hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f3459b23f33126c73c4b69fe300c1f2bcb8c3a0 | 1,011 | py | Python | 2019/day-02/2.py | Valokoodari/advent-of-code | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
] | 2 | 2021-12-27T18:59:11.000Z | 2022-01-10T02:31:36.000Z | 2019/day-02/2.py | Valokoodari/advent-of-code-2019 | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
] | null | null | null | 2019/day-02/2.py | Valokoodari/advent-of-code-2019 | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
] | 2 | 2021-12-23T17:29:10.000Z | 2021-12-24T03:21:49.000Z | inputFile = "2-input"
outputFile = "2-output"
def readFile():
file = open(inputFile, "r")
code = list(map(int, file.readline().split(",")))
file.close
return code
def writeFile(a, b):
file = open(outputFile, "w+")
file.write("Part 1: " + a + "\n")
file.write("Part 2: " + b)
file.close()
def func(code, noun, verb):
code[1] = noun
code[2] = verb
for i in range(0, len(code), 4):
if (code[i] == 1):
code[code[i+3]] = code[code[i+1]] + code[code[i+2]]
elif (code[i] == 2):
code[code[i+3]] = code[code[i+1]] * code[code[i+2]]
else:
break
return code[0]
def main():
result = 19690720
intCode = readFile()
solA = func(intCode[:], 12, 2)
solB = 0
for i in range(0, 100, 1):
verb = result - func(intCode[:], i, 0)
if (0 < verb < 100):
solB = 100 * i + verb
break
writeFile(str(solA), str(solB))
if __name__ == '__main__': main() | 22.466667 | 63 | 0.503462 |
f71ea6b4496eae33b99557e0515c9fe2901709df | 244 | py | Python | problems/303_range_sum_query_immutable.py | wasi0013/leet_code | c589c10f06043fa0ac7643e09ae3903d77c2f8e9 | [
"MIT"
] | null | null | null | problems/303_range_sum_query_immutable.py | wasi0013/leet_code | c589c10f06043fa0ac7643e09ae3903d77c2f8e9 | [
"MIT"
] | null | null | null | problems/303_range_sum_query_immutable.py | wasi0013/leet_code | c589c10f06043fa0ac7643e09ae3903d77c2f8e9 | [
"MIT"
] | null | null | null | class NumArray:
def __init__(self, nums: List[int]):
self.n = list(accumulate(nums))
def sumRange(self, left: int, right: int) -> int:
return self.n[right]- (self.n[left-1] if left>0 else 0)
| 22.181818 | 63 | 0.545082 |
cda52b581e5eea401f84ad9a343d8bde4e1fe529 | 5,411 | py | Python | ipyannotator/bbox_annotator.py | EnriqueMoran/ipyannotator | 5517f8ded24e9e1347d0d72c73d620778f7b3069 | [
"Apache-2.0"
] | 19 | 2020-10-12T19:52:10.000Z | 2022-02-07T18:23:26.000Z | ipyannotator/bbox_annotator.py | EnriqueMoran/ipyannotator | 5517f8ded24e9e1347d0d72c73d620778f7b3069 | [
"Apache-2.0"
] | 6 | 2021-08-02T09:36:32.000Z | 2022-01-05T15:29:30.000Z | ipyannotator/bbox_annotator.py | EnriqueMoran/ipyannotator | 5517f8ded24e9e1347d0d72c73d620778f7b3069 | [
"Apache-2.0"
] | 1 | 2020-12-01T22:42:01.000Z | 2020-12-01T22:42:01.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_bbox_annotator.ipynb (unless otherwise specified).
__all__ = ['BBoxAnnotator']
# Internal Cell
import os
import json
from ipyevents import Event
from ipywidgets import (AppLayout, Button, IntSlider, IntProgress,
HBox, VBox, Output,
Layout, Label)
from pathlib import Path
from traitlets import Int, observe, link, dlink, HasTraits, Bytes, Unicode, Dict
from .bbox_canvas import BBoxCanvas
from .navi_widget import Navi
from .storage import setup_project_paths, get_image_list_from_folder, AnnotationStorage
# Internal Cell
class BBoxAnnotatorGUI(AppLayout):
def __init__(self, canvas_size=(505, 50)):
self._navi = Navi()
self._save_btn = Button(description="Save",
layout=Layout(width='auto'))
self._controls_box = HBox([self._navi, self._save_btn],
layout=Layout(display='flex', flex_flow='row wrap', align_items='center'))
self._image_box = BBoxCanvas(*canvas_size)
super().__init__(header=None,
left_sidebar=None,
center=self._image_box,
right_sidebar=None,
footer=self._controls_box,
pane_widths=(2, 8, 0),
pane_heights=(1, 4, 1))
def on_client_ready(self, callback):
self._image_box.observe_client_ready(callback)
# Internal Cell
class BBoxAnnotatorLogic(HasTraits):
index = Int(0)
image_path = Unicode()
bbox_coords = Dict()
current_im_num = Int()
def __init__(self, project_path, file_name=None, image_dir='pics', results_dir=None):
self.project_path = Path(project_path)
self.image_dir, self.annotation_file_path = setup_project_paths(self.project_path,
file_name=file_name,
image_dir=image_dir,
results_dir=results_dir)
# select images and bboxes only from given annotatin file
if self.annotation_file_path.is_file():
with self.annotation_file_path.open() as json_file:
data = json.load(json_file)
im_names = data.keys()
self.image_paths = sorted(im for im in get_image_list_from_folder(self.image_dir) if str(im) in im_names)
else:
self.image_paths = sorted(get_image_list_from_folder(self.image_dir))
if not self.image_paths:
raise Exception ("!! No Images to dipslay !!")
self.current_im_num = len(self.image_paths)
self.annotations = AnnotationStorage(self.image_paths)
if self.annotation_file_path.exists():
self.annotations.load(self.annotation_file_path)
else:
self.annotations.save(self.annotation_file_path)
def _update_im(self):
self.image_path = str(self.image_paths[self.index])
def _update_coords(self): # from annotations
self.bbox_coords = self.annotations.get(self.image_path) or {}
def _update_annotations(self, index): # from coordinates
self.annotations[str(self.image_paths[index])] = self.bbox_coords
def _save_annotations(self, *args, **kwargs): # to disk
index = kwargs.pop('old_index', self.index)
self._update_annotations(index)
self.annotations.save(self.annotation_file_path)
def _handle_client_ready(self):
self._update_im()
self._update_coords()
@observe('index')
def _idx_changed(self, change):
''' On index change save an old state
and update current image path and bbox coordinates for visualisation
'''
self._save_annotations(old_index = change['old'])
self._update_im()
self._update_coords()
# Cell
class BBoxAnnotator(BBoxAnnotatorGUI):
"""
Represents bounding box annotator.
Gives an ability to itarate through image dataset,
draw 2D bounding box annotations for object detection and localization,
export final annotations in json format
"""
debug_output = Output()
def __init__(self, project_path, canvas_size=(200, 400), file_name=None, image_dir='pics', results_dir=None):
self._model = BBoxAnnotatorLogic(project_path, file_name=file_name,
image_dir=image_dir, results_dir=results_dir)
super().__init__(canvas_size=canvas_size)
self._save_btn.on_click(self._model._save_annotations)
# set correct slider max value based on image number
dlink((self._model, 'current_im_num'), (self._navi.model, 'max_im_number'))
# draw current image and bbox only when client is ready
self.on_client_ready(self._model._handle_client_ready)
# Link image path and bbox coordinates between model and the ImageWithBox widget
link((self._model, 'image_path'), (self._image_box, 'image_path'))
link((self._model, 'bbox_coords'), (self._image_box, 'bbox_coords'))
# Link current image index from controls to annotator model
link((self._navi.model, 'index'), (self._model, 'index'))
def to_dict(self, only_annotated=True):
return self._model.annotations.to_dict(only_annotated)
| 36.315436 | 117 | 0.64258 |
588c39778a34dcb1d8cf1d67adb94ee69eabc529 | 103 | py | Python | geomstats/_backend/constants.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | 743 | 2018-05-23T02:23:29.000Z | 2022-03-29T22:59:22.000Z | geomstats/_backend/constants.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | 1,119 | 2018-05-15T05:29:38.000Z | 2022-03-31T18:27:02.000Z | geomstats/_backend/constants.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | 159 | 2018-05-23T17:49:24.000Z | 2022-03-30T16:44:47.000Z | tf_atol = 1e-6
tf_rtol = 1e-5
pytorch_atol = 1e-6
pytorch_rtol = 1e-5
np_atol = 1e-12
np_rtol = 1e-6
| 11.444444 | 19 | 0.68932 |
40da53de9470f666dac92a7c61b0db930bcc11be | 3,772 | py | Python | test.py | spongezhang/pytorch-CycleGAN-and-pix2pix | 01875b21d537512c304f37fb0eb65fea7f57f4ba | [
"BSD-3-Clause"
] | 2 | 2021-04-10T10:51:24.000Z | 2021-09-27T07:13:35.000Z | test.py | spongezhang/pytorch-CycleGAN-and-pix2pix | 01875b21d537512c304f37fb0eb65fea7f57f4ba | [
"BSD-3-Clause"
] | null | null | null | test.py | spongezhang/pytorch-CycleGAN-and-pix2pix | 01875b21d537512c304f37fb0eb65fea7f57f4ba | [
"BSD-3-Clause"
] | null | null | null | """General-purpose test script for image-to-image translation.
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from --checkpoints_dir and save the results to --results_dir.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for --num_test images and save results to an HTML file.
Example (You need to train models first or download pre-trained models from our website):
Test a CycleGAN model (both sides):
python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Test a CycleGAN model (one side only):
python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
The option '--model test' is used for generating CycleGAN results only for one side.
This option will automatically set '--dataset_mode single', which only loads the images from one set.
On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
which is sometimes unnecessary. The results will be saved at ./results/.
Use '--results_dir <directory_path_to_save_result>' to specify the results directory.
Test a pix2pix model:
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/test_options.py for more test options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
# create a website
# test with eval mode. This only affects layers like batchnorm and dropout.
# For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
# For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
if opt.eval:
model.eval()
for i, data in enumerate(dataset):
if opt.num_test>0 and i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.set_input(data) # unpack data from data loader
model.test() # run inference
#visuals = model.get_current_visuals() # get image results
#img_path = model.get_image_paths() # get image paths
#if i % 5 == 0: # save images to an HTML file
# print('processing (%04d)-th image... %s' % (i, img_path))
#save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
#webpage.save() # save the HTML
| 58.030769 | 126 | 0.72508 |
16e90b700d678aae614d7dee970c7344d942868d | 6,290 | py | Python | gammagl/layers/conv/simplehgn_conv.py | BUPT-GAMMA/GammaGL | 2b9f32e1ac3533cb75a063243e8a2fa654466d18 | [
"Apache-2.0"
] | null | null | null | gammagl/layers/conv/simplehgn_conv.py | BUPT-GAMMA/GammaGL | 2b9f32e1ac3533cb75a063243e8a2fa654466d18 | [
"Apache-2.0"
] | null | null | null | gammagl/layers/conv/simplehgn_conv.py | BUPT-GAMMA/GammaGL | 2b9f32e1ac3533cb75a063243e8a2fa654466d18 | [
"Apache-2.0"
] | null | null | null | import tensorlayerx as tlx
from gammagl.layers.conv import MessagePassing
from gammagl.utils import segment_softmax
from gammagl.mpops import *
class SimpleHGNConv(MessagePassing):
r'''The SimpleHGN layer from the `"Are we really making much progress? Revisiting, benchmarking, and refining heterogeneous graph neural networks"
<https://dl.acm.org/doi/pdf/10.1145/3447548.3467350>`_ paper
The model extend the original graph attention mechanism in GAT by including edge type information into attention calculation.
Calculating the coefficient:
..math::
\alpha_{ij} = \frac{exp(LeakyReLU(a^T[Wh_i||Wh_j||W_r r_{\psi(<i,j>)}]))}{\Sigma_{k\in\mathcal{E}}{exp(LeakyReLU(a^T[Wh_i||Wh_k||W_r r_{\psi(<i,k>)}]))}} (1)
Residual connection including Node residual:
..math::
h_i^{(l)} = \sigma(\Sigma_{j\in \mathcal{N}_i} {\alpha_{ij}^{(l)}W^{(l)}h_j^{(l-1)}} + h_i^{(l-1)}) (2)
and Edge residual:
..math::
\alpha_{ij}^{(l)} = (1-\beta)\alpha_{ij}^{(l)}+\beta\alpha_{ij}^{(l-1)} (3)
Multi-heads:
..math::
h^{(l+1)}_j = \parallel^M_{m = 1}h^{(l + 1, m)}_j (4)
Residual:
..math::
h^{(l+1)}_j = h^{(l)}_j + \parallel^M_{m = 1}h^{(l + 1, m)}_j (5)
Parameters
----------
in_feats: int
the input dimension
out_feats: int
the output dimension
num_etypes: int
the number of the edge type
edge_feats: int
the edge dimension
heads: int
the number of heads in this layer
negative_slope: float
the negative slope used in the LeakyReLU
feat_drop: float
the feature drop rate
attn_drop: float
the attention score drop rate
residual: boolean
whether we need the residual operation
activation:
the activation function
bias:
whether we need the bias
beta: float
the hyperparameter used in edge residual
'''
def __init__(self,
in_feats,
out_feats,
num_etypes,
edge_feats,
heads=1,
negative_slope=0.2,
feat_drop=0.,
attn_drop=0.,
residual=False,
activation=None,
bias=False,
beta=0.,):
super().__init__()
self.in_feats = in_feats
self.out_feats = out_feats
self.edge_feats = edge_feats
self.heads = heads
self.out_feats = out_feats
self.edge_embedding = tlx.nn.Embedding(num_etypes, edge_feats)
self.fc_node = tlx.nn.Linear(out_feats * heads, in_features=in_feats, b_init=None, W_init=tlx.initializers.XavierNormal(gain=1.414))
self.fc_edge = tlx.nn.Linear(edge_feats * heads, in_features=edge_feats, b_init=None, W_init=tlx.initializers.XavierNormal(gain=1.414))
self.attn_src = self._get_weights('attn_l', shape=(1, heads, out_feats), init=tlx.initializers.XavierNormal(gain=1.414), order=True)
self.attn_dst = self._get_weights('attn_r', shape=(1, heads, out_feats), init=tlx.initializers.XavierNormal(gain=1.414), order=True)
self.attn_edge = self._get_weights('attn_e', shape=(1, heads, edge_feats), init=tlx.initializers.XavierNormal(gain=1.414), order=True)
self.feat_drop = tlx.nn.Dropout(feat_drop)
self.attn_drop = tlx.nn.Dropout(attn_drop)
self.leaky_relu = tlx.nn.LeakyReLU(negative_slope)
self.fc_res = tlx.nn.Linear(heads * out_feats, in_features=in_feats, b_init=None, W_init=tlx.initializers.XavierNormal(gain=1.414)) if residual else None
self.activation = activation
self.bias = self._get_weights("bias", (1, heads, out_feats)) if bias else None
self.beta = beta
def message(self, x, edge_index, edge_feat, num_nodes, res_alpha=None):
x_new = self.fc_node(x)
x_new = tlx.ops.reshape(x_new, shape=[-1, self.heads, self.out_feats])
x_new = self.feat_drop(x_new)
edge_feat = self.edge_embedding(edge_feat)
edge_feat = self.fc_edge(edge_feat)
edge_feat = tlx.ops.reshape(edge_feat, [-1, self.heads, self.edge_feats])
#calculate the alpha
node_src = edge_index[0, :]
node_dst = edge_index[1, :]
weight_src = tlx.ops.gather(tlx.reduce_sum(x_new * self.attn_src, -1), node_src)
weight_dst = tlx.ops.gather(tlx.reduce_sum(x_new * self.attn_dst, -1), node_dst)
weight_edge = tlx.reduce_sum(edge_feat * self.attn_edge, -1)
weight = self.leaky_relu(weight_src + weight_dst + weight_edge)
alpha = self.attn_drop(segment_softmax(weight, node_dst, num_nodes))
#edge residual
if res_alpha is not None:
alpha = alpha * (1 - self.beta) + res_attn * self.beta
rst = tlx.ops.gather(x_new, node_src) * tlx.ops.expand_dims(alpha, axis=-1)
rst = unsorted_segment_sum(rst, node_dst, num_nodes)
#node residual
if self.fc_res is not None:
res_val = self.fc_res(x)
res_val = tlx.ops.reshape(res_val, shape=[x.shape[0], -1, self.out_feats])
rst = rst + res_val
if self.bias is not None:
rst = rst + self.bias
if self.activation is not None:
rst = self.activation(rst)
x = rst
return x, alpha
def propagate(self, x, edge_index, aggr='sum', **kwargs):
"""
Function that perform message passing.
Args:
x: input node feature
edge_index: edges from src to dst
aggr: aggregation type, default='sum', optional=['sum', 'mean', 'max']
kwargs: other parameters dict
"""
if 'num_nodes' not in kwargs.keys() or kwargs['num_nodes'] is None:
kwargs['num_nodes'] = x.shape[0]
coll_dict = self.__collect__(x, edge_index, aggr, kwargs)
msg_kwargs = self.inspector.distribute('message', coll_dict)
x, alpha = self.message(**msg_kwargs)
x = self.update(x)
return x, alpha
def forward(self, x, edge_index, edge_feat, res_attn=None):
return self.propagate(x, edge_index, edge_feat=edge_feat)
| 37.218935 | 166 | 0.61097 |
811fff18d4b2c95b7f379f7282b7a14bd74b88f7 | 600 | py | Python | tests/utils/helpers.py | hmajid2301/markdown-mermaid-to-images- | fb83e46552ecf2678f3bc96ae6150024f3306abd | [
"Apache-2.0"
] | 2 | 2020-11-18T00:39:57.000Z | 2021-12-15T00:03:37.000Z | tests/utils/helpers.py | hmajid2301/markdown-mermaid-to-images | fb83e46552ecf2678f3bc96ae6150024f3306abd | [
"Apache-2.0"
] | null | null | null | tests/utils/helpers.py | hmajid2301/markdown-mermaid-to-images | fb83e46552ecf2678f3bc96ae6150024f3306abd | [
"Apache-2.0"
] | null | null | null | import filecmp
import glob
import os
class Helpers:
@staticmethod
def remove_files_in_output():
files = glob.glob("tests/data/output/*")
for file_name in files:
if file_name != ".gitkeep":
os.remove(file_name)
@staticmethod
def compare_files():
files = glob.glob("tests/data/output/*.md")
for file_name in files:
expected_file = os.path.basename(file_name)
expected_file_path = os.path.join("tests", "data", "expected", expected_file)
assert filecmp.cmp(file_name, expected_file_path)
| 28.571429 | 89 | 0.626667 |
a65805fb6444d2996853370ffdf0bfbd731e29d3 | 12,264 | py | Python | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class ListDevicesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'product_id': 'str',
'gateway_id': 'str',
'is_cascade_query': 'bool',
'node_id': 'str',
'device_name': 'str',
'limit': 'int',
'marker': 'str',
'offset': 'int',
'start_time': 'str',
'end_time': 'str',
'app_id': 'str'
}
attribute_map = {
'instance_id': 'Instance-Id',
'product_id': 'product_id',
'gateway_id': 'gateway_id',
'is_cascade_query': 'is_cascade_query',
'node_id': 'node_id',
'device_name': 'device_name',
'limit': 'limit',
'marker': 'marker',
'offset': 'offset',
'start_time': 'start_time',
'end_time': 'end_time',
'app_id': 'app_id'
}
def __init__(self, instance_id=None, product_id=None, gateway_id=None, is_cascade_query=None, node_id=None, device_name=None, limit=None, marker=None, offset=None, start_time=None, end_time=None, app_id=None):
"""ListDevicesRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._product_id = None
self._gateway_id = None
self._is_cascade_query = None
self._node_id = None
self._device_name = None
self._limit = None
self._marker = None
self._offset = None
self._start_time = None
self._end_time = None
self._app_id = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
if product_id is not None:
self.product_id = product_id
if gateway_id is not None:
self.gateway_id = gateway_id
if is_cascade_query is not None:
self.is_cascade_query = is_cascade_query
if node_id is not None:
self.node_id = node_id
if device_name is not None:
self.device_name = device_name
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
if offset is not None:
self.offset = offset
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
if app_id is not None:
self.app_id = app_id
@property
def instance_id(self):
"""Gets the instance_id of this ListDevicesRequest.
实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:return: The instance_id of this ListDevicesRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListDevicesRequest.
实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:param instance_id: The instance_id of this ListDevicesRequest.
:type: str
"""
self._instance_id = instance_id
@property
def product_id(self):
"""Gets the product_id of this ListDevicesRequest.
设备关联的产品ID,用于唯一标识一个产品模型,在管理门户导入产品模型后由平台分配获得。
:return: The product_id of this ListDevicesRequest.
:rtype: str
"""
return self._product_id
@product_id.setter
def product_id(self, product_id):
"""Sets the product_id of this ListDevicesRequest.
设备关联的产品ID,用于唯一标识一个产品模型,在管理门户导入产品模型后由平台分配获得。
:param product_id: The product_id of this ListDevicesRequest.
:type: str
"""
self._product_id = product_id
@property
def gateway_id(self):
"""Gets the gateway_id of this ListDevicesRequest.
网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。
:return: The gateway_id of this ListDevicesRequest.
:rtype: str
"""
return self._gateway_id
@gateway_id.setter
def gateway_id(self, gateway_id):
"""Sets the gateway_id of this ListDevicesRequest.
网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。
:param gateway_id: The gateway_id of this ListDevicesRequest.
:type: str
"""
self._gateway_id = gateway_id
@property
def is_cascade_query(self):
"""Gets the is_cascade_query of this ListDevicesRequest.
是否级联查询,该参数仅在同时携带gateway_id时生效,默认值为false。 - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。
:return: The is_cascade_query of this ListDevicesRequest.
:rtype: bool
"""
return self._is_cascade_query
@is_cascade_query.setter
def is_cascade_query(self, is_cascade_query):
"""Sets the is_cascade_query of this ListDevicesRequest.
是否级联查询,该参数仅在同时携带gateway_id时生效,默认值为false。 - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。
:param is_cascade_query: The is_cascade_query of this ListDevicesRequest.
:type: bool
"""
self._is_cascade_query = is_cascade_query
@property
def node_id(self):
"""Gets the node_id of this ListDevicesRequest.
设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。
:return: The node_id of this ListDevicesRequest.
:rtype: str
"""
return self._node_id
@node_id.setter
def node_id(self, node_id):
"""Sets the node_id of this ListDevicesRequest.
设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。
:param node_id: The node_id of this ListDevicesRequest.
:type: str
"""
self._node_id = node_id
@property
def device_name(self):
"""Gets the device_name of this ListDevicesRequest.
设备名称。
:return: The device_name of this ListDevicesRequest.
:rtype: str
"""
return self._device_name
@device_name.setter
def device_name(self, device_name):
"""Sets the device_name of this ListDevicesRequest.
设备名称。
:param device_name: The device_name of this ListDevicesRequest.
:type: str
"""
self._device_name = device_name
@property
def limit(self):
"""Gets the limit of this ListDevicesRequest.
分页查询时每页显示的记录数,默认值为10,取值范围为1-50的整数。
:return: The limit of this ListDevicesRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListDevicesRequest.
分页查询时每页显示的记录数,默认值为10,取值范围为1-50的整数。
:param limit: The limit of this ListDevicesRequest.
:type: int
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this ListDevicesRequest.
上一次分页查询结果中最后一条记录的ID,在上一次分页查询时由物联网平台返回获得。分页查询时物联网平台是按marker也就是记录ID降序查询的,越新的数据记录ID也会越大。若填写marker,则本次只查询记录ID小于marker的数据记录。若不填写,则从记录ID最大也就是最新的一条数据开始查询。如果需要依次查询所有数据,则每次查询时必须填写上一次查询响应中的marker值。
:return: The marker of this ListDevicesRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListDevicesRequest.
上一次分页查询结果中最后一条记录的ID,在上一次分页查询时由物联网平台返回获得。分页查询时物联网平台是按marker也就是记录ID降序查询的,越新的数据记录ID也会越大。若填写marker,则本次只查询记录ID小于marker的数据记录。若不填写,则从记录ID最大也就是最新的一条数据开始查询。如果需要依次查询所有数据,则每次查询时必须填写上一次查询响应中的marker值。
:param marker: The marker of this ListDevicesRequest.
:type: str
"""
self._marker = marker
@property
def offset(self):
"""Gets the offset of this ListDevicesRequest.
表示从marker后偏移offset条记录开始查询。默认为0,取值范围为0-500的整数。当offset为0时,表示从marker后第一条记录开始输出。限制offset最大值是出于API性能考虑,您可以搭配marker使用该参数实现翻页,例如每页50条记录,1-11页内都可以直接使用offset跳转到指定页,但到11页后,由于offset限制为500,您需要使用第11页返回的marker作为下次查询的marker,以实现翻页到12-22页。
:return: The offset of this ListDevicesRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListDevicesRequest.
表示从marker后偏移offset条记录开始查询。默认为0,取值范围为0-500的整数。当offset为0时,表示从marker后第一条记录开始输出。限制offset最大值是出于API性能考虑,您可以搭配marker使用该参数实现翻页,例如每页50条记录,1-11页内都可以直接使用offset跳转到指定页,但到11页后,由于offset限制为500,您需要使用第11页返回的marker作为下次查询的marker,以实现翻页到12-22页。
:param offset: The offset of this ListDevicesRequest.
:type: int
"""
self._offset = offset
@property
def start_time(self):
"""Gets the start_time of this ListDevicesRequest.
查询设备注册时间在startTime之后的记录,格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。
:return: The start_time of this ListDevicesRequest.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this ListDevicesRequest.
查询设备注册时间在startTime之后的记录,格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。
:param start_time: The start_time of this ListDevicesRequest.
:type: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this ListDevicesRequest.
查询设备注册时间在endTime之前的记录,格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。
:return: The end_time of this ListDevicesRequest.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ListDevicesRequest.
查询设备注册时间在endTime之前的记录,格式:yyyyMMdd'T'HHmmss'Z',如20151212T121212Z。
:param end_time: The end_time of this ListDevicesRequest.
:type: str
"""
self._end_time = end_time
@property
def app_id(self):
"""Gets the app_id of this ListDevicesRequest.
资源空间ID。此参数为非必选参数,存在多资源空间的用户需要使用该接口时,可以携带该参数查询指定资源空间下的设备列表,不携带该参数则会查询该用户下所有设备列表。
:return: The app_id of this ListDevicesRequest.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this ListDevicesRequest.
资源空间ID。此参数为非必选参数,存在多资源空间的用户需要使用该接口时,可以携带该参数查询指定资源空间下的设备列表,不携带该参数则会查询该用户下所有设备列表。
:param app_id: The app_id of this ListDevicesRequest.
:type: str
"""
self._app_id = app_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListDevicesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.058824 | 230 | 0.630463 |
9525dc348d7d3ab429dbd39f86e81954af99ff5b | 5,175 | py | Python | tests/test_ctu.py | cglewis/lim-cli | 8d87bf377afe35b5f2ba420fdb47ad3f4cc02a51 | [
"Apache-2.0"
] | null | null | null | tests/test_ctu.py | cglewis/lim-cli | 8d87bf377afe35b5f2ba420fdb47ad3f4cc02a51 | [
"Apache-2.0"
] | null | null | null | tests/test_ctu.py | cglewis/lim-cli | 8d87bf377afe35b5f2ba420fdb47ad3f4cc02a51 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_ctu
--------
Tests for `lim.ctu` module.
"""
import os
import unittest
from lim.ctu import CTU_Dataset
TEST_VALID_GROUP = 'malware'
TEST_INVALID_GROUP = 'erawlam'
TEST_CACHE = 'tests/test-ctu-cache.json'
class Test_CTU_Dataset(unittest.TestCase):
def setUp(self):
self.ctu_dataset = CTU_Dataset(cache_file=TEST_CACHE)
self.ctu_dataset.load_ctu_metadata()
def tearDown(self):
pass
def test_cache_exists(self):
self.assertTrue(os.path.exists(TEST_CACHE))
def test_get_default_group(self):
self.assertNotEqual(CTU_Dataset.get_default_group(), '')
def test_get_groups(self):
self.assertIs(type(CTU_Dataset.get_groups()), type(list()))
self.assertTrue(len(CTU_Dataset.get_groups()) > 0)
def test_get_group_VALID(self):
self.assertIn(TEST_VALID_GROUP, CTU_Dataset.get_groups())
def test_get_group_INVALID(self):
self.assertNotIn(TEST_INVALID_GROUP, CTU_Dataset.get_groups())
def test_get_url_for_group_valid(self):
self.assertTrue(CTU_Dataset.get_url_for_group(TEST_VALID_GROUP).find('://') != -1)
def test_get_url_for_group_invalid(self):
self.assertIs(CTU_Dataset.get_url_for_group(TEST_INVALID_GROUP), None)
def test_get_columns(self):
columns = CTU_Dataset.get_columns()
self.assertIs(type(columns), type(list()))
self.assertTrue(len(columns) > 0)
def test_get_disclaimer(self):
disclaimer = CTU_Dataset.get_disclaimer()
self.assertTrue("http://dx.doi.org/10.1016/j.cose.2014.05.011" in disclaimer)
def test_get_scenarios(self):
scenarios = self.ctu_dataset.get_scenarios()
self.assertIs(type(scenarios), type(dict()))
self.assertIn('CTU-Mixed-Capture-1', scenarios)
def test_get_scenario_names(self):
scenario_names = self.ctu_dataset.get_scenario_names()
self.assertIs(type(scenario_names), type(list()))
self.assertTrue(len(scenario_names) > 0)
self.assertEqual(scenario_names[0], 'CTU-Mixed-Capture-1',
msg='scenario_names[0]={}'.format(scenario_names[0]))
def test_is_valid_scenario_MATCH(self):
self.assertTrue(self.ctu_dataset.is_valid_scenario('CTU-Mixed-Capture-1'))
def test_is_valid_scenario_FAIL(self):
self.assertFalse(self.ctu_dataset.is_valid_scenario('CTU-Moxed-Cipture-1'))
def test_get_scenario_attribute_url_SUCCESS(self):
self.assertEqual(
self.ctu_dataset.get_scenario_attribute('CTU-Mixed-Capture-1', 'URL'),
'https://mcfp.felk.cvut.cz/publicDatasets/CTU-Mixed-Capture-1/')
def test_get_attributes(self):
items = [a for a in CTU_Dataset.__ATTRIBUTES__]
self.assertListEqual(items, self.ctu_dataset.get_attributes())
def test_get_attributes_lower(self):
items = [a.lower() for a in CTU_Dataset.__ATTRIBUTES__]
self.assertListEqual(items, self.ctu_dataset.get_attributes_lower())
def test_get_scenario_attribute_url_FAIL(self):
try:
_ = self.ctu_dataset.get_scenario_attribute('CTU-Mixed-Capture-1', 'ORL')
except RuntimeError as err:
self.assertIn('is not supported', str(err))
else:
raise
def test_get_scenario_attribute_pcap(self):
url = self.ctu_dataset.get_scenario_attribute('CTU-Mixed-Capture-1', 'PCAP')
self.assertEqual(url,
'https://mcfp.felk.cvut.cz/publicDatasets/CTU-Mixed-Capture-1/2015-07-28_mixed.pcap',
msg='url={}'.format(url))
def test_get_scenario_page(self):
self.assertIn('DOCTYPE HTML PUBLIC',
self.ctu_dataset.get_scenario_page('CTU-Mixed-Capture-1'))
def test_filename_from_url(self):
filename = self.ctu_dataset.filename_from_url(
'https://mcfp.felk.cvut.cz/publicDatasets/CTU-Mixed-Capture-1/2015-07-28_mixed.pcap')
self.assertEqual(filename, '2015-07-28_mixed.pcap',
msg='filename={}'.format(filename))
def test_get_fullname_short(self):
prefix = self.ctu_dataset.__CTU_PREFIX__
shortname = 'Botnet-1'
fullname = self.ctu_dataset.get_fullname(shortname)
self.assertEqual(fullname, prefix + shortname)
def test_get_fullname_typo(self):
prefix = self.ctu_dataset.__CTU_PREFIX__
typoname = 'CTU_Malware_Capture-Botnet-1'
fullname = self.ctu_dataset.get_fullname(typoname)
self.assertEqual(fullname, typoname)
def test_get_shortname_match(self):
actual_shortname = 'Botnet-1'
prefix = self.ctu_dataset.__CTU_PREFIX__
fullname = prefix + actual_shortname
shortname = self.ctu_dataset.get_shortname(fullname)
self.assertEqual(shortname, actual_shortname)
def test_get_shortname_nomatch(self):
actual_shortname = 'Botnet-1'
shortname = self.ctu_dataset.get_shortname(actual_shortname)
self.assertEqual(shortname, actual_shortname)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
# vim: set fileencoding=utf-8 ts=4 sw=4 tw=0 et :
| 36.188811 | 101 | 0.688889 |
43e88f6b4d38cbe970094ad09de74c5adcdd24f0 | 2,616 | py | Python | tests/bugs/core_0143_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_0143_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_0143_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | #coding:utf-8
#
# id: bugs.core_0143
# title: Using where params in SUM return incorrect results
# decription:
# 30.10.2019. NB: new datatype in FB 4.0 was introduces: numeric(38,0).
# It can lead to additional ident of values when we show them in form "SET LIST ON",
# so we have to ignore all internal spaces - see added 'substitution' section below.
# Checked on:
# 4.0.0.1635 SS: 1.061s.
# 3.0.5.33182 SS: 0.754s.
# 2.5.9.27146 SC: 0.190s.
#
# tracker_id: CORE-0143
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = [('[ \t]+', ' ')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
recreate table yeardata
(
id integer not null,
ayear integer,
avalue numeric( 18, 2),
constraint pk_yeardata primary key (id)
);
commit;
insert into yeardata(id, ayear, avalue) values (1, 2005, 3.40);
insert into yeardata(id, ayear, avalue) values (2, 2005, 6.60);
insert into yeardata(id, ayear, avalue) values (3, 2004, 5.20);
insert into yeardata(id, ayear, avalue) values (4, 2004, 5.80);
insert into yeardata(id, ayear, avalue) values (5, 2004, 5.00);
commit;
set list on;
select
sum(case when ayear = 2004 then avalue else null end) as avalue_2004_1
,sum(case when ayear = 2005 then avalue else null end) as avalue_2005_1
from yeardata;
set term ^;
execute block returns( avalue_2004_2 numeric( 18, 2), avalue_2005_2 numeric( 18, 2)) as
begin
execute statement
(
'select
sum(case when ayear = ? then avalue else null end)
,sum(case when ayear = ? then avalue else null end)
from yeardata'
) ( 2004, 2005 )
into avalue_2004_2, avalue_2005_2;
suspend;
end
^
set term ;^
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
AVALUE_2004_1 16.00
AVALUE_2005_1 10.00
AVALUE_2004_2 16.00
AVALUE_2005_2 10.00
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 30.418605 | 102 | 0.582569 |
cee82efcb8841d1cf048637bc603d708c9c3237a | 68 | py | Python | rest_framework_json_schema/__init__.py | greenroxana/drf-json-schema | 7525a3f54d14ca0ac15efc6fad7a06cc6a05aced | [
"MIT"
] | 15 | 2017-01-31T22:58:59.000Z | 2021-07-02T14:19:38.000Z | rest_framework_json_schema/__init__.py | greenroxana/drf-json-schema | 7525a3f54d14ca0ac15efc6fad7a06cc6a05aced | [
"MIT"
] | 13 | 2017-04-05T17:53:10.000Z | 2021-06-02T22:21:55.000Z | rest_framework_json_schema/__init__.py | greenroxana/drf-json-schema | 7525a3f54d14ca0ac15efc6fad7a06cc6a05aced | [
"MIT"
] | 4 | 2021-04-17T18:46:23.000Z | 2021-05-18T08:35:15.000Z | """Top-level package for drf-json-schema."""
__version__ = "0.4.1"
| 17 | 44 | 0.661765 |
73bd8c5b5da0fc204e5a4c78c2cc7f6c46d33760 | 8,477 | py | Python | t5x/examples/decoder_only/network.py | dumpmemory/t5x | 463a23d577490a26498d9bbb2d7554be88afa316 | [
"Apache-2.0"
] | 2 | 2021-11-06T15:48:12.000Z | 2022-01-05T02:34:50.000Z | t5x/examples/decoder_only/network.py | dumpmemory/t5x | 463a23d577490a26498d9bbb2d7554be88afa316 | [
"Apache-2.0"
] | 14 | 2021-11-04T16:28:11.000Z | 2022-01-06T11:25:58.000Z | t5x/examples/decoder_only/network.py | dumpmemory/t5x | 463a23d577490a26498d9bbb2d7554be88afa316 | [
"Apache-2.0"
] | 2 | 2021-11-17T13:43:56.000Z | 2022-01-05T02:34:49.000Z | # Copyright 2021 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal decoder-only Transformer model."""
from typing import Any, Optional, Sequence
from flax import linen as nn
from flax import struct
import jax.numpy as jnp
from t5x.examples.decoder_only import layers
@struct.dataclass
class TransformerConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int
# Activation dtypes.
dtype: Any = jnp.float32
emb_dim: int = 512
num_heads: int = 8
num_layers: int = 6
head_dim: int = 64
mlp_dim: int = 2048
# Activation functions are retrieved from Flax.
mlp_activations: Sequence[str] = ('relu',)
dropout_rate: float = 0.1
# If `True`, the embedding weights are used in the decoder output layer.
logits_via_embedding: bool = False
class DecoderLayer(nn.Module):
"""Transformer decoder layer."""
config: TransformerConfig
@nn.compact
def __call__(self,
inputs: jnp.ndarray,
decoder_mask: Optional[jnp.ndarray] = None,
deterministic: bool = False,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[jnp.ndarray] = None):
"""Applies decoder block module."""
cfg = self.config
# Relative position embedding as attention biases.
l = max_decode_length if decode and max_decode_length else inputs.shape[-2]
decoder_bias = layers.RelativePositionBiases(
num_buckets=32,
max_distance=128,
num_heads=cfg.num_heads,
dtype=cfg.dtype,
embedding_init=nn.initializers.variance_scaling(1.0, 'fan_avg',
'uniform'),
name='relpos_bias')(l, l, False)
# `inputs` is layer input with a shape [batch, length, emb_dim].
x = layers.LayerNorm(
dtype=cfg.dtype, name='pre_self_attention_layer_norm')(
inputs)
# Self-attention block
x = layers.MultiHeadDotProductAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
head_dim=cfg.head_dim,
dropout_rate=cfg.dropout_rate,
name='self_attention')(
x,
x,
decoder_mask,
decoder_bias,
deterministic=deterministic,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
x = nn.Dropout(
rate=cfg.dropout_rate,
broadcast_dims=(-2,),
name='post_self_attention_dropout')(
x, deterministic=deterministic)
x = x + inputs
# MLP block.
y = layers.LayerNorm(dtype=cfg.dtype, name='pre_mlp_layer_norm')(x)
y = layers.MlpBlock(
intermediate_dim=cfg.mlp_dim,
activations=cfg.mlp_activations,
intermediate_dropout_rate=cfg.dropout_rate,
dtype=cfg.dtype,
name='mlp',
)(y, deterministic=deterministic)
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,), name='post_mlp_dropout')(
y, deterministic=deterministic)
y = y + x
return y
class Decoder(nn.Module):
"""A stack of decoder layers."""
config: TransformerConfig
@nn.compact
def __call__(self,
decoder_input_tokens: jnp.ndarray,
decoder_target_tokens: jnp.ndarray,
decoder_segment_ids: Optional[jnp.ndarray] = None,
decoder_positions: Optional[jnp.ndarray] = None,
decoder_causal_attention: Optional[jnp.ndarray] = None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: Optional[bool] = None,
prefill_lengths: Optional[jnp.ndarray] = None):
"""Applies LanguageModel on the inputs.
For a decoder-only architecture with the notion of "prefix", e.g., a prefix
LM where the prefix corresponds to the "inputs" of a supervised dataset, we
perform the "prefill" operation to fill the autoregressive cache
corresponding to the prefix region in one go. Then the autoregressive
decoding starts after the prefix. This makes the decoding process more
efficient. In addition, it gives an option to use bidirectional attention in
the prefix region because the cache is filled simultaneously.
Args:
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
decoder_segment_ids: decoder segmentation info for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
decoder_causal_attention: a binary mask indicating the portion of the
sequence to apply bidirectional attention to instead of causal. As an
example, useful to specify the "inputs" portion of a concatenated
sequence for a prefix LM.
enable_dropout: enables dropout if set to True.
decode: whether to prepare and use an autoregressive cache as opposed to
using teacher-forcing.
max_decode_length: maximum sequence length to be decoded.
prefill: whether to run a partial sequence to prefill the cache.
prefill_lengths: an array of shape [batch] denoting the length of each
partial sequence we are filling in the cache.
Returns:
logits array.
"""
cfg = self.config
deterministic = not enable_dropout
assert decoder_input_tokens.ndim == 2 # [batch, len]
if decode:
decoder_mask = None
else:
decoder_mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=cfg.dtype,
decoder_causal_attention=decoder_causal_attention,
decoder_segment_ids=decoder_segment_ids)
embedding = layers.Embed(
num_embeddings=cfg.vocab_size,
features=cfg.emb_dim,
dtype=cfg.dtype,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=nn.initializers.normal(stddev=1.0),
one_hot=True,
name='token_embedder')
y = embedding(decoder_input_tokens.astype('int32'))
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,), name='input_dropout')(
y, deterministic=deterministic)
y = y.astype(cfg.dtype)
for lyr in range(cfg.num_layers):
# [batch, length, emb_dim] -> [batch, length, emb_dim]
y = DecoderLayer(
config=cfg,
name=f'layers_{lyr}')(
y,
decoder_mask=decoder_mask,
deterministic=deterministic,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths)
y = layers.LayerNorm(dtype=cfg.dtype, name='decoder_norm')(y)
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,), name='output_dropout')(
y, deterministic=deterministic)
# [batch, length, emb_dim] -> [batch, length, vocab_size]
if cfg.logits_via_embedding:
# Use the transpose of embedding matrix for the logit transform.
logits = embedding.attend(y)
# Correctly normalize pre-softmax logits for this shared case.
logits = logits / jnp.sqrt(y.shape[-1])
else:
# Use a separate dense layer for the logit transform.
logits = layers.DenseGeneral(
cfg.vocab_size,
dtype=jnp.float32, # Use float32 for stabiliity.
kernel_axes=('embed', 'vocab'),
name='logits_dense')(
y)
return logits
# TODO(hwchung): remove this after figuring out the name scope issue.
class DecoderWrapper(nn.Module):
"""Thin wrapper for the outer "decoder/" name scope."""
config: TransformerConfig
def setup(self):
self.decoder = Decoder(self.config, name='decoder')
def __call__(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
| 36.07234 | 80 | 0.659313 |
c8f71afb5594a3b32cde1a71900439227a7dd8ee | 2,289 | py | Python | scrape_twitter_v2.py | SKJNR/Crypto-currency-Price-Prediction-based-on-Sentiment | 89f8fd6b5f36a92a9cabeaa5bff6ad44dbef2bc7 | [
"Apache-2.0"
] | null | null | null | scrape_twitter_v2.py | SKJNR/Crypto-currency-Price-Prediction-based-on-Sentiment | 89f8fd6b5f36a92a9cabeaa5bff6ad44dbef2bc7 | [
"Apache-2.0"
] | null | null | null | scrape_twitter_v2.py | SKJNR/Crypto-currency-Price-Prediction-based-on-Sentiment | 89f8fd6b5f36a92a9cabeaa5bff6ad44dbef2bc7 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timedelta
import requests
import json
import pandas as pd
import nltk
nltk.download("vader_lexicon", "nltk_data")
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
# Get datetime for now and 7 days ago, correctly formatted for Twitter API
dtformat = '%Y-%m-%dT%H:%M:%SZ'
# time = datetime.now() gives you the local time whereas time = datetime.utcnow()
# gives the local time in UTC. Hence now() may be ahead or behind which gives the
# error
time = datetime.utcnow()
start_time = time - timedelta(hours=1)
# Subtracting 15 seconds because api needs end_time must be a minimum of 10
# seconds prior to the request time
end_time = time - timedelta(seconds=15)
# convert to strings
start_time, end_time = start_time.strftime(
dtformat), end_time.strftime(dtformat)
# Function to get sentiment of a text
def get_sentiment(text):
d = sid.polarity_scores(text)
# Method 1:
d.pop('compound')
return d["pos"], d["neg"], d["neu"], max(d, key=d.get)
def get_seniment_of_tweets(df):
df['pos_score'], df['neg_score'], df['neu_score'], df['sentiment'] = zip(
*df['text'].map(get_sentiment))
return df
def scrape_tweets(coin):
_query = coin
max_results = str(100)
url = "https://api.twitter.com/2/tweets/search/recent?query=" + _query + "&start_time=" + start_time + \
"&end_time=" + end_time + "&max_results=" + \
max_results + "&tweet.fields=id,text,created_at,lang"
print(url)
payload = ""
headers = {
'Authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAAP2lawEAAAAA56AM1v9m%2B%2FJHcn1TtnaELFoJB10%3DZ1KKfrINgjvadccGnBcnDaWyeR3z2vG5OYUSZ96FuzXhucQRmN',
'Cookie': 'guest_id=v1%3A164858392161650785; guest_id_ads=v1%3A164858392161650785; guest_id_marketing=v1%3A164858392161650785; personalization_id="v1_iSN/fsfFJ6mIo42uRQX+uw=="'
}
response = requests.request("GET", url, headers=headers, data=payload)
if response.status_code == 200:
df = json.loads(response.text)["data"]
df = pd.DataFrame(df)
df["created_at"] = pd.to_datetime(df["created_at"])
df = get_seniment_of_tweets(df)
return df
else:
print("Error: " + str(response.status_code))
| 31.791667 | 183 | 0.697248 |
af33cb5c40dfe958d9e09886c192df5f6971ec7e | 723 | py | Python | dac_example.py | jeremyherbert/python-polyglot-turtle | 8172a4885c38c08b9410ca0f6aa18e4182e2539b | [
"MIT"
] | null | null | null | dac_example.py | jeremyherbert/python-polyglot-turtle | 8172a4885c38c08b9410ca0f6aa18e4182e2539b | [
"MIT"
] | null | null | null | dac_example.py | jeremyherbert/python-polyglot-turtle | 8172a4885c38c08b9410ca0f6aa18e4182e2539b | [
"MIT"
] | null | null | null | import time
from polyglot_turtle import PolyglotTurtleXiao, PinDirection
if __name__ == "__main__":
pt = PolyglotTurtleXiao()
maximum_dac_value, maximum_dac_voltage = pt.dac_get_info()
print("DAC will output", maximum_dac_voltage, "volts at the maximum level of", maximum_dac_value)
dac_gpio = 0
# the following code will output a ramp from 0 to the maximum value from the DAC on GPIO 0
for i in range(maximum_dac_value):
pt.dac_set(dac_gpio, i)
time.sleep(0.01)
# the DAC will hold the last value sent to it, if we wish to turn it back into a GPIO we can set the pin direction
pt.gpio_set_direction(dac_gpio, PinDirection.OUTPUT)
pt.gpio_set_level(dac_gpio, False) | 36.15 | 118 | 0.731674 |
625b4092b448f172a1c08279a4fd9480c4f3f72c | 627 | py | Python | botlander/resources/image_resource.py | LEMSantos/botlander-backend | e38e78e935a784a0e7840477dd30048b54905045 | [
"MIT"
] | null | null | null | botlander/resources/image_resource.py | LEMSantos/botlander-backend | e38e78e935a784a0e7840477dd30048b54905045 | [
"MIT"
] | null | null | null | botlander/resources/image_resource.py | LEMSantos/botlander-backend | e38e78e935a784a0e7840477dd30048b54905045 | [
"MIT"
] | null | null | null | from flask_restful import Resource, abort
from botlander.database.models import Bot
from http import HTTPStatus
import bson
from flask import send_file
class BotImageResource(Resource):
def get(self, bot_id):
try:
bot = Bot.objects.get(
id=bson.objectid.ObjectId(bot_id)
)
except Bot.DoesNotExist:
abort(HTTPStatus.NOT_FOUND)
except bson.errors.InvalidId:
abort(HTTPStatus.NOT_FOUND)
if not bool(bot.image):
abort(HTTPStatus.NOT_FOUND)
return send_file(bot.image, mimetype=f'image/{bot.image.format}')
| 26.125 | 73 | 0.650718 |
b1c226a3fbe12940bc560af4d10f931a7673f01e | 2,636 | py | Python | main.py | filmszillacloud/GoFile-Bot | 6c3c2388c2f91163dfbe2d78b838a5d86dd24dff | [
"MIT"
] | null | null | null | main.py | filmszillacloud/GoFile-Bot | 6c3c2388c2f91163dfbe2d78b838a5d86dd24dff | [
"MIT"
] | null | null | null | main.py | filmszillacloud/GoFile-Bot | 6c3c2388c2f91163dfbe2d78b838a5d86dd24dff | [
"MIT"
] | null | null | null | # Made with python3
# (C) @FayasNoushad
# Copyright permission under MIT License
# All rights reserved by FayasNoushad
# License -> https://github.com/FayasNoushad/GoFile-Bot/blob/main/LICENSE
import os
import urldl
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from gofile import uploadFile
Bot = Client(
"GoFile-Bot",
bot_token = os.environ["BOT_TOKEN"],
api_id = int(os.environ["API_ID"]),
api_hash = os.environ["API_HASH"]
)
@Bot.on_message(filters.private & filters.command("start"))
async def start(bot, update):
await update.reply_text(
text=f"Hello {update.from_user.mention}, Please send a media for gofile.io stream link.",
disable_web_page_preview=True,
quote=True
)
@Bot.on_message(filters.private & (filters.media | filters.text))
async def filter(bot, update):
if not update.text.startswith("http://") or not update.text.startswith("https://"):
return
message = await update.reply_text(
text="`Processing...`",
quote=True,
disable_web_page_preview=True
)
try:
await message.edit_text(
text="`Downloading...`",
disable_web_page_preview=True
)
if update.text:
media = urldl.download(url)
else:
media = await update.download()
await message.edit_text(
text="`Uploading...`",
disable_web_page_preview=True
)
response = uploadFile(media)
try:
os.remove(media)
except:
pass
except Exception as error:
await message.edit_text(
text=f"Error :- <code>{error}</code>",
quote=True,
disable_web_page_preview=True
)
return
text = f"**File Name:** `{response['fileName']}`" + "\n"
text += f"**Download Page:** `{response['downloadPage']}`" + "\n"
text += f"**Direct Download Link:** `{response['directLink']}`" + "\n"
text += f"**Info:** `{response['info']}`"
reply_markup = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(text="Open Link", url=response['directLink']),
InlineKeyboardButton(text="Share Link", url=f"https://telegram.me/share/url?url={response['directLink']}")
],
[
InlineKeyboardButton(text="Join Updates Channel", url="https://t.me/FZBOTS")
]
]
)
await message.edit_text(
text=text,
reply_markup=reply_markup,
disable_web_page_preview=True
)
Bot.run()
| 29.954545 | 122 | 0.601669 |
dd158dae1bac01be5af1426bfc778fdf7ed4f53c | 630 | py | Python | Object detection and depth estimation/catkin_ws/build/f110-fall2018-skeletons/system/vesc/vesc_ackermann/catkin_generated/pkg.develspace.context.pc.py | UF-f1tenth/F1tenth-UFL | 93b0a822c67b2b425664642955342138e65974f4 | [
"Apache-2.0"
] | null | null | null | Object detection and depth estimation/catkin_ws/build/f110-fall2018-skeletons/system/vesc/vesc_ackermann/catkin_generated/pkg.develspace.context.pc.py | UF-f1tenth/F1tenth-UFL | 93b0a822c67b2b425664642955342138e65974f4 | [
"Apache-2.0"
] | null | null | null | Object detection and depth estimation/catkin_ws/build/f110-fall2018-skeletons/system/vesc/vesc_ackermann/catkin_generated/pkg.develspace.context.pc.py | UF-f1tenth/F1tenth-UFL | 93b0a822c67b2b425664642955342138e65974f4 | [
"Apache-2.0"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/f1tenth2/f110_ws/src/f110-fall2018-skeletons/system/vesc/vesc_ackermann/include".split(';') if "/home/f1tenth2/f110_ws/src/f110-fall2018-skeletons/system/vesc/vesc_ackermann/include" != "" else []
PROJECT_CATKIN_DEPENDS = "nodelet;pluginlib;roscpp;nav_msgs;std_msgs;geometry_msgs;tf;ackermann_msgs;vesc_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "vesc_ackermann"
PROJECT_SPACE_DIR = "/home/f1tenth2/f110_ws/devel"
PROJECT_VERSION = "0.0.1"
| 70 | 237 | 0.777778 |
e818cb794774bfdf39c619944575f0eab96b5f61 | 11,790 | py | Python | tfx/orchestration/launcher/kubernetes_component_launcher.py | alonsoir/tfx | 359dcc95e6104e183b685a683d502744305e5eba | [
"Apache-2.0"
] | 1 | 2021-10-04T21:53:34.000Z | 2021-10-04T21:53:34.000Z | tfx/orchestration/launcher/kubernetes_component_launcher.py | alonsoir/tfx | 359dcc95e6104e183b685a683d502744305e5eba | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/launcher/kubernetes_component_launcher.py | alonsoir/tfx | 359dcc95e6104e183b685a683d502744305e5eba | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Docker component launcher which launches a container in docker environment ."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import re
import time
from typing import Any, Callable, Dict, List, Text, cast
import absl
from kubernetes import client
from kubernetes import config
from tfx import types
from tfx.components.base import executor_spec
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import kubernetes_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import container_common
# Pod env names from:
# https://github.com/kubeflow/pipelines/blob/0.1.32/sdk/python/kfp/compiler/_default_transformers.py
_KFP_POD_NAME_ENV = 'KFP_POD_NAME'
_KFP_NAMESPACE_ENV = 'KFP_NAMESPACE'
# Pod phases are defined in
# https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase.
_POD_PENDING_PHASE = 'Pending'
_POD_SUCCEEDED_PHASE = 'Succeeded'
_POD_FAILED_PHASE = 'Failed'
def _pod_is_not_pending(resp: client.V1Pod):
return resp.status.phase != _POD_PENDING_PHASE
def _pod_is_done(resp: client.V1Pod):
return resp.status.phase in [_POD_SUCCEEDED_PHASE, _POD_FAILED_PHASE]
def _sanitize_pod_name(pod_name: Text) -> Text:
pod_name = re.sub(r'[^a-z0-9-]', '-', pod_name.lower())
pod_name = re.sub(r'^[-]+', '', pod_name)
return re.sub(r'[-]+', '-', pod_name)
class KubernetesComponentLauncher(base_component_launcher.BaseComponentLauncher
):
"""Responsible for launching a container executor on Kubernetes."""
# TODO(hongyes): add container spec into exec_properties for driver to check.
@classmethod
def can_launch(
cls,
component_executor_spec: executor_spec.ExecutorSpec,
component_config: base_component_config.BaseComponentConfig = None
) -> bool:
"""Checks if the launcher can launch the executor spec."""
if component_config and not isinstance(
component_config,
kubernetes_component_config.KubernetesComponentConfig):
return False
return isinstance(component_executor_spec,
executor_spec.ExecutorContainerSpec)
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation.
Runs executor container in a Kubernetes Pod and wait until it goes into
`Succeeded` or `Failed` state.
Args:
execution_id: The ID of the execution.
input_dict: Input dict from input key to a list of Artifacts. These are
often outputs of another component in the pipeline and passed to the
component by the orchestration system.
output_dict: Output dict from output key to a list of Artifacts. These are
often consumed by a dependent component.
exec_properties: A dict of execution properties. These are inputs to
pipeline with primitive types (int, string, float) and fully
materialized when a pipeline is constructed. No dependency to other
component or later injection from orchestration systems is necessary or
possible on these values.
Raises:
RuntimeError: when the pod is in `Failed` state or unexpected failure from
Kubernetes API.
"""
container_spec = cast(executor_spec.ExecutorContainerSpec,
self._component_executor_spec)
# Replace container spec with jinja2 template.
container_spec = container_common.resolve_container_template(
container_spec, input_dict, output_dict, exec_properties)
pod_name = self._build_pod_name(execution_id)
# TODO(hongyes): replace the default value from component config.
namespace = os.getenv(_KFP_NAMESPACE_ENV, 'kubeflow')
pod_manifest = self._build_pod_manifest(pod_name, container_spec)
try:
is_in_cluster = True
config.load_incluster_config()
absl.logging.info('Loaded in cluster config.')
except config.config_exception.ConfigException:
is_in_cluster = False
config.load_kube_config()
absl.logging.info('Loaded kube config.')
core_api = client.CoreV1Api()
if is_in_cluster:
launcher_pod_name = os.getenv(_KFP_POD_NAME_ENV)
launcher_pod = self._get_pod(core_api, launcher_pod_name, namespace)
pod_manifest['spec']['serviceAccount'] = launcher_pod.spec.service_account
pod_manifest['spec'][
'serviceAccountName'] = launcher_pod.spec.service_account_name
pod_manifest['metadata'][
'ownerReferences'] = container_common.to_swagger_dict(
launcher_pod.metadata.owner_references)
absl.logging.info('Looking for pod "%s:%s".' % (namespace, pod_name))
resp = self._get_pod(core_api, pod_name, namespace)
if not resp:
absl.logging.info('Pod "%s:%s" does not exist. Creating it...' %
(namespace, pod_name))
absl.logging.info('Pod manifest: ' + str(pod_manifest))
try:
resp = core_api.create_namespaced_pod(
namespace=namespace, body=pod_manifest)
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to created container executor pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
absl.logging.info('Waiting for pod "%s:%s" to start.' %
(namespace, pod_name))
self._wait_pod(
core_api,
pod_name,
namespace,
exit_condition_lambda=_pod_is_not_pending,
condition_description='non-pending status')
absl.logging.info('Start log streaming for pod "%s:%s".' %
(namespace, pod_name))
try:
logs = core_api.read_namespaced_pod_log(
name=pod_name,
namespace=namespace,
container='main',
follow=True,
_preload_content=False).stream()
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to stream the logs from the pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
for log in logs:
absl.logging.info(log.decode().rstrip('\n'))
resp = self._wait_pod(
core_api,
pod_name,
namespace,
exit_condition_lambda=_pod_is_done,
condition_description='done state')
if resp.status.phase == _POD_FAILED_PHASE:
raise RuntimeError('Pod "%s:%s" failed with status "%s".' %
(namespace, pod_name, resp.status))
absl.logging.info('Pod "%s:%s" is done.' % (namespace, pod_name))
def _build_pod_manifest(
self, pod_name: Text,
container_spec: executor_spec.ExecutorContainerSpec) -> Dict[Text, Any]:
"""Build a pod spec.
The function builds a pod spec by patching executor container spec into
the pod spec from component config.
Args:
pod_name: The name of the pod.
container_spec: The resolved executor container spec.
Returns:
The pod manifest in dictionary format.
"""
if self._component_config:
kubernetes_config = cast(
kubernetes_component_config.KubernetesComponentConfig,
self._component_config)
pod_manifest = container_common.to_swagger_dict(kubernetes_config.pod)
else:
pod_manifest = {}
pod_manifest.update({
'apiVersion': 'v1',
'kind': 'Pod',
})
# TODO(hongyes): figure out a better way to figure out type hints for nested
# dict.
metadata = pod_manifest.setdefault('metadata', {}) # type: Dict[Text, Any]
metadata.update({'name': pod_name})
spec = pod_manifest.setdefault('spec', {}) # type: Dict[Text, Any]
spec.update({'restartPolicy': 'Never'})
containers = spec.setdefault('containers',
[]) # type: List[Dict[Text, Any]]
container = None # type: Dict[Text, Any]
for c in containers:
if c['name'] == 'main':
container = c
break
if not container:
container = {'name': 'main'}
containers.append(container)
container.update({
'image': container_spec.image,
'command': container_spec.command,
'args': container_spec.args,
})
return pod_manifest
def _get_pod(self, core_api: client.CoreV1Api, pod_name: Text,
namespace: Text) -> client.V1Pod:
"""Get a pod from Kubernetes metadata API.
Args:
core_api: Client of Core V1 API of Kubernetes API.
pod_name: The name of the POD.
namespace: The namespace of the POD.
Returns:
The found POD object. None if it's not found.
Raises:
RuntimeError: When it sees unexpected errors from Kubernetes API.
"""
try:
return core_api.read_namespaced_pod(name=pod_name, namespace=namespace)
except client.rest.ApiException as e:
if e.status != 404:
raise RuntimeError('Unknown error! \nReason: %s\nBody: %s' %
(e.reason, e.body))
return None
def _wait_pod(self,
core_api: client.CoreV1Api,
pod_name: Text,
namespace: Text,
exit_condition_lambda: Callable[[client.V1Pod], bool],
condition_description: Text,
timeout_sec: int = 100) -> client.V1Pod:
"""Wait for a POD to meet an exit condition.
Args:
core_api: Client of Core V1 API of Kubernetes API.
pod_name: The name of the POD.
namespace: The namespace of the POD.
exit_condition_lambda: A lambda which will be called intervally to wait
for a POD to exit. The function returns True to exit.
condition_description: The description of the exit condition which will be
set in the error message if the wait times out.
timeout_sec: The seconds for the function to wait. Defaults to 100s.
Returns:
The POD object which meets the exit condition.
Raises:
RuntimeError: when the function times out.
"""
start_time = datetime.datetime.utcnow()
while True:
resp = self._get_pod(core_api, pod_name, namespace)
absl.logging.info(resp.status.phase)
if exit_condition_lambda(resp):
return resp
elapse_time = datetime.datetime.utcnow() - start_time
if elapse_time.seconds >= timeout_sec:
raise RuntimeError(
'Pod "%s:%s" does not reach "%s" within %s seconds.' %
(namespace, pod_name, condition_description, timeout_sec))
# TODO(hongyes): add exponential backoff here.
time.sleep(1)
def _build_pod_name(self, execution_id: int) -> Text:
if self._pipeline_info.run_id:
pipeline_name = (
self._pipeline_info.pipeline_name[:50] + '-' +
self._pipeline_info.run_id[:50])
else:
pipeline_name = self._pipeline_info.pipeline_name[:100]
pod_name = '%s-%s-%s' % (
pipeline_name, self._component_info.component_id[:50], execution_id)
return _sanitize_pod_name(pod_name)
| 36.84375 | 100 | 0.67693 |
1bf7753a6a0b11f00a4ae908d861b427dfd07bff | 17,169 | py | Python | deployer/deploy_actions.py | 2i2c-org/similar-hubs | 59e72743a098f2e61a20529fce133e1ef9197ceb | [
"BSD-3-Clause"
] | null | null | null | deployer/deploy_actions.py | 2i2c-org/similar-hubs | 59e72743a098f2e61a20529fce133e1ef9197ceb | [
"BSD-3-Clause"
] | 13 | 2020-09-29T17:42:32.000Z | 2020-10-06T16:52:06.000Z | deployer/deploy_actions.py | 2i2c-org/low-touch-hubs | 35ce60e604e820f09dc877b3cc9daf75e43b5d2c | [
"BSD-3-Clause"
] | null | null | null | """
Actions available when deploying many JupyterHubs to many Kubernetes clusters
"""
import base64
import json
import os
import shutil
import subprocess
import sys
from contextlib import redirect_stderr, redirect_stdout
from pathlib import Path
import pytest
from auth import KeyProvider
from cluster import Cluster
from config_validation import (
assert_single_auth_method_enabled,
validate_cluster_config,
validate_hub_config,
validate_support_config,
)
from file_acquisition import find_absolute_path_to_cluster_file, get_decrypted_file
from helm_upgrade_decision import (
assign_staging_jobs_for_missing_clusters,
discover_modified_common_files,
ensure_support_staging_jobs_have_correct_keys,
generate_hub_matrix_jobs,
generate_support_matrix_jobs,
get_all_cluster_yaml_files,
move_staging_hubs_to_staging_matrix,
pretty_print_matrix_jobs,
)
from ruamel.yaml import YAML
from utils import print_colour
# Without `pure=True`, I get an exception about str / byte issues
yaml = YAML(typ="safe", pure=True)
helm_charts_dir = Path(__file__).parent.parent.joinpath("helm-charts")
def use_cluster_credentials(cluster_name):
"""
Quickly gain command-line access to a cluster by updating the current
kubeconfig file to include the deployer's access credentials for the named
cluster and mark it as the cluster to work against by default.
This function is to be used with the `use-cluster-credentials` CLI
command only - it is not used by the rest of the deployer codebase.
"""
validate_cluster_config(cluster_name)
config_file_path = find_absolute_path_to_cluster_file(cluster_name)
with open(config_file_path) as f:
cluster = Cluster(yaml.load(f), config_file_path.parent)
# Cluster.auth() method has the context manager decorator so cannot call
# it like a normal function
with cluster.auth():
# This command will spawn a new shell with all the env vars (including
# KUBECONFIG) inherited, and once you quit that shell the python program
# will resume as usual.
# TODO: Figure out how to change the PS1 env var of the spawned shell
# to change the prompt to f"cluster-{cluster.spec['name']}". This will
# make it visually clear that the user is now operating in a different
# shell.
subprocess.check_call([os.environ["SHELL"], "-l"])
def deploy_support(cluster_name):
"""
Deploy support components to a cluster
"""
validate_cluster_config(cluster_name)
validate_support_config(cluster_name)
config_file_path = find_absolute_path_to_cluster_file(cluster_name)
with open(config_file_path) as f:
cluster = Cluster(yaml.load(f), config_file_path.parent)
if cluster.support:
with cluster.auth():
cluster.deploy_support()
def deploy_grafana_dashboards(cluster_name):
"""
Deploy grafana dashboards to a cluster that provide useful metrics
for operating a JupyterHub
Grafana dashboards and deployment mechanism in question are maintained in
this repo: https://github.com/jupyterhub/grafana-dashboards
"""
validate_cluster_config(cluster_name)
validate_support_config(cluster_name)
config_file_path = find_absolute_path_to_cluster_file(cluster_name)
with open(config_file_path) as f:
cluster = Cluster(yaml.load(f), config_file_path.parent)
# If grafana support chart is not deployed, then there's nothing to do
if not cluster.support:
print_colour(
"Support chart has not been deployed. Skipping Grafana dashboards deployment..."
)
return
grafana_token_file = (config_file_path.parent).joinpath(
"enc-grafana-token.secret.yaml"
)
# Read the cluster specific secret grafana token file
with get_decrypted_file(grafana_token_file) as decrypted_file_path:
with open(decrypted_file_path) as f:
config = yaml.load(f)
# Check GRAFANA_TOKEN exists in the secret config file before continuing
if "grafana_token" not in config.keys():
raise ValueError(
f"`grafana_token` not provided in secret file! Please add it and try again: {grafana_token_file}"
)
# FIXME: We assume grafana_url and uses_tls config will be defined in the first
# file listed under support.helm_chart_values_files.
support_values_file = cluster.support.get("helm_chart_values_files", [])[0]
with open(config_file_path.parent.joinpath(support_values_file)) as f:
support_values_config = yaml.load(f)
# Get the url where grafana is running from the support values file
grafana_url = (
support_values_config.get("grafana", {}).get("ingress", {}).get("hosts", {})
)
uses_tls = (
support_values_config.get("grafana", {}).get("ingress", {}).get("tls", {})
)
if not grafana_url:
print_colour(
"Couldn't find `config.grafana.ingress.hosts`. Skipping Grafana dashboards deployment..."
)
return
grafana_url = (
f"https://{grafana_url[0]}" if uses_tls else f"http://{grafana_url[0]}"
)
# Use the jupyterhub/grafana-dashboards deployer to deploy the dashboards to this cluster's grafana
print_colour("Cloning jupyterhub/grafana-dashboards...")
dashboards_dir = "grafana_dashboards"
subprocess.check_call(
[
"git",
"clone",
"https://github.com/jupyterhub/grafana-dashboards",
dashboards_dir,
]
)
# We need the existing env too for the deployer to be able to find jssonnet and grafonnet
deploy_env = os.environ.copy()
deploy_env.update({"GRAFANA_TOKEN": config["grafana_token"]})
try:
print_colour(f"Deploying grafana dashboards to {cluster_name}...")
subprocess.check_call(
["./deploy.py", grafana_url], env=deploy_env, cwd=dashboards_dir
)
print_colour(f"Done! Dashboards deployed to {grafana_url}.")
finally:
# Delete the directory where we cloned the repo.
# The deployer cannot call jsonnet to deploy the dashboards if using a temp directory here.
# Might be because opening more than once of a temp file is tried
# (https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile)
shutil.rmtree(dashboards_dir)
def deploy(cluster_name, hub_name, config_path):
"""
Deploy one or more hubs in a given cluster
"""
validate_cluster_config(cluster_name)
validate_hub_config(cluster_name, hub_name)
assert_single_auth_method_enabled(cluster_name, hub_name)
with get_decrypted_file(config_path) as decrypted_file_path:
with open(decrypted_file_path) as f:
config = yaml.load(f)
# Most of our hubs use Auth0 for Authentication. This lets us programmatically
# determine what auth provider each hub uses - GitHub, Google, etc. Without
# this, we'd have to manually generate credentials for each hub - and we
# don't want to do that. Auth0 domains are tied to a account, and
# this is our auth0 domain for the paid account that 2i2c has.
auth0 = config["auth0"]
k = KeyProvider(auth0["domain"], auth0["client_id"], auth0["client_secret"])
# Each hub needs a unique proxy.secretToken. However, we don't want
# to manually generate & save it. We also don't want it to change with
# each deploy - that causes a pod restart with downtime. So instead,
# we generate it based on a single secret key (`PROXY_SECRET_KEY`)
# combined with the name of each hub. This way, we get unique,
# cryptographically secure proxy.secretTokens without having to
# keep much state. We can rotate them by changing `PROXY_SECRET_KEY`.
# However, if `PROXY_SECRET_KEY` leaks, that means all the hub's
# proxy.secretTokens have leaked. So let's be careful with that!
SECRET_KEY = bytes.fromhex(config["secret_key"])
config_file_path = find_absolute_path_to_cluster_file(cluster_name)
with open(config_file_path) as f:
cluster = Cluster(yaml.load(f), config_file_path.parent)
with cluster.auth():
hubs = cluster.hubs
if hub_name:
hub = next((hub for hub in hubs if hub.spec["name"] == hub_name), None)
print_colour(f"Deploying hub {hub.spec['name']}...")
hub.deploy(k, SECRET_KEY)
else:
for i, hub in enumerate(hubs):
print_colour(
f"{i+1} / {len(hubs)}: Deploying hub {hub.spec['name']}..."
)
hub.deploy(k, SECRET_KEY)
def generate_helm_upgrade_jobs(changed_filepaths):
"""Analyse added or modified files from a GitHub Pull Request and decide which
clusters and/or hubs require helm upgrades to be performed for their *hub helm
charts or the support helm chart.
Args:
changed_filepaths (list[str]): A list of files that have been added or
modified by a GitHub Pull Request
"""
(
upgrade_support_on_all_clusters,
upgrade_all_hubs_on_all_clusters,
) = discover_modified_common_files(changed_filepaths)
# Convert changed filepaths into absolute Posix Paths
changed_filepaths = [
Path(os.getcwd()).joinpath(filepath) for filepath in changed_filepaths
]
# Get a list of filepaths to all cluster.yaml files in the repo
cluster_files = get_all_cluster_yaml_files()
# Empty lists to store job definitions in
prod_hub_matrix_jobs = []
support_and_staging_matrix_jobs = []
for cluster_file in cluster_files:
# Read in the cluster.yaml file
with open(cluster_file) as f:
cluster_config = yaml.load(f)
# Get cluster's name and its cloud provider
cluster_name = cluster_config.get("name", {})
provider = cluster_config.get("provider", {})
# Generate template dictionary for all jobs associated with this cluster
cluster_info = {
"cluster_name": cluster_name,
"provider": provider,
"reason_for_redeploy": "",
}
# Check if this cluster file has been modified. If so, set boolean flags to True
intersection = set(changed_filepaths).intersection([str(cluster_file)])
if intersection:
print_colour(
f"This cluster.yaml file has been modified. Generating jobs to upgrade all hubs and the support chart on THIS cluster: {cluster_name}"
)
upgrade_all_hubs_on_this_cluster = True
upgrade_support_on_this_cluster = True
cluster_info["reason_for_redeploy"] = "cluster.yaml file was modified"
else:
upgrade_all_hubs_on_this_cluster = False
upgrade_support_on_this_cluster = False
# Generate a job matrix of all hubs that need upgrading on this cluster
prod_hub_matrix_jobs.extend(
generate_hub_matrix_jobs(
cluster_file,
cluster_config,
cluster_info,
set(changed_filepaths),
upgrade_all_hubs_on_this_cluster=upgrade_all_hubs_on_this_cluster,
upgrade_all_hubs_on_all_clusters=upgrade_all_hubs_on_all_clusters,
)
)
# Generate a job matrix for support chart upgrades
support_and_staging_matrix_jobs.extend(
generate_support_matrix_jobs(
cluster_file,
cluster_config,
cluster_info,
set(changed_filepaths),
upgrade_support_on_this_cluster=upgrade_support_on_this_cluster,
upgrade_support_on_all_clusters=upgrade_support_on_all_clusters,
)
)
# Clean up the matrix jobs
(
prod_hub_matrix_jobs,
support_and_staging_matrix_jobs,
) = move_staging_hubs_to_staging_matrix(
prod_hub_matrix_jobs, support_and_staging_matrix_jobs
)
support_and_staging_matrix_jobs = ensure_support_staging_jobs_have_correct_keys(
support_and_staging_matrix_jobs, prod_hub_matrix_jobs
)
support_and_staging_matrix_jobs = assign_staging_jobs_for_missing_clusters(
support_and_staging_matrix_jobs, prod_hub_matrix_jobs
)
# Pretty print the jobs using rich
pretty_print_matrix_jobs(prod_hub_matrix_jobs, support_and_staging_matrix_jobs)
# The existence of the CI environment variable is an indication that we are running
# in an GitHub Actions workflow
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#example-defining-outputs-for-a-job
# This will avoid errors trying to set CI output variables in an environment that
# doesn't exist.
ci_env = os.environ.get("CI", False)
if ci_env:
# Add these matrix jobs as output variables for use in another job
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions
print(
f"::set-output name=prod-hub-matrix-jobs::{json.dumps(prod_hub_matrix_jobs)}"
)
print(
f"::set-output name=support-and-staging-matrix-jobs::{json.dumps(support_and_staging_matrix_jobs)}"
)
def run_hub_health_check(cluster_name, hub_name, check_dask_scaling=False):
"""Run a health check on a given hub on a given cluster. Optionally check scaling
of dask workers if the hub is a daskhub.
Args:
cluster_name (str): The name of the cluster where the hub is deployed
hub_name (str): The name of the hub to run a health check for
check_dask_scaling (bool, optional): If true, run an additional check that dask
workers can scale. Only applies to daskhubs. Defaults to False.
Returns
exit_code (int): The exit code of the pytest process. 0 for pass, any other
integer number greater than 0 for failure.
"""
# Read in the cluster.yaml file
config_file_path = find_absolute_path_to_cluster_file(cluster_name)
with open(config_file_path) as f:
cluster = Cluster(yaml.load(f), config_file_path.parent)
# Find the hub's config
hub_indx = [
indx for (indx, h) in enumerate(cluster.hubs) if h.spec["name"] == hub_name
]
if len(hub_indx) == 1:
hub = cluster.hubs[hub_indx[0]]
elif len(hub_indx) > 1:
print_colour("ERROR: More than one hub with this name found!")
sys.exit(1)
elif len(hub_indx) == 0:
print_colour("ERROR: No hubs with this name found!")
sys.exit(1)
print_colour(f"Running hub health check for {hub.spec['name']}...")
# Check if this hub has a domain override file. If yes, apply override.
if "domain_override_file" in hub.spec.keys():
domain_override_file = hub.spec["domain_override_file"]
with get_decrypted_file(
hub.cluster.config_path.joinpath(domain_override_file)
) as decrypted_path:
with open(decrypted_path) as f:
domain_override_config = yaml.load(f)
hub.spec["domain"] = domain_override_config["domain"]
# Retrieve hub's URL
hub_url = f'https://{hub.spec["domain"]}'
# Read in the service api token from a k8s Secret in the k8s cluster
with cluster.auth():
try:
service_api_token_b64encoded = subprocess.check_output(
[
"kubectl",
"get",
"secrets",
"hub",
f"--namespace={hub.spec['name']}",
r"--output=jsonpath={.data['hub\.services\.hub-health\.apiToken']}",
],
text=True,
)
except subprocess.CalledProcessError as e:
raise ValueError(
f"Failed to acquire a JupyterHub API token for the hub-health service: {e.stdout}"
)
service_api_token = base64.b64decode(service_api_token_b64encoded).decode()
# On failure, pytest prints out params to the test that failed.
# This can contain sensitive info - so we hide stderr
# FIXME: Don't use pytest - just call a function instead
#
# Show errors locally but redirect on CI
gh_ci = os.environ.get("CI", "false")
pytest_args = [
"-q",
"deployer/tests",
f"--hub-url={hub_url}",
f"--api-token={service_api_token}",
f"--hub-type={hub.spec['helm_chart']}",
]
if (hub.spec["helm_chart"] == "daskhub") and check_dask_scaling:
pytest_args.append("--check-dask-scaling")
if gh_ci == "true":
print_colour("Testing on CI, not printing output")
with open(os.devnull, "w") as dn, redirect_stderr(dn), redirect_stdout(dn):
exit_code = pytest.main(pytest_args)
else:
print_colour("Testing locally, do not redirect output")
exit_code = pytest.main(pytest_args)
if exit_code != 0:
print("Health check failed!", file=sys.stderr)
sys.exit(exit_code)
else:
print_colour("Health check succeeded!")
return exit_code
| 38.843891 | 150 | 0.676335 |
943e26e769fa8ea0cefd87ee786d42ec5e2350ba | 1,128 | py | Python | lists/minStack.py | santoshmano/pybricks | bcb3ab80417e8e896280062494ce6c046329b7e8 | [
"MIT"
] | null | null | null | lists/minStack.py | santoshmano/pybricks | bcb3ab80417e8e896280062494ce6c046329b7e8 | [
"MIT"
] | null | null | null | lists/minStack.py | santoshmano/pybricks | bcb3ab80417e8e896280062494ce6c046329b7e8 | [
"MIT"
] | null | null | null | from stack import *
class MinStack:
def __init__(self):
self.dataStk = ArrayStack()
self.minStk = ArrayStack()
def pop(self):
data = self.dataStk.pop()
if data == self.minStk.peek():
self.minStk.pop()
return data
def push(self, val):
if not self.minStk.isEmpty():
if val < self.minStk.peek() or val == self.minStk.peek():
self.minStk.push(val)
else:
self.minStk.push(val)
self.dataStk.push(val)
def getMin(self):
return self.minStk.peek()
def peek(self):
return self.dataStk.peek()
if __name__ == "__main__":
mStk = MinStack()
mStk.push(14)
mStk.push(16)
mStk.push(14)
mStk.push(43)
mStk.push(15)
mStk.push(13)
mStk.push(13)
mStk.push(12)
print("top of mstk ", mStk.peek())
print("min - ", mStk.getMin())
print("pop - ", mStk.pop())
print("min - ", mStk.getMin())
print("pop - ", mStk.pop())
print("min - ", mStk.getMin())
print("pop - ", mStk.pop())
print("min - ", mStk.getMin())
"""
"""
| 20.142857 | 69 | 0.531028 |
4685d1a27e24185f693e141e05079dd91b06b2a5 | 32,954 | py | Python | hata/backend/reqrep.py | ToxicKidz/hata | f834c3cee3920d3095254815582325c5232022d7 | [
"0BSD"
] | null | null | null | hata/backend/reqrep.py | ToxicKidz/hata | f834c3cee3920d3095254815582325c5232022d7 | [
"0BSD"
] | null | null | null | hata/backend/reqrep.py | ToxicKidz/hata | f834c3cee3920d3095254815582325c5232022d7 | [
"0BSD"
] | null | null | null | import json, re, codecs
from http.cookies import SimpleCookie, CookieError, Morsel
from hashlib import md5, sha1, sha256
try:
import ssl as module_ssl
except ImportError:
module_ssl = None
try:
import cchardet as chardet
except ImportError:
try:
import chardet
except ImportError as err:
message = (
'chardet (or cchardet) is not installed, please make sure it is installed before importing '
f'{__spec__.parent}'
)
err.args = (message,)
err.msg = message
raise err from None
from .utils import imultidict
from .futures import Task, CancelledError
from .headers import (
METHOD_POST_ALL,
METHOD_CONNECT,
SET_COOKIE,
CONTENT_LENGTH,
CONNECTION,
ACCEPT,
ACCEPT_ENCODING,
HOST,
TRANSFER_ENCODING,
COOKIE,
CONTENT_ENCODING,
AUTHORIZATION,
CONTENT_TYPE,
)
from .helpers import BasicAuth
from .multipart import MimeType, create_payload
from .formdata import Formdata
from .protocol import HTTPStreamWriter
json_re = re.compile(r'^application/(?:[\w.+-]+?\+)?json')
class Fingerprint:
"""
HTTP fingerprinting can be used to automate information systems and security audits. Automated security testing
tools can use HTTP fingerprinting to narrow down the set of tests required, based on the specific platform or the
specific web server being audited.
Attributes
----------
fingerprint : `bytes`
The fingerprint's value.
hash_function : `function`
Hash function used by the fingerprint.
Class Attributes
----------------
HASH_FUNCTION_BY_DIGEST_LENGTH : `dict` of (`int`, `function`) items
`fingerprint`'s length - `hash-function` relation mapping.
"""
__slots__ = (
'fingerprint',
'hash_function',
)
HASH_FUNCTION_BY_DIGEST_LENGTH = {
16: md5,
20: sha1,
32: sha256,
}
def __new__(cls, fingerprint):
"""
Creates a new ``Fingerprint`` instance with the given parameters.
Parameters
----------
fingerprint : `bytes`
Fingerprint value.
Raises
------
ValueError
- If `fingerprint`'s length is not any of the expected ones.
- If the detected `hash_function` is `md5` or `sha1`.
"""
fingerprint_length = len(fingerprint)
try:
hash_function = cls.HASH_FUNCTION_BY_DIGEST_LENGTH[fingerprint_length]
except KeyError:
raise ValueError(
f'`fingerprint` has invalid length, got {fingerprint_length!r}, {fingerprint!r}'
) from None
if hash_function is md5 or hash_function is sha1:
raise ValueError(
'`md5` and `sha1` are insecure and not supported, use `sha256`.'
)
self = object.__new__(cls)
self.hash_function = hash_function
self.fingerprint = fingerprint
return self
def check(self, transport):
"""
Checks whether the given transport's ssl data matches the fingerprint.
Parameters
----------
transport : `Any`
Asynchronous transport implementation.
Raises
------
ValueError
If the fingerprint don't match.
"""
if transport.get_extra_info('sslcontext') is None:
return
ssl_object = transport.get_extra_info('ssl_object')
cert = ssl_object.getpeercert(binary_form=True)
received = self.hash_function(cert).digest()
fingerprint = self.fingerprint
if received == fingerprint:
return
host, port, *_ = transport.get_extra_info('peername')
raise ValueError(
f'The expected fingerprint: {fingerprint!r} not matches the received; received={received!r}, '
f'host={host!r}, port={port!r}.'
)
if module_ssl is None:
SSL_ALLOWED_TYPES = (type(None),)
else:
SSL_ALLOWED_TYPES = (module_ssl.SSLContext, bool, Fingerprint, type(None))
class ConnectionKey:
"""
Contains information about a host, like proxy, TLS to prevent reusing wrong connections from the pool.
Attributes
----------
host : `str`
The host's ip address.
is_ssl : `bool`
Whether the connection is secure.
port : `int`
The host's port.
proxy_auth : `None` or ``BasicAuth``
Proxy authorization.
proxy_url : `None` or ``URL``
Proxy's url.
ssl : `None`, ``SSLContext``, `bool`, ``Fingerprint``
The connection's ssl type.
"""
__slots__ = (
'host',
'is_ssl',
'port',
'proxy_auth',
'proxy_url',
'ssl',
) # + 'proxy_header_hash',
def __init__(self, request):
# proxy_headers = request.proxy_headers
# if request.proxy_headers is not None:
# proxy_header_hash = hash(tuple(proxy_headers.items()))
# else:
# proxy_header_hash = None
self.host = request.host
self.port = request.port
self.is_ssl = request.is_ssl()
self.ssl = request.ssl
self.proxy_auth = request.proxy_auth
self.proxy_url = request.proxy_url
# self.proxy_header_hash = proxy_header_hash
def __repr__(self):
"""Returns the connection key's representation."""
return f'<{self.__class__.__name__} host={self.host!r}, port={self.port!r}>'
def __eq__(self, other):
"""Returns whether the two connection keys are equal."""
if type(self) is not type(other):
return NotImplemented
if self.host != other.host:
return False
if self.port != other.port:
return False
if self.is_ssl != other.is_ssl:
return False
if self.ssl is None:
if other.ssl is not None:
return False
else:
if other.ssl is None:
return False
if self.ssl != other.ssl:
return False
if self.proxy_auth is None:
if other.proxy_auth is not None:
return False
else:
if other.proxy_auth is None:
return False
if self.proxy_auth != other.proxy_auth:
return False
if self.proxy_url is None:
if other.proxy_url is not None:
return False
else:
if other.proxy_url is None:
return False
if self.proxy_url != other.proxy_url:
return False
return True
def __hash__(self):
"""Returns the connection key's hash value."""
return (
hash(self.host)
^ (self.port << 17)
^ hash(self.is_ssl)
^ hash(self.ssl)
^ hash(self.proxy_auth)
^ hash(self.proxy_url)
)
class RequestInfo:
"""
Base information representing a request.
Attributes
----------
headers : ``imultidict``
The respective request's headers.
method : `str`
The respective request's method.
real_url : ``URL``
The url given to request.
url : ``URL``
The requested url without fragments. Can be same as ``.real_url``.
"""
__slots__ = (
'headers',
'method',
'real_url',
'url',
)
def __init__(self, request):
"""
Creates a new ``RequestInfo`` instance representing the given request.
Parameters
----------
request : ``ClientRequest``
The represented request.
"""
self.url = request.url
self.method = request.method
self.headers = request.headers
self.real_url = request.original_url
def __repr__(self):
"""Returns the request info's representation."""
return f'<{self.__class__.__name__} url={self.url!r}>'
DEFAULT_HEADERS = (
(ACCEPT, '*/*'),
(ACCEPT_ENCODING, 'gzip, deflate'),
)
class ClientRequest:
"""
Http request class used by ``HTTPClient``.
Attributes
----------
auth : `None` or ``BasicAuth``
Authorization sent with the request.
body : `None`, ``PayloadBase`` instance
The request's body.
chunked : `bool`
Whether the request is sent chunked.
compression : `None` or `str`
Compression used when sending the request.
headers : `imultidict`
The headers of the request.
loop : ``EventThread``
The event loop, trough what the request is executed.
method : `str`
The request's method.
original_url : ``URL``
The original url, what was asked to request.
proxy_auth : `None` or ``BasicAuth``
Proxy authorization sent with the request.
proxy_url : `None` or ``URL``
Proxy url to use if applicable.
response : `None` or ``ClientResponse``
Object representing the received response. Set as `None` till ``.send`` finishes.
ssl : `None` `None`, ``SSLContext``, `bool`, ``Fingerprint``
The connection's ssl type.
url : ``URL``
The url, what will be requested.
writer : `None` or ``Task`` of ``.write_bytes``
Payload writer task, what is present meanwhile the request's payload is sending.
"""
__slots__ = (
'auth',
'body',
'chunked',
'compression',
'headers',
'loop',
'method',
'original_url',
'proxy_auth',
'proxy_url',
'response',
'ssl',
'url',
'writer',
)
def __new__(
cls,
loop,
method,
url,
headers,
data,
params,
cookies,
auth,
proxy_url,
proxy_auth,
ssl,
):
"""
Creates a new ``ClientRequest`` instance with the given parameters.
Parameters
----------
loop : ``EventThread``
The event loop, trough what the request is executed.
method : `str`
The request's method.
url : ``URL``
The url to request.
headers : `None`, `dict` or ``imultidict``
Headers of the request.
data : `None`, `bytes-like`, `io-like`, ``Formdata`
Data to send as the request's body.
params : `dict` of (`str`, (`str`, `int`, `float`, `bool`)) items
Query string parameters.
cookies : `None` or ``CookieJar``
Cookies OwO.
auth : `None` or ``BasicAuth``
Authorization sent with the request.
proxy_url : `None` or ``URL``
Proxy url to use if applicable.
proxy_auth : `None` or ``BasicAuth``
Proxy authorization sent with the request.
ssl : `None` `None`, ``SSLContext``, `bool`, ``Fingerprint``
The connection's ssl type.
Raises
------
TypeError
- `proxy_auth`'s type is incorrect.
- ˙Cannot serialize a field of the given `data`.
ValueError
- Host could not be detected from `url`.
- The `proxy_url`'s scheme is not `http`.
- `compression` and `Content-Encoding` would be set at the same time.
- `chunked` cannot be set, because `Transfer-Encoding: chunked` is already set.
- `chunked` cannot be set, because `Content-Length` header is already present.
RuntimeError
- If one of `data`'s field's content has unknown content-encoding.
- If one of `data`'s field's content has unknown content-transfer-encoding.
"""
# Convert headers
headers = imultidict(headers)
# Add extra query parameters to the url and remove fragments
url = url.extend_query(params)
request_url = url.with_fragment(None)
if not url.host:
raise ValueError('Host could not be detected.')
# Check authorization
if auth is None:
# If authorization is given, try to detect from url.
username = url.user
password = url.password
if (username is not None) and username:
if password is None:
password = ''
auth = BasicAuth(username, password)
# Store auth in headers is applicable.
if auth is not None:
headers[AUTHORIZATION] = auth.encode()
for key, value in DEFAULT_HEADERS:
headers.setdefault(key, value)
# Add host to headers if not present.
if HOST not in headers:
netloc = request_url.raw_host
if not request_url.is_default_port():
netloc = f'{netloc}:{request_url.port}'
headers[HOST] = netloc
# Update cookies
if (cookies is not None) and cookies:
cookie = SimpleCookie()
if COOKIE in headers:
cookie.load(headers.get(COOKIE, ''))
del headers[COOKIE]
for key, value in cookies.items():
if isinstance(key, Morsel):
# Preserve coded_value
try:
morsel_value = value.get(value.key, None)
except KeyError:
morsel_value = Morsel()
morsel_value.set(value.key, value.value, value.coded_value)
value = morsel_value
cookie[key] = value
headers[COOKIE] = cookie.output(header='', sep=';').strip()
# Check proxy settings.
if proxy_url is not None:
if proxy_url.scheme != 'http':
raise ValueError(f'Only http proxies are supported, got {proxy_url!r}.')
if proxy_auth is not None:
proxy_auth_type = proxy_auth.__class__
if proxy_auth_type is not BasicAuth:
raise TypeError(
f'`proxy_auth` must be `None` or `{BasicAuth.__name__}`, got '
f'{proxy_auth_type.__name__}.'
)
# Needed for transfer data checks
chunked = True
compression = headers.get(CONTENT_ENCODING, None)
# Get request content encoding.
if data is not None:
if data:
if compression is not None:
if headers.get(CONTENT_ENCODING, ''):
raise ValueError(
'Compression can not be set if `Content-Encoding` header is set.'
)
chunked = True
# formdata
if isinstance(data, Formdata):
data = data()
else:
try:
data = create_payload(data, {'disposition': None})
except LookupError:
data = Formdata.from_fields(data)()
if not chunked:
if CONTENT_LENGTH not in headers:
size = data.size
if size is None:
chunked = True
else:
if CONTENT_LENGTH not in headers:
headers[CONTENT_LENGTH] = str(size)
if CONTENT_TYPE not in headers:
headers[CONTENT_TYPE] = data.content_type
data_headers = data.headers
if data_headers:
for key, value in data_headers.items():
headers.setdefault(key, value)
else:
data = None
# Analyze transfer-encoding header.
transfer_encoding = headers.get(TRANSFER_ENCODING, '').lower()
if 'chunked' in transfer_encoding:
if chunked:
raise ValueError(
'Chunked can not be set if `Transfer-Encoding: chunked` header is already set.'
)
elif chunked:
if CONTENT_LENGTH in headers:
raise ValueError(
'Chunked can not be set if `Content-Length` header is set.'
)
headers[TRANSFER_ENCODING] = 'chunked'
else:
if CONTENT_LENGTH not in headers:
headers[CONTENT_LENGTH] = '0' if data is None else str(len(data))
# Set default content-type.
if (method in METHOD_POST_ALL) and (CONTENT_TYPE not in headers):
headers[CONTENT_TYPE] = 'application/octet-stream'
# Everything seems correct, create the object.
self = object.__new__(cls)
self.original_url = url
self.url = request_url
self.method = method
self.loop = loop
self.ssl = ssl
self.chunked = chunked
self.compression = compression
self.body = data
self.auth = auth
self.writer = None
self.response = None
self.headers = headers
self.proxy_url = proxy_url
self.proxy_auth = proxy_auth
return self
def is_ssl(self):
"""
Returns whether the request is ssl.
Returns
-------
is_ssl : `bool`
"""
return self.url.scheme in ('https', 'wss')
@property
def connection_key(self):
"""
Returns the connection key of request.
Returns
-------
connection_key : ``ConnectionKey``
"""
return ConnectionKey(self)
@property
def request_info(self):
"""
Returns base information representing the request.
Returns
-------
request_info : ``RequestInfo``
"""
return RequestInfo(self)
@property
def host(self):
"""
Returns the request's host.
Returns
-------
host : `str`
"""
return self.url.host
@property
def port(self):
"""
Returns the request's port.
Returns
-------
port : `int`
"""
return self.url.port
async def write_bytes(self, writer, connection):
"""
Writes the request's body..
This method is a coroutine.
Parameters
----------
writer : ``HTTPStreamWriter``
Writer used to write the request's body into the connection's transport.
connection : ``Connection``
Connection of the request with what the payload is sent.
"""
# Support coroutines that yields bytes objects.
try:
body = self.body
if body is not None:
await self.body.write(writer)
await writer.write_eof()
except OSError as err:
new_err = OSError(
err.errno, f'Can not write request body for {self.url!r}.'
)
new_err.__context__ = err
new_err.__cause__ = err
connection.protocol.set_exception(new_err)
except CancelledError as err:
if not connection.closed:
connection.protocol.set_exception(err)
except BaseException as err:
connection.protocol.set_exception(err)
raise
finally:
self.writer = None
def send(self, connection):
"""
Sends the request.
Parameters
----------
connection : ``Connection``
Connection, what is used to send the request.
Returns
-------
response : `coroutine` of ``ClientResponse.start`` ->
"""
try:
url = self.url
if self.method == METHOD_CONNECT:
path = f'{url.raw_host}:{url.port}'
elif (self.proxy_url is not None) and (not self.is_ssl()):
path = str(url)
else:
path = url.raw_path
if url.raw_query_string:
path = f'{path}?{url.raw_query_string}'
protocol = connection.protocol
writer = HTTPStreamWriter(protocol, self.compression, self.chunked)
protocol.write_http_request(self.method, path, self.headers)
self.writer = Task(self.write_bytes(writer, connection), self.loop)
self.response = response = ClientResponse(self, connection)
return response.start()
except:
connection.close()
raise
def terminate(self):
"""
Terminates the request's writing task if applicable.
"""
writer = self.writer
if writer is not None:
self.writer = None
writer.cancel()
class ClientResponse:
"""
Http response class used by ``HTTPClient``.
Attributes
----------
_released : `bool`
Whether the connection is released.
body : `None` or `bytes`
The received response body. Set as `None` if the response body is not yet received, or if it is empty.
closed : `bool`
Whether the response is closed.
connection : `None` or ``Connection``
Connection used to receive the request response. Set as `None` if the response is ``.close``-d or
``.release``-d.
payload_waiter : `None` or ``Future``
Future used to retrieve the response's body. It's result is set, when the respective protocol's reader task
finished.
cookies : `http.cookies.SimpleCookie`
Received cookies with the response.
headers : `None` or ``imultidict``
Headers of the response. Set when the http response is successfully received.
history : `None` or `tuple` of ``ClientResponse``
Response history. Set as `tuple` of responses from outside.
loop : ``EventThread``
The event loop, trough what the request is executed.
method : `str`
Method of the respective request.
status : `None` or `int`
Received status code. Set as `0` by default.
url : ``URL``
The requested url.
writer : ``Task`` of ``ClientRequest.write_bytes``
Payload writer task of the respective request.
raw_message : `None` or ``RawResponseMessage``
Raw received http response.
"""
__slots__ = (
'_released',
'body',
'closed',
'connection',
'payload_waiter',
'cookies',
'headers',
'history',
'loop',
'method',
'status',
'url',
'writer',
'raw_message',
)
def __new__(cls, request, connection):
"""
Crates a new ``ClientResponse`` instance from the given request and connection.
Parameters
----------
request : ``ClientRequest``
The respective request.
connection : ``Connection``
The connection used to send the request and receive the response.
"""
self = object.__new__(cls)
self.loop = request.loop
self.method = request.method
self.url = request.original_url
self.writer = request.writer
self.closed = False
self.cookies = SimpleCookie()
self._released = False
self.body = None
self.status = 0
self.payload_waiter = None
self.headers = None
self.connection = connection
self.raw_message = None
self.history = None # will be added later
return self
@property
def reason(self):
"""
Returns the server response reason.
reason : `str` or `None`
"""
message = self.raw_message
if message is not None:
reason = message.reason
if reason is not None:
return reason.decode()
def __del__(self):
"""releases the response if not yet closed."""
if self.closed:
return
self._release_connection()
def __repr__(self):
"""Returns the response's representation."""
ascii_encodable_url = str(self.url)
return f'<{self.__class__.__name__}({ascii_encodable_url}) [{self.status} {self.reason!r}]>'
async def start(
self,
):
"""
Starts response processing.
This method is a coroutine.
Returns
-------
self : ``ClientResponse``
"""
try:
protocol = self.connection.protocol
payload_waiter = protocol.set_payload_reader(protocol._read_http_response())
self.raw_message = message = await payload_waiter
protocol.handle_payload_waiter_cancellation()
payload_reader = protocol.get_payload_reader_task(message)
if payload_reader is None:
payload_waiter = None
self._response_eof(None)
else:
payload_waiter = protocol.set_payload_reader(payload_reader)
payload_waiter.add_done_callback(self._response_eof)
# response status
self.status = message.status
# headers
self.headers = message.headers
# OwO
self.payload_waiter = payload_waiter
# cookies
for header in self.headers.get_all(SET_COOKIE, ()):
try:
self.cookies.load(header)
except CookieError: # so sad
pass
except:
self.close()
raise
return self
def _response_eof(self, future):
"""
Future callback added to the payload waiter future, to release the used connection.
Parameters
----------
future : ``Future``
``.payload_waiter`` future.
"""
if self.closed:
return
self.payload_waiter = None
connection = self.connection
if connection is not None:
# Websocket, protocol could be `None`, because connection could be detached.
if (connection.protocol is not None) and self.raw_message.upgraded:
return
self._release_connection()
self.closed = True
self._cleanup_writer()
def _release_connection(self):
"""
Releases the response's connection.
If the connection type is "close", closes the protocol as well.
"""
connection = self.connection
if connection is None:
return
headers = self.headers
if headers is not None:
try:
connection_type = headers[CONNECTION]
except KeyError:
pass
else:
if connection_type == 'close':
protocol = connection.protocol
if protocol is not None:
protocol.close()
connection.release()
self.connection = None
def _notify_content(self):
"""
Called when response reading is cancelled or released. Sets `ConnectionError` to the respective protocol if
the payload is still reading.
"""
payload_waiter = self.payload_waiter
if payload_waiter is not None:
connection = self.connection
if connection is not None:
connection.protocol.set_exception(ConnectionError('Connection closed.'))
self._released = True
def _cleanup_writer(self):
"""
Cancels the writer task of the respective request. Called when the response is cancelled or released, or if
reading the whole response is done.
"""
writer = self.writer
if writer is not None:
self.writer = None
writer.cancel()
async def read(self):
"""
Reads the response's body.
This method is a coroutine.
Returns
-------
body : `bytes`
"""
payload_waiter = self.payload_waiter
if payload_waiter is None:
body = self.body
else:
try:
self.body = body = await payload_waiter
finally:
self.payload_waiter = None
return body
def get_encoding(self):
"""
Gets the encoding of the response's body.
Returns
-------
encoding : `str`
Defaults to `'utf-8'`.
"""
content_type = self.headers.get(CONTENT_TYPE, '').lower()
mime_type = MimeType(content_type)
encoding = mime_type.parameters.get('charset', None)
if encoding is not None:
try:
codecs.lookup(encoding)
except LookupError:
encoding = None
if encoding is None:
if mime_type.type == 'application' and mime_type.sub_type == 'json':
encoding = (
'utf-8' # RFC 7159 states that the default encoding is UTF-8.
)
else:
encoding = chardet.detect(self.body)['encoding']
if not encoding:
encoding = 'utf-8'
return encoding
async def text(self, encoding=None, errors='strict'):
"""
Loads the response's content as text.
This method is a coroutine.
Parameters
----------
encoding : `None` or `str`, Optional
If no encoding is given, then detects it from the payload-
errors : `str`, Optional
May be given to set a different error handling scheme. The default `errors` value is `'strict'`, meaning
that encoding errors raise a `UnicodeError`. Other possible values are `'ignore'`, `'replace'`,
`'xmlcharrefreplace'`, `'backslashreplace'` and any other name registered via `codecs.register_error()`.
Returns
-------
text : `str`
"""
body = await self.read()
if body is None:
return
if encoding is None:
encoding = self.get_encoding()
return body.decode(encoding, errors)
async def json(
self, encoding=None, loader=json.loads, content_type='application/json'
):
"""
Loads the response's content as a json.
This method is a coroutine.
Parameters
----------
encoding : None` or `str`, Optional
Encoding to use instead of the response's. If given as `None` (so by default), then will use the response's
own encoding.
loader : `callable`, Optional
Json loader. Defaults to json.loads`.
content_type : `str`, Optional
Content type to use instead of the default one. Defaults to `'application/json'`.
Returns
-------
json : `Any`
Raises
------
TypeError
If the response's mime_type do not match.
"""
body = await self.read()
if body is None:
return
if content_type is not None:
received_content_type = self.headers.get(CONTENT_TYPE, '').lower()
if (
(json_re.match(received_content_type) is None)
if (content_type == 'application/json')
else (content_type not in received_content_type)
):
raise TypeError(
f'Attempt to decode JSON with unexpected mime_type: {received_content_type!r}.'
)
stripped = body.strip()
if not stripped:
return None
if encoding is None:
encoding = self.get_encoding()
return loader(stripped.decode(encoding))
async def __aenter__(self):
"""
Enters the client response as an asynchronous context manager.
This method is a coroutine.
"""
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""
Releases the response if not yet closed.
This method is a coroutine.
"""
self.release()
return False
def close(self):
"""
Closes the response and it's connection. The used connection will not be reused after.
"""
if not self._released:
self._notify_content()
if self.closed:
return
self.closed = True
connection = self.connection
if connection is not None:
self.connection = None
connection.close()
self._cleanup_writer()
def release(self):
"""
Releases the response and it's connection. The used connection might be reused after.
"""
if not self._released:
self._notify_content()
if self.closed:
return
self.closed = True
self._release_connection()
self._cleanup_writer()
| 29.292444 | 119 | 0.549827 |
774db7717305eb9e0eb57238da718cda42eab4b9 | 2,143 | py | Python | pkg/suggestion/v1alpha1/NAS_Envelopenet/suggestion_param.py | zhenghuiwang/katib | de96a52dd9e9c8cf4165927b7fb17023cfd652fd | [
"Apache-2.0"
] | null | null | null | pkg/suggestion/v1alpha1/NAS_Envelopenet/suggestion_param.py | zhenghuiwang/katib | de96a52dd9e9c8cf4165927b7fb17023cfd652fd | [
"Apache-2.0"
] | 1 | 2019-06-14T21:22:35.000Z | 2019-06-14T21:22:35.000Z | pkg/suggestion/v1alpha1/NAS_Envelopenet/suggestion_param.py | zhenghuiwang/katib | de96a52dd9e9c8cf4165927b7fb17023cfd652fd | [
"Apache-2.0"
] | null | null | null | def parseSuggestionParam(params_raw):
param_standard = {
"gpus": ['categorical', list, []],
"gpu_usage": ['value', float, [1e-6, 1.0]],
"steps": ['value', int, [0, 'inf']],
"batch_size": ['value', int, [1, 'inf']],
"dataset": ['categorical', str, ["cifar10", "imagenet"]],
"iterations": ['value', int, [0, 20]],
"log_stats": ['categorical', bool, [True, False]],
"data_dir":['categorical', str, ["data/"]],
"max_layers_per_stage":['categorical', list, []]
}
suggestion_params = {
"data_dir":"data/",
"gpus": [],
"gpu_usage": 0.47,
"steps": 10000,
"batch_size": 50,
"dataset": "cifar10",
"iterations": 5,
"log_stats": True,
"max_layers_per_stage":[7,6,3]
}
def checktype(param_name, param_value, check_mode, supposed_type, supposed_range=None):
correct = True
try:
converted_value = supposed_type(param_value)
except:
correct = False
print("Parameter {} is of wrong type. Set back to default value {}"
.format(param_name, suggestion_params[param_name]))
if correct and check_mode == 'value':
if not ((supposed_range[0] == '-inf' or converted_value >= supposed_range[0]) and
(supposed_range[1] == 'inf' or converted_value <= supposed_range[1])):
correct = False
print("Parameter {} out of range. Set back to default value {}"
.format(param_name, suggestion_params[param_name]))
if correct:
suggestion_params[param_name] = converted_value
for param in params_raw:
if param.name in suggestion_params.keys():
checktype(param.name,
param.value,
param_standard[param.name][0], # mode
param_standard[param.name][1], # type
param_standard[param.name][2]) # range
else:
print("Unknown Parameter name: {}".format(param.name))
return suggestion_params
| 36.948276 | 93 | 0.543164 |
24f4f344e9ab9658fe6a70acbdbc6d0a41a15488 | 361 | py | Python | configs/_base_/schedules/sgd_tsm_50e.py | rlleshi/mmaction2 | 6993693f178b1a59e5eb07f1a3db484d5e5de61a | [
"Apache-2.0"
] | 1,870 | 2020-07-11T09:33:46.000Z | 2022-03-31T13:21:36.000Z | configs/_base_/schedules/sgd_tsm_50e.py | rlleshi/mmaction2 | 6993693f178b1a59e5eb07f1a3db484d5e5de61a | [
"Apache-2.0"
] | 1,285 | 2020-07-11T11:18:57.000Z | 2022-03-31T08:41:17.000Z | configs/_base_/schedules/sgd_tsm_50e.py | rlleshi/mmaction2 | 6993693f178b1a59e5eb07f1a3db484d5e5de61a | [
"Apache-2.0"
] | 557 | 2020-07-11T09:51:57.000Z | 2022-03-31T13:21:35.000Z | # optimizer
optimizer = dict(
type='SGD',
constructor='TSMOptimizerConstructor',
paramwise_cfg=dict(fc_lr5=True),
lr=0.01, # this lr is used for 8 gpus
momentum=0.9,
weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[20, 40])
total_epochs = 50
| 27.769231 | 65 | 0.703601 |
98d402ba17d3081fac736a0afeb2f1c2c5786345 | 834 | py | Python | dosed/functions/__init__.py | BadrYoubiIdrissi/dosed | d4cefc11f85b0fca4c8dd947331dfa7b63ac9e26 | [
"MIT"
] | 40 | 2019-04-12T16:18:40.000Z | 2022-03-02T07:03:58.000Z | dosed/functions/__init__.py | BadrYoubiIdrissi/dosed | d4cefc11f85b0fca4c8dd947331dfa7b63ac9e26 | [
"MIT"
] | 5 | 2019-10-28T14:19:20.000Z | 2021-08-19T07:43:41.000Z | dosed/functions/__init__.py | BadrYoubiIdrissi/dosed | d4cefc11f85b0fca4c8dd947331dfa7b63ac9e26 | [
"MIT"
] | 15 | 2019-04-13T22:14:42.000Z | 2022-02-27T14:57:38.000Z | from .simple_loss import DOSEDSimpleLoss
from .worst_negative_mining_loss import DOSEDWorstNegativeMiningLoss
from .random_negative_mining_loss import DOSEDRandomNegativeMiningLoss
from .focal_loss import DOSEDFocalLoss
from .detection import Detection
from .metrics import precision_function, recall_function, f1_function
from .compute_metrics_dataset import compute_metrics_dataset
loss_functions = {
"simple": DOSEDSimpleLoss,
"worst_negative_mining": DOSEDWorstNegativeMiningLoss,
"focal": DOSEDFocalLoss,
"random_negative_mining": DOSEDRandomNegativeMiningLoss,
}
available_score_functions = {
"precision": precision_function(),
"recall": recall_function(),
"f1": f1_function(),
}
__all__ = [
"loss_functions"
"Detection",
"available_score_functions",
"compute_metrics_dataset",
]
| 28.758621 | 70 | 0.794964 |
d50267047f986a5401f502f081e64446cd1783f4 | 3,638 | py | Python | PyMysqlPool/mysql/connector/django/schema.py | prezi/python-mysql-pool | 7a5054eeac89bf2ee05f4fd8deaa2e416d5ed9c1 | [
"MIT"
] | 90 | 2017-08-22T07:50:58.000Z | 2022-03-11T07:28:59.000Z | PyMysqlPool/mysql/connector/django/schema.py | prezi/python-mysql-pool | 7a5054eeac89bf2ee05f4fd8deaa2e416d5ed9c1 | [
"MIT"
] | 8 | 2017-09-22T03:55:31.000Z | 2022-02-20T11:06:56.000Z | PyMysqlPool/mysql/connector/django/schema.py | prezi/python-mysql-pool | 7a5054eeac89bf2ee05f4fd8deaa2e416d5ed9c1 | [
"MIT"
] | 26 | 2017-08-23T05:37:35.000Z | 2021-12-27T06:54:38.000Z | # MySQL Connector/Python - MySQL driver written in Python.
# New file added for Django 1.7
import django
if django.VERSION >= (1, 8):
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
else:
from django.db.backends.schema import BaseDatabaseSchemaEditor
from django.db.models import NOT_PROVIDED
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s " \
"%(new_column)s %(type)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_create_fk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN " \
"KEY (%(column)s) REFERENCES %(to_table)s (%(to_column)s)"
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;'
alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;'
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s " \
"PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
def quote_value(self, value):
# Inner import to allow module to fail to load gracefully
from PyMysqlPool.mysql.connector.conversion import MySQLConverter
return MySQLConverter.quote(MySQLConverter.escape(value))
def skip_default(self, field):
"""
MySQL doesn't accept default values for longtext and longblob
and implicitly treats these columns as nullable.
"""
return field.db_type(self.connection) in ('longtext', 'longblob')
def add_field(self, model, field):
super(DatabaseSchemaEditor, self).add_field(model, field)
# Simulate the effect of a one-off default.
if (self.skip_default(field)
and field.default not in (None, NOT_PROVIDED)):
effective_default = self.effective_default(field)
self.execute('UPDATE %(table)s SET %(column)s = %%s' % {
'table': self.quote_name(model._meta.db_table),
'column': self.quote_name(field.column),
}, [effective_default])
def _model_indexes_sql(self, model):
# New in Django 1.8
storage = self.connection.introspection.get_storage_engine(
self.connection.cursor(), model._meta.db_table
)
if storage == "InnoDB":
for field in model._meta.local_fields:
if (field.db_index and not field.unique
and field.get_internal_type() == "ForeignKey"):
# Temporary setting db_index to False (in memory) to
# disable index creation for FKs (index automatically
# created by MySQL)
field.db_index = False
return super(DatabaseSchemaEditor, self)._model_indexes_sql(model)
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
# New in Django 1.8
# Keep null property of old field, if it has changed, it will be
# handled separately
if old_field.null:
new_type += " NULL"
else:
new_type += " NOT NULL"
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(
table, old_field, new_field, new_type)
| 41.816092 | 78 | 0.643485 |
4d85fe187bfb8c219ebee5337a4f4c548f12946f | 2,469 | py | Python | Validation/RecoTau/python/DQMMCValidation_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Validation/RecoTau/python/DQMMCValidation_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Validation/RecoTau/python/DQMMCValidation_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z |
from Validation.RecoTau.dataTypes.ValidateTausOnQCD_cff import *
from Validation.RecoTau.dataTypes.ValidateTausOnRealData_cff import *
from Validation.RecoTau.dataTypes.ValidateTausOnRealElectronsData_cff import *
from Validation.RecoTau.dataTypes.ValidateTausOnRealMuonsData_cff import *
from Validation.RecoTau.dataTypes.ValidateTausOnZEEFastSim_cff import *
from Validation.RecoTau.dataTypes.ValidateTausOnZEE_cff import *
from Validation.RecoTau.dataTypes.ValidateTausOnZMM_cff import *
from Validation.RecoTau.dataTypes.ValidateTausOnZTTFastSim_cff import *
from Validation.RecoTau.dataTypes.ValidateTausOnZTT_cff import *
from Validation.RecoTau.RecoTauValidation_cff import *
pfTauRunDQMValidation = cms.Sequence(
TauValNumeratorAndDenominatorQCD+
TauValNumeratorAndDenominatorRealData+
TauValNumeratorAndDenominatorRealElectronsData+
TauValNumeratorAndDenominatorRealMuonsData+
TauValNumeratorAndDenominatorZEE+
TauValNumeratorAndDenominatorZMM+
TauValNumeratorAndDenominatorZTT
)
from Configuration.Eras.Modifier_phase1Pixel_cff import phase1Pixel
produceDenoms = cms.Sequence(
produceDenominatorQCD+
produceDenominatorRealData+
produceDenominatorRealElectronsData+
produceDenominatorRealMuonsData+
produceDenominatorZEE+
produceDenominatorZMM+
produceDenominatorZTT
)
runTauEff = cms.Sequence(
efficienciesQCD+
efficienciesQCDSummary+
efficienciesRealData+
efficienciesRealDataSummary+
efficienciesRealElectronsData+
efficienciesRealElectronsDataSummary+
efficienciesRealMuonsData+
efficienciesRealMuonsDataSummary+
efficienciesZEE+
efficienciesZEESummary+
efficienciesZMM+
efficienciesZMMSummary+
efficienciesZTT+
efficienciesZTTSummary+
efficienciesTauValidationMiniAODZTT+
efficienciesTauValidationMiniAODZEE+
efficienciesTauValidationMiniAODZMM+
efficienciesTauValidationMiniAODQCD+
efficienciesTauValidationMiniAODRealData+
efficienciesTauValidationMiniAODRealElectronsData+
efficienciesTauValidationMiniAODRealMuonsData+
normalizePlotsZTT
)
##Full sequences, including normalizations
## TauEfficienciesQCD+
## TauEfficienciesRealData+
## TauEfficienciesRealElectronsData+
## TauEfficienciesRealMuonsData+
## TauEfficienciesZEEFastSim+
## TauEfficienciesZEE+
## TauEfficienciesZMM+
## TauEfficienciesZTTFastSim+
## TauEfficienciesZTT
makeBetterPlots = cms.Sequence() #Not used anymore/by now
| 34.774648 | 78 | 0.836371 |
624271c1c66c579f75be392905348ba2fc53eba6 | 4,279 | py | Python | dreamplace/ops/abacus_legalize/abacus_legalize.py | xiefei1026/DREAMPlace | d674cce42caaa5490795c3b577eda43f80350a84 | [
"BSD-3-Clause"
] | 323 | 2019-02-28T10:09:53.000Z | 2022-03-24T04:00:01.000Z | dreamplace/ops/abacus_legalize/abacus_legalize.py | xiefei1026/DREAMPlace | d674cce42caaa5490795c3b577eda43f80350a84 | [
"BSD-3-Clause"
] | 61 | 2019-06-10T08:47:09.000Z | 2022-03-31T13:38:18.000Z | dreamplace/ops/abacus_legalize/abacus_legalize.py | xiefei1026/DREAMPlace | d674cce42caaa5490795c3b577eda43f80350a84 | [
"BSD-3-Clause"
] | 109 | 2019-03-22T17:32:16.000Z | 2022-03-26T14:31:05.000Z | ##
# @file abacus_legalize.py
# @author Yibo Lin
# @date Jun 2018
#
import math
import torch
from torch import nn
from torch.autograd import Function
import dreamplace.ops.abacus_legalize.abacus_legalize_cpp as abacus_legalize_cpp
class AbacusLegalizeFunction(Function):
""" Legalize cells with abacus approach
"""
@staticmethod
def forward(
init_pos,
pos,
node_size_x,
node_size_y,
node_weights,
flat_region_boxes,
flat_region_boxes_start,
node2fence_region_map,
xl,
yl,
xh,
yh,
site_width,
row_height,
num_bins_x,
num_bins_y,
num_movable_nodes,
num_terminal_NIs,
num_filler_nodes
):
if pos.is_cuda:
output = abacus_legalize_cpp.forward(
init_pos.view(init_pos.numel()).cpu(),
pos.view(pos.numel()).cpu(),
node_size_x.cpu(),
node_size_y.cpu(),
node_weights.cpu(),
flat_region_boxes.cpu(),
flat_region_boxes_start.cpu(),
node2fence_region_map.cpu(),
xl,
yl,
xh,
yh,
site_width,
row_height,
num_bins_x,
num_bins_y,
num_movable_nodes,
num_terminal_NIs,
num_filler_nodes
).cuda()
else:
output = abacus_legalize_cpp.forward(
init_pos.view(init_pos.numel()),
pos.view(pos.numel()),
node_size_x,
node_size_y,
node_weights,
flat_region_boxes,
flat_region_boxes_start,
node2fence_region_map,
xl,
yl,
xh,
yh,
site_width,
row_height,
num_bins_x,
num_bins_y,
num_movable_nodes,
num_terminal_NIs,
num_filler_nodes
)
return output
class AbacusLegalize(object):
""" Legalize cells with abacus approach
"""
def __init__(self, node_size_x, node_size_y, node_weights,
flat_region_boxes, flat_region_boxes_start, node2fence_region_map,
xl, yl, xh, yh, site_width, row_height, num_bins_x, num_bins_y, num_movable_nodes, num_terminal_NIs, num_filler_nodes):
super(AbacusLegalize, self).__init__()
self.node_size_x = node_size_x
self.node_size_y = node_size_y
self.node_weights = node_weights
self.flat_region_boxes = flat_region_boxes
self.flat_region_boxes_start = flat_region_boxes_start
self.node2fence_region_map = node2fence_region_map
self.xl = xl
self.yl = yl
self.xh = xh
self.yh = yh
self.site_width = site_width
self.row_height = row_height
self.num_bins_x = num_bins_x
self.num_bins_y = num_bins_y
self.num_movable_nodes = num_movable_nodes
self.num_terminal_NIs = num_terminal_NIs
self.num_filler_nodes = num_filler_nodes
def __call__(self, init_pos, pos):
"""
@param init_pos the reference position for displacement minization
@param pos current roughly legal position
"""
return AbacusLegalizeFunction.forward(
init_pos,
pos,
node_size_x=self.node_size_x,
node_size_y=self.node_size_y,
node_weights=self.node_weights,
flat_region_boxes=self.flat_region_boxes,
flat_region_boxes_start=self.flat_region_boxes_start,
node2fence_region_map=self.node2fence_region_map,
xl=self.xl,
yl=self.yl,
xh=self.xh,
yh=self.yh,
site_width=self.site_width,
row_height=self.row_height,
num_bins_x=self.num_bins_x,
num_bins_y=self.num_bins_y,
num_movable_nodes=self.num_movable_nodes,
num_terminal_NIs=self.num_terminal_NIs,
num_filler_nodes=self.num_filler_nodes,
)
| 30.784173 | 136 | 0.566955 |
1604c94c2d9b007be75e9c0769e75d68d221012d | 17,122 | py | Python | tests/unit/orchestrate/flow/flow-construct/test_flow.py | hantwain/jina | 55d900d42f17211f3d6c7792fd36b937e4f1ef44 | [
"Apache-2.0"
] | 1 | 2022-03-04T01:53:51.000Z | 2022-03-04T01:53:51.000Z | tests/unit/orchestrate/flow/flow-construct/test_flow.py | hantwain/jina | 55d900d42f17211f3d6c7792fd36b937e4f1ef44 | [
"Apache-2.0"
] | null | null | null | tests/unit/orchestrate/flow/flow-construct/test_flow.py | hantwain/jina | 55d900d42f17211f3d6c7792fd36b937e4f1ef44 | [
"Apache-2.0"
] | null | null | null | import datetime
import inspect
import json
import os
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray, Executor, requests, __windows__
from jina.enums import FlowBuildLevel
from jina.excepts import RuntimeFailToStart
from jina.serve.executors import BaseExecutor
from jina.helper import random_identity
from jina.orchestrate.deployments import BaseDeployment
from docarray.document.generators import from_ndarray
from jina.types.request.data import Response
from tests import random_docs
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.slow
def test_flow_with_jump(tmpdir):
f = (
Flow()
.add(name='r1')
.add(name='r2')
.add(name='r3', needs='r1')
.add(name='r4', needs='r2')
.add(name='r5', needs='r3')
.add(name='r6', needs='r4')
.add(name='r8', needs='r6')
.add(name='r9', needs='r5')
.add(name='r10', needs=['r9', 'r8'])
)
with f:
_validate_flow(f)
f.save_config(os.path.join(str(tmpdir), 'tmp.yml'))
Flow.load_config(os.path.join(str(tmpdir), 'tmp.yml'))
with Flow.load_config(os.path.join(str(tmpdir), 'tmp.yml')) as f:
_validate_flow(f)
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_simple_flow(protocol):
bytes_gen = (Document() for _ in range(10))
def bytes_fn():
for _ in range(100):
yield Document()
f = Flow(protocol=protocol).add(name='executor0')
with f:
f.index(inputs=bytes_gen)
with f:
f.index(inputs=bytes_fn)
with f:
f.index(inputs=bytes_fn)
f.index(inputs=bytes_fn)
_validate_flow(f)
assert 'gateway' not in f
@pytest.mark.slow
def test_flow_identical(tmpdir):
with open(os.path.join(cur_dir, '../../../yaml/test-flow.yml')) as fp:
a = Flow.load_config(fp)
b = (
Flow()
.add(name='chunk_seg', shards=3)
.add(name='wqncode1', shards=2)
.add(name='encode2', shards=2, needs='chunk_seg')
.join(['wqncode1', 'encode2'])
)
a.save_config(os.path.join(str(tmpdir), 'test2.yml'))
c = Flow.load_config(os.path.join(str(tmpdir), 'test2.yml'))
assert a == b
assert a == c
with a as f:
_validate_flow(f)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_no_container(protocol):
f = Flow(protocol=protocol).add(
name='dummyEncoder',
uses=os.path.join(cur_dir, 'mwu-encoder/mwu_encoder.yml'),
)
with f:
f.index(inputs=random_docs(10))
@pytest.fixture
def docpb_workspace(tmpdir):
os.environ['TEST_DOCSHARD_WORKSPACE'] = str(tmpdir)
yield
del os.environ['TEST_DOCSHARD_WORKSPACE']
@pytest.mark.slow
def test_py_client():
f = (
Flow()
.add(name='r1')
.add(name='r2')
.add(name='r3', needs='r1')
.add(name='r4', needs='r2')
.add(name='r5', needs='r3')
.add(name='r6', needs='r4')
.add(name='r8', needs='r6')
.add(name='r9', needs='r5')
.add(name='r10', needs=['r9', 'r8'])
)
with f:
_validate_flow(f)
def test_dry_run_with_two_pathways_diverging_at_gateway():
f = Flow().add(name='r2').add(name='r3', needs='gateway').join(['r2', 'r3'])
with f:
_validate_flow(f)
def test_dry_run_with_two_pathways_diverging_at_non_gateway():
f = (
Flow()
.add(name='r1')
.add(name='r2')
.add(name='r3', needs='r1')
.join(['r2', 'r3'])
)
with f:
_validate_flow(f)
def test_refactor_num_part():
f = (
Flow()
.add(name='r1', needs='gateway')
.add(name='r2', needs='gateway')
.join(['r1', 'r2'])
)
with f:
_validate_flow(f)
def test_refactor_num_part_proxy():
f = (
Flow()
.add(name='r1')
.add(name='r2', needs='r1')
.add(name='r3', needs='r1')
.join(['r2', 'r3'])
)
with f:
_validate_flow(f)
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_refactor_num_part_proxy_2(protocol):
f = (
Flow(protocol=protocol)
.add(name='r1')
.add(name='r2', needs='r1', shards=2)
.add(name='r3', needs='r1', shards=3, polling='ALL')
.needs(['r2', 'r3'])
)
with f:
f.index([Document(text='abbcs'), Document(text='efgh')])
_validate_flow(f)
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_refactor_num_part_2(protocol):
f = Flow(protocol=protocol).add(name='r1', needs='gateway', shards=3, polling='ALL')
with f:
f.index([Document(text='abbcs'), Document(text='efgh')])
f = Flow(protocol=protocol).add(name='r1', needs='gateway', shards=3)
with f:
f.index([Document(text='abbcs'), Document(text='efgh')])
@pytest.fixture()
def datauri_workspace(tmpdir):
os.environ['TEST_DATAURIINDEX_WORKSPACE'] = str(tmpdir)
yield
del os.environ['TEST_DATAURIINDEX_WORKSPACE']
class DummyOneHotTextEncoder(Executor):
@requests
def foo(self, docs, **kwargs):
for d in docs:
d.embedding = np.array([1, 2, 3])
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_with_publish_driver(protocol):
def validate(da):
for d in da:
assert d.embedding is not None
f = (
Flow(protocol=protocol)
.add(name='r2', uses=DummyOneHotTextEncoder)
.add(name='r3', uses=DummyOneHotTextEncoder, needs='gateway')
.join(needs=['r2', 'r3'])
)
with f:
da = f.index([Document(text='text_1'), Document(text='text_2')])
_validate_flow(f)
validate(da)
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_arbitrary_needs(protocol):
f = (
Flow(protocol=protocol)
.add(name='p1')
.add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.add(name='p4', needs='gateway')
.add(name='p5', needs='gateway')
.needs(['p2', 'p4'], name='r1')
.needs(['p3', 'p5'], name='r2')
.needs(['p1', 'r1'], name='r3')
.needs(['r2', 'r3'], name='r4')
)
with f:
f.index([Document(text='abbcs'), Document(text='efgh')])
_validate_flow(f)
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_flow_needs_all(protocol):
f = Flow(protocol=protocol).add(name='p1', needs='gateway').needs_all(name='r1')
assert f._deployment_nodes['r1'].needs == {'p1'}
f = (
Flow(protocol=protocol)
.add(name='p1', needs='gateway')
.add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.needs(needs=['p1', 'p2'], name='r1')
.needs_all(name='r2')
)
assert f._deployment_nodes['r2'].needs == {'p3', 'r1'}
with f:
f.index(from_ndarray(np.random.random([10, 10])))
f = (
Flow(protocol=protocol)
.add(name='p1', needs='gateway')
.add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.needs(needs=['p1', 'p2'], name='r1')
.needs_all(name='r2')
.add(name='p4', needs='r2')
)
assert f._deployment_nodes['r2'].needs == {'p3', 'r1'}
assert f._deployment_nodes['p4'].needs == {'r2'}
with f:
f.index(from_ndarray(np.random.random([10, 10])))
_validate_flow(f)
class EnvChecker1(BaseExecutor):
"""Class used in Flow YAML"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pod/pod-specific
assert os.environ['key1'] == 'value1'
assert os.environ['key2'] == 'value2'
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
class EnvChecker2(BaseExecutor):
"""Class used in Flow YAML"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pod/pod-specific
assert 'key1' not in os.environ
assert 'key2' not in os.environ
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
def test_flow_with_pod_envs():
f = Flow.load_config(os.path.join(cur_dir, 'yaml/flow-with-envs.yml'))
with f:
_validate_flow(f)
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
@pytest.mark.parametrize('on_done', [None, lambda x: x])
def test_return_results_sync_flow(protocol, on_done):
with Flow(protocol=protocol).add() as f:
da = f.index(
from_ndarray(np.random.random([10, 2])),
on_done=on_done,
)
if on_done is None:
assert isinstance(da, DocumentArray)
assert len(da) == 10
for doc in da:
assert isinstance(doc, Document)
else:
assert da is None
_validate_flow(f)
@pytest.mark.parametrize(
'input, expect_host, expect_port',
[
('0.0.0.0', '0.0.0.0', None),
('0.0.0.0:12345', '0.0.0.0', 12345),
('123.124.125.0:45678', '123.124.125.0', 45678),
('api.jina.ai:45678', 'api.jina.ai', 45678),
],
)
def test_flow_host_expose_shortcut(input, expect_host, expect_port):
f = Flow().add(host=input).build()
assert f['executor0'].args.host == expect_host
if expect_port is not None:
assert f['executor0'].args.port_jinad == expect_port
def test_flow_workspace_id():
f = Flow().add().add().add().build()
assert len(f.workspace_id) == 4
assert len(set(f.workspace_id.values())) == 4
with pytest.raises(ValueError):
f.workspace_id = 'hello'
new_id = random_identity()
f.workspace_id = new_id
assert len(set(f.workspace_id.values())) == 1
assert list(f.workspace_id.values())[0] == new_id
@pytest.mark.slow
def test_bad_pod_graceful_termination():
def asset_bad_flow(f):
with pytest.raises(RuntimeFailToStart):
with f:
assert f._build_level == FlowBuildLevel.EMPTY
# bad remote pod
asset_bad_flow(Flow().add(name='exec1', host='hello-there'))
# bad local pod
asset_bad_flow(Flow().add(name='exec2', uses='hello-there'))
# bad local pod at second
asset_bad_flow(Flow().add().add(name='exec3', uses='hello-there'))
# bad remote pod at second
asset_bad_flow(Flow().add().add(name='exec4', host='hello-there'))
# bad local pod at second, with correct pod at last
asset_bad_flow(Flow().add().add(name='exec5', uses='hello-there').add())
# bad remote pod at second, with correct pod at last
asset_bad_flow(Flow().add().add(name='exec6', host='hello-there').add())
def test_socket_types_2_remote_one_local():
f = (
Flow()
.add(name='executor1', host='0.0.0.1')
.add(name='executor2', shards=2, host='0.0.0.2')
.add(name='executor3', shards=2, host='1.2.3.4', needs=['gateway'])
.join(name='join', needs=['executor2', 'executor3'])
)
f.build()
_validate_flow(f)
def test_socket_types_2_remote_one_local_input_socket_pull_connect_from_remote():
f = (
Flow()
.add(name='executor1', host='0.0.0.1')
.add(name='executor2', shards=2, host='0.0.0.2')
.add(name='executor3', shards=2, host='1.2.3.4', needs=['gateway'])
.join(name='join', needs=['executor2', 'executor3'])
)
f.build()
_validate_flow(f)
def test_single_document_flow_index():
d = Document()
with Flow().add() as f:
f.index(d)
f.index(lambda: d)
def test_flow_equalities():
f1 = (
Flow()
.add(name='executor0')
.add(name='executor1', needs='gateway')
.needs_all(name='joiner')
)
f2 = (
Flow()
.add(name='executor0')
.add(name='executor1', needs='gateway')
.add(name='joiner', needs=['executor0', 'executor1'])
)
assert f1 == f2
f2 = f2.add(name='executor0')
assert f1 != f2
def test_flow_get_item():
f1 = Flow().add().add(needs='gateway').needs_all(name='joiner')
assert isinstance(f1[1], BaseDeployment)
assert isinstance(f1['executor0'], BaseDeployment)
class CustomizedExecutor(BaseExecutor):
pass
def test_flow_add_class():
f = Flow().add(uses=BaseExecutor).add(uses=CustomizedExecutor)
with f:
pass
@pytest.mark.slow
def test_flow_allinone_yaml():
f = Flow.load_config(os.path.join(cur_dir, 'yaml/flow-allinone.yml'))
with f:
pass
f = Flow.load_config(os.path.join(cur_dir, 'yaml/flow-allinone-oldstyle.yml'))
with f:
pass
class MyExec(Executor):
@requests
def foo(self, parameters, **kwargs):
assert parameters['hello'] == 'world'
def test_flow_empty_data_request(mocker):
f = Flow().add(uses=MyExec)
mock = mocker.Mock()
with f:
f.post('/hello', parameters={'hello': 'world'}, on_done=mock)
mock.assert_called()
def test_flow_common_kwargs():
with Flow(name='hello', something_random=True).add() as f:
assert f._common_kwargs == {'something_random': True}
@pytest.mark.parametrize('is_async', [True, False])
def test_flow_set_asyncio_switch_post(is_async):
f = Flow(asyncio=is_async)
assert inspect.isasyncgenfunction(f.post) == is_async
@pytest.mark.skipif(__windows__, reason='timing comparison is broken for 2nd Flow')
def test_flow_routes_list():
def _time(time: str):
return datetime.datetime.strptime(time, '%Y-%m-%dT%H:%M:%S.%fZ')
def my_cb_one(resp: Response):
gateway_entry, pod1_entry = json.loads(resp.json())['routes']
assert gateway_entry['executor'] == 'gateway'
assert pod1_entry['executor'].startswith('executor1')
assert (
_time(gateway_entry['end_time'])
> _time(pod1_entry['end_time'])
> _time(pod1_entry['start_time'])
> _time(gateway_entry['start_time'])
)
def my_cb_two(resp: Response):
routes = json.loads(resp.json())['routes']
gateway_entry, *pods = routes
(
a1_entry,
a2_entry,
b1_entry,
gateway_entry,
merge_entry,
) = _extract_route_entries(gateway_entry, routes)
assert gateway_entry['executor'] == 'gateway'
assert a1_entry['executor'].startswith('a1')
assert a2_entry['executor'].startswith('a2')
assert b1_entry['executor'].startswith('b1')
assert merge_entry['executor'].startswith('merge')
assert (
_time(gateway_entry['end_time'])
> _time(merge_entry['end_time'])
> _time(merge_entry['start_time'])
> _time(a2_entry['end_time'])
> _time(a2_entry['start_time'])
> _time(a1_entry['start_time'])
> _time(gateway_entry['start_time'])
)
with Flow().add(name='executor1') as simple_flow:
simple_flow.index(inputs=Document(), on_done=my_cb_one)
with Flow().add(name='a1').add(name='a2').add(name='b1', needs='gateway').add(
name='merge', needs=['a2', 'b1']
) as shards_flow:
shards_flow.index(inputs=Document(), on_done=my_cb_two)
def _extract_route_entries(gateway_entry, routes):
for route in routes:
if route['executor'] == 'gateway':
gateway_entry = route
elif route['executor'] == 'a1':
a1_entry = route
elif route['executor'] == 'a2':
a2_entry = route
elif route['executor'] == 'b1':
b1_entry = route
elif route['executor'] == 'merge':
merge_entry = route
return a1_entry, a2_entry, b1_entry, gateway_entry, merge_entry
def test_flow_load_executor_yaml_extra_search_paths():
f = Flow(extra_search_paths=[os.path.join(cur_dir, 'executor')]).add(
uses='config.yml'
)
with f:
da = f.post('/', inputs=Document())
assert da[0].text == 'done'
def test_flow_load_yaml_extra_search_paths():
f = Flow.load_config(os.path.join(cur_dir, 'flow/flow.yml'))
with f:
da = f.post('/', inputs=Document())
assert da[0].text == 'done'
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_gateway_only_flows_no_error(capsys, protocol):
f = Flow(protocol=protocol)
with f:
pass
captured = capsys.readouterr()
assert not captured.err
def _validate_flow(f):
graph_dict = f._get_graph_representation()
addresses = f._get_deployments_addresses()
for name, pod in f:
if name != 'gateway':
assert (
addresses[name][0] == f'{pod.protocol}://{pod.host}:{pod.head_port_in}'
)
for n in pod.needs:
assert name in graph_dict[n if n != 'gateway' else 'start-gateway']
else:
for n in pod.needs:
assert 'end-gateway' in graph_dict[n]
| 27.571659 | 88 | 0.600689 |
49d2d83cdf2b4c68836dd98dadbdad03e62cbf59 | 3,297 | py | Python | Chapter05/Exercise5.03/business_site/business_site/settings.py | PacktPublishing/Web-Development-with-Django-Second-Edition | a9c3d8e46176af612e3b8fe7bc2a2a8effafe981 | [
"MIT"
] | 2 | 2022-01-03T22:17:21.000Z | 2022-03-04T13:32:36.000Z | Chapter05/Exercise5.03/business_site/business_site/settings.py | PacktPublishing/Web-Development-with-Django-Second-Edition | a9c3d8e46176af612e3b8fe7bc2a2a8effafe981 | [
"MIT"
] | null | null | null | Chapter05/Exercise5.03/business_site/business_site/settings.py | PacktPublishing/Web-Development-with-Django-Second-Edition | a9c3d8e46176af612e3b8fe7bc2a2a8effafe981 | [
"MIT"
] | 1 | 2022-02-25T13:53:37.000Z | 2022-02-25T13:53:37.000Z | """
Django settings for business_site project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-h2^@3hx1&z3!(_lyilhy#kt^k!3u=5r)enbjdk(^r!r2vs0$7q"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"landing",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "business_site.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "business_site.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
STATICFILES_DIRS = [BASE_DIR / "static"]
| 25.96063 | 91 | 0.702153 |
9d6e379632fa215e6ab47cae340b9082a9af4c22 | 929 | py | Python | apps/beeswax/src/beeswax/settings.py | t3hi3x/hue | 36d71c1a8dd978b899ef2dc3eef8887b68fd99a8 | [
"Apache-2.0"
] | 11 | 2019-03-20T07:38:35.000Z | 2021-06-18T09:42:46.000Z | apps/beeswax/src/beeswax/settings.py | t3hi3x/hue | 36d71c1a8dd978b899ef2dc3eef8887b68fd99a8 | [
"Apache-2.0"
] | null | null | null | apps/beeswax/src/beeswax/settings.py | t3hi3x/hue | 36d71c1a8dd978b899ef2dc3eef8887b68fd99a8 | [
"Apache-2.0"
] | 5 | 2019-06-29T03:13:02.000Z | 2020-04-23T04:47:11.000Z | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DJANGO_APPS = [ "beeswax" ]
NICE_NAME = "Hive Editor"
REQUIRES_HADOOP = True
ICON = "beeswax/art/icon_beeswax_48.png"
MENU_INDEX = 10
IS_URL_NAMESPACED = True
| 40.391304 | 74 | 0.768568 |
7be0fc4be1716a56a72529b56d91ce3ad732338c | 4,205 | py | Python | research/syntaxnet/dragnn/python/evaluation_test.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 3,326 | 2018-01-26T22:42:25.000Z | 2022-02-16T13:16:39.000Z | research/syntaxnet/dragnn/python/evaluation_test.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | research/syntaxnet/dragnn/python/evaluation_test.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 1,474 | 2018-02-01T04:33:18.000Z | 2022-03-08T07:02:20.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for parser evaluation."""
import tensorflow as tf
from dragnn.python import evaluation
from syntaxnet import sentence_pb2
class EvaluationTest(tf.test.TestCase):
def _add_sentence(self, tags, heads, labels, corpus):
"""Adds a sentence to the corpus."""
sentence = sentence_pb2.Sentence()
for tag, head, label in zip(tags, heads, labels):
sentence.token.add(word='x', start=0, end=0,
tag=tag, head=head, label=label)
corpus.append(sentence.SerializeToString())
def setUp(self):
self._gold_corpus = []
self._test_corpus = []
# A correct sentence.
self._add_sentence(['DT'], [-1], ['ROOT'], self._gold_corpus)
self._add_sentence(['DT'], [-1], ['ROOT'], self._test_corpus)
# An incorrect sentence. There is one POS mistake, two head mistakes, and
# one label mistake. NB: Since the label mistake occurs on the one token
# with a correct head, this sentence has three mistakes w.r.t. LAS.
self._add_sentence(['DT', 'JJ', 'NN'], [2, 2, -1], ['det', 'amod', 'ROOT'],
self._gold_corpus)
self._add_sentence(['xx', 'JJ', 'NN'], [1, 0, -1], ['det', 'amod', 'xxxx'],
self._test_corpus)
def testCalculateParseMetrics(self):
pos, uas, las = evaluation.calculate_parse_metrics(self._gold_corpus,
self._test_corpus)
self.assertEqual(75, pos)
self.assertEqual(50, uas)
self.assertEqual(25, las)
def testCalculateSegmentationMetrics(self):
self._gold_corpus = []
self._test_corpus = []
def add_sentence_for_segment_eval(starts, ends, corpus):
"""Adds a sentence to the corpus."""
sentence = sentence_pb2.Sentence()
for start, end in zip(starts, ends):
sentence.token.add(word='x', start=start, end=end)
corpus.append(sentence.SerializeToString())
# A test case with 5 gold words, 4 test words and 3 are correct.
# -gold tokens: 'This is a gold sentence'
# -test tokens: 'Thisis a gold sentence'
add_sentence_for_segment_eval(
[0, 5, 8, 10, 15], [3, 6, 8, 13, 22], self._gold_corpus)
add_sentence_for_segment_eval(
[0, 8, 10, 15], [6, 8, 13, 22], self._test_corpus)
# Another test case with 3 gold words, 5 test words and 2 correct words.
# -gold tokens: 'another gold sentence'
# -test tokens: 'another gold sen tence'
add_sentence_for_segment_eval([0, 8, 13], [6, 11, 20], self._gold_corpus)
add_sentence_for_segment_eval([0, 8, 13, 17, 21], [6, 11, 15, 19, 22],
self._test_corpus)
prec, rec, f1 = evaluation.calculate_segmentation_metrics(self._gold_corpus,
self._test_corpus)
self.assertEqual(55.56, prec)
self.assertEqual(62.50, rec)
self.assertEqual(58.82, f1)
summaries = evaluation.segmentation_summaries(self._gold_corpus,
self._test_corpus)
self.assertEqual({
'precision': 55.56,
'recall': 62.50,
'f1': 58.82,
'eval_metric': 58.82
}, summaries)
def testParserSummaries(self):
summaries = evaluation.parser_summaries(self._gold_corpus,
self._test_corpus)
self.assertEqual({
'POS': 75,
'UAS': 50,
'LAS': 25,
'eval_metric': 25 # equals LAS
}, summaries)
if __name__ == '__main__':
tf.test.main()
| 38.577982 | 80 | 0.613793 |
ebdcf61635a3bedc98c21a8e7c9bc38bbdc0665a | 3,499 | py | Python | frameworks/cassandra/tests/test_backup_and_restore.py | greggomann/dcos-commons | f8f87e3e112cd6d9c38e43ca6336b1cdef56d5d1 | [
"Apache-2.0"
] | 7 | 2017-11-02T05:26:40.000Z | 2020-01-27T19:33:52.000Z | frameworks/cassandra/tests/test_backup_and_restore.py | greggomann/dcos-commons | f8f87e3e112cd6d9c38e43ca6336b1cdef56d5d1 | [
"Apache-2.0"
] | 14 | 2017-09-20T22:47:48.000Z | 2020-09-11T19:54:25.000Z | frameworks/cassandra/tests/test_backup_and_restore.py | AlexRogalskiy/dcos-commons | 85711f05bc94172aabb6837f9ff529721437d20c | [
"Apache-2.0"
] | 9 | 2017-11-14T19:43:07.000Z | 2022-01-06T12:44:49.000Z | import os
import uuid
import pytest
import sdk_install
import sdk_jobs
from tests import config
no_strict_for_azure = pytest.mark.skipif(os.environ.get("SECURITY") == "strict",
reason="backup/restore doesn't work in strict as user needs to be root")
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
test_jobs = []
try:
test_jobs = config.get_all_jobs(node_address=config.get_foldered_node_address())
# destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service:
for job in test_jobs:
sdk_jobs.install_job(job)
sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name())
# user=root because Azure CLI needs to run in root...
# We don't run the Azure tests in strict however, so don't set it then.
if os.environ.get("SECURITY") == "strict":
additional_options={"service": { "name": config.get_foldered_service_name() } }
else:
additional_options={"service": { "name": config.get_foldered_service_name(), "user": "root" } }
sdk_install.install(
config.PACKAGE_NAME,
config.get_foldered_service_name(),
config.DEFAULT_TASK_COUNT,
additional_options=additional_options)
yield # let the test session execute
finally:
return
# To disable these tests in local runs where you may lack the necessary credentials,
# use e.g. "TEST_TYPES=sanity and not aws and not azure":
@pytest.mark.azure
@no_strict_for_azure
@pytest.mark.sanity
def test_backup_and_restore_to_azure():
client_id = os.getenv('AZURE_CLIENT_ID')
if not client_id:
assert False, 'Azure credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not azure"'
plan_parameters = {
'CLIENT_ID': client_id,
'CLIENT_SECRET': os.getenv('AZURE_CLIENT_SECRET'),
'TENANT_ID': os.getenv('AZURE_TENANT_ID'),
'AZURE_STORAGE_ACCOUNT': os.getenv('AZURE_STORAGE_ACCOUNT'),
'AZURE_STORAGE_KEY': os.getenv('AZURE_STORAGE_KEY'),
'CONTAINER_NAME': os.getenv('CONTAINER_NAME', 'cassandra-test'),
'SNAPSHOT_NAME': str(uuid.uuid1()),
'CASSANDRA_KEYSPACES': '"testspace1 testspace2"',
}
config.run_backup_and_restore(
config.get_foldered_service_name(),
'backup-azure',
'restore-azure',
plan_parameters,
config.get_foldered_node_address())
@pytest.mark.aws
@pytest.mark.sanity
def test_backup_and_restore_to_s3():
key_id = os.getenv('AWS_ACCESS_KEY_ID')
if not key_id:
assert False, 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"'
plan_parameters = {
'AWS_ACCESS_KEY_ID': key_id,
'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'),
'AWS_REGION': os.getenv('AWS_REGION', 'us-west-2'),
'S3_BUCKET_NAME': os.getenv('AWS_BUCKET_NAME', 'infinity-framework-test'),
'SNAPSHOT_NAME': str(uuid.uuid1()),
'CASSANDRA_KEYSPACES': '"testspace1 testspace2"',
}
config.run_backup_and_restore(
config.get_foldered_service_name(),
'backup-s3',
'restore-s3',
plan_parameters,
config.get_foldered_node_address())
@pytest.mark.sanity
def test_bkp_restore_uninstall():
sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) | 37.623656 | 126 | 0.687339 |
b8a647392a1a5258203b1801c4678f339cd07bdf | 7,549 | py | Python | conda_build/cli/main_render.py | isuruf/conda-build | 9f163925f5d03a46e921162892bf4c6bc86b1072 | [
"BSD-3-Clause"
] | null | null | null | conda_build/cli/main_render.py | isuruf/conda-build | 9f163925f5d03a46e921162892bf4c6bc86b1072 | [
"BSD-3-Clause"
] | null | null | null | conda_build/cli/main_render.py | isuruf/conda-build | 9f163925f5d03a46e921162892bf4c6bc86b1072 | [
"BSD-3-Clause"
] | null | null | null | # (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function
import argparse
import logging
import sys
from pprint import pprint
import yaml
from yaml.parser import ParserError
from conda_build.conda_interface import (ArgumentParser, add_parser_channels,
cc_conda_build)
from conda_build import __version__, api
from conda_build.config import get_or_merge_config, get_channel_urls
from conda_build.variants import get_package_variants, set_language_env_vars
from conda_build.utils import LoggingContext
on_win = (sys.platform == 'win32')
# see: https://stackoverflow.com/questions/29986185/python-argparse-dict-arg
class ParseYAMLArgument(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if len(values) != 1:
raise RuntimeError("This switch requires exactly one argument")
try:
my_dict = yaml.load(values[0], Loader=yaml.BaseLoader)
if not isinstance(my_dict, dict):
raise RuntimeError("The argument of {} is not a YAML dictionary.".format(option_string))
setattr(namespace, self.dest, my_dict)
except ParserError as e:
raise RuntimeError('The argument of {} is not a valid YAML. The parser error was: \n\n{}'.format(option_string, str(e)))
def get_render_parser():
p = ArgumentParser(
description="""
Tool for building conda packages. A conda package is a binary tarball
containing system-level libraries, Python modules, executable programs, or
other components. conda keeps track of dependencies between packages and
platform specifics, making it simple to create working environments from
different sets of packages.""",
conflict_handler='resolve'
)
p.add_argument(
'-V', '--version',
action='version',
help='Show the conda-build version number and exit.',
version='conda-build %s' % __version__,
)
p.add_argument(
'-n', "--no-source",
action="store_true",
help="When templating can't be completed, do not obtain the \
source to try fill in related template variables.",
)
p.add_argument(
"--output",
action="store_true",
help="Output the conda package filename which would have been "
"created",
)
p.add_argument(
'--python',
action="append",
help="Set the Python version used by conda build.",
)
p.add_argument(
'--perl',
action="append",
help="Set the Perl version used by conda build.",
)
p.add_argument(
'--numpy',
action="append",
help="Set the NumPy version used by conda build.",
)
p.add_argument(
'--R',
action="append",
help="""Set the R version used by conda build.""",
dest="r_base"
)
p.add_argument(
'--lua',
action="append",
help="Set the Lua version used by conda build.",
)
p.add_argument(
'--bootstrap',
help="""Provide initial configuration in addition to recipe.
Can be a path to or name of an environment, which will be emulated
in the package.""",
)
p.add_argument(
'--append-file',
help="""Append data in meta.yaml with fields from this file. Jinja2 is not done
on appended fields""",
dest='append_sections_file',
)
p.add_argument(
'--clobber-file',
help="""Clobber data in meta.yaml with fields from this file. Jinja2 is not done
on clobbered fields.""",
dest='clobber_sections_file',
)
p.add_argument(
'-m', '--variant-config-files',
action="append",
help="""Additional variant config files to add. These yaml files can contain
keys such as `c_compiler` and `target_platform` to form a build matrix."""
)
p.add_argument(
'-e', '--exclusive-config-files', '--exclusive-config-file',
action="append",
help="""Exclusive variant config files to add. Providing files here disables
searching in your home directory and in cwd. The files specified here come at the
start of the order, as opposed to the end with --variant-config-files. Any config
files in recipes and any config files specified with --variant-config-files will
override values from these files."""
)
p.add_argument(
"--old-build-string", dest="filename_hashing", action="store_false",
default=cc_conda_build.get('filename_hashing', 'true').lower() == 'true',
help=("Disable hash additions to filenames to distinguish package "
"variants from one another. NOTE: any filename collisions are "
"yours to handle. Any variants with overlapping names within a "
"build will clobber each other.")
)
p.add_argument('--variants',
nargs=1,
action=ParseYAMLArgument,
help=('Variants to extend the build matrix. Must be a valid YAML instance, '
'such as "{python: [3.6, 3.7]}"'))
add_parser_channels(p)
return p
def parse_args(args):
p = get_render_parser()
p.add_argument(
'-f', '--file',
help="write YAML to file, given as argument here.\
Overwrites existing files."
)
# we do this one separately because we only allow one entry to conda render
p.add_argument(
'recipe',
metavar='RECIPE_PATH',
help="Path to recipe directory.",
)
# this is here because we have a different default than build
p.add_argument(
'--verbose',
action='store_true',
help='Enable verbose output from download tools and progress updates',
)
args, _ = p.parse_known_args(args)
return p, args
def execute(args, print_results=True):
p, args = parse_args(args)
config = get_or_merge_config(None, **args.__dict__)
variants = get_package_variants(args.recipe, config, variants=args.variants)
set_language_env_vars(variants)
config.channel_urls = get_channel_urls(args.__dict__)
config.override_channels = args.override_channels
if args.output:
config.verbose = False
config.debug = False
metadata_tuples = api.render(args.recipe, config=config,
no_download_source=args.no_source,
variants=args.variants)
if print_results:
if args.output:
with LoggingContext(logging.CRITICAL + 1):
paths = api.get_output_file_paths(metadata_tuples, config=config)
print('\n'.join(sorted(paths)))
else:
logging.basicConfig(level=logging.INFO)
for (m, _, _) in metadata_tuples:
print("--------------")
print("Hash contents:")
print("--------------")
pprint(m.get_hash_contents())
print("----------")
print("meta.yaml:")
print("----------")
print(api.output_yaml(m, args.file, suppress_outputs=True))
else:
return metadata_tuples
def main():
return execute(sys.argv[1:])
if __name__ == '__main__':
main()
| 34.47032 | 132 | 0.622069 |
4178b2168ab0b7616ecdb1a10d7aca6d6ae530d1 | 2,893 | py | Python | scripts/loading/allele/update_allele_type.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 5 | 2015-11-24T23:09:46.000Z | 2019-11-06T17:48:13.000Z | scripts/loading/allele/update_allele_type.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 188 | 2017-08-28T22:39:03.000Z | 2022-03-02T14:53:46.000Z | scripts/loading/allele/update_allele_type.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 7 | 2018-05-13T01:58:07.000Z | 2021-06-25T19:08:33.000Z | import logging
import os
from datetime import datetime
import sys
from src.models import Source, So, Dbentity, Alleledbentity, So
from scripts.loading.database_session import get_session
__author__ = 'sweng66'
logging.basicConfig(format='%(message)s')
log = logging.getLogger()
log.setLevel(logging.INFO)
generic_so_term = 'structural_variant'
def load_data(infile):
nex_session = get_session()
log.info(str(datetime.now()))
log.info("Getting data from database...")
so_to_id = dict([(x.term_name, x.so_id) for x in nex_session.query(So).all()])
allele_to_id = dict([(x.display_name.upper(), x.dbentity_id) for x in nex_session.query(Dbentity).filter_by(subclass='ALLELE').all()])
f = open(infile)
count = 0
allele_id_to_so_id_desc = {}
allele_id_to_so_id_desc = {}
for line in f:
pieces = line.strip().split("\t")
allele_name = pieces[0]
allele_id = allele_to_id.get(allele_name.upper())
if allele_id is None:
log.info("The allele = " + allele_name + " is not in the database.")
continue
so_term = pieces[1]
so_id = so_to_id.get(so_term)
if so_id is None:
log.info("The so term = " + so_term + " is not in the database.")
continue
allele_desc = pieces[2]
allele_id_to_so_id_desc[allele_id] = (so_id, allele_desc)
## update allele types here
so_id = so_to_id.get(generic_so_term)
if so_id is None:
log.info("The so term: " + generic_so_term + " is not in the database.")
return
all = nex_session.query(Alleledbentity).filter_by(so_id=so_id).all()
count = 0
for x in all:
if x.dbentity_id not in allele_id_to_so_id_desc:
log.info("The allele_id: " + str(x.dbentity_id) + " is not in the mapping file.")
continue
(so_id, desc) = allele_id_to_so_id_desc[x.dbentity_id]
updated = 0
if x.so_id != so_id:
x.so_id = so_id
updated = 1
if x.description != desc:
x.description = desc
updated = 1
if updated == 1:
nex_session.add(x)
count = count + 1
if count > 300:
# nex_session.rollback()
nex_session.commit()
count = 0
# nex_session.rollback()
nex_session.commit()
nex_session.close()
log.info("Done!")
log.info(str(datetime.now()))
if __name__ == "__main__":
infile = None
if len(sys.argv) >= 2:
infile = sys.argv[1]
else:
print("Usage: python scripts/loading/allele/update_allele_type.py allele_to_so_mapping_file_name")
print("Usage example: python scripts/loading/allele/update_allele_type.py scripts/loading/allele/data/allele_to_so_desc_mapping.txt")
exit()
load_data(infile)
| 29.824742 | 141 | 0.618044 |
ddc760f26a27a028bf9f126a7e78e3bff18808ff | 20,461 | py | Python | snpdb/migrations/0008_populate_column_vcf_info.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | snpdb/migrations/0008_populate_column_vcf_info.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | snpdb/migrations/0008_populate_column_vcf_info.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | # Generated by Django 3.1 on 2020-11-23 03:31
from django.db import migrations
from django.db.models import F
from library.django_utils import bulk_insert_class_data
def add_tags_global(apps, schema_editor):
VariantGridColumn = apps.get_model("snpdb", "VariantGridColumn")
CustomColumn = apps.get_model("snpdb", "CustomColumn")
tags_global = VariantGridColumn.objects.create(**{
'grid_column_name': 'tags_global',
'variant_column': 'tags_global',
'annotation_level': 'D',
'width': None,
'label': 'Tags in other analyses',
'description': "All Variant Tags",
'model_field': False,
'queryset_field': False
})
# Insert global tags after tags
tags = VariantGridColumn.objects.get(grid_column_name="tags")
for tag_column in CustomColumn.objects.filter(column=tags):
after_columns_qs = CustomColumn.objects.filter(custom_columns_collection=tag_column.custom_columns_collection,
sort_order__gt=tag_column.sort_order)
after_columns_qs.update(sort_order=F("sort_order") + 1)
CustomColumn.objects.create(custom_columns_collection=tag_column.custom_columns_collection,
column=tags_global,
sort_order=tag_column.sort_order + 1)
def populate_column_vcf_info(apps, schema_editor):
COLUMN_VCF_INFO = [
{'info_id': '1KG_AF', 'column_id': 'af_1kg', 'number': 1, 'type': 'F',
'description': 'Estimated allele frequency in the range (0,1), based on AC/AN'},
{'info_id': 'UK10K_AF', 'column_id': 'af_uk10k', 'number': None, 'type': 'F',
'description': 'Allele frequency in called genotypes, from UK10 project'},
{'info_id': 'GENE_BAND', 'column_id': 'band', 'number': None, 'type': 'S', 'description': 'Cytogenetic band'},
{'info_id': 'CADD_phred', 'column_id': 'cadd_phred', 'number': 1, 'type': 'F',
'description': 'Combined Annotation Dependent Depletion raw scores'},
{'info_id': 'CADD_raw', 'column_id': 'cadd_raw', 'number': 1, 'type': 'F',
'description': 'Phred-scaled score of Combined Annotation Dependent Depletion'},
{'info_id': 'GENE_ccds_ids', 'column_id': 'ccds_ids', 'number': None, 'type': 'S',
'description': 'Consensus Coding Sequence Project Ids'},
{'info_id': 'CLNSIG', 'column_id': 'clinical_significance', 'number': None, 'type': 'S',
'description': 'ClinVar Variant Clinical Significance, 0 - Uncertain significance, 1 - not provided, 2 - Benign, 3 - Likely benign, 4 - Likely pathogenic, 5 - Pathogenic, 6 - drug response, 7 - histocompatibility, 255 - other'},
{'info_id': 'ALLELEID', 'column_id': 'clinvar_allele_id', 'number': None, 'type': 'I',
'description': 'the ClinVar Allele ID'},
{'info_id': 'CLNVI', 'column_id': 'clinvar_clinical_sources', 'number': None, 'type': 'S',
'description': "the variant's clinical sources reported as tag-value pairs of database and variant identifier"},
{'info_id': 'CLNDISDB', 'column_id': 'clinvar_disease_database_name', 'number': None, 'type': 'S',
'description': 'Tag-value pairs of disease database name and identifier, e.g. OMIM:NNNNNN'},
{'info_id': 'CLNSIG_MAX', 'column_id': 'clinvar_highest_pathogenicity', 'number': None, 'type': 'S',
'description': 'ClinVar Highest Pathogenicty'},
{'info_id': 'ORIGIN', 'column_id': 'clinvar_origin', 'number': None, 'type': 'S',
'description': 'Allele origin. One or more of the following values may be added: 0 - unknown; 1 - germline; 2 - somatic; 4 - inherited; 8 - paternal; 16 - maternal; 32 - de-novo; 64 - biparental; 128 - uniparental; 256 - not-tested; 512 - tested-inconclusive; 1073741824 - other'},
{'info_id': 'CLNDN', 'column_id': 'clinvar_preferred_disease_name', 'number': None, 'type': 'S',
'description': "ClinVar's preferred disease name for the concept specified by disease identifiers in CLNDISDB"},
{'info_id': 'CLNREVSTAT', 'column_id': 'clinvar_review_status', 'number': None, 'type': 'S',
'description': 'ClinVar review status for the Variation ID'},
{'info_id': 'CLNVID', 'column_id': 'clinvar_variation_id', 'number': None, 'type': 'I',
'description': 'ClinVar Variant Accession'},
{'info_id': 'CONSEQUENCE', 'column_id': 'consequence', 'number': None, 'type': 'S',
'description': 'Molecular Consequence'},
{'info_id': 'COSMIC_CNT', 'column_id': 'cosmic_count', 'number': 1, 'type': 'I',
'description': 'Number of COSMIC samples with mutation'},
{'info_id': 'COSMIC_ID', 'column_id': 'cosmic_id', 'number': None, 'type': 'S', 'description': 'COSMIC_ID'},
{'info_id': 'COSMIC_LEGACY_ID', 'column_id': 'cosmic_legacy_id', 'number': 1, 'type': 'S',
'description': 'COSMIC Legacy ID'},
{'info_id': 'DBSCSNV_ADA', 'column_id': 'dbscsnv_ada_score', 'number': None, 'type': 'F',
'description': 'dbSCSNV Ada Score'},
{'info_id': 'DBSCSNV_RF', 'column_id': 'dbscsnv_rf_score', 'number': None, 'type': 'F',
'description': 'dbSCSNV RF Score'},
{'info_id': 'DBSNP_RS_ID', 'column_id': 'dbsnp_rs_id', 'number': None, 'type': 'S', 'description': 'dbSNP ID'},
{'info_id': 'DOMAINS', 'column_id': 'domains', 'number': None, 'type': 'S', 'description': 'Protein domains'},
{'info_id': 'GENE_END_POSITION', 'column_id': 'end_position', 'number': None, 'type': 'I',
'description': 'The Ensembl gene start and end coordinates correspond to the outermost transcript start and end coordinates (containing the UTRs). These can be part of the same transcript or belong to two different transcripts. An example of the latter is the human BRCA2 gene, where the start coordinate corresponds to the start coordinate of transcript ENST00000380152 (BRCA2-001) and the end coordinate to the end coordinate of transcript ENST00000544455 (BRCA2-201).'},
{'info_id': 'ENSEMBL_PROTEIN', 'column_id': 'ensembl_protein', 'number': None, 'type': 'S',
'description': 'Ensembl Protein ID'},
{'info_id': 'GENE_entrez_gene_id', 'column_id': 'entrez_gene_id', 'number': None, 'type': 'S',
'description': ''},
{'info_id': 'GENE_enzyme_ids', 'column_id': 'enzyme_ids', 'number': None, 'type': 'S', 'description': ''},
{'info_id': 'GENE_', 'column_id': 'external_gene_name', 'number': None, 'type': 'S', 'description': ''},
{'info_id': 'FATHMM_MAX', 'column_id': 'fathmm_pred_most_damaging', 'number': None, 'type': 'S',
'description': 'FATHMM Prediction (most damaging)'},
{'info_id': 'FLAGS', 'column_id': 'flags', 'number': None, 'type': 'S', 'description': 'Flags'},
{'info_id': 'GENE_function_from_uniprotkb', 'column_id': 'function_from_uniprotkb', 'number': None, 'type': 'S',
'description': ''},
{'info_id': 'GENE_family_description', 'column_id': 'gene_family_description', 'number': None, 'type': 'S',
'description': 'From HGNC database.'},
{'info_id': 'GENE_family_tag', 'column_id': 'gene_family_tag', 'number': None, 'type': 'S',
'description': 'From HGNC database.'},
{'info_id': 'GENE_symbol', 'column_id': 'gene_symbol', 'number': None, 'type': 'S', 'description': ''},
{'info_id': 'GERP_PP_RS', 'column_id': 'gerp_pp_rs', 'number': None, 'type': 'S', 'description': 'GERP++ RS'},
{'info_id': 'GNOMAD_AF', 'column_id': 'gnomad_af', 'number': None, 'type': 'F', 'description': 'gnomAD AF'},
{'info_id': 'GNOMAD_FILTERED', 'column_id': 'gnomad_filtered', 'number': 0, 'type': 'B',
'description': 'gnomAD Filtered'},
{'info_id': 'gnomAD_HOM_ALT', 'column_id': 'gnomad_hom_alt', 'number': None, 'type': 'I',
'description': 'Count of homozygous individuals'},
{'info_id': 'GNOMAD_POPMAX', 'column_id': 'gnomad_popmax', 'number': None, 'type': 'S',
'description': 'gnomAD PopMax population'},
{'info_id': 'GNOMAD_POPMAX_AF', 'column_id': 'gnomad_popmax_af', 'number': None, 'type': 'F',
'description': 'gnomAD PopMax AF'},
{'info_id': 'GRANTHAM', 'column_id': 'grantham', 'number': 1, 'type': 'I', 'description': 'Grantham'},
{'info_id': 'GENE_hgnc_id', 'column_id': 'hgnc_id', 'number': None, 'type': 'S', 'description': 'HGNC Id'},
{'info_id': 'GENE_hgnc_name', 'column_id': 'hgnc_name', 'number': None, 'type': 'S',
'description': 'HGNC Name'},
{'info_id': 'GENE_hgnc_symbol', 'column_id': 'hgnc_symbol', 'number': None, 'type': 'S',
'description': 'Gene symbol/name from HGNC. The HUGO Gene Nomenclature Committee (HGNC) is the only worldwide authority that assigns standardised nomenclature to human genes.'},
{'info_id': 'GENE_hgnc_symbol_lower', 'column_id': 'hgnc_symbol_lower', 'number': None, 'type': 'S',
'description': 'Lower case hgnc_symbol'},
{'info_id': 'HGVS_C', 'column_id': 'hgvs_c', 'number': None, 'type': 'S', 'description': 'c.HGVS'},
{'info_id': 'HGVS_P', 'column_id': 'hgvs_p', 'number': None, 'type': 'S', 'description': 'p.HGVS'},
{'info_id': 'IMPACT', 'column_id': 'impact', 'number': None, 'type': 'S', 'description': 'Impact'},
{'info_id': 'GENE_in_cancer_gene_census', 'column_id': 'in_cancer_gene_census', 'number': None, 'type': 'S',
'description': 'Cancer gene census is a list of genes for which mutations have been causally implicated in cancer. It is from the current most release of COSMIC. In SACGF here we only mark genes in the list, and if you want to see more details about those gene (e.g. what kind of tumor the gene causes), check http://cancer.sanger.ac.uk/cancergenome/projects/census/'},
{'info_id': 'INTERNAL_CLINSIG', 'column_id': 'internally_classified', 'number': None, 'type': 'S',
'description': 'Internally Classified'},
{'info_id': 'INTERPRO_DOMAIN', 'column_id': 'interpro_domain', 'number': None, 'type': 'S',
'description': 'InterPro domain'},
{'info_id': 'LOF_TOOL', 'column_id': 'loftool', 'number': None, 'type': 'F', 'description': 'LOF Tool'},
{'info_id': 'Mastermind_MMCNT1', 'column_id': 'mastermind_count_1_cdna', 'number': 1, 'type': 'I',
'description': "Count of <a href='https://www.genomenon.com/cvr/'>Mastermind</a> articles with cDNA matches for this specific variant"},
{'info_id': 'Mastermind_MMCNT2', 'column_id': 'mastermind_count_2_cdna_prot', 'number': 1, 'type': 'I',
'description': "Count of <a href='https://www.genomenon.com/cvr/'>Mastermind</a> articles with variants either explicitly matching at the cDNA level or given only at protein level"},
{'info_id': 'Mastermind_MMCNT3', 'column_id': 'mastermind_count_3_aa_change', 'number': 1, 'type': 'I',
'description': "Count of <a href='https://www.genomenon.com/cvr/'>Mastermind</a> articles including other DNA-level variants resulting in the same amino acid change."},
{'info_id': 'Mastermind_MMID3', 'column_id': 'mastermind_mmid3', 'number': 1, 'type': 'S',
'description': 'Mastermind MMID3 variant identifier(s), as gene:key'},
{'info_id': 'MAX_ENT_SCAN_ALT', 'column_id': 'maxentscan_alt', 'number': None, 'type': 'S',
'description': 'MaxEntScan Alt'},
{'info_id': 'MAX_ENT_SCAN_DIFF', 'column_id': 'maxentscan_diff', 'number': None, 'type': 'S',
'description': 'MaxEntScan Diff'},
{'info_id': 'MAXENTSCAN_PERCENT_DIFF', 'column_id': 'maxentscan_percent_diff_ref', 'number': 1, 'type': 'F',
'description': 'Combined Annotation Dependent Depletion raw scores'},
{'info_id': 'MAX_ENT_SCAN_REF', 'column_id': 'maxentscan_ref', 'number': None, 'type': 'S',
'description': 'MaxEntScan Ref'},
{'info_id': 'INTERNAL_CLINSIG_MAX', 'column_id': 'max_internal_classification', 'number': None, 'type': 'S',
'description': 'Max Internal Classification'},
{'info_id': 'GENE_mgi_id', 'column_id': 'mgi_id', 'number': None, 'type': 'S',
'description': 'Mouse Genome Informatics ID'},
{'info_id': 'MUTATION_ASSESSOR_MAX', 'column_id': 'mutation_assessor_pred_most_damaging', 'number': None,
'type': 'S', 'description': 'Mutation Assessor (Most damaging)'},
{'info_id': 'MUTATION_TASTER_MAX', 'column_id': 'mutation_taster_pred_most_damaging', 'number': None,
'type': 'S', 'description': 'Mutation Taster (Most damaging)'},
{'info_id': 'GENE_omim_id', 'column_id': 'omim_id', 'number': None, 'type': 'S',
'description': 'Online Mendelian Inheritance in Man Id'},
{'info_id': 'GENE_omim_phenotypes', 'column_id': 'omim_phenotypes', 'number': None, 'type': 'S',
'description': 'Online Mendelian Inheritance in Man Phenotypes'},
{'info_id': 'OVERLAPPING_SYMBOLS', 'column_id': 'overlapping_symbols', 'number': 1, 'type': 'S',
'description': 'MaxEntScan percent diff/ref'},
{'info_id': 'GENE_pathway_from_uniprotkb', 'column_id': 'pathway_from_uniprotkb', 'number': None, 'type': 'S',
'description': ''},
{'info_id': 'GENE_percentage_gc_content', 'column_id': 'percentage_gc_content', 'number': None, 'type': 'F',
'description': ''},
{'info_id': 'GENE_phenotypes_from_ensembl', 'column_id': 'phenotypes_from_ensembl', 'number': None, 'type': 'S',
'description': 'Phenotype annotation from Ensembl, which incorporates OMIM, Orphanet, and DDG2P. Multiple entries are separated by :: (double colons).'},
{'info_id': 'POLYPHEN_HVAR_MAX', 'column_id': 'polyphen2_hvar_pred_most_damaging', 'number': None, 'type': 'S',
'description': 'Poplyphen2 HVAR (Most damaging)'},
{'info_id': 'GENE_previous_symbols', 'column_id': 'previous_symbols', 'number': None, 'type': 'S',
'description': ''},
{'info_id': 'PUBMED', 'column_id': 'pubmed', 'number': None, 'type': 'S', 'description': 'PubMed'},
{'info_id': 'GENE_refseq_gene_summary', 'column_id': 'refseq_gene_summary', 'number': None, 'type': 'S',
'description': ''},
{'info_id': 'REPEAT_MASKER', 'column_id': 'repeat_masker', 'number': None, 'type': 'S',
'description': 'Repeat masker'},
{'info_id': 'REVEL_SCORE', 'column_id': 'revel_score', 'number': None, 'type': 'S', 'description': 'REVEL'},
{'info_id': 'GENE_rgd_id', 'column_id': 'rgd_id', 'number': None, 'type': 'S', 'description': ''},
{'info_id': 'GENE_rvis_percentile', 'column_id': 'rvis_percentile', 'number': None, 'type': 'S',
'description': '(Residual Variation Intolerance Score'},
{'info_id': 'SIFT', 'column_id': 'sift', 'number': 1, 'type': 'S',
'description': 'Effect prediction of single amino acid substitution by SIFT'},
{'info_id': 'SpliceAI_pred_DP_AG', 'column_id': 'spliceai_pred_dp_ag', 'number': 1, 'type': 'I',
'description': "<a href='https://pubmed.ncbi.nlm.nih.gov/30661751/'>SpliceAI</a> delta position for Acceptor Gain"},
{'info_id': 'SpliceAI_pred_DP_AL', 'column_id': 'spliceai_pred_dp_al', 'number': 1, 'type': 'I',
'description': "<a href='https://pubmed.ncbi.nlm.nih.gov/30661751/'>SpliceAI</a> delta position for Acceptor Loss"},
{'info_id': 'SpliceAI_pred_DP_DG', 'column_id': 'spliceai_pred_dp_dg', 'number': 1, 'type': 'I',
'description': "<a href='https://pubmed.ncbi.nlm.nih.gov/30661751/'>SpliceAI</a> delta position for Donor Gain"},
{'info_id': 'SpliceAI_pred_DP_DL', 'column_id': 'spliceai_pred_dp_dl', 'number': 1, 'type': 'I',
'description': "<a href='https://pubmed.ncbi.nlm.nih.gov/30661751/'>SpliceAI</a> delta position for Donor Loss"},
{'info_id': 'SpliceAI_pred_DS_AG', 'column_id': 'spliceai_pred_ds_ag', 'number': 1, 'type': 'F',
'description': "<a href='https://pubmed.ncbi.nlm.nih.gov/30661751/'>SpliceAI</a> delta score for Acceptor Gain"},
{'info_id': 'SpliceAI_pred_DS_AL', 'column_id': 'spliceai_pred_ds_al', 'number': 1, 'type': 'F',
'description': "<a href='https://pubmed.ncbi.nlm.nih.gov/30661751/'>SpliceAI</a> delta score for Acceptor Loss"},
{'info_id': 'SpliceAI_pred_DS_DG', 'column_id': 'spliceai_pred_ds_dg', 'number': 1, 'type': 'F',
'description': "<a href='https://pubmed.ncbi.nlm.nih.gov/30661751/'>SpliceAI</a> delta score for Donor Gain"},
{'info_id': 'SpliceAI_pred_DS_DL', 'column_id': 'spliceai_pred_ds_dl', 'number': 1, 'type': 'F',
'description': "<a href='https://pubmed.ncbi.nlm.nih.gov/30661751/'>SpliceAI</a> delta score for Donor Loss"},
{'info_id': 'SPLICE_REGION', 'column_id': 'splice_region', 'number': None, 'type': 'S',
'description': 'Splice Region'},
{'info_id': 'GENE_START_POSITION', 'column_id': 'start_position', 'number': None, 'type': 'I',
'description': 'The Ensembl gene start and end coordinates correspond to the outermost transcript start and end coordinates (containing the UTRs). These can be part of the same transcript or belong to two different transcripts. An example of the latter is the human BRCA2 gene, where the start coordinate corresponds to the start coordinate of transcript ENST00000380152 (BRCA2-001) and the end coordinate to the end coordinate of transcript ENST00000544455 (BRCA2-201).'},
{'info_id': 'GENE_STATUS', 'column_id': 'status', 'number': None, 'type': 'S',
'description': "From Ensembl. 'KNOWN', 'NOVEL', or 'PUTATIVE'. A known gene or transcript matches to a sequence in a public, scientific database such as UniProtKB and NCBI RefSeq. The match must be for the same species, otherwise the gene is considered to be novel for that species. Putative genes are sequences identical to human ESTs that splice into two or more exons but without an ORF."},
{'info_id': 'GENE_STRAND', 'column_id': 'strand', 'number': None, 'type': 'S', 'description': 'Gene Strand'},
{'info_id': 'GENE_synonyms', 'column_id': 'synonyms', 'number': None, 'type': 'S',
'description': 'Synonyms (From the HGNC)'},
{'info_id': 'TAGS', 'column_id': 'tags', 'number': None, 'type': 'S', 'description': 'Tags'},
# Added with this change
{'info_id': 'TAGS_GLOBAL', 'column_id': 'tags_global', 'number': None, 'type': 'S', 'description': 'Global Tags'},
{'info_id': 'GENE_tissue_specificity_from_uniprotkb', 'column_id': 'tissue_specificity_from_uniprotkb',
'number': None, 'type': 'S', 'description': 'From Universal Protein Knowledgebase'},
{'info_id': 'TOPMED_AF', 'column_id': 'topmed_af', 'number': None, 'type': 'F', 'description': 'TOPMed'},
{'info_id': 'NUM_DB_HET', 'column_id': 'total_db_het', 'number': None, 'type': 'I',
'description': 'Total Het in DB'},
{'info_id': 'NUM_DB_HOM', 'column_id': 'total_db_hom', 'number': None, 'type': 'I',
'description': 'Total Hom in DB'},
{'info_id': 'TRANSCRIPT_BIOTYPE', 'column_id': 'transcript_biotype', 'number': None, 'type': 'S',
'description': 'Transcript Biotype'},
{'info_id': 'GENE_transcript_count', 'column_id': 'transcript_count', 'number': None, 'type': 'S',
'description': 'The number of transcripts produced by that gene, from Ensembl.'},
{'info_id': 'GENE_ucsc_id', 'column_id': 'ucsc_id', 'number': None, 'type': 'S', 'description': ''},
{'info_id': 'GENE_uniprot_id', 'column_id': 'uniprot_id', 'number': None, 'type': 'S',
'description': 'Universal Protein Knowledgebase. The Swiss-Prot section is used, and it is manually annotated and reviewed and is of high-quality. Official website is www.uniprot.org'},
{'info_id': 'VARIANT_GRID_ID', 'column_id': 'variant', 'number': 1, 'type': 'S',
'description': 'VariantGrid ID'},
{'info_id': 'VARIANT_CLASS', 'column_id': 'variant_class', 'number': None, 'type': 'S',
'description': 'Variant Class'},
]
bulk_insert_class_data(apps, "snpdb", [("ColumnVCFInfo", COLUMN_VCF_INFO)])
class Migration(migrations.Migration):
dependencies = [
('snpdb', '0007_columnvcfinfo'),
]
operations = [
migrations.RunPython(add_tags_global),
migrations.RunPython(populate_column_vcf_info),
]
| 86.333333 | 482 | 0.641122 |
6546f11f96aa32d09707d618b3e12dc9fc830a50 | 13,440 | py | Python | code/python/StocksAPIforDigitalPortals/v3/fds/sdk/StocksAPIforDigitalPortals/model/inline_response20012_data_estimates_first_fiscal_year_ratios_price_free_cash_flow.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/StocksAPIforDigitalPortals/v3/fds/sdk/StocksAPIforDigitalPortals/model/inline_response20012_data_estimates_first_fiscal_year_ratios_price_free_cash_flow.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/StocksAPIforDigitalPortals/v3/fds/sdk/StocksAPIforDigitalPortals/model/inline_response20012_data_estimates_first_fiscal_year_ratios_price_free_cash_flow.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
Stocks API For Digital Portals
The stocks API features a screener to search for equity instruments based on stock-specific parameters. Parameters for up to three fiscal years might now be used in one request; data is available for the ten most recent completed fiscal years. Estimates are available for the current and two consecutive fiscal years. A separate endpoint returns the possible values and value ranges for the parameters that the endpoint /stock/notation/screener/search accepts: Application developers can request the values and value ranges only for a restricted set of notations that match predefined parameters. This functionality may be used to pre-fill the values and value ranges of the parameters of the /stock/notation/screener/search endpoint so that performing a search always leads to a non-empty set of notations. The endpoint /stock/notation/ranking/intraday/list ranks stocks notations using intraday figures, for example to build a gainers/losers list. Additional endpoints include end-of-day benchmark key figures, and selected fundamentals (as of end of fiscal year and with potentially daily updates). This API is fully integrated with the corresponding Quotes API, allowing access to detailed price and performance information of instruments, as well as basic security identifier cross-reference. For direct access to price histories, please refer to the Time Series API for Digital Portals. Similar criteria based screener APIs exist for fixed income instruments and securitized derivatives: See the Bonds API and the Securitized Derivatives API for details. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.StocksAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.StocksAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.StocksAPIforDigitalPortals.model.inline_response20012_data_estimates_first_fiscal_year_currency_dependent_estimates_ebit_mean import InlineResponse20012DataEstimatesFirstFiscalYearCurrencyDependentEstimatesEbitMean
globals()['InlineResponse20012DataEstimatesFirstFiscalYearCurrencyDependentEstimatesEbitMean'] = InlineResponse20012DataEstimatesFirstFiscalYearCurrencyDependentEstimatesEbitMean
class InlineResponse20012DataEstimatesFirstFiscalYearRatiosPriceFreeCashFlow(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'mean': (InlineResponse20012DataEstimatesFirstFiscalYearCurrencyDependentEstimatesEbitMean,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'mean': 'mean', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse20012DataEstimatesFirstFiscalYearRatiosPriceFreeCashFlow - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
mean (InlineResponse20012DataEstimatesFirstFiscalYearCurrencyDependentEstimatesEbitMean): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse20012DataEstimatesFirstFiscalYearRatiosPriceFreeCashFlow - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
mean (InlineResponse20012DataEstimatesFirstFiscalYearCurrencyDependentEstimatesEbitMean): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 51.29771 | 1,585 | 0.622842 |
29d22f07bab56464c7b547b3d361a5c112debc06 | 1,316 | py | Python | Manager/UserManager.py | EnviableYapper0/SmartLibrary-Server | 2131d2a13277ebdab2356f0254c80b9f417621d2 | [
"MIT"
] | null | null | null | Manager/UserManager.py | EnviableYapper0/SmartLibrary-Server | 2131d2a13277ebdab2356f0254c80b9f417621d2 | [
"MIT"
] | null | null | null | Manager/UserManager.py | EnviableYapper0/SmartLibrary-Server | 2131d2a13277ebdab2356f0254c80b9f417621d2 | [
"MIT"
] | null | null | null | from Manager.DatabaseManager import DatabaseManager
from model import User
class UserManager(DatabaseManager):
def __init__(self):
DatabaseManager.__init__(self)
User.create_table()
def get_all_user(self):
return DatabaseManager.get_list(
User.select().where(User.is_active == True)
)
def register_new_user(self, json_data):
return User.create(**json_data)
def get_specific_user(self, user_id):
return User.get((User.user_id == user_id) & (User.is_active == True))
def get_user_by_rfid(self, rfid):
return User.get((User.rfid == rfid) & (User.is_active == True))
def update_user_data(self, user_id, json_data):
if 'user_id' in json_data.keys() and json_data['user_id'] != user_id:
raise IndexError("Id mismatched.")
User.set_by_id(user_id, json_data)
return self.get_specific_user(user_id)
def search(self, keyword):
return DatabaseManager.get_list(
User.select().where((User.name.contains(keyword)) & (User.is_active == True))
)
def mark_user_inactive(self, user_id):
User.set_by_id(user_id, {"is_active": False})
def update_user_line_token(self, user_id, line_token):
User.set_by_id(user_id, {'line_token': line_token}) | 32.9 | 89 | 0.665653 |
a0ab755df1c38e944574310fd483e0b48a3c0b88 | 822 | py | Python | manage.py | tob112/django_angular_project_2 | 4bd06f9a5c216ed1b6288fa64e0058a65f378991 | [
"MIT"
] | null | null | null | manage.py | tob112/django_angular_project_2 | 4bd06f9a5c216ed1b6288fa64e0058a65f378991 | [
"MIT"
] | null | null | null | manage.py | tob112/django_angular_project_2 | 4bd06f9a5c216ed1b6288fa64e0058a65f378991 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_angular_project_2.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.73913 | 88 | 0.649635 |
7eca72f6d971ad308eea02ceafde2e8b451e9410 | 771 | py | Python | Deployment/DispatchServer.py | w452144816/Savior | f91871b61d1dd8424af7c7e42c60c1d0b981f79e | [
"BSD-2-Clause"
] | 1 | 2021-03-19T06:13:15.000Z | 2021-03-19T06:13:15.000Z | Deployment/DispatchServer.py | w452144816/Savior | f91871b61d1dd8424af7c7e42c60c1d0b981f79e | [
"BSD-2-Clause"
] | null | null | null | Deployment/DispatchServer.py | w452144816/Savior | f91871b61d1dd8424af7c7e42c60c1d0b981f79e | [
"BSD-2-Clause"
] | null | null | null | import uvicorn
from fastapi import FastAPI
from Deployment.server_config import DEPLOY_VERSION, SERVER_NAME
from Deployment.DispatchInterfaces.DummyInterface import router as dummy_interface_router
from Deployment.DispatchInterfaces.OCRRelatedInterface import router as ocr_interface_router
app = FastAPI(title=SERVER_NAME, version=DEPLOY_VERSION)
# dummy_interface是用来测试当前服务器是否成功启动
available_interfaces = [
(dummy_interface_router, '/dummy_interface'),
(ocr_interface_router, '/ocr_interface'),
]
for m_router, m_path_prefix in available_interfaces:
app.include_router(m_router, prefix=m_path_prefix)
if __name__ == '__main__':
from Deployment.server_config import DISPATCH_SERVER_PORT
uvicorn.run(app, host="0.0.0.0", port=DISPATCH_SERVER_PORT)
| 35.045455 | 92 | 0.824903 |
a13fb062ca3c391715314aeee6004c8146ed7689 | 372 | py | Python | src/cmdtree/_compat.py | winkidney/cmdtree | 8558be856f4c3044cf13d2d07a86b69877bb6491 | [
"MIT"
] | 63 | 2016-07-29T10:55:20.000Z | 2021-06-28T09:11:48.000Z | src/cmdtree/_compat.py | winkidney/cmdtree | 8558be856f4c3044cf13d2d07a86b69877bb6491 | [
"MIT"
] | 3 | 2016-09-22T08:42:18.000Z | 2016-12-10T12:02:01.000Z | src/cmdtree/_compat.py | winkidney/cmdtree | 8558be856f4c3044cf13d2d07a86b69877bb6491 | [
"MIT"
] | 3 | 2016-07-30T23:53:29.000Z | 2016-08-30T11:03:39.000Z | import sys
WIN = sys.platform.startswith('win')
def get_filesystem_encoding():
return sys.getfilesystemencoding() or sys.getdefaultencoding()
if WIN:
def _get_argv_encoding():
import locale
return locale.getpreferredencoding()
else:
def _get_argv_encoding():
return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding() | 23.25 | 80 | 0.72043 |
7d6e65535abd477474def168f4edfa2cf10a9d37 | 346 | py | Python | Data Structure/Array Or Vector/Find Second Largest Number/SolutionByTanmay.py | rajethanm4/Programmers-Community | d16083eb0e84403159d999d4d1a8bbf652ca51f6 | [
"MIT"
] | 261 | 2019-09-30T19:47:29.000Z | 2022-03-29T18:20:07.000Z | Data Structure/Array Or Vector/Find Second Largest Number/SolutionByTanmay.py | rajethanm4/Programmers-Community | d16083eb0e84403159d999d4d1a8bbf652ca51f6 | [
"MIT"
] | 647 | 2019-10-01T16:51:29.000Z | 2021-12-16T20:39:44.000Z | Data Structure/Array Or Vector/Find Second Largest Number/SolutionByTanmay.py | rajethanm4/Programmers-Community | d16083eb0e84403159d999d4d1a8bbf652ca51f6 | [
"MIT"
] | 383 | 2019-09-30T19:32:07.000Z | 2022-03-24T16:18:26.000Z | # To Find Second Largest Number In An Array
n = int(input("Enter Size Of Array : "))
print("Enter The Elements : ")
c = [int(input()) for i in range(n)]
if len(c)==1:
print("No Second Largest Number")
else:
c=list(set(c))
num = c.pop(c.index(max(c)))
c.insert(0,num)
print("The Second Largest Number Is : ", max(c[1:]))
| 24.714286 | 56 | 0.604046 |
9bc3bd2c63178bd3432adbffc18e76a376ddb5b9 | 664 | py | Python | shelved_cache/keys.py | mariushelf/shelved_cache | 50dd2ae1518a1961c61003a610fdb0d1992b22cd | [
"MIT"
] | 2 | 2021-06-02T11:40:59.000Z | 2021-06-03T12:46:41.000Z | shelved_cache/keys.py | mariushelf/shelved_cache | 50dd2ae1518a1961c61003a610fdb0d1992b22cd | [
"MIT"
] | null | null | null | shelved_cache/keys.py | mariushelf/shelved_cache | 50dd2ae1518a1961c61003a610fdb0d1992b22cd | [
"MIT"
] | null | null | null | import cachetools
def autotuple_hashkey(*args, **kwargs):
"""Convert lists in args or kwargs to tuple, then pass to `cachetools.keys.hashkey`.
Useful for function that accept lists at arguments. Converting them to a tuple makes
them cacheable and hence usable in `cachetools`.
This function is non-recursive, i.e., it does not work for nested lists.
It works for an argument like [1, 2, 3], but not for [1, 2, [1, 2, 3]].
"""
args = [tuple(arg) if isinstance(arg, list) else arg for arg in args]
kwargs = {k: tuple(v) if isinstance(v, list) else v for k, v in kwargs.items()}
return cachetools.keys.hashkey(*args, **kwargs)
| 41.5 | 88 | 0.685241 |
9682a4444aa8fcaf96f78c10abc41d0b63519bba | 1,989 | py | Python | homeassistant/components/switch/switchbot.py | atanarro/home-assistant | e4de44703aa74f9d45f9b5c58dad5942b4eb55b2 | [
"Apache-2.0"
] | 2 | 2017-12-30T12:40:59.000Z | 2020-07-11T17:23:48.000Z | homeassistant/components/switch/switchbot.py | atanarro/home-assistant | e4de44703aa74f9d45f9b5c58dad5942b4eb55b2 | [
"Apache-2.0"
] | 1 | 2018-08-29T19:33:22.000Z | 2018-08-30T06:26:27.000Z | homeassistant/components/switch/switchbot.py | atanarro/home-assistant | e4de44703aa74f9d45f9b5c58dad5942b4eb55b2 | [
"Apache-2.0"
] | 3 | 2018-08-29T19:26:20.000Z | 2020-01-19T11:58:22.000Z | """
Support for Switchbot.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.switchbot
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.switch import SwitchDevice, PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_MAC
REQUIREMENTS = ['PySwitchbot==0.4']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Switchbot'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Perform the setup for Switchbot devices."""
name = config.get(CONF_NAME)
mac_addr = config[CONF_MAC]
add_entities([SwitchBot(mac_addr, name)])
class SwitchBot(SwitchDevice):
"""Representation of a Switchbot."""
def __init__(self, mac, name) -> None:
"""Initialize the Switchbot."""
import switchbot
self._state = False
self._name = name
self._mac = mac
self._device = switchbot.Switchbot(mac=mac)
def turn_on(self, **kwargs) -> None:
"""Turn device on."""
if self._device.turn_on():
self._state = True
def turn_off(self, **kwargs) -> None:
"""Turn device off."""
if self._device.turn_off():
self._state = False
@property
def assumed_state(self) -> bool:
"""Return true if unable to access real state of entity."""
return True
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._state
@property
def unique_id(self) -> str:
"""Return a unique, HASS-friendly identifier for this entity."""
return self._mac.replace(':', '')
@property
def name(self) -> str:
"""Return the name of the switch."""
return self._name
| 26.878378 | 74 | 0.658622 |
285800e1b4987f404f7bf5186bd1e9363c5e8411 | 5,550 | py | Python | rewowr/rewowr_library/check_process/check_functions/check_worker_path.py | arturOnRails/ReWoWr | 45c2cef2e61ca5900082c11c325dd2be2ecca6fb | [
"MIT"
] | null | null | null | rewowr/rewowr_library/check_process/check_functions/check_worker_path.py | arturOnRails/ReWoWr | 45c2cef2e61ca5900082c11c325dd2be2ecca6fb | [
"MIT"
] | null | null | null | rewowr/rewowr_library/check_process/check_functions/check_worker_path.py | arturOnRails/ReWoWr | 45c2cef2e61ca5900082c11c325dd2be2ecca6fb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""".. moduleauthor:: Artur Lissin"""
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Tuple, final
from rewowr.rewowr_library.check_process.errors.custom_errors import CheckWorkerPathError
from rewowr.rewowr_library.check_process.check_functions.check_worker import WorkerNode, \
EndStartNodes
@final
class _NextNodes(Enum):
nodes_in = 1
nodes_out = 2
@final
@dataclass
class _StartWorkerNode:
next_worker_id: str
workable: str
path_chain: Tuple[str, ...]
def _rec_walk(worker_con: _StartWorkerNode, worker_structure_chain: Dict[str, WorkerNode],
start_end_structure: EndStartNodes, node_pos: _NextNodes, /) -> None:
mom_node = worker_structure_chain[worker_con.next_worker_id]
if worker_con.next_worker_id in worker_con.path_chain:
raise CheckWorkerPathError(
f"A node {worker_con.next_worker_id} in worker path has been visited already!"
)
if not (worker_con.path_chain and worker_con.path_chain[-1] in mom_node.visited_list):
mom_node.visited += 1
if worker_con.path_chain:
mom_node.visited_list.append(worker_con.path_chain[-1])
if worker_con.workable != _get_next_wa(mom_node, node_pos)[0]:
raise CheckWorkerPathError(
"A connection {0} in worker path has different types: {1} {2}!".format(
worker_con.next_worker_id, _get_next_wa(mom_node, node_pos)[0],
worker_con.workable
)
)
for next_node in _get_next_nodes(mom_node, node_pos)[1]:
_rec_walk(
_StartWorkerNode(
next_node, _get_next_wa(mom_node, node_pos)[1],
(*worker_con.path_chain, worker_con.next_worker_id)
),
worker_structure_chain, start_end_structure, node_pos
)
if not _get_next_nodes(mom_node, node_pos)[1]:
to_add_l = False
if not worker_con.path_chain or len(worker_con.path_chain) == 1:
to_add_l = True
elif worker_con.path_chain[0] not in mom_node.visited_list:
mom_node.visited_list.append(worker_con.path_chain[0])
to_add_l = True
if to_add_l:
_increase_start_end_cnt(
start_end_structure, _get_next_wa(mom_node, node_pos)[1],
node_pos
)
def _increase_start_end_cnt(start_end_structure: EndStartNodes,
wa_id: str, node_pos: _NextNodes, /) -> None:
if _NextNodes.nodes_in == node_pos:
start_end_structure.end_nodes[wa_id].end_cnt += 1
if _NextNodes.nodes_out == node_pos:
start_end_structure.start_nodes[wa_id].reversed_end_cnt += 1
def _get_next_nodes(work_node: WorkerNode,
node_pos: _NextNodes, /) -> Tuple[List[str], List[str]]:
if _NextNodes.nodes_in == node_pos:
return work_node.in_nodes, work_node.out_nodes
if _NextNodes.nodes_out == node_pos:
return work_node.out_nodes, work_node.in_nodes
raise Exception("Function _get_next_nodes has missing _NextNodes possibilities!")
def _get_next_wa(work_node: WorkerNode,
node_pos: _NextNodes, /) -> Tuple[str, str]:
if _NextNodes.nodes_in == node_pos:
return work_node.in_wa, work_node.out_wa
if _NextNodes.nodes_out == node_pos:
return work_node.out_wa, work_node.in_wa
raise Exception("Function _get_next_wa has missing _NextNodes possibilities!")
def _check_if_visited(worker_structure_chain: Dict[str, WorkerNode],
node_pos: _NextNodes, /) -> None:
for worker_id, worker_value in worker_structure_chain.items():
vis_number = len(_get_next_nodes(worker_value, node_pos)[0])
vis_number = vis_number if vis_number else 1
if worker_value.visited != vis_number:
raise CheckWorkerPathError(
f"The worker {worker_id} has no connections to a start or end node!"
)
worker_value.visited = 0
worker_value.visited_list = []
def check_worker_path(worker_structure_chain: Dict[str, WorkerNode],
start_end_structure: EndStartNodes, /) -> None:
if not start_end_structure.start_nodes:
raise CheckWorkerPathError("StartNodes are empty!")
if not start_end_structure.end_nodes:
raise CheckWorkerPathError("EndNodes are empty!")
for started_workable, started_nodes in start_end_structure.start_nodes.items():
for node in started_nodes.nodes:
_rec_walk(
_StartWorkerNode(node, started_workable, tuple()),
worker_structure_chain, start_end_structure, _NextNodes.nodes_in
)
_check_if_visited(worker_structure_chain, _NextNodes.nodes_in)
for end_workable, end_nodes in start_end_structure.end_nodes.items():
if end_nodes.end_cnt != len(end_nodes.nodes):
raise CheckWorkerPathError(f"The EndNode {end_workable} was never reached!")
for node in end_nodes.nodes:
_rec_walk(
_StartWorkerNode(node, end_workable, tuple()),
worker_structure_chain, start_end_structure, _NextNodes.nodes_out
)
_check_if_visited(worker_structure_chain, _NextNodes.nodes_out)
for started_workable, started_nodes in start_end_structure.start_nodes.items():
if started_nodes.reversed_end_cnt != len(started_nodes.nodes):
raise CheckWorkerPathError(f"The StartNode {started_workable} was never reached!")
| 40.217391 | 94 | 0.683063 |
b8f748e85758c4c557fcaa4f1117e6082d6f8169 | 17,293 | py | Python | db.py | gpremel/cascada-master-server | ab1903025923149380ce9a8404305c94379dfd2a | [
"MIT"
] | null | null | null | db.py | gpremel/cascada-master-server | ab1903025923149380ce9a8404305c94379dfd2a | [
"MIT"
] | null | null | null | db.py | gpremel/cascada-master-server | ab1903025923149380ce9a8404305c94379dfd2a | [
"MIT"
] | null | null | null | """Defines the object used to communicate with the SQL server"""
import mysql.connector
import json
import logging
import math
from numbers import Number
from typing import Dict, Type, Tuple
import vartypes
class BDDException(Exception):
pass
class BDDExceptionTableNonValidee(BDDException):
"""The supplied name for the project matches an existing table that is not compatible with
the supplied scheme for the project"""
class BDDExceptionVariableNonValide(BDDException):
"""At least one of the names choosed for the variables has been rejected by the
database server.
Please choose another one"""
class BDDExceptionResultatsIngerable(BDDException):
"""Query result could not be interpreted by the program"""
class controlleurBDD():
def __init__(self, nom_projet: str,
schema: Dict[str, Type[vartypes.csc_var]], fichier_login: str):
"""Initializes the database driver
Params:
nom_projet: the name given to the project, used to create/open a table
schema: the data scheme used. For instance, for two outgoing variables:
- K: float
- n: uint32
that are going to be generated by the server, sent to the client
which will compute a new variable:
- E: float
the scheme is {"K": csc_float, "n": csc_uint32, "E": csc_float}
fichier_login: the name of the file used to connect to the database server
"""
self.nom_projet = nom_projet
self.schema = schema
if "id" in schema or "niv_densification" in schema:
raise BDDExceptionVariableNonValide
with open(fichier_login, "r") as f:
self.creds = json.load(f)
logging.info("Connexion à la BDD \"{}\" avec l'utilisateur {}@{}".format(
self.creds["db"], self.creds["user"], self.creds["host"]))
self.cnx = mysql.connector.connect(
host=self.creds["host"],
port=self.creds["port"],
user=self.creds["user"],
password=self.creds["pass"],
database=self.creds["db"]
)
self.seuil_densification = None
self.variable_densification = None
self.offset_cour_densification = 0
self.limite_select = None
self.densifie = False
self.a_afficher_erreur_plus_echantillon = False
# the densification depth
# we have no choice but to save it in the table; if we don't, each time we'll reach a
# new densification level, we run the risk of "drowning" the best candidates of the
# old level with new records
self.niveau_densification = 1
cursor = self.cnx.cursor()
# First off, we check whether a table with the same project name exists
r = cursor.execute("""SELECT COUNT(*)
FROM information_schema.tables
WHERE table_name = %s AND table_schema = %s
LIMIT 1""", (nom_projet, self.creds["db"]))
nb_tables = cursor.fetchone()[0]
schema_sql = [nom + " " + str(vartype.to_SQL_type()) for nom, vartype in schema.items()]
schema_sql.append("niv_densification INT UNSIGNED ")
schema_sql.append("id INT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id)")
if nb_tables != 0:
logging.info("Table {} already exists !".format(nom_projet))
# Check that the table is compatible
cursor.execute("DESCRIBE {};".format(nom_projet))
for r in cursor:
# Name...
if r[0] in schema:
# Type...
if str(r[1]).casefold() != str(schema[r[0]].to_SQL_type()).casefold():
raise BDDExceptionTableNonValidee
elif r[0] == "niv_densification":
if str(r[1]).casefold() != str(vartypes.csc_uint32.to_SQL_type()).casefold():
raise BDDExceptionTableNonValidee
logging.info("Table validated")
else: # simpler case: table doesn't exist
# Sadly, we have no choice but to use string substitution.
# However, that's not really such a big deal here; we're not reading user input, but
# extracting data from the running Python code.
# I guess that if a third-party is able to hijack Python code running on the server, we
# have a much bigger issue that a "simple" SQLi
cursor.execute("CREATE TABLE {} ({});".format(nom_projet, ", ".join(schema_sql)))
logging.info("La table {} n'existait pas, on vient de la créer !".format(nom_projet))
cursor.close()
def enregistrer(self, resultat: Dict[str, vartypes.csc_var]):
"""Saves the result of a computation in the database
Params:
resultat: the result of the computation, with the following shape:
{"variable name": variable_value}
variable_value being an instance of the cascada type matching the
type assigned to said variable in the scheme provided during the
initialization
Example:
{"K": csc_float(7.4), "n": csc_uint32(14638), "E": csc_float(15.9)}
Returns:
None
"""
cursor = self.cnx.cursor()
# Recasting
r2 = {}
for nom, type_var in self.schema.items():
r2[nom] = type_var(resultat[nom])
resultat = r2
resultat["niv_densification"] = vartypes.csc_int32(self.niveau_densification)
cursor.execute("""INSERT INTO {} ({}) VALUES ({})""".format(self.nom_projet,
", ".join(resultat.keys()),
", ".join(
[r'%s' for x
in resultat.keys()])),
tuple([str(x) for x in resultat.values()]))
self.cnx.commit()
cursor.close()
def existe(self, valeurs: Dict[str, vartypes.csc_var]) -> bool:
"""Checks if a record containing valeurs exists
Params:
valeurs: the name of the variables and their value, for instance:
"K": csc_float(7.4), "n": csc_uint32(14638), "E": csc_float(15.9)}
Returns:
bool: True if the record exists, False otherwise
"""
cursor = self.cnx.cursor(dictionary=True)
cursor.execute("""SELECT COUNT(*) AS C FROM {} WHERE {}""".format(self.nom_projet,
" AND ".join(
["{} = {}".format(
var,
str(val)
) for var, val in
valeurs.items()]
)
)
)
res = cursor.fetchone()["C"]
cursor.close()
return res != 0
def densifier(self, seuil: float, variable: str, offset: int = 0):
"""Sets up the DB for densification, ie finding the records MAXIMIZING the value of the
variable named variable.
This function won't return anything, but should be used when changing densification level.
Params:
seuil: the ratio of records that should be kept, ie seuil = 1., each record will
be selected
variable: the name of the variable that should be used to find the best records
offset: the offset used when selecting best candidates. Defaults to 0.
"""
if not 0 < seuil <= 1:
raise ValueError
if variable not in self.schema:
raise KeyError
self.densifie = True
# Keeping tracks of the parameters provided, we could need them
self.seuil_densification = seuil
self.offset_cour_densification = offset
self.variable_densification = variable
# First off, finding the number of records to compute the number
# of records we're going to extract
cursor = self.cnx.cursor(dictionary=True)
cursor.execute(
"""SELECT COUNT(*) AS C FROM {} WHERE niv_densification={}""".format(
self.nom_projet, self.niveau_densification))
count = int(cursor.fetchone()["C"])
cursor.close()
logging.info(
"Densification procedure on {} initialized. Current densifcation level is {}; "
"this level has generated {} records".format(
self.variable_densification,
self.niveau_densification,
count))
# Computing the number of records we're keeping
if self.niveau_densification == 1:
limite = int(math.ceil(seuil * count))
else:
# We know that last time we drew limite_select configurations,
# therefore each configuration would have had count/limite_select iterations.
# Using a max to avoid division by zero errors
limite = int(math.ceil(seuil * count / max(self.limite_select, 1)))
if limite == 0 and not self.a_afficher_erreur_plus_echantillon:
logging.warning(
"No record found. "
"Likely cause: records for this step have all been previously computed. "
"Fix: adjust the numerical paramters for the densification. "
"The issue we keep happening for the other densifications but this warning will "
"only show up once."
)
self.a_afficher_erreur_plus_echantillon = True
self.limite_select = limite
logging.info("\tFor this densification step, {} schemes will be drawn".format(limite))
self.niveau_densification += 1
def schema_suivant(self,
schema_entree: Dict[str,
Type[vartypes.csc_var]]) -> Dict[str,
Tuple[Number,
Number,
Number]]:
"""Does most of the densification work.
This consists in finding the best/most promising values
for the parameters sent to the client and "zooming" on them
and increasing the resolution (ie the step size between
each sample point) near these points.
Params:
schema_entree: a scheme with the same shape as the scheme used to
initialize the database controller, but it should only
cointain the so-called outgoing variables that will be
sent to the slave servers, as these are the ones the
master have full control over
Returns:
dict: a dictionnary which keys are the keys of schema_entree and the values
being 3-value tuples (<center value>, <immediately inferior neighbor>,
<immediately superior neighbor>). The center value is the value that was
previously computed by the client and determined to be "promising". Each
of the neighbors coordinates (say there are N of them) are computed by
iteratively setting the other coordinates (N-1 variables) to their respective
value in the center vector, and then looking up the closet neighbors for the
remaining "free" coordinate amongst the records of the DB having the other,
N-1 variables set to their value.
Some neighbors might be missing, for instance if the selected record is the one
having the maximum values for the coordinates (is that case, their is no upper
neighbor), then they are replaced by <center value>.
If there are no new promising configurations to fetch (because the maximum number
of candidates as computed in densifier() has been reached), the function returns
None; in that case, one should call densifier() again.
"""
if not self.densifie:
return None
cursor = self.cnx.cursor(dictionary=True)
# On commence par extraire l'enregistrement avec les coordonnées bonnes
cursor.execute(
"""SELECT *
FROM {}
WHERE niv_densification = {}-1
ORDER BY {}
DESC LIMIT 1
OFFSET {}""".format(
self.nom_projet,
self.niveau_densification,
self.variable_densification,
self.offset_cour_densification))
sch = cursor.fetchone()
if sch is None:
self.densifie = False
return None
self.offset_cour_densification += 1
cursor.close()
cursor = self.cnx.cursor()
sch_vrai = {}
# Recasting (CRITICAL for floats, because of the precision:
# MySQL only displays the significant part of floats but expects a "long"
# value for them, that we get by recasting)
for n in schema_entree:
sch_vrai[n] = schema_entree[n](sch[n])
# on reprend le niveau de densification précédent
# starting from the previous densification scheme
sch_vrai["niv_densification"] = vartypes.csc_int32(self.niveau_densification - 1)
# Then we find the closest neighbors for each coordinate
xs = {xn: "{} = {}".format(xn, xv) for xn, xv in sch_vrai.items()}
bornes = {}
for n, v in sch_vrai.items():
# We don't take the densification variable into account but it's what we're
# trying to maximize
if n != self.variable_densification and n != "niv_densification":
val_python = sch[n]
s = " AND ".join(
filter(
None, [
xs[xn] if xn != n and xn != self.variable_densification
else None
for xn in sch_vrai
]
)
)
# cf https://ask.sqlservercentral.com/questions/103011/retrieve-closest-upper-and-lower-values.html # noqa
# fetch the closest neighbors
cursor.execute("""SELECT
CASE SIGN({}-{})
WHEN 1 THEN MIN({})
ELSE MAX({})
END
AS {}
FROM {} AS xV
WHERE {}
GROUP BY SIGN({}-{})
HAVING SIGN({}-{}) != 0
""".format(n, v, n, n, n, self.nom_projet,
s, n, v, n, v))
res = cursor.fetchall()
# Issue: the connector returns the data as a tuple
r = []
for x in res:
if isinstance(x, tuple):
r.append(*x)
else:
r.append(x)
res = sorted(r)
if len(res) == 2:
bornes[n] = tuple([val_python, *res])
elif len(res) == 1:
if res[0] < val_python:
bornes[n] = (val_python, res[0], val_python)
else:
bornes[n] = (val_python, val_python, res[0])
elif len(res) == 0:
bornes[n] = (val_python, val_python, val_python)
else:
raise BDDExceptionResultatsIngerable
if self.offset_cour_densification >= self.limite_select:
self.densifie = False
# offset_cour_densification-1 because we just incremented it
logging.info(
"\tNew scheme drawn: {}, offset {}".format(
bornes, self.offset_cour_densification - 1))
return bornes
def densification_en_cours(self) -> bool:
"""Checks whether a densification procedure is running
Returns:
True if a densification procedure is running; False if not
"""
return self.densifie
| 42.593596 | 122 | 0.523391 |
e902779aeb09ee32977c2ef9d6c086fa8d58200e | 5,149 | py | Python | lib/galaxy/tools/deps/docker_util.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tools/deps/docker_util.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tools/deps/docker_util.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | null | null | null | import os
DEFAULT_DOCKER_COMMAND = "docker"
DEFAULT_SUDO = True
DEFAULT_SUDO_COMMAND = "sudo"
DEFAULT_HOST = None
DEFAULT_VOLUME_MOUNT_TYPE = "rw"
DEFAULT_WORKING_DIRECTORY = None
DEFAULT_NET = None
DEFAULT_MEMORY = None
DEFAULT_VOLUMES_FROM = None
DEFAULT_AUTO_REMOVE = True
DEFAULT_SET_USER = "$UID"
class DockerVolume(object):
def __init__(self, path, to_path=None, how=DEFAULT_VOLUME_MOUNT_TYPE):
self.from_path = path
self.to_path = to_path or path
if not DockerVolume.__valid_how(how):
raise ValueError("Invalid way to specify docker volume %s" % how)
self.how = how
@staticmethod
def volumes_from_str(volumes_as_str):
if not volumes_as_str:
return []
volume_strs = [v.strip() for v in volumes_as_str.split(",")]
return map(DockerVolume.volume_from_str, volume_strs)
@staticmethod
def volume_from_str(as_str):
if not as_str:
raise ValueError("Failed to parse docker volume from %s" % as_str)
parts = as_str.split(":", 2)
kwds = dict(path=parts[0])
if len(parts) == 2:
if DockerVolume.__valid_how(parts[1]):
kwds["how"] = parts[1]
else:
kwds["to_path"] = parts[1]
elif len(parts) == 3:
kwds["to_path"] = parts[1]
kwds["how"] = parts[2]
return DockerVolume(**kwds)
@staticmethod
def __valid_how(how):
return how in ["ro", "rw"]
def __str__(self):
return ":".join([self.from_path, self.to_path, self.how])
def build_command(
image,
docker_build_path,
**kwds
):
if os.path.isfile(docker_build_path):
docker_build_path = os.path.dirname(os.path.abspath(docker_build_path))
build_command_parts = __docker_prefix(**kwds)
build_command_parts.extend(["build", "-t", image, docker_build_path])
return build_command_parts
def build_save_image_command(
image,
destination,
**kwds
):
build_command_parts = __docker_prefix(**kwds)
build_command_parts.extend(["save", "-o", destination, image])
return build_command_parts
def build_docker_cache_command(
image,
**kwds
):
inspect_command_parts = __docker_prefix(**kwds)
inspect_command_parts.extend(["inspect", image])
inspect_image_command = " ".join(inspect_command_parts)
pull_command_parts = __docker_prefix(**kwds)
pull_command_parts.extend(["pull", image])
pull_image_command = " ".join(pull_command_parts)
cache_command = "%s > /dev/null 2>&1\n[ $? -ne 0 ] && %s > /dev/null 2>&1\n" % (inspect_image_command, pull_image_command)
return cache_command
def build_docker_images_command(truncate=True, **kwds):
images_command_parts = __docker_prefix(**kwds)
images_command_parts.append("images")
if not truncate:
images_command_parts.append("--no-trunc")
return " ".join(images_command_parts)
def build_docker_load_command(**kwds):
load_command_parts = __docker_prefix(**kwds)
load_command_parts.append("load")
return " ".join(load_command_parts)
def build_docker_run_command(
container_command,
image,
interactive=False,
tag=None,
volumes=[],
volumes_from=DEFAULT_VOLUMES_FROM,
memory=DEFAULT_MEMORY,
env_directives=[],
working_directory=DEFAULT_WORKING_DIRECTORY,
name=None,
net=DEFAULT_NET,
docker_cmd=DEFAULT_DOCKER_COMMAND,
sudo=DEFAULT_SUDO,
sudo_cmd=DEFAULT_SUDO_COMMAND,
auto_rm=DEFAULT_AUTO_REMOVE,
set_user=DEFAULT_SET_USER,
host=DEFAULT_HOST,
):
command_parts = __docker_prefix(
docker_cmd=docker_cmd,
sudo=sudo,
sudo_cmd=sudo_cmd,
host=host
)
command_parts.append("run")
if interactive:
command_parts.append("-i")
for env_directive in env_directives:
command_parts.extend(["-e", env_directive])
for volume in volumes:
command_parts.extend(["-v", str(volume)])
if volumes_from:
command_parts.extend(["--volumes-from", str(volumes_from)])
if memory:
command_parts.extend(["-m", memory])
if name:
command_parts.extend(["-name", name])
if working_directory:
command_parts.extend(["-w", working_directory])
if net:
command_parts.extend(["--net", net])
if auto_rm:
command_parts.append("--rm")
if set_user:
user = set_user
if set_user == DEFAULT_SET_USER:
user = str(os.geteuid())
command_parts.extend(["-u", user])
full_image = image
if tag:
full_image = "%s:%s" % (full_image, tag)
command_parts.append(full_image)
command_parts.append(container_command)
return " ".join(command_parts)
def __docker_prefix(
docker_cmd=DEFAULT_DOCKER_COMMAND,
sudo=DEFAULT_SUDO,
sudo_cmd=DEFAULT_SUDO_COMMAND,
host=DEFAULT_HOST,
**kwds
):
""" Prefix to issue a docker command.
"""
command_parts = []
if sudo:
command_parts.append(sudo_cmd)
command_parts.append(docker_cmd)
if host:
command_parts.extend(["-H", host])
return command_parts
| 28.605556 | 126 | 0.662265 |
017bca609cc6686946337b0987273c9c017d9d00 | 7,816 | py | Python | mlflow/store/artifact_repo.py | kmr0877/mlflow | 04a5fee5df5ea3eaeeb4ce98b0a58425983c8bb0 | [
"Apache-2.0"
] | 1 | 2020-03-13T20:57:04.000Z | 2020-03-13T20:57:04.000Z | mlflow/store/artifact_repo.py | anabranch/mlflow | b58a420ec1ef935b45c4f9cb3bbe91b17b0f1628 | [
"Apache-2.0"
] | null | null | null | mlflow/store/artifact_repo.py | anabranch/mlflow | b58a420ec1ef935b45c4f9cb3bbe91b17b0f1628 | [
"Apache-2.0"
] | 1 | 2018-11-30T17:56:47.000Z | 2018-11-30T17:56:47.000Z | from abc import abstractmethod, ABCMeta
import shutil
from six.moves import urllib
from distutils import dir_util
import os
import boto3
from mlflow.utils.file_utils import (mkdir, exists, list_all, get_relative_path,
get_file_info, build_path, TempDir)
from mlflow.entities.file_info import FileInfo
class ArtifactRepository:
"""
Defines how to upload (log) and download potentially large artifacts from different
storage backends.
"""
__metaclass__ = ABCMeta
def __init__(self, artifact_uri):
self.artifact_uri = artifact_uri
@abstractmethod
def log_artifact(self, local_file, artifact_path=None):
"""
Logs a local file as an artifact, optionally taking an ``artifact_path`` to place it in
within the run's artifacts. Run artifacts can be organized into directories, so you can
place the artifact in a directory this way.
:param local_file: Path to artifact to log
:param artifact_path: Directory within the run's artifact directory in which to log the
artifact
"""
pass
@abstractmethod
def log_artifacts(self, local_dir, artifact_path=None):
"""
Logs the files in the specified local directory as artifacts, optionally taking
an ``artifact_path`` to place them in within the run's artifacts.
:param local_dir: Directory of local artifacts to log
:param artifact_path: Directory within the run's artifact directory in which to log the
artifacts
"""
pass
@abstractmethod
def list_artifacts(self, path):
"""
Return all the artifacts for this run_uuid directly under path.
:param path: Relative source path that contain desired artifacts
:return: List of artifacts as FileInfo listed directly under path.
"""
pass
@abstractmethod
def download_artifacts(self, artifact_path):
"""
Download an artifact file or directory to a local directory if applicable, and return a
local path for it.
:param path: Relative source path to the desired artifact
:return: Full path desired artifact.
"""
# TODO: Probably need to add a more efficient method to stream just a single artifact
# without downloading it, or to get a pre-signed URL for cloud storage.
pass
@staticmethod
def from_artifact_uri(artifact_uri):
"""
Given an artifact URI for an Experiment Run (e.g., /local/file/path or s3://my/bucket),
returns an ArtifactReposistory instance capable of logging and downloading artifacts
on behalf of this URI.
"""
if artifact_uri.startswith("s3:/"):
return S3ArtifactRepository(artifact_uri)
else:
return LocalArtifactRepository(artifact_uri)
class LocalArtifactRepository(ArtifactRepository):
"""Stores artifacts as files in a local directory."""
def log_artifact(self, local_file, artifact_path=None):
artifact_dir = build_path(self.artifact_uri, artifact_path) \
if artifact_path else self.artifact_uri
if not exists(artifact_dir):
mkdir(artifact_dir)
shutil.copy(local_file, artifact_dir)
def log_artifacts(self, local_dir, artifact_path=None):
artifact_dir = build_path(self.artifact_uri, artifact_path) \
if artifact_path else self.artifact_uri
if not exists(artifact_dir):
mkdir(artifact_dir)
dir_util.copy_tree(src=local_dir, dst=artifact_dir)
def list_artifacts(self, path=None):
artifact_dir = self.artifact_uri
list_dir = build_path(artifact_dir, path) if path else artifact_dir
artifact_files = list_all(list_dir, full_path=True)
infos = [get_file_info(f, get_relative_path(artifact_dir, f)) for f in artifact_files]
return sorted(infos, key=lambda f: f.path)
def download_artifacts(self, artifact_path):
"""Since this is a local file store, just return the artifacts' local path."""
return build_path(self.artifact_uri, artifact_path)
class S3ArtifactRepository(ArtifactRepository):
"""Stores artifacts on Amazon S3."""
@staticmethod
def parse_s3_uri(uri):
"""Parse an S3 URI, returning (bucket, path)"""
parsed = urllib.parse.urlparse(uri)
if parsed.scheme != "s3":
raise Exception("Not an S3 URI: %s" % uri)
path = parsed.path
if path.startswith('/'):
path = path[1:]
return parsed.netloc, path
def log_artifact(self, local_file, artifact_path=None):
(bucket, dest_path) = self.parse_s3_uri(self.artifact_uri)
if artifact_path:
dest_path = build_path(dest_path, artifact_path)
dest_path = build_path(dest_path, os.path.basename(local_file))
boto3.client('s3').upload_file(local_file, bucket, dest_path)
def log_artifacts(self, local_dir, artifact_path=None):
(bucket, dest_path) = self.parse_s3_uri(self.artifact_uri)
if artifact_path:
dest_path = build_path(dest_path, artifact_path)
s3 = boto3.client('s3')
local_dir = os.path.abspath(local_dir)
for (root, _, filenames) in os.walk(local_dir):
upload_path = dest_path
if root != local_dir:
rel_path = get_relative_path(local_dir, root)
upload_path = build_path(dest_path, rel_path)
for f in filenames:
s3.upload_file(build_path(root, f), bucket, build_path(upload_path, f))
def list_artifacts(self, path=None):
(bucket, artifact_path) = self.parse_s3_uri(self.artifact_uri)
dest_path = artifact_path
if path:
dest_path = build_path(dest_path, path)
infos = []
prefix = dest_path + "/"
paginator = boto3.client('s3').get_paginator("list_objects_v2")
results = paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter='/')
for result in results:
# Subdirectories will be listed as "common prefixes" due to the way we made the request
for obj in result.get("CommonPrefixes", []):
subdir = obj.get("Prefix")[len(artifact_path)+1:]
if subdir.endswith("/"):
subdir = subdir[:-1]
infos.append(FileInfo(subdir, True, None))
# Objects listed directly will be files
for obj in result.get('Contents', []):
name = obj.get("Key")[len(artifact_path)+1:]
size = int(obj.get('Size'))
infos.append(FileInfo(name, False, size))
return sorted(infos, key=lambda f: f.path)
def download_artifacts(self, artifact_path):
with TempDir(remove_on_exit=False) as tmp:
return self._download_artifacts_into(artifact_path, tmp.path())
def _download_artifacts_into(self, artifact_path, dest_dir):
"""Private version of download_artifacts that takes a destination directory."""
basename = os.path.basename(artifact_path)
local_path = build_path(dest_dir, basename)
listing = self.list_artifacts(artifact_path)
if len(listing) > 0:
# Artifact_path is a directory, so make a directory for it and download everything
os.mkdir(local_path)
for file_info in listing:
self._download_artifacts_into(file_info.path, local_path)
else:
(bucket, s3_path) = self.parse_s3_uri(self.artifact_uri)
s3_path = build_path(s3_path, artifact_path)
boto3.client('s3').download_file(bucket, s3_path, local_path)
return local_path
| 41.136842 | 99 | 0.651868 |
2030c1ee741640e1002668ba150e627c636ef0a4 | 9,124 | py | Python | bh20seqanalyzer/main.py | uniqueg/bh20-seq-resource | ddabd9390d2b221786ef58a6d85200eecf82ca2f | [
"Apache-2.0"
] | null | null | null | bh20seqanalyzer/main.py | uniqueg/bh20-seq-resource | ddabd9390d2b221786ef58a6d85200eecf82ca2f | [
"Apache-2.0"
] | null | null | null | bh20seqanalyzer/main.py | uniqueg/bh20-seq-resource | ddabd9390d2b221786ef58a6d85200eecf82ca2f | [
"Apache-2.0"
] | null | null | null | import argparse
import arvados
import arvados.collection
import time
import subprocess
import tempfile
import json
import logging
import ruamel.yaml
from bh20sequploader.qc_metadata import qc_metadata
logging.basicConfig(format="[%(asctime)s] %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
logging.getLogger("googleapiclient.discovery").setLevel(logging.WARN)
def validate_upload(api, collection, validated_project,
fastq_project, fastq_workflow_uuid):
col = arvados.collection.Collection(collection["uuid"])
# validate the collection here. Check metadata, etc.
valid = True
if "metadata.yaml" not in col:
logging.warn("Upload '%s' missing metadata.yaml", collection["name"])
valid = False
else:
metadata_content = ruamel.yaml.round_trip_load(col.open("metadata.yaml"))
#valid = qc_metadata(metadata_content) and valid
if not valid:
logging.warn("Failed metadata qc")
if valid:
if "sequence.fasta" not in col:
if "reads.fastq" in col:
start_fastq_to_fasta(api, collection, fastq_project, fastq_workflow_uuid)
return False
else:
valid = False
logging.warn("Upload '%s' missing sequence.fasta", collection["name"])
dup = api.collections().list(filters=[["owner_uuid", "=", validated_project],
["portable_data_hash", "=", col.portable_data_hash()]]).execute()
if dup["items"]:
# This exact collection has been uploaded before.
valid = False
logging.warn("Upload '%s' is duplicate" % collection["name"])
if valid:
logging.info("Added '%s' to validated sequences" % collection["name"])
# Move it to the "validated" project to be included in the next analysis
api.collections().update(uuid=collection["uuid"], body={
"owner_uuid": validated_project,
"name": "%s (%s)" % (collection["name"], time.asctime(time.gmtime()))}).execute()
else:
# It is invalid, delete it.
logging.warn("Deleting '%s'" % collection["name"])
api.collections().delete(uuid=collection["uuid"]).execute()
return valid
def run_workflow(api, parent_project, workflow_uuid, name, inputobj):
project = api.groups().create(body={
"group_class": "project",
"name": name,
"owner_uuid": parent_project,
}, ensure_unique_name=True).execute()
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(json.dumps(inputobj, indent=2).encode('utf-8'))
tmp.flush()
cmd = ["arvados-cwl-runner",
"--submit",
"--no-wait",
"--debug",
"--project-uuid=%s" % project["uuid"],
"arvwf:%s" % workflow_uuid,
tmp.name]
logging.info("Running %s" % ' '.join(cmd))
comp = subprocess.run(cmd, capture_output=True)
if comp.returncode != 0:
logging.error(comp.stderr.decode('utf-8'))
return project
def start_fastq_to_fasta(api, collection,
analysis_project,
fastq_workflow_uuid):
newproject = run_workflow(api, analysis_project, fastq_workflow_uuid, "FASTQ to FASTA", {
"fastq_forward": {
"class": "File",
"location": "keep:%s/reads.fastq" % collection["portable_data_hash"]
},
"metadata": {
"class": "File",
"location": "keep:%s/metadata.yaml" % collection["portable_data_hash"]
},
"ref_fasta": {
"class": "File",
"location": "keep:ffef6a3b77e5e04f8f62a7b6f67264d1+556/SARS-CoV2-NC_045512.2.fasta"
}
})
api.collections().update(uuid=collection["uuid"],
body={"owner_uuid": newproject["uuid"]}).execute()
def start_pangenome_analysis(api,
analysis_project,
pangenome_workflow_uuid,
validated_project):
validated = arvados.util.list_all(api.collections().list, filters=[["owner_uuid", "=", validated_project]])
inputobj = {
"inputReads": [],
"metadata": [],
"subjects": []
}
for v in validated:
inputobj["inputReads"].append({
"class": "File",
"location": "keep:%s/sequence.fasta" % v["portable_data_hash"]
})
inputobj["metadata"].append({
"class": "File",
"location": "keep:%s/metadata.yaml" % v["portable_data_hash"]
})
inputobj["subjects"].append("keep:%s/sequence.fasta" % v["portable_data_hash"])
run_workflow(api, analysis_project, pangenome_workflow_uuid, "Pangenome analysis", inputobj)
def get_workflow_output_from_project(api, uuid):
cr = api.container_requests().list(filters=[['owner_uuid', '=', uuid],
["requesting_container_uuid", "=", None]]).execute()
if cr["items"] and cr["items"][0]["output_uuid"]:
return cr["items"][0]
else:
return None
def copy_most_recent_result(api, analysis_project, latest_result_uuid):
most_recent_analysis = api.groups().list(filters=[['owner_uuid', '=', analysis_project]],
order="created_at desc", limit=1).execute()
for m in most_recent_analysis["items"]:
wf = get_workflow_output_from_project(api, m["uuid"])
if wf:
src = api.collections().get(uuid=wf["output_uuid"]).execute()
dst = api.collections().get(uuid=latest_result_uuid).execute()
if src["portable_data_hash"] != dst["portable_data_hash"]:
logging.info("Copying latest result from '%s' to %s", m["name"], latest_result_uuid)
api.collections().update(uuid=latest_result_uuid,
body={"manifest_text": src["manifest_text"],
"description": "Result from %s %s" % (m["name"], wf["uuid"])}).execute()
break
def move_fastq_to_fasta_results(api, analysis_project, uploader_project):
projects = api.groups().list(filters=[['owner_uuid', '=', analysis_project],
["properties.moved_output", "!=", True]],
order="created_at desc",).execute()
for p in projects["items"]:
wf = get_workflow_output_from_project(api, p["uuid"])
if wf:
logging.info("Moving completed fastq2fasta result %s back to uploader project", wf["output_uuid"])
api.collections().update(uuid=wf["output_uuid"],
body={"owner_uuid": uploader_project}).execute()
p["properties"]["moved_output"] = True
api.groups().update(uuid=p["uuid"], body={"properties": p["properties"]}).execute()
def main():
parser = argparse.ArgumentParser(description='Analyze collections uploaded to a project')
parser.add_argument('--uploader-project', type=str, default='lugli-j7d0g-n5clictpuvwk8aa', help='')
parser.add_argument('--pangenome-analysis-project', type=str, default='lugli-j7d0g-y4k4uswcqi3ku56', help='')
parser.add_argument('--fastq-project', type=str, default='lugli-j7d0g-xcjxp4oox2u1w8u', help='')
parser.add_argument('--validated-project', type=str, default='lugli-j7d0g-5ct8p1i1wrgyjvp', help='')
parser.add_argument('--pangenome-workflow-uuid', type=str, default='lugli-7fd4e-mqfu9y3ofnpnho1', help='')
parser.add_argument('--fastq-workflow-uuid', type=str, default='lugli-7fd4e-2zp9q4jo5xpif9y', help='')
parser.add_argument('--latest-result-collection', type=str, default='lugli-4zz18-z513nlpqm03hpca', help='')
args = parser.parse_args()
api = arvados.api()
logging.info("Starting up, monitoring %s for uploads" % (args.uploader_project))
while True:
move_fastq_to_fasta_results(api, args.fastq_project, args.uploader_project)
new_collections = api.collections().list(filters=[['owner_uuid', '=', args.uploader_project]]).execute()
at_least_one_new_valid_seq = False
for c in new_collections["items"]:
at_least_one_new_valid_seq = validate_upload(api, c,
args.validated_project,
args.fastq_project,
args.fastq_workflow_uuid) or at_least_one_new_valid_seq
if at_least_one_new_valid_seq:
start_pangenome_analysis(api,
args.pangenome_analysis_project,
args.pangenome_workflow_uuid,
args.validated_project)
copy_most_recent_result(api,
args.pangenome_analysis_project,
args.latest_result_collection)
time.sleep(15)
| 43.865385 | 119 | 0.589215 |
027fa847fb9c97192878f5c9dd03d6163d6cfd93 | 7,013 | py | Python | tests/qos/files/mellanox/qos_param_generator.py | macikgozwa/sonic-mgmt | 86338be8b2e55fd03d4913037d0e641e443762b0 | [
"Apache-2.0"
] | null | null | null | tests/qos/files/mellanox/qos_param_generator.py | macikgozwa/sonic-mgmt | 86338be8b2e55fd03d4913037d0e641e443762b0 | [
"Apache-2.0"
] | 1 | 2021-02-24T13:48:41.000Z | 2021-02-24T13:48:41.000Z | tests/qos/files/mellanox/qos_param_generator.py | macikgozwa/sonic-mgmt | 86338be8b2e55fd03d4913037d0e641e443762b0 | [
"Apache-2.0"
] | null | null | null | import math
class QosParamMellanox(object):
def __init__(self, qos_params, asic_type, speed_cable_len, ingressLosslessProfile, ingressLossyProfile, egressLosslessProfile, egressLossyProfile):
asic_param_dic = {
'spc1': {
'cell_size': 96,
'headroom_overhead': 95
},
'spc2': {
'cell_size': 144,
'headroom_overhead': 64
},
'spc3': {
'cell_size': 144,
'headroom_overhead': 64
}
}
self.asic_type = asic_type
self.cell_size = asic_param_dic[asic_type]['cell_size']
self.headroom_overhead = asic_param_dic[asic_type]['headroom_overhead']
if speed_cable_len[0:6] == '400000':
self.headroom_overhead += 59
# for 400G ports we need an extra margin in case it is filled unbalancely between two buffer units
self.extra_margin = 16
else:
self.extra_margin = 0
self.speed_cable_len = speed_cable_len
self.lossless_profile = "pg_lossless_{}_profile".format(speed_cable_len)
self.pools_info = {}
self.qos_parameters = {}
self.qos_params_mlnx = qos_params
self.qos_params_mlnx[self.speed_cable_len] = self.qos_params_mlnx['profile']
self.ingressLosslessProfile = ingressLosslessProfile
self.ingressLossyProfile = ingressLossyProfile
self.egressLosslessProfile = egressLosslessProfile
self.egressLossyProfile = egressLossyProfile
return
def run(self):
"""
Main method of the class
Returns the dictionary containing all the parameters required for the qos test
"""
self.collect_qos_configurations()
self.calculate_parameters()
return self.qos_params_mlnx
def collect_qos_configurations(self):
"""
Collect qos configuration from the following fixtures
ingressLosslessProfile
egressLossyProfile
"""
xon = int(math.ceil(float(self.ingressLosslessProfile['xon']) / self.cell_size))
xoff = int(math.ceil(float(self.ingressLosslessProfile['xoff']) / self.cell_size))
size = int(math.ceil(float(self.ingressLosslessProfile['size']) / self.cell_size))
headroom = size
hysteresis = headroom - (xon + xoff)
ingress_lossless_size = int(math.ceil(float(self.ingressLosslessProfile['static_th']) / self.cell_size)) - headroom
egress_lossy_size = int(math.ceil(float(self.egressLossyProfile['static_th']) / self.cell_size))
pkts_num_trig_pfc = ingress_lossless_size + xon + hysteresis
pkts_num_trig_ingr_drp = ingress_lossless_size + headroom - self.headroom_overhead
pkts_num_dismiss_pfc = ingress_lossless_size + 1
pkts_num_trig_egr_drp = egress_lossy_size + 1
self.qos_parameters['pkts_num_trig_pfc'] = pkts_num_trig_pfc
self.qos_parameters['pkts_num_trig_ingr_drp'] = pkts_num_trig_ingr_drp
self.qos_parameters['pkts_num_dismiss_pfc'] = pkts_num_dismiss_pfc
self.qos_parameters['pkts_num_trig_egr_drp'] = pkts_num_trig_egr_drp
self.qos_parameters['pkts_num_hysteresis'] = hysteresis
def calculate_parameters(self):
"""
Generate qos test parameters based on the configuration
xon
xoff
wm_pg_headroom
wm_pg_shared_lossless
wm_q_shared_lossless
lossy_queue_1
wm_pg_shared_lossy
wm_q_shared_lossy
wm_buf_pool_lossless
wm_buf_pool_lossy
"""
pkts_num_trig_pfc = self.qos_parameters['pkts_num_trig_pfc']
pkts_num_trig_ingr_drp = self.qos_parameters['pkts_num_trig_ingr_drp']
pkts_num_dismiss_pfc = self.qos_parameters['pkts_num_dismiss_pfc']
pkts_num_trig_egr_drp = self.qos_parameters['pkts_num_trig_egr_drp']
pkts_num_hysteresis = self.qos_parameters['pkts_num_hysteresis']
xoff = {}
xoff['pkts_num_trig_pfc'] = pkts_num_trig_pfc
xoff['pkts_num_trig_ingr_drp'] = pkts_num_trig_ingr_drp
# One motivation of margin is to tolerance the deviation.
# We need a larger margin on SPC2/3
if self.asic_type != 'spc1':
xoff['pkts_num_margin'] = 3
self.qos_params_mlnx[self.speed_cable_len]['xoff_1'].update(xoff)
self.qos_params_mlnx[self.speed_cable_len]['xoff_2'].update(xoff)
xon = {}
xon['pkts_num_trig_pfc'] = pkts_num_trig_pfc
xon['pkts_num_dismiss_pfc'] = pkts_num_dismiss_pfc + self.extra_margin
xon['pkts_num_hysteresis'] = pkts_num_hysteresis + 16
if self.asic_type == 'spc2':
xon['pkts_num_margin'] = 2
elif self.asic_type == 'spc3':
xon['pkts_num_margin'] = 3
self.qos_params_mlnx['xon_1'].update(xon)
self.qos_params_mlnx['xon_2'].update(xon)
wm_pg_headroom = self.qos_params_mlnx[self.speed_cable_len]['wm_pg_headroom']
wm_pg_headroom['pkts_num_trig_pfc'] = pkts_num_trig_pfc
wm_pg_headroom['pkts_num_trig_ingr_drp'] = pkts_num_trig_ingr_drp
wm_pg_headroom['cell_size'] = self.cell_size
if self.asic_type == 'spc3':
wm_pg_headroom['pkts_num_margin'] = 3
else:
wm_pg_headroom['pkts_num_margin'] = 2
wm_pg_shared_lossless = self.qos_params_mlnx['wm_pg_shared_lossless']
wm_pg_shared_lossless['pkts_num_trig_pfc'] = pkts_num_dismiss_pfc
wm_pg_shared_lossless['cell_size'] = self.cell_size
wm_q_shared_lossless = self.qos_params_mlnx[self.speed_cable_len]['wm_q_shared_lossless']
wm_q_shared_lossless['pkts_num_trig_ingr_drp'] = pkts_num_trig_ingr_drp
wm_q_shared_lossless['cell_size'] = self.cell_size
lossy_queue = self.qos_params_mlnx['lossy_queue_1']
lossy_queue['pkts_num_trig_egr_drp'] = pkts_num_trig_egr_drp - 1
lossy_queue['cell_size'] = self.cell_size
wm_shared_lossy = {}
wm_shared_lossy['pkts_num_trig_egr_drp'] = pkts_num_trig_egr_drp
wm_shared_lossy['cell_size'] = self.cell_size
self.qos_params_mlnx['wm_pg_shared_lossy'].update(wm_shared_lossy)
self.qos_params_mlnx['wm_q_shared_lossy'].update(wm_shared_lossy)
wm_buf_pool_lossless = self.qos_params_mlnx['wm_buf_pool_lossless']
wm_buf_pool_lossless['pkts_num_trig_pfc'] = pkts_num_trig_pfc
wm_buf_pool_lossless['pkts_num_trig_ingr_drp'] = pkts_num_trig_ingr_drp
wm_buf_pool_lossless['cell_size'] = self.cell_size
wm_buf_pool_lossy = self.qos_params_mlnx['wm_buf_pool_lossy']
wm_buf_pool_lossy['pkts_num_trig_egr_drp'] = pkts_num_trig_egr_drp
wm_buf_pool_lossy['cell_size'] = self.cell_size
for i in range(4):
self.qos_params_mlnx['ecn_{}'.format(i+1)]['cell_size'] = self.cell_size
| 45.245161 | 151 | 0.667902 |
c7757e9941f2139aed7efe182d9cfd910c395e84 | 11,578 | py | Python | alexia/api/v1/test/test_juliana.py | LaudateCorpus1/alexia-1 | 9c0d3c90c0ffe2237299a561b755b9c17905e354 | [
"BSD-3-Clause"
] | 8 | 2015-06-29T20:01:22.000Z | 2020-10-19T13:49:38.000Z | alexia/api/v1/test/test_juliana.py | LaudateCorpus1/alexia-1 | 9c0d3c90c0ffe2237299a561b755b9c17905e354 | [
"BSD-3-Clause"
] | 67 | 2015-10-05T16:57:14.000Z | 2022-03-28T19:57:36.000Z | alexia/api/v1/test/test_juliana.py | LaudateCorpus1/alexia-1 | 9c0d3c90c0ffe2237299a561b755b9c17905e354 | [
"BSD-3-Clause"
] | 6 | 2015-10-05T13:54:34.000Z | 2021-11-30T05:11:58.000Z | from __future__ import unicode_literals
import datetime
from django.test.testcases import SimpleTestCase
from django.utils import timezone
from alexia.api.exceptions import ForbiddenError, InvalidParamsError
from alexia.apps.billing.models import RfidCard
from alexia.test import APITestCase, TestCase
from ..common import format_authorization
from ..methods.juliana import _get_validate_event, rfid_to_identifier
class JulianaRfidToIdentifierTest(SimpleTestCase):
"""
Tests for the api.v1.juliana.rfid_to_identifier method.
"""
def test_mifare_clasic_1k(self):
rfid = {
'atqa': '00:04',
'sak': '08',
'uid': '98:ab:54:ef',
}
self.assertEqual(rfid_to_identifier(rfid), '02,98:ab:54:ef')
def test_mifare_clasic_4k(self):
rfid = {
'atqa': '00:02',
'sak': '18',
'uid': '98:ab:54:ef',
}
self.assertEqual(rfid_to_identifier(rfid), '03,98:ab:54:ef')
def test_mifare_clasic_desfire(self):
rfid = {
'atqa': '03:44',
'sak': '20',
'uid': '98:ab:54:ef:10:cb:76',
}
self.assertEqual(rfid_to_identifier(rfid), '04,98:ab:54:ef:10:cb:76')
def test_mifare_clasic_ultralight(self):
rfid = {
'atqa': '00:44',
'sak': '00',
'uid': '98:ab:54:ef:10:cb:76',
}
self.assertEqual(rfid_to_identifier(rfid), '05,98:ab:54:ef:10:cb:76')
def test_mifare_invalid_combination(self):
rfid = {
'atqa': '00:05',
'sak': '08',
'uid': '98:ab:54:ef',
}
with self.assertRaises(InvalidParamsError):
rfid_to_identifier(rfid)
def test_mifare_no_atqa(self):
rfid = {
'sak': '08',
'uid': '98:ab:54:ef',
}
with self.assertRaises(InvalidParamsError):
rfid_to_identifier(rfid)
def test_mifare_no_sak(self):
rfid = {
'atqa': '00:04',
'uid': '98:ab:54:ef',
}
with self.assertRaises(InvalidParamsError):
rfid_to_identifier(rfid)
def test_mifare_no_uid(self):
rfid = {
'atqa': '00:04',
'sak': '08',
}
with self.assertRaises(InvalidParamsError):
rfid_to_identifier(rfid)
class JulianaGetValidateEventTest(TestCase):
"""
Tests for the api.v1.juliana._get_validate_event method.
"""
def setUp(self):
super(JulianaGetValidateEventTest, self).setUp()
self.load_organization_data()
self.load_billing_data()
self.load_scheduling_data()
data = self.data
# Make event1 current
now = timezone.now()
self.data['event1'].starts_at = now - datetime.timedelta(hours=1)
self.data['event1'].ends_at = now + datetime.timedelta(hours=1)
self.data['event1'].save()
# Add user2 as tender for organization 1
data['organization1'].membership_set.create(user=data['user2'], is_tender=True)
# Add user2 as tender for event1
self.data['event1'].bartender_availabilities.create(user=self.data['user2'],
availability=self.data['availability1'])
class MockRequest(object):
def __init__(self, user):
super(MockRequest, self).__init__()
self.user = user
self.request = MockRequest(self.data['user2'])
def test_normal(self):
event_id = self.data['event1'].id
self.assertEqual(_get_validate_event(self.request, event_id), self.data['event1'])
def test_no_event(self):
event_id = self.data['event1'].id * 100
with self.assertRaises(InvalidParamsError):
_get_validate_event(self.request, event_id)
def test_no_tender(self):
event_id = self.data['event1'].id
# Mark bartender as not available
bartender_availability = self.data['event1'].bartender_availabilities.get(user=self.data['user2'])
bartender_availability.availability = self.data['availability3']
bartender_availability.save()
with self.assertRaises(ForbiddenError):
_get_validate_event(self.request, event_id)
def test_event_past(self):
event_id = self.data['event1'].id
# Make event in the past
now = timezone.now()
self.data['event1'].starts_at = now - datetime.timedelta(days=1, hours=2)
self.data['event1'].ends_at = now - datetime.timedelta(days=1, hours=1)
self.data['event1'].save()
with self.assertRaises(ForbiddenError):
_get_validate_event(self.request, event_id)
def test_event_future(self):
event_id = self.data['event1'].id
# Make event in the future
now = timezone.now()
self.data['event1'].starts_at = now + datetime.timedelta(days=1, hours=1)
self.data['event1'].ends_at = now + datetime.timedelta(days=1, hours=2)
self.data['event1'].save()
with self.assertRaises(ForbiddenError):
_get_validate_event(self.request, event_id)
def test_superuser(self):
event_id = self.data['event1'].id
# Make event in the future
now = timezone.now()
self.data['event1'].starts_at = now + datetime.timedelta(days=1, hours=1)
self.data['event1'].ends_at = now + datetime.timedelta(days=1, hours=2)
self.data['event1'].save()
# Login superuser
self.request.user = self.data['user1']
# Call should still be allowed
self.assertEqual(_get_validate_event(self.request, event_id), self.data['event1'])
class JulianaTest(APITestCase):
"""
Tests for the api.v1.juliana API methods.
"""
def setUp(self):
super(JulianaTest, self).setUp()
self.load_billing_data()
self.load_scheduling_data()
data = self.data
# Make event1 current
now = timezone.now()
self.data['event1'].starts_at = now - datetime.timedelta(hours=1)
self.data['event1'].ends_at = now + datetime.timedelta(hours=1)
self.data['event1'].save()
# Add user2 as tender for organization 1
data['organization1'].membership_set.create(user=data['user2'], is_tender=True)
# Add user2 as tender for event1
self.data['event1'].bartender_availabilities.create(user=self.data['user2'],
availability=self.data['availability1'])
# Login user2
self.login(username=self.data['user2'].username,
password=self.data['password2'],
organization_slug=self.data['organization1'].slug)
def test_rfid_get(self):
event_id = self.data['event1'].id
rfid_data = {
'atqa': '00:04',
'sak': '08',
'uid': '98:ab:54:ef',
}
self.data['user2'].rfidcard_set.create(identifier='02,98:ab:54:ef', is_active=True)
authorization = self.data['user2'].authorizations.create(organization=self.data['organization1'])
# Ignore microseconds
authorization.start_date = authorization.start_date.replace(microsecond=0)
authorization.save()
expected_result = {
'user': {
'id': self.data['user2'].id,
'first_name': self.data['user2'].first_name,
'last_name': self.data['user2'].last_name,
'username': self.data['user2'].username,
},
'authorization': format_authorization(authorization),
}
self.send_and_compare_request('juliana.rfid.get', [event_id, rfid_data], expected_result)
def test_rfid_get_no_rfid(self):
event_id = self.data['event1'].id
rfid_data = {
'atqa': '00:04',
'sak': '08',
'uid': '98:ab:54:ef',
}
self.send_and_compare_request_error(
'juliana.rfid.get', [event_id, rfid_data],
status_code=404,
error_code=404,
error_name='ObjectNotFoundError',
error_message='ObjectNotFoundError: RFID card not found',
)
def test_rfid_get_no_authorization(self):
event_id = self.data['event1'].id
rfid_data = {
'atqa': '00:04',
'sak': '08',
'uid': '98:ab:54:ef',
}
rfidcard = RfidCard(identifier='02,98:ab:54:ef', is_active=True, user=self.data['user2'])
rfidcard.save()
self.send_and_compare_request_error(
'juliana.rfid.get', [event_id, rfid_data],
status_code=404,
error_code=404,
error_name='ObjectNotFoundError',
error_message='ObjectNotFoundError: No authorization found for user',
)
def test_rfid_get_other_authorization(self):
event_id = self.data['event1'].id
rfid_data = {
'atqa': '00:04',
'sak': '08',
'uid': '98:ab:54:ef',
}
self.data['user2'].rfidcard_set.create(identifier='02,98:ab:54:ef', is_active=True)
self.data['user2'].authorizations.create(organization=self.data['organization2'])
self.send_and_compare_request_error(
'juliana.rfid.get', [event_id, rfid_data],
status_code=404,
error_code=404,
error_name='ObjectNotFoundError',
error_message='ObjectNotFoundError: No authorization found for user',
)
def test_rfid_get_invalid_event(self):
event_id = self.data['event1'].id * 100
rfid_data = {
'atqa': '00:04',
'sak': '08',
'uid': '98:ab:54:ef',
}
self.send_and_compare_request_error(
'juliana.rfid.get', [event_id, rfid_data],
status_code=404,
error_code=404,
error_name='ObjectNotFoundError',
error_message='ObjectNotFoundError: Event does not exist',
)
def test_user_check_no_orders(self):
event_id = self.data['event1'].id
user_id = self.data['user1'].id
self.send_and_compare_request('juliana.user.check', [event_id, user_id], 0)
def test_user_check_with_orders(self):
# Add some orders to event1
self.load_billing_order_data()
event_id = self.data['event1'].id
user_id = self.data['user1'].id
expected_amount = self.data['order1'].amount + self.data['order2'].amount
expected_result = int(expected_amount * 100)
self.send_and_compare_request('juliana.user.check', [event_id, user_id], expected_result)
def test_user_check_invalid_event(self):
event_id = self.data['event1'].id * 100
user_id = self.data['user1'].id
self.send_and_compare_request_error(
'juliana.user.check', [event_id, user_id],
status_code=404,
error_code=404,
error_name='ObjectNotFoundError',
error_message='ObjectNotFoundError: Event does not exist',
)
def test_user_check_invalid_user(self):
event_id = self.data['event1'].id
user_id = self.data['user1'].id * 100
self.send_and_compare_request_error(
'juliana.user.check', [event_id, user_id],
status_code=404,
error_code=404,
error_name='ObjectNotFoundError',
error_message='ObjectNotFoundError: User does not exist',
)
| 32.892045 | 106 | 0.594317 |
81e58b675347491115da987a25241a41e7a0c40b | 7,971 | py | Python | code/02_SNODAS.py | kashingtonDC/RS_GW | 1c88ac5175005c727edcf57a7ed53901f4f70e3f | [
"MIT",
"Unlicense"
] | null | null | null | code/02_SNODAS.py | kashingtonDC/RS_GW | 1c88ac5175005c727edcf57a7ed53901f4f70e3f | [
"MIT",
"Unlicense"
] | null | null | null | code/02_SNODAS.py | kashingtonDC/RS_GW | 1c88ac5175005c727edcf57a7ed53901f4f70e3f | [
"MIT",
"Unlicense"
] | null | null | null | import os
import time
import gzip
import shutil
import ftplib
import datetime
import subprocess
from tqdm import tqdm
# Helpers
def login_to_ftp(ftproot,data_dir):
'''
Use ftplib to login to the NSIDC ftp server, return ftp object cd'd into data dir
'''
f = ftplib.FTP(ftproot)
f.login()
f.cwd(data_dir)
return f
def download_snodat(yeardir, ftp, writedir):
'''
Given list of the directories containing data for each year, navigate to each dir, download files, return list of files downloaded
'''
ftp.cwd(yeardir)
months = [x for x in ftp.nlst() if "." not in x]
print("Processing SNODAS for {}".format(yeardir))
allfiles = []
for month in months[:]:
monthdir = os.path.join(yeardir,month)
ftp.cwd(monthdir)
mfiles = [x for x in ftp.nlst() if x.endswith("tar")]
for mf in tqdm(mfiles[:]):
outfn = os.path.join(writedir,mf)
if not os.path.exists(outfn): # If file already exists, skip
with open(outfn, 'wb') as fp:
ftp.retrbinary('RETR {}'.format(mf), fp.write)
allfiles.append(mfiles)
# unnest the lists
flatfiles = [item for sublist in allfiles for item in sublist]
print("Wrote SNODAS files for {}".format(yeardir))
return [os.path.join(writedir,x) for x in flatfiles]
def extract_snofiles(filelist, writedir):
'''
For the files that have been downloaded, (0) untar file, (1) extract desired variables, (2) convert to gtiff, (2) save, and write to write_dir
'''
for file in filelist:
subprocess.Popen(['tar', '-xvf', file, "-C", writedir],stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
print("extracted tar files in {}".format(writedir))
return [os.path.join(writedir,x) for x in os.listdir(writedir) if x.endswith(".gz")]
def process_tarfile(tarfile, writedir, variables = ["1034"]):
'''
For the files that have been downloaded,
(0) untar file,
(1) extract desired variables :
1025: Precipitation
1034: Snow water equivalent X
1036: Snow depth
1038: Snow pack average temperature
1039: Blowing snow sublimation
1044: Snow melt
1050: Snow pack sublimation
return lists of the txtfiles (to convert to hdr) and the datfiles (to convert to gtiff)
'''
# Extract date from og tar file for string matching
date = os.path.splitext(os.path.split(tarfile)[1])[0].replace("SNODAS_","")
# Untar the file we want
cmd = '''tar -xvf {} -C {}'''.format(tarfile,writedir)
print(cmd)
os.system(cmd)
# subprocess.Popen(['tar', '-xvf', tarfile, "-C", writedir],stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
# Find the untarred gz files
gz_files = [os.path.join(writedir,x) for x in os.listdir(writedir) if date in x if x.endswith(".gz")]
# Get the variable strings from each file
varstrs = [x[x.find("ssmv")+5:x.find("ssmv")+9] for x in gz_files]
# Compare to list of vars we want, extract if we want it
for varstr,file in zip(varstrs,gz_files):
outfn = os.path.splitext(file)[0]
if varstr in variables:
with gzip.open(file, 'r') as f_in, open(outfn, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
continue
datfiles = [os.path.join(writedir,x) for x in os.listdir(writedir) if date in x if x.endswith(".dat")]
txtfiles = [os.path.join(writedir,x) for x in os.listdir(writedir) if date in x if x.endswith(".txt")]
gz_files = [os.path.join(writedir,x) for x in os.listdir(writedir) if date in x if x.endswith(".gz")]
return [datfiles,txtfiles, gz_files]
def txt2hdr(txtfiles, writedir):
dates = [x[x.find("TS")+2:x.find("TS")+10] for x in txtfiles]
ymd = [datetime.datetime.strptime(x, '%Y%m%d') for x in dates]
hdrfiles = []
# Account for the absurd datum change in 2013...
for date,file in zip(ymd, txtfiles):
if date < datetime.datetime(2013, 10, 1):
hdrfile = os.path.join(writedir,"../pre_10_2013.hdr")
if date >= datetime.datetime(2013, 10, 1):
hdrfile = os.path.join(writedir,"../post_10_2013.hdr")
# Spec dest file
snofn = os.path.split((os.path.splitext(file)[0]))[1] + ".hdr"
snowpath = os.path.join(writedir,snofn)
hdrfiles.append(snowpath)
shutil.copy(hdrfile,snowpath)
return hdrfiles
def dat2tif(datfiles, writedir):
prod_lookup = dict({
"1025": "PREC",
"1034": "SNWE",
"1036": "SNOD",
"1038": "SPAT",
"1039": "BlSS",
"1044": "SMLT",
"1050": "SSUB",
})
outfnsv1 = {}
for file in datfiles:
date= file[file.find("TS")+2:file.find("TS")+10]
for k,v in prod_lookup.items():
if k in file:
outfnsv1[file] = date+v+".tif"
outfnsvf = {}
for k,v in outfnsv1.items():
if "PREC" in v:
if "L01" in k:
outfnsvf[k] = os.path.join(writedir,os.path.splitext(v)[0]+"LQD.tif")
if "L00" in k:
outfnsvf[k] = os.path.join(writedir,os.path.splitext(v)[0]+"SOL.tif")
else:
outfnsvf[k]= os.path.join(writedir,v)
outfiles = []
for infile, outfile in outfnsvf.items():
if not os.path.exists(outfile): # Dont write if already there
cmd = '''gdal_translate -of GTiff -a_srs '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs' -a_nodata -9999 -a_ullr -124.73333333 52.87500000 -66.94166667 24.95000000 {} {}'''.format(infile,outfile)
os.system(cmd)
else:
print("{} already exists.... Moving to next file".format(outfile))
outfiles.append(outfile)
return outfiles
def clip_tifs(tifs,dst_dir, shapefile = "../shape/argus_grace.shp"):
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for tif in tifs:
outfn = os.path.join(dst_dir,os.path.split(tif)[1])
if not os.path.exists(outfn): # Dont write if already there
cmd = '''gdalwarp -cutline {} {} {}'''.format(shapefile,tif, outfn)
print(cmd)
os.system(cmd)
return
def main():
# Setup write dirs and hdr files
if not os.path.exists("../data"):
os.mkdir("../data")
if not os.path.exists("../data/SNODAS"):
os.mkdir("../data/SNODAS")
if not os.path.exists("../data/SNODAS/pre_10_2013.hdr"):
shutil.copyfile("pre_10_2013.hdr","../data/SNODAS/pre_10_2013.hdr")
if not os.path.exists("../data/SNODAS/post_10_2013.hdr"):
shutil.copyfile("pre_10_2013.hdr","../data/SNODAS/post_10_2013.hdr")
# Set some directories and global vars
ftproot = 'sidads.colorado.edu'
data_dir = '/DATASETS/NOAA/G02158/masked/'
tar_dir = os.path.join("../data/SNODAS",'SNODAS_tar')
gz_dir = os.path.join("../data/SNODAS",'SNODAS_tar_gz')
tif_dir = os.path.join("../data/SNODAS",'SNODAS_tifs')
out_dir = os.path.join("../data/SNODAS",'SNODAS_processed')
for fdir in [tar_dir,gz_dir,tif_dir,out_dir]:
if not os.path.exists(fdir):
os.mkdir(fdir)
# Main routine
ftp = login_to_ftp(ftproot,data_dir)
dirlist = ftp.nlst()
yeardirs = [os.path.join(data_dir,x) for x in dirlist if "." not in x]
for y in yeardirs[:]:
ftp = login_to_ftp(ftproot,data_dir)
tarfiles = download_snodat(y, ftp, writedir = tar_dir)
# Make sure files are sorted by date
tarfiles.sort()
# Quit the connection so it does not become stale
ftp.close()
for tarfile in tarfiles[:]:
# Quick check to see if the outfn already was processed. If so, skip.
proctif_fn = os.path.splitext(os.path.split(tarfile)[1])[0].replace("SNODAS_","")
procfiles = [x for x in os.listdir(out_dir) if proctif_fn in x]
# If the files are not processed, execute functions above
if len(procfiles) == 0:
print("=====" * 10)
print("Processing {}".format(tarfile))
print("=====" * 10)
datfiles,txtfiles,gz_files = process_tarfile(tarfile, writedir = gz_dir)
hdrfiles = txt2hdr(txtfiles,writedir = gz_dir)
tiffiles = dat2tif(datfiles, writedir = tif_dir)
clipped = clip_tifs(tiffiles, dst_dir = out_dir)
# clean up
print(len(gz_files), len(datfiles), len(txtfiles), len(hdrfiles))
for gzf in gz_files:
os.remove(gzf)
for a,b,c,d in zip(datfiles,txtfiles,hdrfiles,tiffiles):
os.remove(a)
os.remove(b)
os.remove(c)
os.remove(d)
# Cleanup everything in gz dir just in case
gz_leftovers = [os.path.join(gz_dir,x) for x in os.listdir(gz_dir)]
for gzl in gz_leftovers:
os.remove(gzl)
if __name__ == '__main__':
main() | 32.271255 | 201 | 0.682599 |
b0b814caf1c9af840e881e7a608e7900afab9775 | 4,194 | py | Python | web/src/p2k16/web/tool_blueprint.py | bitraf/p2k16 | 266143944d3c287b3b4ba37337336be80c146c26 | [
"MIT"
] | 5 | 2018-03-28T17:53:44.000Z | 2021-07-27T19:20:12.000Z | web/src/p2k16/web/tool_blueprint.py | bitraf/p2k16 | 266143944d3c287b3b4ba37337336be80c146c26 | [
"MIT"
] | 88 | 2017-04-20T11:05:36.000Z | 2022-03-12T01:01:28.000Z | web/src/p2k16/web/tool_blueprint.py | bitraf/p2k16 | 266143944d3c287b3b4ba37337336be80c146c26 | [
"MIT"
] | 8 | 2018-03-23T16:43:56.000Z | 2021-09-06T11:13:28.000Z | import logging
import flask
import flask_login
import p2k16.core.door
from flask import Blueprint, jsonify, request
from p2k16.core import P2k16UserException, event_management
from p2k16.core.door import DoorClient
from p2k16.core.models import db, ToolDescription, ToolCheckout, Circle
from p2k16.web.utils import validate_schema, DataServiceTool, require_circle_membership
from p2k16.web.core_blueprint import model_to_json
logger = logging.getLogger(__name__)
tool = Blueprint('tool', __name__, template_folder='templates')
registry = DataServiceTool("ToolDataService", "tool-data-service.js", tool)
tool_form = {
"type": "object",
"properties": {
"tool": {"type": "integer"},
},
"required": ["tool"]
}
@registry.route('/service/tool/recent-events', methods=['GET'])
def recent_events():
from datetime import datetime, timedelta
start = datetime.now() - timedelta(days=7)
return jsonify([e.to_dict() for e in event_management.get_tool_recent_events(start)])
@registry.route('/service/tool/checkout', methods=['POST'])
@validate_schema(tool_form)
@flask_login.login_required
def checkout_tool():
account = flask_login.current_user.account
client = flask.current_app.config.tool_client # type: DoorClient
tool = ToolDescription.find_by_id(request.json["tool"])
client.checkout_tool(account, tool)
db.session.commit()
return data_tool_list()
@registry.route('/service/tool/checkin', methods=['POST'])
@validate_schema(tool_form)
@flask_login.login_required
def checkin_tool():
account = flask_login.current_user.account
client = flask.current_app.config.tool_client # type: DoorClient
tool = ToolDescription.find_by_id(request.json["tool"])
client.checkin_tool(account, tool)
db.session.commit()
return data_tool_list()
def tool_to_json(tool: ToolDescription):
checkout = ToolCheckout.find_by_tool(tool)
checkout_model = {}
if checkout is not None:
checkout_model = {
"active": True,
"started": checkout.started,
"account": checkout.account,
"username": checkout.account.username,
}
else:
checkout_model = {
"active": False,
}
return {**model_to_json(tool), **{
"name": tool.name,
"description": tool.description,
"circle": tool.circle.name,
"checkout": checkout_model,
}}
@registry.route('/data/tool')
def data_tool_list():
tools = ToolDescription.query.all()
return jsonify([tool_to_json(tool) for tool in tools])
@registry.route('/data/tool/<int:tool_id>')
def data_tool(tool_id: int):
tool = ToolDescription.find_by_id(tool_id)
if tool is None:
abort(404)
return jsonify(tool_to_json(tool))
@registry.route('/data/tool', methods=["PUT"])
def data_tool_update():
return _data_tool_save()
@registry.route('/data/tool', methods=["POST"])
def data_tool_add():
return _data_tool_save()
@require_circle_membership("despot")
def _data_tool_save():
circle_name = request.json["circle"]
circle = Circle.find_by_name(circle_name)
if not circle:
raise P2k16UserException("No such circle: {}".format(circle_name))
_id = request.json.get("id", None)
if _id:
tool = ToolDescription.find_by_id(_id)
if tool is None:
abort(404)
logger.info("Updating tool: {}".format(tool))
tool.name = request.json["name"]
tool.circle = circle
tool.description = request.json["description"]
else:
logger.info("Creating new tooldescription: {}".format(request.json["name"]))
tool = ToolDescription(request.json["name"], request.json["description"], circle)
db.session.add(tool)
db.session.commit()
db.session.flush()
logger.info("Update tool: {}".format(tool.name))
return jsonify(tool_to_json(tool))
@registry.route('/tool-data-service.js')
def door_service():
content = door_service.content
if not content:
content = registry.generate()
door_service.content = content
return content, {'Content-Type': 'application/javascript'}
door_service.content = None
| 27.592105 | 89 | 0.687649 |
4b761fe5ec43ef212eba50591de969d78c85b98b | 966 | py | Python | ex084.py | qnomon/Python-Studies | dbd592cf2a161bb9ddbec66f020c602bddc6d44b | [
"MIT"
] | null | null | null | ex084.py | qnomon/Python-Studies | dbd592cf2a161bb9ddbec66f020c602bddc6d44b | [
"MIT"
] | null | null | null | ex084.py | qnomon/Python-Studies | dbd592cf2a161bb9ddbec66f020c602bddc6d44b | [
"MIT"
] | null | null | null | dados = list()
lista = list()
totleve = list()
totpesado = list()
c = 0
while True:
lista.append(str(input('Digite o nome: ')))
lista.append((float(input('Digite o peso: '))))
dados.append(lista[:])
lista.clear()
c += 1
cont = str(input('Deseja continuar?[S/N]: ')).strip().upper()
while cont not in 'SN':
cont = str(input('Deseja continuar?[S/N]: ')).strip.upper()
if cont == 'N':
break
for s in range (0, len(dados)):
for p in dados:
if s == 0:
leve = p[1]
pesado = p[1]
if p[1] < leve:
leve = p[1]
if p[1] > pesado:
pesado = p[1]
for v in dados:
if v[1]== leve:
totleve.append(v[0])
if v[1] == pesado:
totpesado.append(v[0])
print(f'Foram adicionados {c} pessoas no cadastro')
print(f'O maior peso registrado foi {pesado}Kg, Peso de: {totpesado}')
print(f'O menor peso registrado foi {leve}Kg, Peso de: {totleve}')
| 28.411765 | 70 | 0.549689 |
e71ec0dce0eb74e1d0d07f8237455af544324d3f | 416 | py | Python | process.py | elecabfer/Bowtie | 0d6fc6d6da833deaf5a8eca1e5b33513a2009f4e | [
"MIT"
] | null | null | null | process.py | elecabfer/Bowtie | 0d6fc6d6da833deaf5a8eca1e5b33513a2009f4e | [
"MIT"
] | null | null | null | process.py | elecabfer/Bowtie | 0d6fc6d6da833deaf5a8eca1e5b33513a2009f4e | [
"MIT"
] | null | null | null | rm 1_*
rm 21_*
head *error.txt >> info_gg_unpaired.txt
source /mnt/common/epfl/etc/bbcf_bashrc ### para llamar a todos los programas de bbcf
module add UHTS/Analysis/samtools/1.2;
python -c "from bbcflib import mapseq"
for i in {2..48}
do
add_nh_flag "$i"_16S_gg.sam "$i"_SE_gg.bam
samtools sort "$i"_SE_gg.bam "$i"_SE_gg_s
samtools view -F0x4 $i"_SE_gg_s.bam | cut -f 3 | uniq -c >> "$i"_counts.txt
done
| 32 | 85 | 0.709135 |
52a8d45e4d46bbbabbd068ec2bfa563b0b93df59 | 51 | py | Python | plugin/src/test/resources/joinLines/StringOneQuotePlainU-after.py | consulo/consulo-python | 586c3eaee3f9c2cc87fb088dc81fb12ffa4b3a9d | [
"Apache-2.0"
] | null | null | null | plugin/src/test/resources/joinLines/StringOneQuotePlainU-after.py | consulo/consulo-python | 586c3eaee3f9c2cc87fb088dc81fb12ffa4b3a9d | [
"Apache-2.0"
] | 11 | 2017-02-27T22:35:32.000Z | 2021-12-24T08:07:40.000Z | plugin/src/test/resources/joinLines/StringOneQuotePlainU-after.py | consulo/consulo-python | 586c3eaee3f9c2cc87fb088dc81fb12ffa4b3a9d | [
"Apache-2.0"
] | null | null | null | a = (u"Ein Deutsche Text. "
"An English text")
| 17 | 27 | 0.588235 |
21a411f52c7ff6196ab314546d3b2a0f7c068235 | 17,655 | py | Python | lib/python2.7/site-packages/scipy/sparse/lil.py | wfehrnstrom/harmonize | e5661d24b2021739e8ac4bf1d3a530eda4e155b3 | [
"MIT"
] | 18 | 2018-02-23T11:28:54.000Z | 2021-09-23T08:19:54.000Z | SLpackage/private/thirdparty/pythonpkgs/scipy/scipy_0.19.1/lib/python2.7/site-packages/scipy/sparse/lil.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | 8 | 2020-09-26T00:55:16.000Z | 2022-03-12T00:23:07.000Z | SLpackage/private/thirdparty/pythonpkgs/scipy/scipy_0.19.1/lib/python2.7/site-packages/scipy/sparse/lil.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | 5 | 2019-03-12T14:24:18.000Z | 2021-06-23T13:42:58.000Z | """LInked List sparse matrix class
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['lil_matrix','isspmatrix_lil']
import numpy as np
from scipy._lib.six import xrange
from .base import spmatrix, isspmatrix
from .sputils import (getdtype, isshape, isscalarlike, IndexMixin,
upcast_scalar, get_index_dtype, isintlike)
from . import _csparsetools
class lil_matrix(spmatrix, IndexMixin):
"""Row-based linked list sparse matrix
This is a structure for constructing sparse matrices incrementally.
Note that inserting a single item can take linear time in the worst case;
to construct a matrix efficiently, make sure the items are pre-sorted by
index, per row.
This can be instantiated in several ways:
lil_matrix(D)
with a dense matrix or rank-2 ndarray D
lil_matrix(S)
with another sparse matrix S (equivalent to S.tolil())
lil_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
LIL format data array of the matrix
rows
LIL format row index array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the LIL format
- supports flexible slicing
- changes to the matrix sparsity structure are efficient
Disadvantages of the LIL format
- arithmetic operations LIL + LIL are slow (consider CSR or CSC)
- slow column slicing (consider CSC)
- slow matrix vector products (consider CSR or CSC)
Intended Usage
- LIL is a convenient format for constructing sparse matrices
- once a matrix has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- consider using the COO format when constructing large matrices
Data Structure
- An array (``self.rows``) of rows, each of which is a sorted
list of column indices of non-zero elements.
- The corresponding nonzero values are stored in similar
fashion in ``self.data``.
"""
format = 'lil'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
spmatrix.__init__(self)
self.dtype = getdtype(dtype, arg1, default=float)
# First get the shape
if isspmatrix(arg1):
if isspmatrix_lil(arg1) and copy:
A = arg1.copy()
else:
A = arg1.tolil()
if dtype is not None:
A = A.astype(dtype)
self.shape = A.shape
self.dtype = A.dtype
self.rows = A.rows
self.data = A.data
elif isinstance(arg1,tuple):
if isshape(arg1):
if shape is not None:
raise ValueError('invalid use of shape parameter')
M, N = arg1
self.shape = (M,N)
self.rows = np.empty((M,), dtype=object)
self.data = np.empty((M,), dtype=object)
for i in range(M):
self.rows[i] = []
self.data[i] = []
else:
raise TypeError('unrecognized lil_matrix constructor usage')
else:
# assume A is dense
try:
A = np.asmatrix(arg1)
except TypeError:
raise TypeError('unsupported matrix type')
else:
from .csr import csr_matrix
A = csr_matrix(A, dtype=dtype).tolil()
self.shape = A.shape
self.dtype = A.dtype
self.rows = A.rows
self.data = A.data
def set_shape(self,shape):
shape = tuple(shape)
if len(shape) != 2:
raise ValueError("Only two-dimensional sparse arrays "
"are supported.")
try:
shape = int(shape[0]),int(shape[1]) # floats, other weirdness
except:
raise TypeError('invalid shape')
if not (shape[0] >= 0 and shape[1] >= 0):
raise ValueError('invalid shape')
if (self._shape != shape) and (self._shape is not None):
try:
self = self.reshape(shape)
except NotImplementedError:
raise NotImplementedError("Reshaping not implemented for %s." %
self.__class__.__name__)
self._shape = shape
set_shape.__doc__ = spmatrix.set_shape.__doc__
shape = property(fget=spmatrix.get_shape, fset=set_shape)
def __iadd__(self,other):
self[:,:] = self + other
return self
def __isub__(self,other):
self[:,:] = self - other
return self
def __imul__(self,other):
if isscalarlike(other):
self[:,:] = self * other
return self
else:
return NotImplemented
def __itruediv__(self,other):
if isscalarlike(other):
self[:,:] = self / other
return self
else:
return NotImplemented
# Whenever the dimensions change, empty lists should be created for each
# row
def getnnz(self, axis=None):
if axis is None:
return sum([len(rowvals) for rowvals in self.data])
if axis < 0:
axis += 2
if axis == 0:
out = np.zeros(self.shape[1], dtype=np.intp)
for row in self.rows:
out[row] += 1
return out
elif axis == 1:
return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp)
else:
raise ValueError('axis out of bounds')
def count_nonzero(self):
return sum(np.count_nonzero(rowvals) for rowvals in self.data)
getnnz.__doc__ = spmatrix.getnnz.__doc__
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def __str__(self):
val = ''
for i, row in enumerate(self.rows):
for pos, j in enumerate(row):
val += " %s\t%s\n" % (str((i, j)), str(self.data[i][pos]))
return val[:-1]
def getrowview(self, i):
"""Returns a view of the 'i'th row (without copying).
"""
new = lil_matrix((1, self.shape[1]), dtype=self.dtype)
new.rows[0] = self.rows[i]
new.data[0] = self.data[i]
return new
def getrow(self, i):
"""Returns a copy of the 'i'th row.
"""
i = self._check_row_bounds(i)
new = lil_matrix((1, self.shape[1]), dtype=self.dtype)
new.rows[0] = self.rows[i][:]
new.data[0] = self.data[i][:]
return new
def _check_row_bounds(self, i):
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('row index out of bounds')
return i
def _check_col_bounds(self, j):
if j < 0:
j += self.shape[1]
if j < 0 or j >= self.shape[1]:
raise IndexError('column index out of bounds')
return j
def __getitem__(self, index):
"""Return the element(s) index=(i, j), where j may be a slice.
This always returns a copy for consistency, since slices into
Python lists return copies.
"""
# Scalar fast path first
if isinstance(index, tuple) and len(index) == 2:
i, j = index
# Use isinstance checks for common index types; this is
# ~25-50% faster than isscalarlike. Other types are
# handled below.
if ((isinstance(i, int) or isinstance(i, np.integer)) and
(isinstance(j, int) or isinstance(j, np.integer))):
v = _csparsetools.lil_get1(self.shape[0], self.shape[1],
self.rows, self.data,
i, j)
return self.dtype.type(v)
# Utilities found in IndexMixin
i, j = self._unpack_index(index)
# Proper check for other scalar index types
i_intlike = isintlike(i)
j_intlike = isintlike(j)
if i_intlike and j_intlike:
v = _csparsetools.lil_get1(self.shape[0], self.shape[1],
self.rows, self.data,
i, j)
return self.dtype.type(v)
elif j_intlike or isinstance(j, slice):
# column slicing fast path
if j_intlike:
j = self._check_col_bounds(j)
j = slice(j, j+1)
if i_intlike:
i = self._check_row_bounds(i)
i = xrange(i, i+1)
i_shape = None
elif isinstance(i, slice):
i = xrange(*i.indices(self.shape[0]))
i_shape = None
else:
i = np.atleast_1d(i)
i_shape = i.shape
if i_shape is None or len(i_shape) == 1:
return self._get_row_ranges(i, j)
i, j = self._index_to_arrays(i, j)
if i.size == 0:
return lil_matrix(i.shape, dtype=self.dtype)
new = lil_matrix(i.shape, dtype=self.dtype)
i, j = _prepare_index_for_memoryview(i, j)
_csparsetools.lil_fancy_get(self.shape[0], self.shape[1],
self.rows, self.data,
new.rows, new.data,
i, j)
return new
def _get_row_ranges(self, rows, col_slice):
"""
Fast path for indexing in the case where column index is slice.
This gains performance improvement over brute force by more
efficient skipping of zeros, by accessing the elements
column-wise in order.
Parameters
----------
rows : sequence or xrange
Rows indexed. If xrange, must be within valid bounds.
col_slice : slice
Columns indexed
"""
j_start, j_stop, j_stride = col_slice.indices(self.shape[1])
col_range = xrange(j_start, j_stop, j_stride)
nj = len(col_range)
new = lil_matrix((len(rows), nj), dtype=self.dtype)
_csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1],
self.rows, self.data,
new.rows, new.data,
rows,
j_start, j_stop, j_stride, nj)
return new
def __setitem__(self, index, x):
# Scalar fast path first
if isinstance(index, tuple) and len(index) == 2:
i, j = index
# Use isinstance checks for common index types; this is
# ~25-50% faster than isscalarlike. Scalar index
# assignment for other types is handled below together
# with fancy indexing.
if ((isinstance(i, int) or isinstance(i, np.integer)) and
(isinstance(j, int) or isinstance(j, np.integer))):
x = self.dtype.type(x)
if x.size > 1:
# Triggered if input was an ndarray
raise ValueError("Trying to assign a sequence to an item")
_csparsetools.lil_insert(self.shape[0], self.shape[1],
self.rows, self.data, i, j, x)
return
# General indexing
i, j = self._unpack_index(index)
# shortcut for common case of full matrix assign:
if (isspmatrix(x) and isinstance(i, slice) and i == slice(None) and
isinstance(j, slice) and j == slice(None)
and x.shape == self.shape):
x = lil_matrix(x, dtype=self.dtype)
self.rows = x.rows
self.data = x.data
return
i, j = self._index_to_arrays(i, j)
if isspmatrix(x):
x = x.toarray()
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
x, _ = np.broadcast_arrays(x, i)
if x.shape != i.shape:
raise ValueError("shape mismatch in assignment")
# Set values
i, j, x = _prepare_index_for_memoryview(i, j, x)
_csparsetools.lil_fancy_set(self.shape[0], self.shape[1],
self.rows, self.data,
i, j, x)
def _mul_scalar(self, other):
if other == 0:
# Multiply by zero: return the zero matrix
new = lil_matrix(self.shape, dtype=self.dtype)
else:
res_dtype = upcast_scalar(self.dtype, other)
new = self.copy()
new = new.astype(res_dtype)
# Multiply this scalar by every element.
for j, rowvals in enumerate(new.data):
new.data[j] = [val*other for val in rowvals]
return new
def __truediv__(self, other): # self / other
if isscalarlike(other):
new = self.copy()
# Divide every element by this scalar
for j, rowvals in enumerate(new.data):
new.data[j] = [val/other for val in rowvals]
return new
else:
return self.tocsr() / other
def copy(self):
from copy import deepcopy
new = lil_matrix(self.shape, dtype=self.dtype)
new.data = deepcopy(self.data)
new.rows = deepcopy(self.rows)
return new
copy.__doc__ = spmatrix.copy.__doc__
def reshape(self, shape, order='C'):
if type(order) != str or order != 'C':
raise ValueError(("Sparse matrices do not support "
"an 'order' parameter."))
if type(shape) != tuple:
raise TypeError("a tuple must be passed in for 'shape'")
if len(shape) != 2:
raise ValueError("a length-2 tuple must be passed in for 'shape'")
new = lil_matrix(shape, dtype=self.dtype)
j_max = self.shape[1]
# Size is ambiguous for sparse matrices, so in order to check 'total
# dimension', we need to take the product of their dimensions instead
if new.shape[0] * new.shape[1] != self.shape[0] * self.shape[1]:
raise ValueError("the product of the dimensions for the new sparse "
"matrix must equal that of the original matrix")
for i, row in enumerate(self.rows):
for col, j in enumerate(row):
new_r, new_c = np.unravel_index(i*j_max + j, shape)
new[new_r, new_c] = self[i, j]
return new
reshape.__doc__ = spmatrix.reshape.__doc__
def toarray(self, order=None, out=None):
"""See the docstring for `spmatrix.toarray`."""
d = self._process_toarray_args(order, out)
for i, row in enumerate(self.rows):
for pos, j in enumerate(row):
d[i, j] = self.data[i][pos]
return d
def transpose(self, axes=None, copy=False):
return self.tocsr().transpose(axes=axes, copy=copy).tolil()
transpose.__doc__ = spmatrix.transpose.__doc__
def tolil(self, copy=False):
if copy:
return self.copy()
else:
return self
tolil.__doc__ = spmatrix.tolil.__doc__
def tocsr(self, copy=False):
lst = [len(x) for x in self.rows]
idx_dtype = get_index_dtype(maxval=max(self.shape[1], sum(lst)))
indptr = np.asarray(lst, dtype=idx_dtype)
indptr = np.concatenate((np.array([0], dtype=idx_dtype),
np.cumsum(indptr, dtype=idx_dtype)))
indices = []
for x in self.rows:
indices.extend(x)
indices = np.asarray(indices, dtype=idx_dtype)
data = []
for x in self.data:
data.extend(x)
data = np.asarray(data, dtype=self.dtype)
from .csr import csr_matrix
return csr_matrix((data, indices, indptr), shape=self.shape)
tocsr.__doc__ = spmatrix.tocsr.__doc__
def _prepare_index_for_memoryview(i, j, x=None):
"""
Convert index and data arrays to form suitable for passing to the
Cython fancy getset routines.
The conversions are necessary since to (i) ensure the integer
index arrays are in one of the accepted types, and (ii) to ensure
the arrays are writable so that Cython memoryview support doesn't
choke on them.
Parameters
----------
i, j
Index arrays
x : optional
Data arrays
Returns
-------
i, j, x
Re-formatted arrays (x is omitted, if input was None)
"""
if i.dtype > j.dtype:
j = j.astype(i.dtype)
elif i.dtype < j.dtype:
i = i.astype(j.dtype)
if not i.flags.writeable or i.dtype not in (np.int32, np.int64):
i = i.astype(np.intp)
if not j.flags.writeable or j.dtype not in (np.int32, np.int64):
j = j.astype(np.intp)
if x is not None:
if not x.flags.writeable:
x = x.copy()
return i, j, x
else:
return i, j
def isspmatrix_lil(x):
return isinstance(x, lil_matrix)
| 33.374291 | 83 | 0.54891 |
ce5bdfadd5bc8be0ace3634d8889e7584e40ad4b | 330 | py | Python | 01-Basics/13-json.py | michaelcw02/Python | fc5a589305d9961b92aa8730401570427a5c0069 | [
"MIT"
] | null | null | null | 01-Basics/13-json.py | michaelcw02/Python | fc5a589305d9961b92aa8730401570427a5c0069 | [
"MIT"
] | null | null | null | 01-Basics/13-json.py | michaelcw02/Python | fc5a589305d9961b92aa8730401570427a5c0069 | [
"MIT"
] | null | null | null | #SIMPLE EXAMPLE FOR RETRIEVING JSON FILES FROM WEB
import codecs
import json
import urllib.request
url = "https://api.github.com/users/michaelcw02/repos"
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
reader = codecs.getreader("utf-8")
data = json.load(reader(response))
for i in data:
print(i)
| 19.411765 | 54 | 0.751515 |
1c4465c58914ba00924c3db332f745c7350c02b5 | 5,321 | py | Python | sandbox/mixed_poisson_hypre_2d.py | MiroK/fenics_ii | 58c41f0e8dba720962830395851e081b057269cc | [
"MIT"
] | 10 | 2017-06-22T21:05:17.000Z | 2020-09-25T08:36:59.000Z | sandbox/mixed_poisson_hypre_2d.py | MiroK/fenics_ii | 58c41f0e8dba720962830395851e081b057269cc | [
"MIT"
] | 2 | 2018-04-14T08:43:59.000Z | 2018-09-19T14:51:46.000Z | sandbox/mixed_poisson_hypre_2d.py | MiroK/fenics_ii | 58c41f0e8dba720962830395851e081b057269cc | [
"MIT"
] | 6 | 2018-04-13T20:33:53.000Z | 2020-09-25T08:37:01.000Z | from dolfin import *
from petsc4py import PETSc
from mpi4py import MPI as pyMPI
from sympy.printing import ccode
import sympy as sp
import numpy as np
from block import block_assemble, block_mat
from block.iterative import MinRes
from block.algebraic.petsc import LU, LumpedInvDiag
from block.block_base import block_base
# MMS utils
def expr_body(expr, **kwargs):
if not hasattr(expr, '__len__'):
# Defined in terms of some coordinates
xyz = set(sp.symbols('x[0], x[1], x[2]'))
xyz_used = xyz & expr.free_symbols
assert xyz_used <= xyz
# Expression params which need default values
params = (expr.free_symbols - xyz_used) & set(kwargs.keys())
# Body
expr = ccode(expr).replace('M_PI', 'pi')
# Default to zero
kwargs.update(dict((str(p), 0.) for p in params))
# Convert
return expr
# Vectors, Matrices as iterables of expressions
else:
return [expr_body(e, **kwargs) for e in expr]
def as_expression(expr, degree=4, **kwargs):
'''Turns sympy expressions to Dolfin expressions.'''
return Expression(expr_body(expr), degree=degree, **kwargs)
def vec(x):
return as_backend_type(x).vec()
def mat(A):
return as_backend_type(A).mat()
class HypreAMS(block_base):
'''AMG auxiliary space preconditioner for Hdiv(0) norm'''
def __init__(self, V, hdiv0=False, bc=None):
# FIXME: lift
assert V.ufl_element().family() == 'Raviart-Thomas'
assert V.ufl_element().degree() == 1
mesh = V.mesh()
assert mesh.geometry().dim() == 2
sigma, tau = TrialFunction(V), TestFunction(V)
a = inner(div(sigma), div(tau))*dx
if not hdiv0:
a += inner(sigma, tau)*dx
f = Constant(np.zeros(V.ufl_element().value_shape()))
L = inner(tau, f)*dx
A, _ = assemble_system(a, L, bc)
# AMS setup
Q = FunctionSpace(mesh, 'CG', 1)
G = DiscreteOperators.build_gradient(V, Q)
pc = PETSc.PC().create(mesh.mpi_comm().tompi4py())
pc.setType('hypre')
pc.setHYPREType('ams')
# Attach gradient
pc.setHYPREDiscreteGradient(mat(G))
# Constant nullspace (in case not mass and bcs)
constants = [vec(interpolate(c, V).vector())
for c in (Constant((1, 0)), Constant((0, 1)))]
pc.setHYPRESetEdgeConstantVectors(*constants)
# NOTE: term mass term is accounted for automatically by Hypre
# unless pc.setPoissonBetaMatrix(None)
if hdiv0: pc.setHYPRESetBetaPoissonMatrix(None)
pc.setOperators(mat(A))
# FIXME: some defaults
pc.setFromOptions()
pc.setUp()
self.pc = pc
self.A = A # For creating vec
def matvec(self, b):
if not isinstance(b, GenericVector):
return NotImplemented
x = self.A.create_vec(dim=1)
if len(x) != len(b):
raise RuntimeError(
'incompatible dimensions for PETSc matvec, %d != %d'%(len(x),len(b)))
self.pc.apply(vec(b), vec(x))
return x
def main(n):
'''Solves grad-div problem in 2d with HypreAMS preconditioning'''
# Exact solution
x, y = sp.symbols('x[0] x[1]')
u = sp.sin(pi*x*(1-x)*y*(1-y))
sp_div = lambda f: f[0].diff(x, 1) + f[1].diff(y, 1)
sp_grad = lambda f: sp.Matrix([f.diff(x, 1), f.diff(y, 1)])
sigma = sp_grad(u)
f = -sp_div(sigma) + u
sigma_expr, u_expr, f_expr = list(map(as_expression, (sigma, u, f)))
# The discrete problem
mesh = UnitSquareMesh(n, n)
V = FunctionSpace(mesh, 'RT', 1)
Q = FunctionSpace(mesh, 'DG', 0)
W = (V, Q)
sigma, u = list(map(TrialFunction, W))
tau, v = list(map(TestFunction, W))
a00 = inner(sigma, tau)*dx
a01 = inner(div(tau), u)*dx
a10 = inner(div(sigma), v)*dx
a11 = -inner(u, v)*dx
L0 = inner(Constant((0, 0)), tau)*dx
L1 = inner(-f_expr, v)*dx
AA = block_assemble([[a00, a01], [a10, a11]])
bb = block_assemble([L0, L1])
# b00 = inner(sigma, tau)*dx + inner(div(sigma), div(tau))*dx
# B00 = LU(assemble(b00))
B00 = HypreAMS(V)
b11 = inner(u, v)*dx
B11 = LumpedInvDiag(assemble(b11))
BB = block_mat([[B00, 0], [0, B11]])
AAinv = MinRes(AA, precond=BB, tolerance=1e-10, maxiter=500, show=2)
# Compute solution
sigma_h, u_h = AAinv * bb
sigma_h, u_h = Function(V, sigma_h), Function(Q, u_h)
niters = len(AAinv.residuals) - 1
# error = sqrt(errornorm(sigma_expr, sigma_h, 'Hdiv', degree_rise=1)**2 +
# errornorm(u_expr, u_h, 'L2', degree_rise=1)**2)
hmin = mesh.mpi_comm().tompi4py().allreduce(mesh.hmin(), pyMPI.MIN)
error = 1.
return hmin, V.dim()+Q.dim(), niters, error
# --------------------------------------------------------------------------
if __name__ == '__main__':
msg = 'hmin = %g #dofs = %d, niters = %d, error = %g(%.2f)'
h0, error0 = None, None
for n in (8, 16, 32, 64, 128, 256, 512, 1024):
h, ndofs, niters, error = main(n)
if error0 is not None:
rate = ln(error/error0)/ln(h/h0)
else:
rate = -1
h0, error0 = h, error
print((msg % (h, ndofs, niters, error, rate)))
| 28.454545 | 85 | 0.577147 |
636faec433c33f27ee805266a085becb9415d66e | 7,602 | py | Python | build/lib.macosx-10.9-x86_64-3.9/mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py | hanggaoh/mediapipe | 9eafb85cd7daa8986be7e4cc8af2f256f6f83451 | [
"Apache-2.0"
] | 2 | 2021-08-11T15:50:12.000Z | 2021-09-03T17:53:47.000Z | build/lib.macosx-10.9-x86_64-3.9/mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py | hanggaoh/mediapipe | 9eafb85cd7daa8986be7e4cc8af2f256f6f83451 | [
"Apache-2.0"
] | 1 | 2022-01-20T11:17:50.000Z | 2022-01-20T11:17:50.000Z | build/lib.macosx-10.9-x86_64-3.9/mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py | hanggaoh/mediapipe | 9eafb85cd7daa8986be7e4cc8af2f256f6f83451 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/audio/mfcc_mel_calculators.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
try:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
except AttributeError:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe.framework.calculator_options_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/audio/mfcc_mel_calculators.proto',
package='mediapipe',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n6mediapipe/calculators/audio/mfcc_mel_calculators.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\"\xd5\x01\n\x1cMelSpectrumCalculatorOptions\x12\x19\n\rchannel_count\x18\x01 \x01(\x05:\x02\x32\x30\x12 \n\x13min_frequency_hertz\x18\x02 \x01(\x02:\x03\x31\x32\x35\x12!\n\x13max_frequency_hertz\x18\x03 \x01(\x02:\x04\x33\x38\x30\x30\x32U\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\xb4\xa0\xbc% \x01(\x0b\x32\'.mediapipe.MelSpectrumCalculatorOptions\"\xc5\x01\n\x15MfccCalculatorOptions\x12\x44\n\x13mel_spectrum_params\x18\x01 \x01(\x0b\x32\'.mediapipe.MelSpectrumCalculatorOptions\x12\x16\n\nmfcc_count\x18\x02 \x01(\r:\x02\x31\x33\x32N\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\x89\x9e\xb4% \x01(\x0b\x32 .mediapipe.MfccCalculatorOptions'
,
dependencies=[mediapipe_dot_framework_dot_calculator__pb2.DESCRIPTOR,])
_MELSPECTRUMCALCULATOROPTIONS = _descriptor.Descriptor(
name='MelSpectrumCalculatorOptions',
full_name='mediapipe.MelSpectrumCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='channel_count', full_name='mediapipe.MelSpectrumCalculatorOptions.channel_count', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=20,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='min_frequency_hertz', full_name='mediapipe.MelSpectrumCalculatorOptions.min_frequency_hertz', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(125),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_frequency_hertz', full_name='mediapipe.MelSpectrumCalculatorOptions.max_frequency_hertz', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(3800),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.MelSpectrumCalculatorOptions.ext', index=0,
number=78581812, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=321,
)
_MFCCCALCULATOROPTIONS = _descriptor.Descriptor(
name='MfccCalculatorOptions',
full_name='mediapipe.MfccCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='mel_spectrum_params', full_name='mediapipe.MfccCalculatorOptions.mel_spectrum_params', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mfcc_count', full_name='mediapipe.MfccCalculatorOptions.mfcc_count', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=13,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.MfccCalculatorOptions.ext', index=0,
number=78450441, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=324,
serialized_end=521,
)
_MFCCCALCULATOROPTIONS.fields_by_name['mel_spectrum_params'].message_type = _MELSPECTRUMCALCULATOROPTIONS
DESCRIPTOR.message_types_by_name['MelSpectrumCalculatorOptions'] = _MELSPECTRUMCALCULATOROPTIONS
DESCRIPTOR.message_types_by_name['MfccCalculatorOptions'] = _MFCCCALCULATOROPTIONS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MelSpectrumCalculatorOptions = _reflection.GeneratedProtocolMessageType('MelSpectrumCalculatorOptions', (_message.Message,), {
'DESCRIPTOR' : _MELSPECTRUMCALCULATOROPTIONS,
'__module__' : 'mediapipe.calculators.audio.mfcc_mel_calculators_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.MelSpectrumCalculatorOptions)
})
_sym_db.RegisterMessage(MelSpectrumCalculatorOptions)
MfccCalculatorOptions = _reflection.GeneratedProtocolMessageType('MfccCalculatorOptions', (_message.Message,), {
'DESCRIPTOR' : _MFCCCALCULATOROPTIONS,
'__module__' : 'mediapipe.calculators.audio.mfcc_mel_calculators_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.MfccCalculatorOptions)
})
_sym_db.RegisterMessage(MfccCalculatorOptions)
_MELSPECTRUMCALCULATOROPTIONS.extensions_by_name['ext'].message_type = _MELSPECTRUMCALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_MELSPECTRUMCALCULATOROPTIONS.extensions_by_name['ext'])
_MFCCCALCULATOROPTIONS.extensions_by_name['ext'].message_type = _MFCCCALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_MFCCCALCULATOROPTIONS.extensions_by_name['ext'])
# @@protoc_insertion_point(module_scope)
| 48.420382 | 792 | 0.801631 |
dbd1c2ce389e70211d2d2ba0fe138607afa1bcc4 | 2,321 | py | Python | livemark/plugins/table/plugin.py | gabrielbdornas/livemark | 1c717e14e05e29f6feda4900d1e8e79025b8117d | [
"MIT"
] | 73 | 2021-06-07T13:28:36.000Z | 2022-03-26T05:37:59.000Z | livemark/plugins/table/plugin.py | gabrielbdornas/livemark | 1c717e14e05e29f6feda4900d1e8e79025b8117d | [
"MIT"
] | 120 | 2021-06-04T12:51:01.000Z | 2022-03-21T11:11:36.000Z | livemark/plugins/table/plugin.py | gabrielbdornas/livemark | 1c717e14e05e29f6feda4900d1e8e79025b8117d | [
"MIT"
] | 7 | 2021-09-22T11:38:26.000Z | 2022-03-26T05:35:58.000Z | import json
import yaml
from frictionless import Resource, Detector
from ...plugin import Plugin
class TablePlugin(Plugin):
identity = "table"
priority = 60
# Process
def process_document(self, document):
self.__count = 0
def process_snippet(self, snippet):
if self.document.format == "html":
if snippet.type == "table" and snippet.lang in ["yaml", "json"]:
if snippet.lang == "yaml":
spec = yaml.safe_load(str(snippet.input).strip())
if snippet.lang == "json":
spec = json.loads(str(snippet.input).strip())
detector = Detector(field_float_numbers=True)
with Resource(spec.pop("data", []), detector=detector) as resource:
header = resource.header
rows = resource.read_rows()
columns = spec.get("columns", [])
if not columns:
for label in header:
columns.append({"data": label})
width = spec.pop("width", "100%")
if isinstance(width, int):
width = f"{width}px"
spec.setdefault("columnDefs", [])
spec["columnDefs"].append(
{"targets": "_all", "orderSequence": ["desc", "asc"]}
)
spec = json.dumps(spec, ensure_ascii=False)
spec = spec.replace("'", "\\'")
self.__count += 1
card = snippet.props.get("card")
elem = f"livemark-table-{self.__count}"
if card:
elem += "-card"
snippet.output = (
self.read_asset(
"markup.html",
card=card,
elem=elem,
spec=spec,
rows=rows,
columns=columns,
width=width,
)
+ "\n"
)
def process_markup(self, markup):
if self.__count:
url = "https://cdn.datatables.net/1.11.3"
markup.add_style(f"{url}/css/jquery.dataTables.css")
markup.add_script(f"{url}/js/jquery.dataTables.js")
| 36.84127 | 83 | 0.46187 |
0c94ed4d94110cbc15830139579838216e8fdb23 | 7,169 | py | Python | moderngl/vertex_array.py | vpoulailleau/moderngl | bff23bd538372821d9c42f4633ff6978929a0a92 | [
"MIT"
] | null | null | null | moderngl/vertex_array.py | vpoulailleau/moderngl | bff23bd538372821d9c42f4633ff6978929a0a92 | [
"MIT"
] | null | null | null | moderngl/vertex_array.py | vpoulailleau/moderngl | bff23bd538372821d9c42f4633ff6978929a0a92 | [
"MIT"
] | null | null | null | from typing import Tuple
__all__ = ['VertexArray',
'POINTS', 'LINES', 'LINE_LOOP', 'LINE_STRIP', 'TRIANGLES', 'TRIANGLE_STRIP', 'TRIANGLE_FAN',
'LINES_ADJACENCY', 'LINE_STRIP_ADJACENCY', 'TRIANGLES_ADJACENCY', 'TRIANGLE_STRIP_ADJACENCY', 'PATCHES']
POINTS = 0x0000
LINES = 0x0001
LINE_LOOP = 0x0002
LINE_STRIP = 0x0003
TRIANGLES = 0x0004
TRIANGLE_STRIP = 0x0005
TRIANGLE_FAN = 0x0006
LINES_ADJACENCY = 0x000A
LINE_STRIP_ADJACENCY = 0x000B
TRIANGLES_ADJACENCY = 0x000C
TRIANGLE_STRIP_ADJACENCY = 0x000D
PATCHES = 0x000E
class VertexArray:
'''
A VertexArray object is an OpenGL object that stores all of the state
needed to supply vertex data. It stores the format of the vertex data
as well as the Buffer objects providing the vertex data arrays.
In ModernGL, the VertexArray object also stores a reference
for a :py:class:`Program` object, and some Subroutine information.
A VertexArray object cannot be instantiated directly, it requires a context.
Use :py:meth:`Context.vertex_array` or :py:meth:`Context.simple_vertex_array`
to create one.
'''
__slots__ = ['mglo', '_program', '_index_buffer', '_index_element_size', '_glo', 'ctx', 'extra', 'scope']
def __init__(self):
self.mglo = None
self._program = None
self._index_buffer = None
self._index_element_size = None
self._glo = None
self.ctx = None
self.extra = None #: Any - Attribute for storing user defined objects
self.scope = None
raise TypeError()
def __repr__(self):
return '<VertexArray: %d>' % self.glo
def __eq__(self, other):
return type(self) is type(other) and self.mglo is other.mglo
@property
def program(self) -> 'Program':
'''
Program: The program assinged to the VertexArray.
The program used when rendering or transforming primitives.
'''
return self._program
@property
def index_buffer(self) -> 'Buffer':
'''
Buffer: The index buffer if the index_buffer is set, otherwise ``None``.
'''
return self._index_buffer
@property
def index_element_size(self) -> int:
'''
int: The byte size of each element in the index buffer
'''
return self._index_element_size
@property
def vertices(self) -> int:
'''
int: The number of vertices detected.
This is the minimum of the number of vertices possible per Buffer.
The size of the index_buffer determines the number of vertices.
Per instance vertex attributes does not affect this number.
'''
return self.mglo.vertices
@vertices.setter
def vertices(self, value):
self.mglo.vertices = int(value)
@property
def instances(self) -> int:
return self.mglo.instances
@instances.setter
def instances(self, value):
self.mglo.instances = int(value)
@property
def subroutines(self) -> Tuple[int, ...]:
'''
tuple: The subroutines assinged to the VertexArray.
The subroutines used when rendering or transforming primitives.
'''
return self.mglo.subroutines
@subroutines.setter
def subroutines(self, value):
self.mglo.subroutines = tuple(value)
@property
def glo(self) -> int:
'''
int: The internal OpenGL object.
This values is provided for debug purposes only.
'''
return self._glo
def render(self, mode=None, vertices=-1, *, first=0, instances=-1) -> None:
'''
The render primitive (mode) must be the same as
the input primitive of the GeometryShader.
Args:
mode (int): By default :py:data:`TRIANGLES` will be used.
vertices (int): The number of vertices to transform.
Keyword Args:
first (int): The index of the first vertex to start with.
instances (int): The number of instances.
'''
if mode is None:
mode = TRIANGLES
if self.scope:
with self.scope:
self.mglo.render(mode, vertices, first, instances)
else:
self.mglo.render(mode, vertices, first, instances)
def render_indirect(self, buffer, mode=None, count=-1, *, first=0) -> None:
'''
The render primitive (mode) must be the same as
the input primitive of the GeometryShader.
The draw commands are 5 integers: (count, instanceCount, firstIndex, baseVertex, baseInstance).
Args:
buffer (Buffer): Indirect drawing commands.
mode (int): By default :py:data:`TRIANGLES` will be used.
count (int): The number of draws.
Keyword Args:
first (int): The index of the first indirect draw command.
'''
if mode is None:
mode = TRIANGLES
if self.scope:
with self.scope:
self.mglo.render_indirect(buffer.mglo, mode, count, first)
else:
self.mglo.render_indirect(buffer.mglo, mode, count, first)
def transform(self, buffer, mode=None, vertices=-1, *, first=0, instances=-1) -> None:
'''
Transform vertices.
Stores the output in a single buffer.
The transform primitive (mode) must be the same as
the input primitive of the GeometryShader.
Args:
buffer (Buffer): The buffer to store the output.
mode (int): By default :py:data:`POINTS` will be used.
vertices (int): The number of vertices to transform.
Keyword Args:
first (int): The index of the first vertex to start with.
instances (int): The number of instances.
'''
if mode is None:
mode = POINTS
if self.scope:
with self.scope:
self.mglo.transform(buffer.mglo, mode, vertices, first, instances)
else:
self.mglo.transform(buffer.mglo, mode, vertices, first, instances)
def bind(self, attribute, cls, buffer, fmt, *, offset=0, stride=0, divisor=0, normalize=False) -> None:
'''
Bind individual attributes to buffers.
Args:
location (int): The attribute location.
cls (str): The attribute class. Valid values are ``f``, ``i`` or ``d``.
buffer (Buffer): The buffer.
format (str): The buffer format.
Keyword Args:
offset (int): The offset.
stride (int): The stride.
divisor (int): The divisor.
normalize (bool): The normalize parameter, if applicable.
'''
self.mglo.bind(attribute, cls, buffer.mglo, fmt, offset, stride, divisor, normalize)
def release(self) -> None:
'''
Release the ModernGL object.
'''
self.mglo.release()
| 32.147982 | 115 | 0.590319 |
eccc966648f878995db09ecde157aed43e5f1437 | 1,589 | py | Python | nyud-fcn16s-color/solve.py | yeLer/fcn | 2e8a816814dfcb1e12d340f81d56ae41dbca843e | [
"BSD-2-Clause"
] | 1 | 2022-02-12T19:39:58.000Z | 2022-02-12T19:39:58.000Z | nyud-fcn16s-color/solve.py | yeLer/fcn | 2e8a816814dfcb1e12d340f81d56ae41dbca843e | [
"BSD-2-Clause"
] | 1 | 2019-06-14T08:54:22.000Z | 2019-06-14T08:54:22.000Z | nyud-fcn16s-color/solve.py | yeLer/fcn | 2e8a816814dfcb1e12d340f81d56ae41dbca843e | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
'''
@author: lele Ye
@contact: 1750112338@qq.com
@software: pycharm 2018.2
@file: solve.py
@time: 2019/1/2 17:21
@desc:nyud-fcn16s-color 网络结构的定义文件,目的是用于生成trainval.prototxt和test.prototxt文件
'''
CAFFE_ROOT = "/home/bxx-yll/caffe"
import sys
sys.path.insert(0, CAFFE_ROOT + '/python')
import caffe
import surgery, score
import numpy as np
import os
try:
import setproctitle
setproctitle.setproctitle(os.path.basename(os.getcwd()))
# 获得当前路径(返回最后的文件名)
# 比如os.getcwd()获得的当前路径为/home/zhangrf/fcn,则os.path.basename()为fcn;
# setproctitle是用来修改进程入口名称,如C++中入口为main()函数
except:
pass
# vgg_weights = '../ilsvrc-nets/VGG_ILSVRC_16_layers.caffemodel' # 用来fine-tune的FCN参数
# vgg_proto = '../ilsvrc-nets/VGG_ILSVRC_16_layers_deploy.prototxt' # VGGNet模型
# 这次我们用fcn32s的模型微调训练
weights = '../nyud-fcn32s-color/snapshot/train_iter_100000.caffemodel'
# init
# caffe.set_device(int(sys.argv[1]))
# 获取命令行参数,其中sys.argv[0]为文件名,argv[1]为紧随其后的那个参数
caffe.set_device(2) # GPU型号id,这里指定第三块GPU
caffe.set_mode_gpu()
solver = caffe.SGDSolver('solver.prototxt') # 调用SGD(随即梯度下降)Solver方法,solver.prototxt为所需参数
solver.net.copy_from(weights) # 这个方法仅仅是从vgg-16模型中拷贝参数,但是并没有改造原先的网络,这才是不收敛的根源
# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k] # interp_layers为upscore层
surgery.interp(solver.net, interp_layers) # 将upscore层中每层的权重初始化为双线性内核插值。
# scoring
test = np.loadtxt('../data/nyud/test.txt', dtype=str) # 载入测试图片信息
for _ in range(50):
solver.step(2000) # 每2000次训练迭代执行后面的函数
score.seg_tests(solver, False, test, layer='score') # 测试图片
| 29.425926 | 92 | 0.754563 |
617f360ad3a47a60efa43e8757b9dcde63c328b3 | 96 | py | Python | venv/lib/python3.8/site-packages/pkginfo/installed.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/pkginfo/installed.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/pkginfo/installed.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/98/af/99/d8f3066ff66831f7838f689b6fcd3a942e55b62b011851aa430df6be6f | 96 | 96 | 0.895833 |
5c58aa5f5b3e5165e3bae078176cf3594f8cc089 | 1,078 | py | Python | tests/test_published_examples.py | Bezier89/conda-build | 95a118f8f06230120514fe0066a52a152ec2349b | [
"BSD-3-Clause"
] | null | null | null | tests/test_published_examples.py | Bezier89/conda-build | 95a118f8f06230120514fe0066a52a152ec2349b | [
"BSD-3-Clause"
] | null | null | null | tests/test_published_examples.py | Bezier89/conda-build | 95a118f8f06230120514fe0066a52a152ec2349b | [
"BSD-3-Clause"
] | null | null | null | import os
import pytest
from conda_build import api
from conda_build.utils import check_call_env
from .utils import metadata_dir, is_valid_dir
published_examples = os.path.join(os.path.dirname(metadata_dir), 'published_code')
@pytest.mark.serial
def test_skeleton_pypi(testing_workdir):
"""published in docs at http://conda.pydata.org/docs/build_tutorials/pkgs.html"""
cmd = 'conda skeleton pypi pyinstrument'
check_call_env(cmd.split())
cmd = 'conda build pyinstrument'
check_call_env(cmd.split())
@pytest.fixture(params=[dirname for dirname in os.listdir(published_examples)
if is_valid_dir(published_examples, dirname)])
def recipe(request):
return os.path.join(published_examples, request.param)
# This tests any of the folders in the test-recipes/published_code folder that don't start with _
def test_recipe_builds(recipe, testing_config, testing_workdir):
# These variables are defined solely for testing purposes,
# so they can be checked within build scripts
api.build(recipe, config=testing_config)
| 33.6875 | 97 | 0.763451 |
041ba5400f5236b69c841bf5e801e3cea223db14 | 621 | py | Python | src/backend/ros_services/parameter_service.py | Hegl1/vu-robotik-project-bt5 | 7d7da6823693320d6deaa8c0f784760c6777d5b6 | [
"MIT"
] | null | null | null | src/backend/ros_services/parameter_service.py | Hegl1/vu-robotik-project-bt5 | 7d7da6823693320d6deaa8c0f784760c6777d5b6 | [
"MIT"
] | null | null | null | src/backend/ros_services/parameter_service.py | Hegl1/vu-robotik-project-bt5 | 7d7da6823693320d6deaa8c0f784760c6777d5b6 | [
"MIT"
] | null | null | null | #this file contains a service
#class that handles interaction with parameters from the parameterservice.
import rospy
from configuration import config
class Parameter_service:
def __init__(self, config):
self.config = config
def get_parameters(self):
'''
Method that receives all configured parameters from parameterserver.
'''
result = dict()
for parameter in self.config.parameters:
try:
result[parameter] = rospy.get_param(parameter)
except KeyError:
result[parameter] = None
return result
| 23.884615 | 76 | 0.640902 |
9cfcba315eae70fd8c3e050face5daddcb323d4d | 49 | py | Python | bert4keras/__init__.py | d294270681/bert4keras | 1a39f3e0c681d9ed51441d7f6a99c3d4c7731809 | [
"Apache-2.0"
] | null | null | null | bert4keras/__init__.py | d294270681/bert4keras | 1a39f3e0c681d9ed51441d7f6a99c3d4c7731809 | [
"Apache-2.0"
] | null | null | null | bert4keras/__init__.py | d294270681/bert4keras | 1a39f3e0c681d9ed51441d7f6a99c3d4c7731809 | [
"Apache-2.0"
] | null | null | null | #! -*- coding: utf-8 -*-
__version__ = '0.10.1'
| 12.25 | 24 | 0.510204 |
8c988f676eb9899dd50bb876069b9670efc5f443 | 357 | py | Python | data/urls.py | tblxdezhu/STP | db3db2183a70b39e155a0b3a061dc5d3e29e6c9f | [
"MIT"
] | null | null | null | data/urls.py | tblxdezhu/STP | db3db2183a70b39e155a0b3a061dc5d3e29e6c9f | [
"MIT"
] | null | null | null | data/urls.py | tblxdezhu/STP | db3db2183a70b39e155a0b3a061dc5d3e29e6c9f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/18 1:13 PM
# @Author : Zhenxuan Xu
# @File : urls.py
# @Software: Pycharm professional
from django.conf.urls import include, url
from data import views
urlpatterns = [
url(r'^large$', views.large_data),
url(r'^mini$', views.mini_data),
url(r'^error$', views.error_data)
]
| 22.3125 | 41 | 0.641457 |
582116282d012c160a25ab2f92882249cd7891f2 | 1,664 | py | Python | my_web_scraper_app/views.py | MisterLenivec/leniva_list | fc416f1afe9886b60bf46c9152fa5eb095def5df | [
"MIT"
] | null | null | null | my_web_scraper_app/views.py | MisterLenivec/leniva_list | fc416f1afe9886b60bf46c9152fa5eb095def5df | [
"MIT"
] | 2 | 2020-06-07T01:38:32.000Z | 2021-06-04T22:27:50.000Z | my_web_scraper_app/views.py | MisterLenivec/leniva_list | fc416f1afe9886b60bf46c9152fa5eb095def5df | [
"MIT"
] | null | null | null | import requests
from django.shortcuts import render
from requests.compat import quote_plus
from bs4 import BeautifulSoup
from .models import Search
BASE_CRAIGSLIST_URL = 'https://sfbay.craigslist.org/search/sfc/?query={}'
BASE_IMAGE_URL = 'https://images.craigslist.org/{}_300x300.jpg'
def home(request):
return render(request, 'base.html')
def new_search(request):
search = request.POST.get('search')
Search.objects.create(search=search)
final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
response = requests.get(final_url)
data = response.text
soup = BeautifulSoup(data, features='html.parser')
post_listings = soup.find_all('li', {'class': 'result-row'})
final_postings = []
for post in post_listings:
post_title = post.find(class_='result-title').text
post_url = post.find('a').get('href')
if post.find(class_='result-price'):
post_price = post.find(class_='result-price').text
else:
post_price = 'N/A'
post_image_urls = []
if post.find(class_='result-image').get('data-ids'):
post_image_ids = [i[2:] for i in post.find(class_='result-image').get('data-ids').split(',')]
for i in post_image_ids:
post_image_urls.append(BASE_IMAGE_URL.format(i))
else:
post_image_urls.append('https://craigslist.org/images/peace.jpg')
final_postings.append((post_title, post_url, post_price, post_image_urls))
context = {
'search': search,
'final_postings': final_postings,
}
return render(request, 'my_web_scraper_app/new_search.html', context)
| 30.254545 | 105 | 0.664663 |
b5b23bafedf5ff32cd86ae90e72ab722c1566c87 | 1,742 | py | Python | tests/utils/cartesian.py | kbh2o/slash | 532b7e3acdf46103ece5b86f21c29f9b58587289 | [
"BSD-3-Clause"
] | 70 | 2015-12-05T12:33:10.000Z | 2022-03-03T04:56:58.000Z | tests/utils/cartesian.py | kbh2o/slash | 532b7e3acdf46103ece5b86f21c29f9b58587289 | [
"BSD-3-Clause"
] | 711 | 2015-10-06T11:01:48.000Z | 2022-02-09T12:40:47.000Z | tests/utils/cartesian.py | kbh2o/slash | 532b7e3acdf46103ece5b86f21c29f9b58587289 | [
"BSD-3-Clause"
] | 37 | 2015-10-13T11:00:51.000Z | 2022-02-08T07:28:11.000Z | import itertools
class Cartesian(object):
def __init__(self):
super(Cartesian, self).__init__()
self.sets = {}
self._assigns = []
def assign_all(self, source_name, target_name):
"""
For every expected combination, assigns one key from the other
"""
self._assigns.append((source_name, target_name))
def __getattr__(self, attr):
if attr.startswith("_"):
raise AttributeError(attr)
return SetMaker(self, attr)
def __len__(self):
if not self.sets:
return 0
returned = 1
for x in self.sets.values():
returned *= len(x)
return returned
def check(self, iterator):
names = list(self.sets)
sets = [self.sets[name] for name in names]
expected = sorted((self._build_combination(names, combination) for combination in itertools.product(*sets)), key=lambda d: sorted(d.items()))
got = sorted(iterator, key=lambda d: sorted(d.items()))
assert got == expected
def _build_combination(self, names, combination):
returned = {}
for name, value in zip(names, combination):
returned[name] = value
for assign_source, assign_target in self._assigns:
returned[assign_target] = returned[assign_source]
return returned
class SetMaker(object):
def __init__(self, cartesian, name):
super(SetMaker, self).__init__()
self.cartesian = cartesian
self.name = name
def make_set(self, size=3):
assert self.name not in self.cartesian.sets
returned = self.cartesian.sets[self.name] = ["{}{}".format(self.name, i) for i in range(size)]
return returned
| 30.561404 | 149 | 0.614237 |
e52f04df98f3cfc855d2902bd79bf43942a4f2b3 | 7,902 | py | Python | backend/the_closer_33482/settings.py | crowdbotics-apps/the-closer-33482 | b0de1c392c7085f81b06761e3783ea54f79d6741 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/the_closer_33482/settings.py | crowdbotics-apps/the-closer-33482 | b0de1c392c7085f81b06761e3783ea54f79d6741 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/the_closer_33482/settings.py | crowdbotics-apps/the-closer-33482 | b0de1c392c7085f81b06761e3783ea54f79d6741 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
Django settings for the_closer_33482 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'the_closer_33482.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'the_closer_33482.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
| 30.392308 | 112 | 0.736902 |
7aec43916d3f7080d60706ec40c7da1c49800719 | 172,108 | py | Python | nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py | riddopic/nfv | e5ced4ade4916910646bcf8018dfabadef447fc2 | [
"Apache-2.0"
] | null | null | null | nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py | riddopic/nfv | e5ced4ade4916910646bcf8018dfabadef447fc2 | [
"Apache-2.0"
] | null | null | null | nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py | riddopic/nfv | e5ced4ade4916910646bcf8018dfabadef447fc2 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2016-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import mock
import uuid
from nfv_common import strategy as common_strategy
from nfv_vim import nfvi
from nfv_vim.objects import HOST_PERSONALITY
from nfv_vim.objects import SW_UPDATE_ALARM_RESTRICTION
from nfv_vim.objects import SW_UPDATE_APPLY_TYPE
from nfv_vim.objects import SW_UPDATE_INSTANCE_ACTION
from nfv_vim.objects import SwPatch
from nfv_vim.strategy._strategy import SwPatchStrategy
from . import sw_update_testcase # noqa: H304
def create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
swift_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
worker_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
max_parallel_worker_hosts=10,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START,
alarm_restrictions=SW_UPDATE_ALARM_RESTRICTION.STRICT,
single_controller=False):
"""
Create a software update strategy
"""
return SwPatchStrategy(
uuid=str(uuid.uuid4()),
controller_apply_type=controller_apply_type,
storage_apply_type=storage_apply_type,
swift_apply_type=swift_apply_type,
worker_apply_type=worker_apply_type,
max_parallel_worker_hosts=max_parallel_worker_hosts,
default_instance_action=default_instance_action,
alarm_restrictions=alarm_restrictions,
ignore_alarms=[],
single_controller=single_controller
)
@mock.patch('nfv_vim.objects._sw_update.SwUpdate.save', sw_update_testcase.fake_save)
@mock.patch('nfv_vim.objects._sw_update.timers.timers_create_timer', sw_update_testcase.fake_timer)
@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', sw_update_testcase.fake_host_name)
@mock.patch('nfv_vim.event_log._instance._event_issue', sw_update_testcase.fake_event_issue)
@mock.patch('nfv_vim.nfvi.nfvi_compute_plugin_disabled', sw_update_testcase.fake_nfvi_compute_plugin_disabled)
class TestSwPatchStrategy(sw_update_testcase.SwUpdateStrategyTestCase):
"""
Software Patch Strategy Unit Tests
"""
def test_sw_patch_strategy_worker_stages_ignore(self):
"""
Test the sw_patch strategy add worker strategy stages:
- ignore apply
- stop start instance action
Verify:
- stages not created
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
success, reason = strategy._add_worker_strategy_stages(
worker_hosts=sorted_worker_hosts,
reboot=True)
assert success is True, "Strategy creation failed"
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 0
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_migrate_anti_affinity(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- migrate instance action
Verify:
- hosts with no instances patched first
- anti-affinity policy enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': []},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_migrate_ten_hosts(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- migrate instance action
Verify:
- hosts with no instances patched first
- instances migrated
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host('compute-4')
self.create_host('compute-5')
self.create_host('compute-6')
self.create_host('compute-7')
self.create_host('compute-8')
self.create_host('compute-9')
self.create_instance('small', "test_instance_0", 'compute-0')
self.create_instance('small', "test_instance_2", 'compute-2')
self.create_instance('small', "test_instance_3", 'compute-3')
self.create_instance('small', "test_instance_4", 'compute-4')
self.create_instance('small', "test_instance_6", 'compute-6')
self.create_instance('small', "test_instance_7", 'compute-7')
self.create_instance('small', "test_instance_8", 'compute-8')
self.create_instance('small', "test_instance_9", 'compute-9')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 5,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': []},
{'name': 'lock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_0',
'test_instance_2']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_3',
'test_instance_4']},
{'name': 'lock-hosts',
'entity_names': ['compute-3', 'compute-4']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3', 'compute-4']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3', 'compute-4']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_6',
'test_instance_7']},
{'name': 'lock-hosts',
'entity_names': ['compute-6', 'compute-7']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-6', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-6', 'compute-7']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_8',
'test_instance_9']},
{'name': 'lock-hosts',
'entity_names': ['compute-8', 'compute-9']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-8', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-8', 'compute-9']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_migrate_host_aggregate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- migrate instance action
Verify:
- hosts with no instances patched first
- host aggregate limits enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host('compute-4')
self.create_host('compute-5')
self.create_host('compute-6')
self.create_host('compute-7')
self.create_host('compute-8')
self.create_host('compute-9')
self.create_host_aggregate('aggregate-1', ['compute-0',
'compute-1',
'compute-2',
'compute-3',
'compute-4'])
self.create_host_aggregate('aggregate-2', ['compute-5',
'compute-6',
'compute-7',
'compute-8',
'compute-9'])
self.create_instance('small', "test_instance_0", 'compute-0')
self.create_instance('small', "test_instance_2", 'compute-2')
self.create_instance('small', "test_instance_3", 'compute-3')
self.create_instance('small', "test_instance_4", 'compute-4')
self.create_instance('small', "test_instance_6", 'compute-6')
self.create_instance('small', "test_instance_7", 'compute-7')
self.create_instance('small', "test_instance_8", 'compute-8')
self.create_instance('small', "test_instance_9", 'compute-9')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 5,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': []},
{'name': 'lock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_0',
'test_instance_6']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_2',
'test_instance_7']},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_3',
'test_instance_8']},
{'name': 'lock-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_4',
'test_instance_9']},
{'name': 'lock-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_migrate_overlap_host_aggregate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- migrate instance action
Verify:
- hosts with no instances patched first
- host aggregate limits enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host('compute-4')
self.create_host('compute-5')
self.create_host('compute-6')
self.create_host('compute-7')
self.create_host('compute-8')
self.create_host('compute-9')
self.create_host_aggregate('aggregate-1', ['compute-0',
'compute-1',
'compute-2',
'compute-3',
'compute-4'])
self.create_host_aggregate('aggregate-2', ['compute-5',
'compute-6',
'compute-7',
'compute-8',
'compute-9'])
self.create_host_aggregate('aggregate-3', ['compute-0',
'compute-1',
'compute-2',
'compute-3',
'compute-4',
'compute-5',
'compute-6',
'compute-7',
'compute-8',
'compute-9'])
self.create_instance('small', "test_instance_0", 'compute-0')
self.create_instance('small', "test_instance_2", 'compute-2')
self.create_instance('small', "test_instance_3", 'compute-3')
self.create_instance('small', "test_instance_4", 'compute-4')
self.create_instance('small', "test_instance_6", 'compute-6')
self.create_instance('small', "test_instance_7", 'compute-7')
self.create_instance('small', "test_instance_8", 'compute-8')
self.create_instance('small', "test_instance_9", 'compute-9')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 5,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': []},
{'name': 'lock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_0',
'test_instance_6']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_2',
'test_instance_7']},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_3',
'test_instance_8']},
{'name': 'lock-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_4',
'test_instance_9']},
{'name': 'lock-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_migrate_small_host_aggregate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- migrate instance action
Verify:
- hosts with no instances patched first
- small host aggregate handled
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host('compute-4')
self.create_host('compute-5')
self.create_host('compute-6')
self.create_host('compute-7')
self.create_host('compute-8')
self.create_host('compute-9')
self.create_host_aggregate('aggregate-1', ['compute-0',
'compute-1'])
self.create_host_aggregate('aggregate-2', ['compute-2',
'compute-3',
'compute-4',
'compute-5',
'compute-6'])
self.create_host_aggregate('aggregate-3', ['compute-7',
'compute-8',
'compute-9'])
self.create_instance('small', "test_instance_0", 'compute-0')
self.create_instance('small', "test_instance_1", 'compute-1')
self.create_instance('small', "test_instance_2", 'compute-2')
self.create_instance('small', "test_instance_3", 'compute-3')
self.create_instance('small', "test_instance_4", 'compute-4')
self.create_instance('small', "test_instance_5", 'compute-5')
self.create_instance('small', "test_instance_6", 'compute-6')
self.create_instance('small', "test_instance_7", 'compute-7')
self.create_instance('small', "test_instance_8", 'compute-8')
self.create_instance('small', "test_instance_9", 'compute-9')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 5,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_0',
'test_instance_2']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_1',
'test_instance_3']},
{'name': 'lock-hosts',
'entity_names': ['compute-1', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1', 'compute-3']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_4',
'test_instance_7']},
{'name': 'lock-hosts',
'entity_names': ['compute-4', 'compute-7']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-4', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-4', 'compute-7']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_5',
'test_instance_8']},
{'name': 'lock-hosts',
'entity_names': ['compute-5', 'compute-8']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-5', 'compute-8']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-5', 'compute-8']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_6',
'test_instance_9']},
{'name': 'lock-hosts',
'entity_names': ['compute-6', 'compute-9']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-6', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-6', 'compute-9']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_anti_affinity(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
Verify:
- hosts with no instances patched first
- anti-affinity policy enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_anti_affinity_locked_instance(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
- locked instance in instance group
Verify:
- stage creation fails
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1',
admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED)
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
success, reason = strategy._add_worker_strategy_stages(
worker_hosts=sorted_worker_hosts,
reboot=True)
assert success is False, "Strategy creation did not fail"
def test_sw_patch_strategy_worker_stages_parallel_stop_start_host_aggregate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
- test both reboot and no reboot cases
Verify:
- hosts with no instances patched first
- host aggregate limits enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host_aggregate('aggregate-1', ['compute-0', 'compute-1'])
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches.
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START,
max_parallel_worker_hosts=3,
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
# Perform no-reboot parallel worker patches without any
# grouping by aggregates or determining which hosts have VMs
# max_parallel_worker_hosts is 3 (for 4 hosts) resulting in 2 stages
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1', 'compute-2']},
{'name': 'system-stabilize', 'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize', 'timeout': 30}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_locked_host(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
- locked host
Verify:
- hosts with no instances patched first
- locked host patched and rebooted
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3',
admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED)
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize', 'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2']},
{'name': 'reboot-hosts',
'entity_names': ['compute-3']},
{'name': 'wait-alarms-clear', 'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0', 'test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize', 'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_0', 'test_instance_1']},
{'name': 'wait-alarms-clear', 'timeout': 600}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_host_aggregate_locked_instance(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
- locked instance not in an instance group
Verify:
- hosts with no instances patched first
- host aggregate limits enforced
- locked instance not stopped or started
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host_aggregate('aggregate-1', ['compute-0', 'compute-1'])
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1',
admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED)
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_host_aggregate_single_host(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
Verify:
- host aggregates with a single host are patched in parallel
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host_aggregate('aggregate-1', ['compute-0'])
self.create_host_aggregate('aggregate-2', ['compute-1'])
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 1,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0', 'test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_0', 'test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_anti_affinity_host_aggregate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
Verify:
- hosts with no instances patched first
- anti-affinity policy and host aggregates enforced at same time
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host_aggregate('aggregate-1', ['compute-1', 'compute-2'])
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance('small',
"test_instance_2",
'compute-2')
self.create_instance('small',
"test_instance_3",
'compute-3')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0', 'test_instance_2', 'test_instance_3']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-2', 'compute-3']},
{'name': 'start-instances',
'entity_names': ['test_instance_0', 'test_instance_2', 'test_instance_3']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_serial_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- serial apply
- stop start instance action
- test both reboot and no reboot cases
Verify:
- hosts with no instances patched first
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2']},
{'name': 'wait-alarms-clear'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3']},
{'name': 'wait-alarms-clear'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 30}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_serial_no_openstack(self):
"""
Test the sw_patch strategy with no openstack, add worker strategy stages:
- serial apply
- no stop start instance action
- test both reboot and no reboot cases
Verify:
- hosts are patched in order and and doesn't wait for alarms to clear
"""
self.create_host('compute-0', openstack_installed=False)
self.create_host('compute-1', openstack_installed=False)
self.create_host('compute-2', openstack_installed=False)
self.create_host('compute-3', openstack_installed=False)
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 30}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_no_openstack(self):
"""
Test the sw_patch strategy with no openstack add worker strategy stages:
- serial apply
- no migrate instance action
- test both reboot and no reboot cases
Verify:
- hosts are patched and and doesn't wait for alarms to clear
"""
self.create_host('compute-0', openstack_installed=False)
self.create_host('compute-1', openstack_installed=False)
self.create_host('compute-2', openstack_installed=False)
self.create_host('compute-3', openstack_installed=False)
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 30},
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_serial_stop_start_locked_host(self):
"""
Test the sw_patch strategy add worker strategy stages:
- serial apply
- stop start instance action
- locked host
- test both reboot and no reboot cases
Verify:
- hosts with no instances patched first
- locked host patched and rebooted
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2',
admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED)
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance('small',
"test_instance_2",
'compute-3')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 5,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'reboot-hosts',
'entity_names': ['compute-2']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_2']},
{'name': 'lock-hosts',
'entity_names': ['compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3']},
{'name': 'start-instances',
'entity_names': ['test_instance_2']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_max_hosts(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
Verify:
- maximum host limit enforced
"""
for x in range(0, 13):
self.create_host('compute-%02d' % x)
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START,
max_parallel_worker_hosts=5
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-00',
'compute-01',
'compute-02',
'compute-03',
'compute-04']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-00',
'compute-01',
'compute-02',
'compute-03',
'compute-04']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-00',
'compute-01',
'compute-02',
'compute-03',
'compute-04']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-05',
'compute-06',
'compute-07',
'compute-08',
'compute-09']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-05',
'compute-06',
'compute-07',
'compute-08',
'compute-09']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-05',
'compute-06',
'compute-07',
'compute-08',
'compute-09']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-10',
'compute-11',
'compute-12']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-10',
'compute-11',
'compute-12']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-10',
'compute-11',
'compute-12']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_serial_migrate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- serial apply
- migrate instance action
- test both reboot and no reboot cases
Verify:
- hosts with no instances patched first
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'migrate-instances-from-host',
'entity_names': []},
{'name': 'lock-hosts',
'entity_names': ['compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'migrate-instances-from-host',
'entity_names': []},
{'name': 'lock-hosts',
'entity_names': ['compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'migrate-instances-from-host',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 30},
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_serial_migrate_locked_instance(self):
"""
Test the sw_patch strategy add worker strategy stages:
- serial apply
- migrate instance action
- locked instance in instance group
- test both reboot and no reboot cases
Verify:
- stages not created for reboot case
- for no reboot case:
- hosts with no instances patched first
- locked instance is not migrated
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0',
admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED)
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE
)
success, reason = strategy._add_worker_strategy_stages(
worker_hosts=sorted_worker_hosts,
reboot=True)
assert success is False, "Strategy creation did not fail"
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 30},
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_storage_stages_ignore(self):
"""
Test the sw_patch strategy add storage strategy stages:
- ignore apply
Verify:
- stages not created
"""
self.create_host('storage-0')
self.create_host('storage-1')
self.create_host('storage-2')
self.create_host('storage-3')
self.create_host_group('group-0',
['storage-0', 'storage-1'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
self.create_host_group('group-1',
['storage-2', 'storage-3'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
storage_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.STORAGE in host.personality:
storage_hosts.append(host)
# Sort hosts so the order of the steps is deterministic
sorted_storage_hosts = sorted(storage_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE
)
success, reason = strategy._add_storage_strategy_stages(
storage_hosts=sorted_storage_hosts,
reboot=True)
assert success is True, "Strategy creation failed"
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 0
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_storage_stages_parallel_host_group(self):
"""
Test the sw_patch strategy add storage strategy stages:
- parallel apply
- test both reboot and no reboot cases
Verify:
- host groups enforced
"""
self.create_host('storage-0')
self.create_host('storage-1')
self.create_host('storage-2')
self.create_host('storage-3')
self.create_host_group('group-0',
['storage-0', 'storage-1'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
self.create_host_group('group-1',
['storage-2', 'storage-3'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
storage_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.STORAGE in host.personality:
storage_hosts.append(host)
# Sort hosts so the order of the steps is deterministic
sorted_storage_hosts = sorted(storage_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
storage_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL
)
strategy._add_storage_strategy_stages(storage_hosts=sorted_storage_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-0', 'storage-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-0', 'storage-2']},
{'name': 'system-stabilize', 'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-0', 'storage-2']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
},
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-1', 'storage-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-1', 'storage-3']},
{'name': 'system-stabilize', 'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-1', 'storage-3']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
storage_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL
)
strategy._add_storage_strategy_stages(storage_hosts=sorted_storage_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-storage-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-0', 'storage-2']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-storage-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-1', 'storage-3']},
{'name': 'system-stabilize',
'timeout': 30}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_storage_stages_serial(self):
"""
Test the sw_patch strategy add storage strategy stages:
- serial apply
"""
self.create_host('storage-0')
self.create_host('storage-1')
self.create_host('storage-2')
self.create_host('storage-3')
self.create_host_group('group-0',
['storage-0', 'storage-1'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
self.create_host_group('group-1',
['storage-2', 'storage-3'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
storage_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.STORAGE in host.personality:
storage_hosts.append(host)
# Sort hosts so the order of the steps is deterministic
sorted_storage_hosts = sorted(storage_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
storage_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
)
strategy._add_storage_strategy_stages(storage_hosts=sorted_storage_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-0']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
},
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-1']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
},
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-2']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
},
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-3']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_controller_stages_ignore(self):
"""
Test the sw_patch strategy add controller strategy stages:
- ignore apply
Verify:
- stages not created
"""
self.create_host('controller-0')
self.create_host('controller-1')
controller_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.CONTROLLER in host.personality:
controller_hosts.append(host)
# Test reboot patches
strategy = create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE
)
success, reason = strategy._add_controller_strategy_stages(
controllers=controller_hosts,
reboot=True)
assert success is True, "Strategy creation failed"
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 0
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_controller_stages_serial(self):
"""
Test the sw_patch strategy add controller strategy stages:
- serial apply
- test both reboot and no reboot cases
Verify:
- patch mate controller first
"""
self.create_host('controller-0')
self.create_host('controller-1')
controller_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.CONTROLLER in host.personality:
controller_hosts.append(host)
# Test reboot patches
strategy = create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
)
strategy._add_controller_strategy_stages(controllers=controller_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-controllers',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'wait-alarms-clear',
'timeout': 1800},
]
},
{'name': 'sw-patch-controllers',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 1800},
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
)
strategy._add_controller_strategy_stages(controllers=controller_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-controllers',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-controllers',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_controller_stages_serial_openstack_not_installed(self):
"""
Test the sw_patch strategy add controller strategy stages:
- serial apply
- test both reboot and no reboot cases
Verify:
- patch mate controller first
"""
self.create_host('controller-0', openstack_installed=False)
self.create_host('controller-1', openstack_installed=False)
controller_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.CONTROLLER in host.personality:
controller_hosts.append(host)
# Test reboot patches
strategy = create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
)
strategy._add_controller_strategy_stages(controllers=controller_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-controllers',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'wait-alarms-clear',
'timeout': 1800},
]
},
{'name': 'sw-patch-controllers',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 1800},
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
)
strategy._add_controller_strategy_stages(controllers=controller_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-controllers',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-controllers',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_duplex_stages_parallel_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts
- parallel apply treated as serial
- stop start instance action
- test both reboot and no reboot cases
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
self.create_instance('small',
"test_instance_1",
'controller-1')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 1800},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize'}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_duplex_stages_serial_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
self.create_instance('small',
"test_instance_1",
'controller-1')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_duplex_stages_serial_stop_start_no_instances(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts
- no instances
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_duplex_stages_serial_stop_start_no_openstack(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts
- no instances
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True, openstack_installed=False)
self.create_host('controller-1', aio=True, openstack_installed=False)
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_plus_stages_parallel_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts plus workers
- parallel apply treated as serial
- stop start instance action
- test both reboot and no reboot cases
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
self.create_instance('small',
"test_instance_1",
'controller-1')
self.create_host('compute-0')
self.create_host('compute-1')
self.create_instance('small',
"test_instance_2",
'compute-0')
self.create_instance('small',
"test_instance_3",
'compute-1')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 1800},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_2', 'test_instance_3']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_2', 'test_instance_3']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize'}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_plus_stages_serial_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts plus workers
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
self.create_instance('small',
"test_instance_1",
'controller-1')
self.create_host('compute-0')
self.create_host('compute-1')
self.create_instance('small',
"test_instance_2",
'compute-0')
self.create_instance('small',
"test_instance_3",
'compute-1')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_2']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_2']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_3']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_3']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_plus_stages_serial_stop_start_no_instances(
self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts plus workers
- no instances
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
self.create_host('compute-0')
self.create_host('compute-1')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_simplex_stages_serial_migrate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- simplex aio host
- serial apply
- migrate instance action
Verify:
- stage creation fails
"""
self.create_host('controller-0', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
self.create_instance('small',
"test_instance_1",
'controller-0')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
single_controller=True
)
success, reason = strategy._add_worker_strategy_stages(
worker_hosts=worker_hosts,
reboot=True)
assert success is False, "Strategy creation did not fail"
def test_sw_patch_strategy_aio_simplex_stages_serial_no_openstack(
self):
"""
Test the sw_patch strategy add worker strategy stages:
- simplex aio host (no openstack)
- serial apply
- no migrate instance action
"""
self.create_host('controller-0', aio=True, openstack_installed=False)
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
single_controller=True
)
strategy._add_worker_strategy_stages(worker_hosts=worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 1,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 1800},
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_simplex_stages_serial_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- simplex aio host
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START,
single_controller=True
)
strategy._add_worker_strategy_stages(worker_hosts=worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 1,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 1800},
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_simplex_stages_serial_stop_start_no_instances(self):
"""
Test the sw_patch strategy add worker strategy stages:
- simplex aio host
- no instances
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
worker_hosts = []
for host in list(self._host_table.values()):
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START,
single_controller=True
)
strategy._add_worker_strategy_stages(worker_hosts=worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 1,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 1800}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_build_complete_parallel_stop_start(self):
"""
Test the sw_patch strategy build_complete:
- parallel apply
- stop start instance action
Verify:
- hosts with no instances patched first
- anti-affinity policy enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_instance('small',
"test_instance_0",
'compute-0')
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
fake_patch_obj = SwPatch()
strategy.sw_update_obj = fake_patch_obj
nfvi_sw_patches = list()
sw_patch = nfvi.objects.v1.SwPatch(
'PATCH_0001', '12.01', 'Applied', 'Available')
nfvi_sw_patches.append(sw_patch)
strategy.nfvi_sw_patches = nfvi_sw_patches
nfvi_sw_patch_hosts = list()
for host_name in ['compute-0', 'compute-1']:
host = nfvi.objects.v1.HostSwPatch(
host_name, 'worker', '12.01', True, False, 'idle', False,
False)
nfvi_sw_patch_hosts.append(host)
strategy.nfvi_sw_patch_hosts = nfvi_sw_patch_hosts
strategy.build_complete(common_strategy.STRATEGY_RESULT.SUCCESS, "")
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
| 40.890473 | 110 | 0.454058 |
ce8450a86a80ddff5f355ce4bb349939b5ee8734 | 9,152 | py | Python | ppdet/modeling/backbones/cspresnet.py | Amanda-Barbara/PaddleDetection | 65ac13074eaaa2447c644a2df71969d8a3dd1fae | [
"Apache-2.0"
] | null | null | null | ppdet/modeling/backbones/cspresnet.py | Amanda-Barbara/PaddleDetection | 65ac13074eaaa2447c644a2df71969d8a3dd1fae | [
"Apache-2.0"
] | null | null | null | ppdet/modeling/backbones/cspresnet.py | Amanda-Barbara/PaddleDetection | 65ac13074eaaa2447c644a2df71969d8a3dd1fae | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.regularizer import L2Decay
from ppdet.modeling.ops import get_act_fn
from ppdet.core.workspace import register, serializable
from ..shape_spec import ShapeSpec
__all__ = ['CSPResNet', 'BasicBlock', 'EffectiveSELayer', 'ConvBNLayer']
class ConvBNLayer(nn.Layer):
def __init__(self,
ch_in,
ch_out,
filter_size=3,
stride=1,
groups=1,
padding=0,
act=None):
super(ConvBNLayer, self).__init__()
self.conv = nn.Conv2D(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
bias_attr=False)
self.bn = nn.BatchNorm2D(
ch_out,
weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
self.act = get_act_fn(act) if act is None or isinstance(act, (
str, dict)) else act
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
return x
class RepVggBlock(nn.Layer):
def __init__(self, ch_in, ch_out, act='relu'):
super(RepVggBlock, self).__init__()
self.ch_in = ch_in
self.ch_out = ch_out
self.conv1 = ConvBNLayer(
ch_in, ch_out, 3, stride=1, padding=1, act=None)
self.conv2 = ConvBNLayer(
ch_in, ch_out, 1, stride=1, padding=0, act=None)
self.act = get_act_fn(act) if act is None or isinstance(act, (
str, dict)) else act
def forward(self, x):
if hasattr(self, 'conv'):
y = self.conv(x)
else:
y = self.conv1(x) + self.conv2(x)
y = self.act(y)
return y
def convert_to_deploy(self):
if not hasattr(self, 'conv'):
self.conv = nn.Conv2D(
in_channels=self.ch_in,
out_channels=self.ch_out,
kernel_size=3,
stride=1,
padding=1,
groups=1)
kernel, bias = self.get_equivalent_kernel_bias()
self.conv.weight.set_value(kernel)
self.conv.bias.set_value(bias)
self.__delattr__('conv1')
self.__delattr__('conv2')
def get_equivalent_kernel_bias(self):
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.conv1)
kernel1x1, bias1x1 = self._fuse_bn_tensor(self.conv2)
return kernel3x3 + self._pad_1x1_to_3x3_tensor(
kernel1x1), bias3x3 + bias1x1
def _pad_1x1_to_3x3_tensor(self, kernel1x1):
if kernel1x1 is None:
return 0
else:
return nn.functional.pad(kernel1x1, [1, 1, 1, 1])
def _fuse_bn_tensor(self, branch):
if branch is None:
return 0, 0
kernel = branch.conv.weight
running_mean = branch.bn._mean
running_var = branch.bn._variance
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn._epsilon
std = (running_var + eps).sqrt()
t = (gamma / std).reshape((-1, 1, 1, 1))
return kernel * t, beta - running_mean * gamma / std
class BasicBlock(nn.Layer):
def __init__(self, ch_in, ch_out, act='relu', shortcut=True):
super(BasicBlock, self).__init__()
assert ch_in == ch_out
self.conv1 = ConvBNLayer(ch_in, ch_out, 3, stride=1, padding=1, act=act)
self.conv2 = RepVggBlock(ch_out, ch_out, act=act)
self.shortcut = shortcut
def forward(self, x):
y = self.conv1(x)
y = self.conv2(y)
if self.shortcut:
return paddle.add(x, y)
else:
return y
class EffectiveSELayer(nn.Layer):
""" Effective Squeeze-Excitation
From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
"""
def __init__(self, channels, act='hardsigmoid'):
super(EffectiveSELayer, self).__init__()
self.fc = nn.Conv2D(channels, channels, kernel_size=1, padding=0)
self.act = get_act_fn(act) if act is None or isinstance(act, (
str, dict)) else act
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.fc(x_se)
return x * self.act(x_se)
class CSPResStage(nn.Layer):
def __init__(self,
block_fn,
ch_in,
ch_out,
n,
stride,
act='relu',
attn='eca'):
super(CSPResStage, self).__init__()
ch_mid = (ch_in + ch_out) // 2
if stride == 2:
self.conv_down = ConvBNLayer(
ch_in, ch_mid, 3, stride=2, padding=1, act=act)
else:
self.conv_down = None
self.conv1 = ConvBNLayer(ch_mid, ch_mid // 2, 1, act=act)
self.conv2 = ConvBNLayer(ch_mid, ch_mid // 2, 1, act=act)
self.blocks = nn.Sequential(*[
block_fn(
ch_mid // 2, ch_mid // 2, act=act, shortcut=True)
for i in range(n)
])
if attn:
self.attn = EffectiveSELayer(ch_mid, act='hardsigmoid')
else:
self.attn = None
self.conv3 = ConvBNLayer(ch_mid, ch_out, 1, act=act)
def forward(self, x):
if self.conv_down is not None:
x = self.conv_down(x)
y1 = self.conv1(x)
y2 = self.blocks(self.conv2(x))
y = paddle.concat([y1, y2], axis=1)
if self.attn is not None:
y = self.attn(y)
y = self.conv3(y)
return y
@register
@serializable
class CSPResNet(nn.Layer):
__shared__ = ['width_mult', 'depth_mult', 'trt']
def __init__(self,
layers=[3, 6, 6, 3],
channels=[64, 128, 256, 512, 1024],
act='swish',
return_idx=[0, 1, 2, 3, 4],
depth_wise=False,
use_large_stem=False,
width_mult=1.0,
depth_mult=1.0,
trt=False):
super(CSPResNet, self).__init__()
channels = [max(round(c * width_mult), 1) for c in channels]
layers = [max(round(l * depth_mult), 1) for l in layers]
act = get_act_fn(
act, trt=trt) if act is None or isinstance(act,
(str, dict)) else act
if use_large_stem:
self.stem = nn.Sequential(
('conv1', ConvBNLayer(
3, channels[0] // 2, 3, stride=2, padding=1, act=act)),
('conv2', ConvBNLayer(
channels[0] // 2,
channels[0] // 2,
3,
stride=1,
padding=1,
act=act)), ('conv3', ConvBNLayer(
channels[0] // 2,
channels[0],
3,
stride=1,
padding=1,
act=act)))
else:
self.stem = nn.Sequential(
('conv1', ConvBNLayer(
3, channels[0] // 2, 3, stride=2, padding=1, act=act)),
('conv2', ConvBNLayer(
channels[0] // 2,
channels[0],
3,
stride=1,
padding=1,
act=act)))
n = len(channels) - 1
self.stages = nn.Sequential(*[(str(i), CSPResStage(
BasicBlock, channels[i], channels[i + 1], layers[i], 2, act=act))
for i in range(n)])
self._out_channels = channels[1:]
self._out_strides = [4, 8, 16, 32]
self.return_idx = return_idx
def forward(self, inputs):
x = inputs['image']
x = self.stem(x)
outs = []
for idx, stage in enumerate(self.stages):
x = stage(x)
if idx in self.return_idx:
outs.append(x)
return outs
@property
def out_shape(self):
return [
ShapeSpec(
channels=self._out_channels[i], stride=self._out_strides[i])
for i in self.return_idx
]
| 32.339223 | 102 | 0.535948 |
35b9ea1ac6a4a98c9f207338a03146ef649c0eb6 | 14,160 | py | Python | fairseq/optim/fp16_optimizer.py | Luccadoremi/Model-Compression---DAQ- | a1efb348921c7728a12ca5670302aca75306946a | [
"MIT"
] | 1 | 2021-04-17T16:57:11.000Z | 2021-04-17T16:57:11.000Z | fairseq/optim/fp16_optimizer.py | Luccadoremi/Model-Compression---DAQ- | a1efb348921c7728a12ca5670302aca75306946a | [
"MIT"
] | null | null | null | fairseq/optim/fp16_optimizer.py | Luccadoremi/Model-Compression---DAQ- | a1efb348921c7728a12ca5670302aca75306946a | [
"MIT"
] | 1 | 2021-07-12T09:45:43.000Z | 2021-07-12T09:45:43.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import chain
import torch
from fairseq import optim, utils
class DynamicLossScaler(object):
def __init__(
self, init_scale=2.**15, scale_factor=2., scale_window=2000,
tolerance=0.05, threshold=None,
):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
def update_scale(self, overflow):
iter_since_rescale = self._iter - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
elif (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
@staticmethod
def has_overflow(grad_norm):
# detect inf and nan
if grad_norm == float('inf') or grad_norm != grad_norm:
return True
return False
class FP16Optimizer(optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
"""
def __init__(self, args, params, fp32_optimizer, fp32_params):
super().__init__(args)
self.retain_graph = hasattr(args, "quantization_config_path")
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if getattr(args, 'fp16_scale_window', None) is None:
if len(args.update_freq) > 1:
raise ValueError(
'--fp16-scale-window must be given explicitly when using a '
'custom --update-freq schedule'
)
scale_window = 2**14 / args.distributed_world_size / args.update_freq[0]
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=scale_window,
tolerance=args.fp16_scale_tolerance,
threshold=args.threshold_loss_scale,
)
@classmethod
def build_optimizer(cls, args, params):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
# create FP32 copy of parameters and grads
total_param_size = sum(p.data.numel() for p in params)
fp32_params = params[0].new(0).float().new(total_param_size)
offset = 0
for p in params:
numel = p.data.numel()
fp32_params[offset:offset+numel].copy_(p.data.view(-1))
offset += numel
fp32_params = torch.nn.Parameter(fp32_params)
fp32_params.grad = fp32_params.data.new(total_param_size)
fp32_optimizer = optim.build_optimizer(args, [fp32_params])
return cls(args, params, fp32_optimizer, fp32_params)
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict:
self.scaler.loss_scale = state_dict['loss_scale']
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
loss = loss * self.scaler.loss_scale
loss.backward(retain_graph=self.retain_graph)
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self, multiply_grads=1.):
if self._needs_sync:
# copy FP16 grads to FP32
offset = 0
for p in self.fp16_params:
if not p.requires_grad:
continue
grad_data = p.grad.data if p.grad is not None else p.data.new_zeros(p.data.shape)
numel = grad_data.numel()
self.fp32_params.grad.data[offset:offset+numel].copy_(grad_data.view(-1))
offset += numel
# correct for dynamic loss scaler
self.fp32_params.grad.data.mul_(multiply_grads / self.scaler.loss_scale)
self._needs_sync = False
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
if self._needs_sync:
self._sync_fp16_grads_to_fp32(c)
else:
self.fp32_params.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = utils.clip_grad_norm_(self.fp32_params.grad.data, max_norm)
# detect overflow and adjust loss scale
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if self.scaler.loss_scale <= self.args.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
raise FloatingPointError((
'Minimum loss scale reached ({}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.'
).format(self.args.min_loss_scale))
raise OverflowError('setting loss scale to: ' + str(self.scaler.loss_scale))
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
self.fp32_optimizer.step(closure)
# copy FP32 params back into FP16 model
offset = 0
for p in self.fp16_params:
if not p.requires_grad:
continue
numel = p.data.numel()
p.data.copy_(self.fp32_params.data[offset:offset+numel].view_as(p.data))
offset += numel
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
self._needs_sync = False
class MemoryEfficientFP16Optimizer(optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not
maintain an FP32 copy of the model. We instead expect the optimizer to
convert the gradients to FP32 internally and sync the results back to the
FP16 model params. This significantly reduces memory usage but slightly
increases the time spent in the optimizer.
Since this wrapper depends on specific functionality in the wrapped
optimizer (i.e., on-the-fly conversion of grads to FP32), only certain
optimizers can be wrapped. This is determined by the
*supports_memory_efficient_fp16* property.
"""
def __init__(self, args, params, optimizer):
if not optimizer.supports_memory_efficient_fp16:
raise ValueError(
'Unsupported optimizer: {}'.format(optimizer.__class__.__name__)
)
super().__init__(args)
self.wrapped_optimizer = optimizer
if getattr(args, 'fp16_scale_window', None) is None:
if len(args.update_freq) > 1:
raise ValueError(
'--fp16-scale-window must be given explicitly when using a '
'custom --update-freq schedule'
)
scale_window = 2**14 / args.distributed_world_size / args.update_freq[0]
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=scale_window,
tolerance=args.fp16_scale_tolerance,
threshold=args.threshold_loss_scale,
)
@classmethod
def build_optimizer(cls, args, params):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
fp16_optimizer = optim.build_optimizer(args, params)
return cls(args, params, fp16_optimizer)
@property
def optimizer(self):
return self.wrapped_optimizer.optimizer
@property
def optimizer_config(self):
return self.wrapped_optimizer.optimizer_config
def get_lr(self):
return self.wrapped_optimizer.get_lr()
def set_lr(self, lr):
self.wrapped_optimizer.set_lr(lr)
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.wrapped_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict:
self.scaler.loss_scale = state_dict['loss_scale']
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
# Hack: PyTorch automatically casts the optimizer state to match the
# type of the current parameters. But with --memory-efficient-fp16 the
# params are FP16 while the optimizer state is FP32 and we don't want
# to cast. A workaround is to manually copy back the original state
# after the optimizer has been loaded.
groups = self.optimizer.param_groups
saved_groups = state_dict['param_groups']
id_map = {
old_id: p
for old_id, p in zip(
chain(*(g['params'] for g in saved_groups)),
chain(*(g['params'] for g in groups))
)
}
for k, v in state_dict['state'].items():
if k in id_map:
param = id_map[k]
self.optimizer.state[param] = v
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
loss = loss * self.scaler.loss_scale
loss.backward()
self._grads_are_scaled = True
def _unscale_grads(self, multiply_grads=1.):
if self._grads_are_scaled:
self._grads_are_scaled = False
# correct for dynamic loss scaler
self.wrapped_optimizer.multiply_grads(multiply_grads / self.scaler.loss_scale)
else:
assert multiply_grads == 1.
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
if self._grads_are_scaled:
self._unscale_grads(c)
else:
self.wrapped_optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm and updates dynamic loss scaler."""
self._unscale_grads()
grad_norm = self.wrapped_optimizer.clip_grad_norm(max_norm)
# detect overflow and adjust loss scale
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if self.scaler.loss_scale <= self.args.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
raise FloatingPointError((
'Minimum loss scale reached ({}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.'
).format(self.args.min_loss_scale))
raise OverflowError('setting loss scale to: ' + str(self.scaler.loss_scale))
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
self._unscale_grads()
self.wrapped_optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self.wrapped_optimizer.zero_grad()
self._grads_are_scaled = False
| 37.659574 | 97 | 0.634534 |
6dea260b841baef24c80a3714abe83383c367fa0 | 7,901 | py | Python | tests/utils_tests/test_encoding.py | jommerce/django | baf9604ed8fed3e6e7ddfaca2d83c377c81399ae | [
"BSD-3-Clause",
"0BSD"
] | 16 | 2019-08-10T12:24:06.000Z | 2020-05-21T09:11:14.000Z | tests/utils_tests/test_encoding.py | jommerce/django | baf9604ed8fed3e6e7ddfaca2d83c377c81399ae | [
"BSD-3-Clause",
"0BSD"
] | 12 | 2019-08-10T11:55:29.000Z | 2020-05-21T04:46:30.000Z | tests/utils_tests/test_encoding.py | jommerce/django | baf9604ed8fed3e6e7ddfaca2d83c377c81399ae | [
"BSD-3-Clause",
"0BSD"
] | 3 | 2019-08-20T13:29:34.000Z | 2020-01-30T22:05:10.000Z | import datetime
import sys
import unittest
from pathlib import Path
from unittest import mock
from urllib.parse import quote_plus
from django.test import SimpleTestCase
from django.utils.encoding import (
DjangoUnicodeDecodeError,
escape_uri_path,
filepath_to_uri,
force_bytes,
force_str,
get_system_encoding,
iri_to_uri,
repercent_broken_unicode,
smart_bytes,
smart_str,
uri_to_iri,
)
from django.utils.functional import SimpleLazyObject
from django.utils.translation import gettext_lazy
class TestEncodingUtils(SimpleTestCase):
def test_force_str_exception(self):
"""
Broken __str__ actually raises an error.
"""
class MyString:
def __str__(self):
return b"\xc3\xb6\xc3\xa4\xc3\xbc"
# str(s) raises a TypeError if the result is not a text type.
with self.assertRaises(TypeError):
force_str(MyString())
def test_force_str_lazy(self):
s = SimpleLazyObject(lambda: "x")
self.assertIs(type(force_str(s)), str)
def test_force_str_DjangoUnicodeDecodeError(self):
msg = (
"'utf-8' codec can't decode byte 0xff in position 0: invalid "
"start byte. You passed in b'\\xff' (<class 'bytes'>)"
)
with self.assertRaisesMessage(DjangoUnicodeDecodeError, msg):
force_str(b"\xff")
def test_force_bytes_exception(self):
"""
force_bytes knows how to convert to bytes an exception
containing non-ASCII characters in its args.
"""
error_msg = "This is an exception, voilà"
exc = ValueError(error_msg)
self.assertEqual(force_bytes(exc), error_msg.encode())
self.assertEqual(
force_bytes(exc, encoding="ascii", errors="ignore"),
b"This is an exception, voil",
)
def test_force_bytes_strings_only(self):
today = datetime.date.today()
self.assertEqual(force_bytes(today, strings_only=True), today)
def test_force_bytes_encoding(self):
error_msg = "This is an exception, voilà".encode()
result = force_bytes(error_msg, encoding="ascii", errors="ignore")
self.assertEqual(result, b"This is an exception, voil")
def test_force_bytes_memory_view(self):
data = b"abc"
result = force_bytes(memoryview(data))
# Type check is needed because memoryview(bytes) == bytes.
self.assertIs(type(result), bytes)
self.assertEqual(result, data)
def test_smart_bytes(self):
class Test:
def __str__(self):
return "ŠĐĆŽćžšđ"
lazy_func = gettext_lazy("x")
self.assertIs(smart_bytes(lazy_func), lazy_func)
self.assertEqual(
smart_bytes(Test()),
b"\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91",
)
self.assertEqual(smart_bytes(1), b"1")
self.assertEqual(smart_bytes("foo"), b"foo")
def test_smart_str(self):
class Test:
def __str__(self):
return "ŠĐĆŽćžšđ"
lazy_func = gettext_lazy("x")
self.assertIs(smart_str(lazy_func), lazy_func)
self.assertEqual(
smart_str(Test()), "\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111"
)
self.assertEqual(smart_str(1), "1")
self.assertEqual(smart_str("foo"), "foo")
def test_get_default_encoding(self):
with mock.patch("locale.getlocale", side_effect=Exception):
self.assertEqual(get_system_encoding(), "ascii")
def test_repercent_broken_unicode_recursion_error(self):
# Prepare a string long enough to force a recursion error if the tested
# function uses recursion.
data = b"\xfc" * sys.getrecursionlimit()
try:
self.assertEqual(
repercent_broken_unicode(data), b"%FC" * sys.getrecursionlimit()
)
except RecursionError:
self.fail("Unexpected RecursionError raised.")
class TestRFC3987IEncodingUtils(unittest.TestCase):
def test_filepath_to_uri(self):
self.assertIsNone(filepath_to_uri(None))
self.assertEqual(
filepath_to_uri("upload\\чубака.mp4"),
"upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4",
)
self.assertEqual(filepath_to_uri(Path("upload/test.png")), "upload/test.png")
self.assertEqual(filepath_to_uri(Path("upload\\test.png")), "upload/test.png")
def test_iri_to_uri(self):
cases = [
# Valid UTF-8 sequences are encoded.
("red%09rosé#red", "red%09ros%C3%A9#red"),
("/blog/for/Jürgen Münster/", "/blog/for/J%C3%BCrgen%20M%C3%BCnster/"),
(
"locations/%s" % quote_plus("Paris & Orléans"),
"locations/Paris+%26+Orl%C3%A9ans",
),
# Reserved chars remain unescaped.
("%&", "%&"),
("red&♥ros%#red", "red&%E2%99%A5ros%#red"),
(gettext_lazy("red&♥ros%#red"), "red&%E2%99%A5ros%#red"),
]
for iri, uri in cases:
with self.subTest(iri):
self.assertEqual(iri_to_uri(iri), uri)
# Test idempotency.
self.assertEqual(iri_to_uri(iri_to_uri(iri)), uri)
def test_uri_to_iri(self):
cases = [
(None, None),
# Valid UTF-8 sequences are decoded.
("/%e2%89%Ab%E2%99%a5%E2%89%aB/", "/≫♥≫/"),
("/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93", "/♥♥/?utf8=✓"),
("/%41%5a%6B/", "/AZk/"),
# Reserved and non-URL valid ASCII chars are not decoded.
("/%25%20%02%41%7b/", "/%25%20%02A%7b/"),
# Broken UTF-8 sequences remain escaped.
("/%AAd%AAj%AAa%AAn%AAg%AAo%AA/", "/%AAd%AAj%AAa%AAn%AAg%AAo%AA/"),
("/%E2%99%A5%E2%E2%99%A5/", "/♥%E2♥/"),
("/%E2%99%A5%E2%99%E2%99%A5/", "/♥%E2%99♥/"),
("/%E2%E2%99%A5%E2%99%A5%99/", "/%E2♥♥%99/"),
(
"/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93",
"/♥♥/?utf8=%9C%93✓%9C%93",
),
]
for uri, iri in cases:
with self.subTest(uri):
self.assertEqual(uri_to_iri(uri), iri)
# Test idempotency.
self.assertEqual(uri_to_iri(uri_to_iri(uri)), iri)
def test_complementarity(self):
cases = [
(
"/blog/for/J%C3%BCrgen%20M%C3%BCnster/",
"/blog/for/J\xfcrgen%20M\xfcnster/",
),
("%&", "%&"),
("red&%E2%99%A5ros%#red", "red&♥ros%#red"),
("/%E2%99%A5%E2%99%A5/", "/♥♥/"),
("/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93", "/♥♥/?utf8=✓"),
("/%25%20%02%7b/", "/%25%20%02%7b/"),
("/%AAd%AAj%AAa%AAn%AAg%AAo%AA/", "/%AAd%AAj%AAa%AAn%AAg%AAo%AA/"),
("/%E2%99%A5%E2%E2%99%A5/", "/♥%E2♥/"),
("/%E2%99%A5%E2%99%E2%99%A5/", "/♥%E2%99♥/"),
("/%E2%E2%99%A5%E2%99%A5%99/", "/%E2♥♥%99/"),
(
"/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93",
"/♥♥/?utf8=%9C%93✓%9C%93",
),
]
for uri, iri in cases:
with self.subTest(uri):
self.assertEqual(iri_to_uri(uri_to_iri(uri)), uri)
self.assertEqual(uri_to_iri(iri_to_uri(iri)), iri)
def test_escape_uri_path(self):
cases = [
(
"/;some/=awful/?path/:with/@lots/&of/+awful/chars",
"/%3Bsome/%3Dawful/%3Fpath/:with/@lots/&of/+awful/chars",
),
("/foo#bar", "/foo%23bar"),
("/foo?bar", "/foo%3Fbar"),
]
for uri, expected in cases:
with self.subTest(uri):
self.assertEqual(escape_uri_path(uri), expected)
| 35.751131 | 86 | 0.555246 |
b8b46a9e7cfe9685d97d4886e26a3f32d016675b | 14,934 | py | Python | on-chain-privacy/eth_erc721_privacy.py | saanvijay/vmware-blockchain-samples | a4262b8556400697a56984e8e9aac06342d9a035 | [
"MIT"
] | null | null | null | on-chain-privacy/eth_erc721_privacy.py | saanvijay/vmware-blockchain-samples | a4262b8556400697a56984e8e9aac06342d9a035 | [
"MIT"
] | null | null | null | on-chain-privacy/eth_erc721_privacy.py | saanvijay/vmware-blockchain-samples | a4262b8556400697a56984e8e9aac06342d9a035 | [
"MIT"
] | null | null | null | import sys
import time
import urllib3
import logging
import json
from web3 import Web3
from web3.middleware import geth_poa_middleware
urllib3.disable_warnings()
ethrpcApiUrl = "http://localhost:8545"
w3 = None
chainid = 5000
contract_deploy = True
contract1_use = True
contract2_use = True
account_perm_test = False
contract1_address = ""
contract2_address = ""
logging.basicConfig(level = logging.INFO)
log = logging.getLogger("logger")
def main():
global w3
w3 = Web3(Web3.HTTPProvider(ethrpcApiUrl, request_kwargs={"verify": False}))
w3.middleware_onion.inject(geth_poa_middleware, layer=0)
global contract_deploy_account
global contract_deploy_account_key
contract_deploy_account = "f17f52151EbEF6C7334FAD080c5704D77216b732"
contract_deploy_account = contract_deploy_account.lower()
contract_deploy_account = w3.toChecksumAddress(contract_deploy_account)
contract_deploy_account_key = "ae6ae8e5ccbfb04590405997ee2d52d2b330726137b875053c36d94e974d162f"
global account1
global account1_key
global account1_str
account1_str = "fe3b557e8fb62b89f4916b721be55ceb828dbd73"
account1_str = account1_str.lower()
account1 = w3.toChecksumAddress(account1_str)
account1_key = "8f2a55949038a9610f50fb23b5883af3b4ecb3c3bb792cbcefbd1542c692be63"
global account2
global account2_key
global account2_str
account2_str = "627306090abaB3A6e1400e9345bC60c78a8BEf57"
account2_str = account2_str.lower()
account2 = w3.toChecksumAddress(account2_str)
account2_key = "c87509a1c067bbde78beb793e6fa76530b6382a4c0241e5e4a9ec0a0f44dc0d3"
global account3
global account3_key
global account3_str
account3_str = "0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E"
account3_str = account3_str.lower()
account3 = w3.toChecksumAddress(account3_str)
account3_key = "b25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364"
global gas
global gas_price
gas = 2000000
gas_price = 0
global abi1
with open("./contracts/SupplyChainItemPrivacy.abi/SupplyChainItem.abi", "r") as f:
abi1 = f.read()
global bin1
with open("./contracts/SupplyChainItemPrivacy.bin/SupplyChainItem.bin", "r") as f:
bin1 = f.read()
if (contract_deploy):
erc721_contract_deploy()
global contract1_address
f = open("contract1_address.txt", "r")
contract1_address = f.read()
f.close()
global contract2_address
f = open("contract2_address.txt", "r")
contract2_address = f.read()
f.close()
if (contract1_use):
erc721_asset_transfer(contract1_address, account1, account2)
if (contract2_use):
erc721_asset_transfer(contract2_address, account1, account3)
if (account_perm_test):
erc721_account_perm_test(contract1_address)
def erc721_contract_deploy():
#
# i=1 deploy nft_contract 1
# i=2 deploy nft_contract 2
#
for i in range(1, 3, 1):
contract_deploy_status = False
contract = w3.eth.contract(abi=abi1, bytecode=bin1)
#
# Submit the transaction that deploys the nft contract
#
construct_txn = contract.constructor().buildTransaction(
{
"from": contract_deploy_account,
"gas": gas,
"gasPrice": gas_price,
"nonce": w3.eth.get_transaction_count(contract_deploy_account),
"chainId": chainid,
}
)
signed_txn = w3.eth.account.sign_transaction(construct_txn, contract_deploy_account_key)
# Validating transaction hash
tx_hash_send = signed_txn.hash
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
assert tx_hash_send == tx_hash, "smart contract deploy transaction hash mismatch error"
log.debug("smart contract deploy transaction hash successfully validated")
# Validating transaction receipt
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
log.debug("smart contract deploy transaction receipt:'{}'".format(tx_receipt))
if tx_receipt.status == 1:
log.info(
"smart contract deploy success, contract address:'{}'".format(
tx_receipt.contractAddress
)
)
contract_deploy_status = True
else:
assert contract_deploy_status, "smart contract deploy failed"
if (i == 1):
global contract1_address
contract1_address = tx_receipt.contractAddress
f = open("contract1_address.txt", "w")
f.write(contract1_address)
f.close()
elif (i == 2):
global contract2_address
contract2_address = tx_receipt.contractAddress
f = open("contract2_address.txt", "w")
f.write(contract2_address)
f.close()
#
# i=1 use nft_contract 1
# i=2 use nft_contract 2
#
for i in range(1, 3, 1):
if (i == 1):
contract_address = contract1_address
elif (i == 2):
contract_address = contract2_address
nft_contract = w3.eth.contract(address=contract_address, abi=abi1)
#
# j=1 give permissions to account1 for nft_contract
# j=2 give permissions to account2 or account3 for nft_contract
#
for j in range(1, 3, 1):
if (j == 1):
accountx = account1
elif (j == 2):
if (i == 1):
accountx = account2
elif (i == 2):
accountx = account3
log.info("account permissioning for account:'{}' nft_contract:'{}'\n".format(accountx, contract_address))
construct_txn = nft_contract.functions.addPerm(accountx).buildTransaction({
'from': contract_deploy_account,
'gas': gas,
'gasPrice': gas_price,
'nonce': w3.eth.get_transaction_count(contract_deploy_account),
'chainId': chainid
})
signed_txn = w3.eth.account.sign_transaction(construct_txn, contract_deploy_account_key)
# Validating transaction hash
tx_hash_send = signed_txn.hash
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
assert tx_hash_send == tx_hash, "tx hash mismatch"
# Validating transaction receipt
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
log.debug("transaction receipt: '{}'".format(tx_receipt))
assert tx_receipt.status == 1, "transaction failed"
def erc721_asset_transfer(contract_address, accountx, accounty):
nft_item_id1 = 0
nft_item_id2 = 0
accountx_key = ""
accounty_key = ""
accountx_str = ""
accounty_str = ""
nft_contract = w3.eth.contract(address=contract_address, abi=abi1)
log.info("### nft_smart_contract:'{}' ###".format(contract_address))
#
# transaction submssion in a loop
# i=1 Submit the transaction that creates an nft item 1) for accountx
# i=2 Submit the transaction that creates an nft item 2) for accountx
#
for i in range(1, 3, 1):
log.info("new nft_item for accountx: '{}'".format(accountx))
construct_txn = nft_contract.functions.newItem(accountx).buildTransaction({
'from': accountx,
'gas': gas,
'gasPrice': gas_price,
'nonce': w3.eth.get_transaction_count(accountx),
'chainId': chainid
})
if (accountx == account1):
accountx_key = account1_key
elif (accountx == account2):
accountx_key = account2_key
elif (accountx == account3):
accountx_key = account3_key
signed_txn = w3.eth.account.sign_transaction(construct_txn, accountx_key)
# Validating transaction hash
tx_hash_send = signed_txn.hash
tx_hash = 0
try:
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
except ValueError as e:
# log.error("json rpc error: '{}'".format(json.loads(e)))
log.error("json rpc error: '{}'".format(e))
return
assert tx_hash_send == tx_hash, "tx hash mismatch"
# Validating transaction receipt
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
log.debug("transaction receipt: '{}'".format(tx_receipt))
assert tx_receipt.status == 1, "transaction failed"
# https://stackoverflow.com/questions/67803090/how-to-get-erc-721-tokenid
if (i == 1):
nft_item_id1 = int(tx_receipt['logs'][0]['topics'][3].hex(), 16)
log.info("new nft_item_id accountx: {}".format(nft_item_id1))
elif (i == 2):
nft_item_id2 = int(tx_receipt['logs'][0]['topics'][3].hex(), 16)
log.info("new nft_item_id accountx: {}".format(nft_item_id2))
nft_item_count = nft_contract.functions.balanceOf(accountx).call()
log.info("nft_item_count accountx: {} \n".format(nft_item_count))
num_repeats = 1
start_time = [None] * num_repeats
end_time = [None] * num_repeats
num_account_transfers = 2
for j in range(0, num_repeats, 1):
start_time[j] = time.time()
#
# i is odd, Submit the transaction that transfers nft item 1) from accountx to accounty
# i is even, Submit the transaction that transfers nft item 1) from accounty to accountx
#
for i in range(1, num_account_transfers + 1, 1):
if (i % 2 == 1):
construct_txn = nft_contract.functions.transferFrom(accountx, accounty, nft_item_id1).buildTransaction({
'from': accountx,
'gas': gas,
'gasPrice': gas_price,
'nonce': w3.eth.get_transaction_count(accountx),
'chainId': chainid
})
if (accountx == account1):
accountx_key = account1_key
signed_txn = w3.eth.account.sign_transaction(construct_txn, accountx_key)
tx_hash_send = signed_txn.hash
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
else:
construct_txn = nft_contract.functions.transferFrom(accounty, accountx, nft_item_id1).buildTransaction({
'from': accounty,
'gas': gas,
'gasPrice': gas_price,
'nonce': w3.eth.get_transaction_count(accounty),
'chainId': chainid
})
if (accounty == account2):
accounty_key = account2_key
elif (accounty == account3):
accounty_key = account3_key
signed_txn = w3.eth.account.sign_transaction(construct_txn, accounty_key)
tx_hash_send = signed_txn.hash
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
# Validating transaction hash
assert tx_hash_send == tx_hash, "tx hash mismatch"
# Validating transaction receipt
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
log.debug("transaction receipt: '{}'".format(tx_receipt))
assert tx_receipt.status == 1, "transaction failed"
log.info("nft transfer loop index: {}".format(i))
nft_item_count = nft_contract.functions.balanceOf(accountx).call()
log.info("nft_item_count accountx: {}".format(nft_item_count))
nft_item_count = nft_contract.functions.balanceOf(accounty).call()
log.info("nft_item_count accounty: {}".format(nft_item_count))
if (i % 2 == 1):
account_log = tx_receipt['logs'][1]['topics'][1].hex()
ret = account_log.find(accountx_str)
assert ret != -1, "NFT transfer log mismatch accountx"
account_log = tx_receipt['logs'][1]['topics'][2].hex()
ret = account_log.find(accounty_str)
assert ret != -1, "NFT transfer log mismatch accounty"
nft_item_id1_log = int(tx_receipt['logs'][1]['topics'][3].hex(), 16)
log.info("nft_item_id transferred from accountx to accounty: {}\n".format(nft_item_id1_log))
assert nft_item_id1_log == nft_item_id1, "NFT transfer log mismatch id"
else:
account_log = tx_receipt['logs'][1]['topics'][1].hex()
ret = account_log.find(accounty_str)
assert ret != -1, "NFT transfer log mismatch accounty"
account_log = tx_receipt['logs'][1]['topics'][2].hex()
ret = account_log.find(accountx_str)
assert ret != -1, "NFT transfer log mismatch accountx"
nft_item_id1_log = int(tx_receipt['logs'][1]['topics'][3].hex(), 16)
log.info("nft_item_id transferred from accounty to accountx: {}\n".format(nft_item_id1_log))
assert nft_item_id1_log == nft_item_id1, "NFT transfer log mismatch id"
end_time[j] = time.time()
# time.sleep(1)
avg_time = 0
for j in range(0, num_repeats, 1):
time1 = end_time[j] - start_time[j]
avg_time += time1
avg_time = avg_time / num_repeats
#log.info("average time taken for {} account transfers {} seconds".format(num_account_transfers, avg_time))
#tx_rate = num_account_transfers / avg_time
#log.info("average transactions per second {}\n".format(tx_rate))
# typical time taken for 10 account transfers on a hermes nimbus vm with 4 vCPUs, 16GB RAM is 1.5s
# jenkins node with 16 vCPUs, 32GB RAM is faster than hermes nimbus VM
#assert tx_rate >= 1, "NFT account transfer taking too long"
def erc721_account_perm_test(contract_address):
erc721_asset_transfer(contract_address, account3, account1)
if __name__ == "__main__":
#print(f"Arguments count: {len(sys.argv)}")
for i, arg in enumerate(sys.argv):
#print(f"Argument {i:>6}: {arg}")
if (arg == "--contract_deploy"):
contract_deploy = True
contract1_use = False
contract2_use = False
account_perm_test = False
elif (arg == "--contract1_use"):
contract_deploy = False
contract1_use = True
contract2_use = False
account_perm_test = False
elif (arg == "--contract2_use"):
contract_deploy = False
contract1_use = False
contract2_use = True
account_perm_test = False
elif (arg == "--account_perm_test"):
contract_deploy = False
contract1_use = False
contract2_use = False
account_perm_test = True
# call main
main()
| 38.589147 | 120 | 0.624816 |
0a5f6518e120e18d4f954e00587e42deb3c0e7a9 | 15,979 | py | Python | socks-relay.py | guillon/socks-relay | a2e7cebf6596e3767c0b4f3e3e7faca6ed08167e | [
"MIT"
] | 6 | 2019-07-10T09:28:12.000Z | 2022-02-22T14:57:11.000Z | socks-relay.py | guillon/socks-relay | a2e7cebf6596e3767c0b4f3e3e7faca6ed08167e | [
"MIT"
] | 1 | 2020-10-17T05:10:52.000Z | 2020-10-17T05:10:52.000Z | socks-relay.py | guillon/socks-relay | a2e7cebf6596e3767c0b4f3e3e7faca6ed08167e | [
"MIT"
] | 2 | 2021-07-02T23:59:59.000Z | 2022-01-11T16:45:00.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2019 Christophe Guillon
# Copyright (c) 2018 Artem Golubin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This work is a derivative work from the toy socks server published
# at https://github.com/rushter/socks5
# which is itself under the MIT license and copyright reproduced above.
#
#
# socks-relay implements a SOCK5 server which can optionally relay to
# a further SOCKS5 server.
# A typical use case is to expose a no-auth server in front of an authenticating
# server.
#
# For instance install a socks server bound to localhost:1080
# with auth user1/password1 which relays to another socks server
# socks.example.org:1080 with auth user2/password2:
#
# SERVER_USER=user1 SERVER_PASSWORD=password1 SOCKS5_SERVER=socks.example.org:1080 \
# SOCKS5_USER=user2 SOCKS5_PASSWORD=password2 ./socks-relay.py localhost:1080'
#
# Or the same with no password for the local server:
#
# SOCKS5_SERVER=socks.example.org:1080 SOCKS5_USER=user2 SOCKS5_PASSWORD=password2 \
# ./socks-relay.py localhost:1080
#
import sys, os
import logging
import select
import socket
import selectors
import struct
import socks
import time
import re
from socketserver import ThreadingMixIn, TCPServer, BaseRequestHandler
logger = logging.getLogger("socks-relay")
logger_hdl = logging.StreamHandler()
logger_hdl.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s'))
logger.addHandler(logger_hdl)
logger.setLevel(logging.INFO)
SOCKS_VERSION = 5
SOCKS5_METHOD_NOAUTH = 0x00
SOCKS5_METHOD_GSSAPI = 0x01
SOCKS5_METHOD_USERPASS = 0x02
SOCKS5_METHOD_NONE_ACCEPTABLE = 0xFF
SOCKS5_METHODS = {
SOCKS5_METHOD_NOAUTH: 'NO AUTHENTICATION REQUIRED',
SOCKS5_METHOD_GSSAPI: 'GSSAPI',
SOCKS5_METHOD_USERPASS: 'USERNAME/PASSWORD',
SOCKS5_METHOD_NONE_ACCEPTABLE: 'NO ACCEPTABLE METHODS'
}
SOCKS5_ATYPE_IPV4 = 0x01
SOCKS5_ATYPE_DOMAIN = 0x03
SOCKS5_ATYPE_IPV6 = 0x04
SOCKS5_ATYPES = {
SOCKS5_ATYPE_IPV4: 'IPV4',
SOCKS5_ATYPE_DOMAIN: 'DOMAINNAME',
SOCKS5_ATYPE_IPV6: 'IPV6',
}
SOCKS5_CMD_CONNECT = 0x01
SOCKS5_CMD_BIND = 0x02
SOCKS5_CMD_UDP_ASSOCIATE = 0x03
SOCKS5_CMDS = {
SOCKS5_CMD_CONNECT: 'CONNECT',
SOCKS5_CMD_BIND: 'BIND',
SOCKS5_CMD_UDP_ASSOCIATE: 'UDP ASSOCIATE',
}
class ThreadingTCPServer(ThreadingMixIn, TCPServer):
pass
class ConnectionInterrupted(Exception):
pass
class SocksProxy(BaseRequestHandler):
def setup(self):
super(SocksProxy, self).setup()
self.request.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.username = os.environ.get('SERVER_USER', None)
self.password = os.environ.get('SERVER_PASSWORD', None)
if self.username:
if not self.password: self.password = ""
self.method = SOCKS5_METHOD_USERPASS
else:
self.method = SOCKS5_METHOD_NOAUTH
proxy_server = os.environ.get('SOCKS5_SERVER', None)
if proxy_server != None:
self.proxy_host, self.proxy_port = proxy_server.rsplit(":", 1)
self.proxy_port = int(self.proxy_port)
else:
self.proxy_host, self.proxy_port = (None, None)
self.proxy_username = os.environ.get('SOCKS5_USER', None)
self.proxy_password = os.environ.get('SOCKS5_PASSWORD', None)
self.resolve_list = os.environ.get('SERVER_RESOLVE_MAP', '')
self.resolve_map = {}
for pair in [x for x in self.resolve_list.split(",") if x]:
x, y = pair.split("=>")
self.resolve_map[x] = y
self.clients_allowed = os.environ.get('SERVER_CLIENTS_ALLOWED', '.*')
self.clients_refused = os.environ.get('SERVER_CLIENTS_REFUSED', '')
self.clients_re_white_list = []
self.clients_re_black_list = []
for allowed in [x for x in self.clients_allowed.split(",") if x]:
self.clients_re_white_list.append(re.compile(allowed))
for refused in [x for x in self.clients_refused.split(",") if x]:
self.clients_re_black_list.append(re.compile(refused))
self.remotes_allowed = os.environ.get('SERVER_REMOTES_ALLOWED', '.*')
self.remotes_refused = os.environ.get('SERVER_REMOTES_REFUSED', '')
self.remotes_re_white_list = []
self.remotes_re_black_list = []
for allowed in [x for x in self.remotes_allowed.split(",") if x]:
self.remotes_re_white_list.append(re.compile(allowed))
for refused in [x for x in self.remotes_refused.split(",") if x]:
self.remotes_re_black_list.append(re.compile(refused))
if os.environ.get('SERVER_DEBUG'): logger.setLevel(logging.DEBUG)
logger.info("Clients allowed: %s, refused: %s" % (self.clients_allowed, self.clients_refused))
logger.info("Remotes allowed: %s, refused: %s" % (self.remotes_allowed, self.remotes_refused))
logger.info("Resolve mapping: %s" % (self.resolve_list,))
def finish(self):
super(SocksProxy, self).finish()
def recv(self, sock, n):
try:
return sock.recv(n)
except Exception as e:
raise ConnectionInterrupted('in recv() %s: %s' % (sock, e))
def recvall(self, sock, n):
parts = []
total = 0
while total < n:
try:
part = sock.recv(n - total)
except Exception as e:
raise ConnectionInterrupted('in recvall() %s: %s' % (sock, e))
if len(part) == 0: break
total += len(part)
parts.append(part)
if total < n:
raise ConnectionInterrupted('in recvall() %s: unexpected end of stream' % sock)
return b''.join(parts)
def sendall(self, sock, msg):
try:
return sock.sendall(msg)
except Exception as e:
raise ConnectionInterrupted('sock.sendall %s: %s' % (sock, e))
def resolve_addr_port(self, address, port):
resolved = self.resolve_map.get("%s:%s" % (address, port))
if resolved != None:
resolved_address, resolved_port = resolved.rsplit(":", 1)
else:
resolved = self.resolve_map.get(address)
if resolved != None:
resolved_address, resolved_port = resolved, port
else:
resolved_address, resolved_port = address, port
if (resolved_address, resolved_port) != (address, port):
return self.resolve_addr_port(resolved_address, resolved_port)
return (resolved_address, resolved_port)
def verify_client_addr(self, address):
white = False
for authorized in self.clients_re_white_list:
if authorized.match(address):
white = True
break
if not white: return False
black = False
for rejected in self.clients_re_black_list:
if rejected.match(address):
black = True
break
if black: return False
return True
def verify_remote_addr(self, address):
white = False
for authorized in self.remotes_re_white_list:
if authorized.match(address):
white = True
break
if not white: return False
black = False
for rejected in self.remotes_re_black_list:
if rejected.match(address):
black = True
break
if black: return False
return True
def handle(self):
logger.info('client %s: Accepting connection: %s' % (self.client_address, self.request))
try:
# greeting header
header = self.recvall(self.request, 2)
version, nmethods = struct.unpack("!BB", header)
# asserts socks 5
assert version == SOCKS_VERSION
assert nmethods > 0
# get available methods
methods = set(self.recvall(self.request, nmethods))
logger.debug("client %s: Received requested methods: %s" % (self.client_address, methods))
if self.method not in methods:
logger.error("client %s: Authentication methods not available: %s" % (self.client_address, methods))
self.sendall(self.request, struct.pack("!BB", SOCKS_VERSION, SOCKS5_METHOD_NONE_ACCEPTABLE))
return
# send welcome message
logger.debug("client %s: Sending method: %s" % (self.client_address, self.method))
self.sendall(self.request, struct.pack("!BB", SOCKS_VERSION, self.method))
# send credentials
if self.method == SOCKS5_METHOD_USERPASS:
if not self.verify_credentials():
return
# request
version, cmd, _, address_type = struct.unpack("!BBBB", self.recvall(self.request, 4))
assert version == SOCKS_VERSION
if address_type not in [SOCKS5_ATYPE_IPV4, SOCKS5_ATYPE_DOMAIN]:
logger.error("client %s: Address Type not supported: %d (%s)" % (self.client_address, address_type, SOCKS5_ATYPES.get(address_type, "unknown")))
reply = self.generate_failed_reply(0x08) # Address type not supported
self.sendall(self.request, reply)
return
if address_type == SOCKS5_ATYPE_IPV4:
address = socket.inet_ntoa(self.recvall(self.request, 4))
elif address_type == SOCKS5_ATYPE_DOMAIN:
domain_length = self.recvall(self.request, 1)[0]
address = self.recvall(self.request, domain_length).decode('ascii')
port = struct.unpack('!H', self.recvall(self.request, 2))[0]
if cmd not in [SOCKS5_CMD_CONNECT]:
logger.error("client %s: Command not supported: %d (%s)" % (self.client_address, cmd, SOCKS5_CMDS.get(cmd, "unknown")))
reply = self.generate_failed_reply(0x07) # Command not supported
self.sendall(self.request, reply)
return
logger.info("client %s: Received command %d for %s:%s" % (self.client_address, cmd, address, port))
if not self.verify_client_addr(self.client_address[0]):
logger.error("client %s: client address '%s' rejected: returning connection refused" % (self.client_address, self.client_address[0]))
reply = self.generate_failed_reply(0x05) # Connection refused
self.sendall(self.request, reply)
return
if not self.verify_remote_addr(address):
logger.error("client %s: remote address '%s' rejected: returning connection refused" % (self.client_address, address))
reply = self.generate_failed_reply(0x05) # Connection refused
self.sendall(self.request, reply)
return
resolved_address, resolved_port = self.resolve_addr_port(address, port)
logger.info("client %s: resolved remote address '%s:%s' as: '%s:%s'" % (self.client_address, address, port, resolved_address, resolved_port))
if self.proxy_host and self.proxy_port:
socket_class = socks.socksocket
else:
socket_class = socket.socket
with socket_class() as remote:
try:
if self.proxy_host and self.proxy_port:
remote.set_proxy(socks.SOCKS5, self.proxy_host, self.proxy_port, username=self.proxy_username, password=self.proxy_password)
remote.connect((resolved_address, resolved_port))
except Exception as err:
logger.error("client %s: could not connect to remote: %s" % (self.client_address, err))
reply = self.generate_failed_reply(0x05) # Connection refused
self.sendall(self.request, reply)
return
logger.info('client %s: Connected to %s:%s: %s' % (self.client_address, resolved_address, resolved_port, remote))
bind_address = remote.getsockname()
addr = struct.unpack("!I", socket.inet_aton(bind_address[0]))[0]
port = bind_address[1]
reply = struct.pack("!BBBBIH", SOCKS_VERSION, 0, 0, SOCKS5_ATYPE_IPV4, addr, port)
self.sendall(self.request, reply)
self.exchange_loop(self.request, remote)
except ConnectionInterrupted as e:
logger.info("client %s: Connection interrupted: %s" % (self.client_address, e))
finally:
self.server.close_request(self.request)
logger.info('client %s: Closed connection' % (self.client_address,))
def verify_credentials(self):
version = self.recvall(self.request, 1)[0]
assert version == 1
username_len = self.recvall(self.request, 1)[0]
username = self.recvall(self.request, username_len).decode('utf-8')
password_len = self.recvall(self.request, 1)[0]
password = self.recvall(self.request, password_len).decode('utf-8')
if username == self.username and password == self.password:
# success, status = 0
logger.debug("client %s: Sending succesfull authentication" % (self.client_address,))
response = struct.pack("!BB", version, 0)
self.sendall(self.request, response)
return True
# failure, status != 0
logger.error("client %s: Authentication failure for username '%s'" % (self.client_address, username))
response = struct.pack("!BB", version, 0xFF)
self.sendall(self.request, response)
return False
def generate_failed_reply(self, error_number):
return struct.pack("!BBBBIH", SOCKS_VERSION, error_number, 0, SOCKS5_ATYPE_IPV4, 0, 0)
def exchange_loop(self, client, remote):
sel = selectors.DefaultSelector()
client.setblocking(False)
sel.register(client, selectors.EVENT_READ, remote)
remote.setblocking(False)
sel.register(remote, selectors.EVENT_READ, client)
while len(sel.get_map().keys()) == 2:
events = sel.select()
for key, mask in events:
data = self.recv(key.fileobj, 4096)
if len(data) > 0:
self.sendall(key.data, data)
else:
sel.unregister(key.fileobj)
sel.close()
if __name__ == '__main__':
host, port = sys.argv[1].rsplit(":", 1)
if host == "": host = 'localhost'
elif host == "*": host = '0.0.0.0'
port = int(port)
logger.info("Socks relay listening for %s:%d" % (host, port))
try:
server = ThreadingTCPServer((host, port), SocksProxy)
except OSError as e:
if e.errno == 98:
logger.error("cannot bind %s:%d: %s" % (host, port, e))
sys.exit(1)
raise
try:
server.serve_forever()
finally:
server.server_close()
| 41.503896 | 160 | 0.635396 |
25c3762d813020f12097a5df2c5375ab50884bc6 | 60,527 | py | Python | Lib/test/test_urlparse.py | native-api/cpython | 63799136e6c0491bb5d6f4a234d5a775db3458db | [
"PSF-2.0"
] | 2 | 2018-05-23T05:18:49.000Z | 2021-08-01T08:43:23.000Z | Lib/test/test_urlparse.py | native-api/cpython | 63799136e6c0491bb5d6f4a234d5a775db3458db | [
"PSF-2.0"
] | null | null | null | Lib/test/test_urlparse.py | native-api/cpython | 63799136e6c0491bb5d6f4a234d5a775db3458db | [
"PSF-2.0"
] | null | null | null | import unittest
import urllib.parse
import warnings
RFC1808_BASE = "http://a/b/c/d;p?q#f"
RFC2396_BASE = "http://a/b/c/d;p?q"
RFC3986_BASE = 'http://a/b/c/d;p?q'
SIMPLE_BASE = 'http://a/b/c/d'
# Each parse_qsl testcase is a two-tuple that contains
# a string with the query and a list with the expected result.
parse_qsl_test_cases = [
("", []),
("&", []),
("&&", []),
("=", [('', '')]),
("=a", [('', 'a')]),
("a", [('a', '')]),
("a=", [('a', '')]),
("&a=b", [('a', 'b')]),
("a=a+b&b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1&a=2", [('a', '1'), ('a', '2')]),
(b"", []),
(b"&", []),
(b"&&", []),
(b"=", [(b'', b'')]),
(b"=a", [(b'', b'a')]),
(b"a", [(b'a', b'')]),
(b"a=", [(b'a', b'')]),
(b"&a=b", [(b'a', b'b')]),
(b"a=a+b&b=b+c", [(b'a', b'a b'), (b'b', b'b c')]),
(b"a=1&a=2", [(b'a', b'1'), (b'a', b'2')]),
(";", []),
(";;", []),
(";a=b", [('a', 'b')]),
("a=a+b;b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1;a=2", [('a', '1'), ('a', '2')]),
(b";", []),
(b";;", []),
(b";a=b", [(b'a', b'b')]),
(b"a=a+b;b=b+c", [(b'a', b'a b'), (b'b', b'b c')]),
(b"a=1;a=2", [(b'a', b'1'), (b'a', b'2')]),
]
# Each parse_qs testcase is a two-tuple that contains
# a string with the query and a dictionary with the expected result.
parse_qs_test_cases = [
("", {}),
("&", {}),
("&&", {}),
("=", {'': ['']}),
("=a", {'': ['a']}),
("a", {'a': ['']}),
("a=", {'a': ['']}),
("&a=b", {'a': ['b']}),
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=1&a=2", {'a': ['1', '2']}),
(b"", {}),
(b"&", {}),
(b"&&", {}),
(b"=", {b'': [b'']}),
(b"=a", {b'': [b'a']}),
(b"a", {b'a': [b'']}),
(b"a=", {b'a': [b'']}),
(b"&a=b", {b'a': [b'b']}),
(b"a=a+b&b=b+c", {b'a': [b'a b'], b'b': [b'b c']}),
(b"a=1&a=2", {b'a': [b'1', b'2']}),
(";", {}),
(";;", {}),
(";a=b", {'a': ['b']}),
("a=a+b;b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=1;a=2", {'a': ['1', '2']}),
(b";", {}),
(b";;", {}),
(b";a=b", {b'a': [b'b']}),
(b"a=a+b;b=b+c", {b'a': [b'a b'], b'b': [b'b c']}),
(b"a=1;a=2", {b'a': [b'1', b'2']}),
]
class UrlParseTestCase(unittest.TestCase):
def checkRoundtrips(self, url, parsed, split):
result = urllib.parse.urlparse(url)
self.assertEqual(result, parsed)
t = (result.scheme, result.netloc, result.path,
result.params, result.query, result.fragment)
self.assertEqual(t, parsed)
# put it back together and it should be the same
result2 = urllib.parse.urlunparse(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# the result of geturl() is a fixpoint; we can always parse it
# again to get the same result:
result3 = urllib.parse.urlparse(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.params, result.params)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
# check the roundtrip using urlsplit() as well
result = urllib.parse.urlsplit(url)
self.assertEqual(result, split)
t = (result.scheme, result.netloc, result.path,
result.query, result.fragment)
self.assertEqual(t, split)
result2 = urllib.parse.urlunsplit(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# check the fixpoint property of re-parsing the result of geturl()
result3 = urllib.parse.urlsplit(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
def test_qsl(self):
for orig, expect in parse_qsl_test_cases:
result = urllib.parse.parse_qsl(orig, keep_blank_values=True)
self.assertEqual(result, expect, "Error parsing %r" % orig)
expect_without_blanks = [v for v in expect if len(v[1])]
result = urllib.parse.parse_qsl(orig, keep_blank_values=False)
self.assertEqual(result, expect_without_blanks,
"Error parsing %r" % orig)
def test_qs(self):
for orig, expect in parse_qs_test_cases:
result = urllib.parse.parse_qs(orig, keep_blank_values=True)
self.assertEqual(result, expect, "Error parsing %r" % orig)
expect_without_blanks = {v: expect[v]
for v in expect if len(expect[v][0])}
result = urllib.parse.parse_qs(orig, keep_blank_values=False)
self.assertEqual(result, expect_without_blanks,
"Error parsing %r" % orig)
def test_roundtrips(self):
str_cases = [
('file:///tmp/junk.txt',
('file', '', '/tmp/junk.txt', '', '', ''),
('file', '', '/tmp/junk.txt', '', '')),
('imap://mail.python.org/mbox1',
('imap', 'mail.python.org', '/mbox1', '', '', ''),
('imap', 'mail.python.org', '/mbox1', '', '')),
('mms://wms.sys.hinet.net/cts/Drama/09006251100.asf',
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '', ''),
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '')),
('nfs://server/path/to/file.txt',
('nfs', 'server', '/path/to/file.txt', '', '', ''),
('nfs', 'server', '/path/to/file.txt', '', '')),
('svn+ssh://svn.zope.org/repos/main/ZConfig/trunk/',
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', '', ''),
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', '')),
('git+ssh://git@github.com/user/project.git',
('git+ssh', 'git@github.com','/user/project.git',
'','',''),
('git+ssh', 'git@github.com','/user/project.git',
'', '')),
]
def _encode(t):
return (t[0].encode('ascii'),
tuple(x.encode('ascii') for x in t[1]),
tuple(x.encode('ascii') for x in t[2]))
bytes_cases = [_encode(x) for x in str_cases]
for url, parsed, split in str_cases + bytes_cases:
self.checkRoundtrips(url, parsed, split)
def test_http_roundtrips(self):
# urllib.parse.urlsplit treats 'http:' as an optimized special case,
# so we test both 'http:' and 'https:' in all the following.
# Three cheers for white box knowledge!
str_cases = [
('://www.python.org',
('www.python.org', '', '', '', ''),
('www.python.org', '', '', '')),
('://www.python.org#abc',
('www.python.org', '', '', '', 'abc'),
('www.python.org', '', '', 'abc')),
('://www.python.org?q=abc',
('www.python.org', '', '', 'q=abc', ''),
('www.python.org', '', 'q=abc', '')),
('://www.python.org/#abc',
('www.python.org', '/', '', '', 'abc'),
('www.python.org', '/', '', 'abc')),
('://a/b/c/d;p?q#f',
('a', '/b/c/d', 'p', 'q', 'f'),
('a', '/b/c/d;p', 'q', 'f')),
]
def _encode(t):
return (t[0].encode('ascii'),
tuple(x.encode('ascii') for x in t[1]),
tuple(x.encode('ascii') for x in t[2]))
bytes_cases = [_encode(x) for x in str_cases]
str_schemes = ('http', 'https')
bytes_schemes = (b'http', b'https')
str_tests = str_schemes, str_cases
bytes_tests = bytes_schemes, bytes_cases
for schemes, test_cases in (str_tests, bytes_tests):
for scheme in schemes:
for url, parsed, split in test_cases:
url = scheme + url
parsed = (scheme,) + parsed
split = (scheme,) + split
self.checkRoundtrips(url, parsed, split)
def checkJoin(self, base, relurl, expected):
str_components = (base, relurl, expected)
self.assertEqual(urllib.parse.urljoin(base, relurl), expected)
bytes_components = baseb, relurlb, expectedb = [
x.encode('ascii') for x in str_components]
self.assertEqual(urllib.parse.urljoin(baseb, relurlb), expectedb)
def test_unparse_parse(self):
str_cases = ['Python', './Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',]
bytes_cases = [x.encode('ascii') for x in str_cases]
for u in str_cases + bytes_cases:
self.assertEqual(urllib.parse.urlunsplit(urllib.parse.urlsplit(u)), u)
self.assertEqual(urllib.parse.urlunparse(urllib.parse.urlparse(u)), u)
def test_RFC1808(self):
# "normal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, 'g:h', 'g:h')
self.checkJoin(RFC1808_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, '/g', 'http://a/g')
self.checkJoin(RFC1808_BASE, '//g', 'http://g')
self.checkJoin(RFC1808_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC1808_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC1808_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC1808_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC1808_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC1808_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC1808_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC1808_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC1808_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, '..', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, '../..', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../g', 'http://a/g')
# "abnormal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, '', 'http://a/b/c/d;p?q#f')
self.checkJoin(RFC1808_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC1808_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC1808_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC1808_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC1808_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC1808_BASE, 'g/../h', 'http://a/b/c/h')
# RFC 1808 and RFC 1630 disagree on these (according to RFC 1808),
# so we'll not actually run these tests (which expect 1808 behavior).
#self.checkJoin(RFC1808_BASE, 'http:g', 'http:g')
#self.checkJoin(RFC1808_BASE, 'http:', 'http:')
# XXX: The following tests are no longer compatible with RFC3986
# self.checkJoin(RFC1808_BASE, '../../../g', 'http://a/../g')
# self.checkJoin(RFC1808_BASE, '../../../../g', 'http://a/../../g')
# self.checkJoin(RFC1808_BASE, '/./g', 'http://a/./g')
# self.checkJoin(RFC1808_BASE, '/../g', 'http://a/../g')
def test_RFC2368(self):
# Issue 11467: path that starts with a number is not parsed correctly
self.assertEqual(urllib.parse.urlparse('mailto:1337@example.org'),
('mailto', '', '1337@example.org', '', '', ''))
def test_RFC2396(self):
# cases from RFC 2396
self.checkJoin(RFC2396_BASE, 'g:h', 'g:h')
self.checkJoin(RFC2396_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, '/g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '//g', 'http://g')
self.checkJoin(RFC2396_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC2396_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC2396_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC2396_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC2396_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC2396_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC2396_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, '..', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, '../..', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '', RFC2396_BASE)
self.checkJoin(RFC2396_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC2396_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC2396_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC2396_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC2396_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC2396_BASE, 'g/../h', 'http://a/b/c/h')
self.checkJoin(RFC2396_BASE, 'g;x=1/./y', 'http://a/b/c/g;x=1/y')
self.checkJoin(RFC2396_BASE, 'g;x=1/../y', 'http://a/b/c/y')
self.checkJoin(RFC2396_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC2396_BASE, 'g?y/../x', 'http://a/b/c/g?y/../x')
self.checkJoin(RFC2396_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC2396_BASE, 'g#s/../x', 'http://a/b/c/g#s/../x')
# XXX: The following tests are no longer compatible with RFC3986
# self.checkJoin(RFC2396_BASE, '../../../g', 'http://a/../g')
# self.checkJoin(RFC2396_BASE, '../../../../g', 'http://a/../../g')
# self.checkJoin(RFC2396_BASE, '/./g', 'http://a/./g')
# self.checkJoin(RFC2396_BASE, '/../g', 'http://a/../g')
def test_RFC3986(self):
self.checkJoin(RFC3986_BASE, '?y','http://a/b/c/d;p?y')
self.checkJoin(RFC3986_BASE, ';x', 'http://a/b/c/;x')
self.checkJoin(RFC3986_BASE, 'g:h','g:h')
self.checkJoin(RFC3986_BASE, 'g','http://a/b/c/g')
self.checkJoin(RFC3986_BASE, './g','http://a/b/c/g')
self.checkJoin(RFC3986_BASE, 'g/','http://a/b/c/g/')
self.checkJoin(RFC3986_BASE, '/g','http://a/g')
self.checkJoin(RFC3986_BASE, '//g','http://g')
self.checkJoin(RFC3986_BASE, '?y','http://a/b/c/d;p?y')
self.checkJoin(RFC3986_BASE, 'g?y','http://a/b/c/g?y')
self.checkJoin(RFC3986_BASE, '#s','http://a/b/c/d;p?q#s')
self.checkJoin(RFC3986_BASE, 'g#s','http://a/b/c/g#s')
self.checkJoin(RFC3986_BASE, 'g?y#s','http://a/b/c/g?y#s')
self.checkJoin(RFC3986_BASE, ';x','http://a/b/c/;x')
self.checkJoin(RFC3986_BASE, 'g;x','http://a/b/c/g;x')
self.checkJoin(RFC3986_BASE, 'g;x?y#s','http://a/b/c/g;x?y#s')
self.checkJoin(RFC3986_BASE, '','http://a/b/c/d;p?q')
self.checkJoin(RFC3986_BASE, '.','http://a/b/c/')
self.checkJoin(RFC3986_BASE, './','http://a/b/c/')
self.checkJoin(RFC3986_BASE, '..','http://a/b/')
self.checkJoin(RFC3986_BASE, '../','http://a/b/')
self.checkJoin(RFC3986_BASE, '../g','http://a/b/g')
self.checkJoin(RFC3986_BASE, '../..','http://a/')
self.checkJoin(RFC3986_BASE, '../../','http://a/')
self.checkJoin(RFC3986_BASE, '../../g','http://a/g')
self.checkJoin(RFC3986_BASE, '../../../g', 'http://a/g')
# Abnormal Examples
# The 'abnormal scenarios' are incompatible with RFC2986 parsing
# Tests are here for reference.
self.checkJoin(RFC3986_BASE, '../../../g','http://a/g')
self.checkJoin(RFC3986_BASE, '../../../../g','http://a/g')
self.checkJoin(RFC3986_BASE, '/./g','http://a/g')
self.checkJoin(RFC3986_BASE, '/../g','http://a/g')
self.checkJoin(RFC3986_BASE, 'g.','http://a/b/c/g.')
self.checkJoin(RFC3986_BASE, '.g','http://a/b/c/.g')
self.checkJoin(RFC3986_BASE, 'g..','http://a/b/c/g..')
self.checkJoin(RFC3986_BASE, '..g','http://a/b/c/..g')
self.checkJoin(RFC3986_BASE, './../g','http://a/b/g')
self.checkJoin(RFC3986_BASE, './g/.','http://a/b/c/g/')
self.checkJoin(RFC3986_BASE, 'g/./h','http://a/b/c/g/h')
self.checkJoin(RFC3986_BASE, 'g/../h','http://a/b/c/h')
self.checkJoin(RFC3986_BASE, 'g;x=1/./y','http://a/b/c/g;x=1/y')
self.checkJoin(RFC3986_BASE, 'g;x=1/../y','http://a/b/c/y')
self.checkJoin(RFC3986_BASE, 'g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin(RFC3986_BASE, 'g?y/../x','http://a/b/c/g?y/../x')
self.checkJoin(RFC3986_BASE, 'g#s/./x','http://a/b/c/g#s/./x')
self.checkJoin(RFC3986_BASE, 'g#s/../x','http://a/b/c/g#s/../x')
#self.checkJoin(RFC3986_BASE, 'http:g','http:g') # strict parser
self.checkJoin(RFC3986_BASE, 'http:g','http://a/b/c/g') #relaxed parser
# Test for issue9721
self.checkJoin('http://a/b/c/de', ';x','http://a/b/c/;x')
def test_urljoins(self):
self.checkJoin(SIMPLE_BASE, 'g:h','g:h')
self.checkJoin(SIMPLE_BASE, 'http:g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'http:','http://a/b/c/d')
self.checkJoin(SIMPLE_BASE, 'g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, './g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'g/','http://a/b/c/g/')
self.checkJoin(SIMPLE_BASE, '/g','http://a/g')
self.checkJoin(SIMPLE_BASE, '//g','http://g')
self.checkJoin(SIMPLE_BASE, '?y','http://a/b/c/d?y')
self.checkJoin(SIMPLE_BASE, 'g?y','http://a/b/c/g?y')
self.checkJoin(SIMPLE_BASE, 'g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin(SIMPLE_BASE, '.','http://a/b/c/')
self.checkJoin(SIMPLE_BASE, './','http://a/b/c/')
self.checkJoin(SIMPLE_BASE, '..','http://a/b/')
self.checkJoin(SIMPLE_BASE, '../','http://a/b/')
self.checkJoin(SIMPLE_BASE, '../g','http://a/b/g')
self.checkJoin(SIMPLE_BASE, '../..','http://a/')
self.checkJoin(SIMPLE_BASE, '../../g','http://a/g')
self.checkJoin(SIMPLE_BASE, './../g','http://a/b/g')
self.checkJoin(SIMPLE_BASE, './g/.','http://a/b/c/g/')
self.checkJoin(SIMPLE_BASE, 'g/./h','http://a/b/c/g/h')
self.checkJoin(SIMPLE_BASE, 'g/../h','http://a/b/c/h')
self.checkJoin(SIMPLE_BASE, 'http:g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'http:','http://a/b/c/d')
self.checkJoin(SIMPLE_BASE, 'http:?y','http://a/b/c/d?y')
self.checkJoin(SIMPLE_BASE, 'http:g?y','http://a/b/c/g?y')
self.checkJoin(SIMPLE_BASE, 'http:g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin('http:///', '..','http:///')
self.checkJoin('', 'http://a/b/c/g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin('', 'http://a/./g', 'http://a/./g')
self.checkJoin('svn://pathtorepo/dir1', 'dir2', 'svn://pathtorepo/dir2')
self.checkJoin('svn+ssh://pathtorepo/dir1', 'dir2', 'svn+ssh://pathtorepo/dir2')
self.checkJoin('ws://a/b','g','ws://a/g')
self.checkJoin('wss://a/b','g','wss://a/g')
# XXX: The following tests are no longer compatible with RFC3986
# self.checkJoin(SIMPLE_BASE, '../../../g','http://a/../g')
# self.checkJoin(SIMPLE_BASE, '/./g','http://a/./g')
# test for issue22118 duplicate slashes
self.checkJoin(SIMPLE_BASE + '/', 'foo', SIMPLE_BASE + '/foo')
# Non-RFC-defined tests, covering variations of base and trailing
# slashes
self.checkJoin('http://a/b/c/d/e/', '../../f/g/', 'http://a/b/c/f/g/')
self.checkJoin('http://a/b/c/d/e', '../../f/g/', 'http://a/b/f/g/')
self.checkJoin('http://a/b/c/d/e/', '/../../f/g/', 'http://a/f/g/')
self.checkJoin('http://a/b/c/d/e', '/../../f/g/', 'http://a/f/g/')
self.checkJoin('http://a/b/c/d/e/', '../../f/g', 'http://a/b/c/f/g')
self.checkJoin('http://a/b/', '../../f/g/', 'http://a/f/g/')
# issue 23703: don't duplicate filename
self.checkJoin('a', 'b', 'b')
def test_RFC2732(self):
str_cases = [
('http://Test.python.org:5432/foo/', 'test.python.org', 5432),
('http://12.34.56.78:5432/foo/', '12.34.56.78', 5432),
('http://[::1]:5432/foo/', '::1', 5432),
('http://[dead:beef::1]:5432/foo/', 'dead:beef::1', 5432),
('http://[dead:beef::]:5432/foo/', 'dead:beef::', 5432),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]:5432/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', 5432),
('http://[::12.34.56.78]:5432/foo/', '::12.34.56.78', 5432),
('http://[::ffff:12.34.56.78]:5432/foo/',
'::ffff:12.34.56.78', 5432),
('http://Test.python.org/foo/', 'test.python.org', None),
('http://12.34.56.78/foo/', '12.34.56.78', None),
('http://[::1]/foo/', '::1', None),
('http://[dead:beef::1]/foo/', 'dead:beef::1', None),
('http://[dead:beef::]/foo/', 'dead:beef::', None),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', None),
('http://[::12.34.56.78]/foo/', '::12.34.56.78', None),
('http://[::ffff:12.34.56.78]/foo/',
'::ffff:12.34.56.78', None),
('http://Test.python.org:/foo/', 'test.python.org', None),
('http://12.34.56.78:/foo/', '12.34.56.78', None),
('http://[::1]:/foo/', '::1', None),
('http://[dead:beef::1]:/foo/', 'dead:beef::1', None),
('http://[dead:beef::]:/foo/', 'dead:beef::', None),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]:/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', None),
('http://[::12.34.56.78]:/foo/', '::12.34.56.78', None),
('http://[::ffff:12.34.56.78]:/foo/',
'::ffff:12.34.56.78', None),
]
def _encode(t):
return t[0].encode('ascii'), t[1].encode('ascii'), t[2]
bytes_cases = [_encode(x) for x in str_cases]
for url, hostname, port in str_cases + bytes_cases:
urlparsed = urllib.parse.urlparse(url)
self.assertEqual((urlparsed.hostname, urlparsed.port) , (hostname, port))
str_cases = [
'http://::12.34.56.78]/',
'http://[::1/foo/',
'ftp://[::1/foo/bad]/bad',
'http://[::1/foo/bad]/bad',
'http://[::ffff:12.34.56.78']
bytes_cases = [x.encode('ascii') for x in str_cases]
for invalid_url in str_cases + bytes_cases:
self.assertRaises(ValueError, urllib.parse.urlparse, invalid_url)
def test_urldefrag(self):
str_cases = [
('http://python.org#frag', 'http://python.org', 'frag'),
('http://python.org', 'http://python.org', ''),
('http://python.org/#frag', 'http://python.org/', 'frag'),
('http://python.org/', 'http://python.org/', ''),
('http://python.org/?q#frag', 'http://python.org/?q', 'frag'),
('http://python.org/?q', 'http://python.org/?q', ''),
('http://python.org/p#frag', 'http://python.org/p', 'frag'),
('http://python.org/p?q', 'http://python.org/p?q', ''),
(RFC1808_BASE, 'http://a/b/c/d;p?q', 'f'),
(RFC2396_BASE, 'http://a/b/c/d;p?q', ''),
]
def _encode(t):
return type(t)(x.encode('ascii') for x in t)
bytes_cases = [_encode(x) for x in str_cases]
for url, defrag, frag in str_cases + bytes_cases:
result = urllib.parse.urldefrag(url)
self.assertEqual(result.geturl(), url)
self.assertEqual(result, (defrag, frag))
self.assertEqual(result.url, defrag)
self.assertEqual(result.fragment, frag)
def test_urlsplit_scoped_IPv6(self):
p = urllib.parse.urlsplit('http://[FE80::822a:a8ff:fe49:470c%tESt]:1234')
self.assertEqual(p.hostname, "fe80::822a:a8ff:fe49:470c%tESt")
self.assertEqual(p.netloc, '[FE80::822a:a8ff:fe49:470c%tESt]:1234')
p = urllib.parse.urlsplit(b'http://[FE80::822a:a8ff:fe49:470c%tESt]:1234')
self.assertEqual(p.hostname, b"fe80::822a:a8ff:fe49:470c%tESt")
self.assertEqual(p.netloc, b'[FE80::822a:a8ff:fe49:470c%tESt]:1234')
def test_urlsplit_attributes(self):
url = "HTTP://WWW.PYTHON.ORG/doc/#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "WWW.PYTHON.ORG")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, None)
# geturl() won't return exactly the original URL in this case
# since the scheme is always case-normalized
# We handle this by ignoring the first 4 characters of the URL
self.assertEqual(p.geturl()[4:], url[4:])
url = "http://User:Pass@www.python.org:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User:Pass@www.python.org:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# Addressing issue1698, which suggests Username can contain
# "@" characters. Though not RFC compliant, many ftp sites allow
# and request email addresses as usernames.
url = "http://User@example.com:Pass@www.python.org:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User@example.com:Pass@www.python.org:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User@example.com")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# And check them all again, only with bytes this time
url = b"HTTP://WWW.PYTHON.ORG/doc/#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, b"http")
self.assertEqual(p.netloc, b"WWW.PYTHON.ORG")
self.assertEqual(p.path, b"/doc/")
self.assertEqual(p.query, b"")
self.assertEqual(p.fragment, b"frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, b"www.python.org")
self.assertEqual(p.port, None)
self.assertEqual(p.geturl()[4:], url[4:])
url = b"http://User:Pass@www.python.org:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, b"http")
self.assertEqual(p.netloc, b"User:Pass@www.python.org:080")
self.assertEqual(p.path, b"/doc/")
self.assertEqual(p.query, b"query=yes")
self.assertEqual(p.fragment, b"frag")
self.assertEqual(p.username, b"User")
self.assertEqual(p.password, b"Pass")
self.assertEqual(p.hostname, b"www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
url = b"http://User@example.com:Pass@www.python.org:080/doc/?query=yes#frag"
p = urllib.parse.urlsplit(url)
self.assertEqual(p.scheme, b"http")
self.assertEqual(p.netloc, b"User@example.com:Pass@www.python.org:080")
self.assertEqual(p.path, b"/doc/")
self.assertEqual(p.query, b"query=yes")
self.assertEqual(p.fragment, b"frag")
self.assertEqual(p.username, b"User@example.com")
self.assertEqual(p.password, b"Pass")
self.assertEqual(p.hostname, b"www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# Verify an illegal port raises ValueError
url = b"HTTP://WWW.PYTHON.ORG:65536/doc/#frag"
p = urllib.parse.urlsplit(url)
with self.assertRaisesRegex(ValueError, "out of range"):
p.port
def test_attributes_bad_port(self):
"""Check handling of invalid ports."""
for bytes in (False, True):
for parse in (urllib.parse.urlsplit, urllib.parse.urlparse):
for port in ("foo", "1.5", "-1", "0x10"):
with self.subTest(bytes=bytes, parse=parse, port=port):
netloc = "www.example.net:" + port
url = "http://" + netloc
if bytes:
netloc = netloc.encode("ascii")
url = url.encode("ascii")
p = parse(url)
self.assertEqual(p.netloc, netloc)
with self.assertRaises(ValueError):
p.port
def test_attributes_without_netloc(self):
# This example is straight from RFC 3261. It looks like it
# should allow the username, hostname, and port to be filled
# in, but doesn't. Since it's a URI and doesn't use the
# scheme://netloc syntax, the netloc and related attributes
# should be left empty.
uri = "sip:alice@atlanta.com;maddr=239.255.255.1;ttl=15"
p = urllib.parse.urlsplit(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
p = urllib.parse.urlparse(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
# You guessed it, repeating the test with bytes input
uri = b"sip:alice@atlanta.com;maddr=239.255.255.1;ttl=15"
p = urllib.parse.urlsplit(uri)
self.assertEqual(p.netloc, b"")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
p = urllib.parse.urlparse(uri)
self.assertEqual(p.netloc, b"")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
def test_noslash(self):
# Issue 1637: http://foo.com?query is legal
self.assertEqual(urllib.parse.urlparse("http://example.com?blahblah=/foo"),
('http', 'example.com', '', '', 'blahblah=/foo', ''))
self.assertEqual(urllib.parse.urlparse(b"http://example.com?blahblah=/foo"),
(b'http', b'example.com', b'', b'', b'blahblah=/foo', b''))
def test_withoutscheme(self):
# Test urlparse without scheme
# Issue 754016: urlparse goes wrong with IP:port without scheme
# RFC 1808 specifies that netloc should start with //, urlparse expects
# the same, otherwise it classifies the portion of url as path.
self.assertEqual(urllib.parse.urlparse("path"),
('','','path','','',''))
self.assertEqual(urllib.parse.urlparse("//www.python.org:80"),
('','www.python.org:80','','','',''))
self.assertEqual(urllib.parse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
# Repeat for bytes input
self.assertEqual(urllib.parse.urlparse(b"path"),
(b'',b'',b'path',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"//www.python.org:80"),
(b'',b'www.python.org:80',b'',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"http://www.python.org:80"),
(b'http',b'www.python.org:80',b'',b'',b'',b''))
def test_portseparator(self):
# Issue 754016 makes changes for port separator ':' from scheme separator
self.assertEqual(urllib.parse.urlparse("path:80"),
('','','path:80','','',''))
self.assertEqual(urllib.parse.urlparse("http:"),('http','','','','',''))
self.assertEqual(urllib.parse.urlparse("https:"),('https','','','','',''))
self.assertEqual(urllib.parse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
# As usual, need to check bytes input as well
self.assertEqual(urllib.parse.urlparse(b"path:80"),
(b'',b'',b'path:80',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"http:"),(b'http',b'',b'',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"https:"),(b'https',b'',b'',b'',b'',b''))
self.assertEqual(urllib.parse.urlparse(b"http://www.python.org:80"),
(b'http',b'www.python.org:80',b'',b'',b'',b''))
def test_usingsys(self):
# Issue 3314: sys module is used in the error
self.assertRaises(TypeError, urllib.parse.urlencode, "foo")
def test_anyscheme(self):
# Issue 7904: s3://foo.com/stuff has netloc "foo.com".
self.assertEqual(urllib.parse.urlparse("s3://foo.com/stuff"),
('s3', 'foo.com', '/stuff', '', '', ''))
self.assertEqual(urllib.parse.urlparse("x-newscheme://foo.com/stuff"),
('x-newscheme', 'foo.com', '/stuff', '', '', ''))
self.assertEqual(urllib.parse.urlparse("x-newscheme://foo.com/stuff?query#fragment"),
('x-newscheme', 'foo.com', '/stuff', '', 'query', 'fragment'))
self.assertEqual(urllib.parse.urlparse("x-newscheme://foo.com/stuff?query"),
('x-newscheme', 'foo.com', '/stuff', '', 'query', ''))
# And for bytes...
self.assertEqual(urllib.parse.urlparse(b"s3://foo.com/stuff"),
(b's3', b'foo.com', b'/stuff', b'', b'', b''))
self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff"),
(b'x-newscheme', b'foo.com', b'/stuff', b'', b'', b''))
self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff?query#fragment"),
(b'x-newscheme', b'foo.com', b'/stuff', b'', b'query', b'fragment'))
self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff?query"),
(b'x-newscheme', b'foo.com', b'/stuff', b'', b'query', b''))
def test_default_scheme(self):
# Exercise the scheme parameter of urlparse() and urlsplit()
for func in (urllib.parse.urlparse, urllib.parse.urlsplit):
with self.subTest(function=func):
result = func("http://example.net/", "ftp")
self.assertEqual(result.scheme, "http")
result = func(b"http://example.net/", b"ftp")
self.assertEqual(result.scheme, b"http")
self.assertEqual(func("path", "ftp").scheme, "ftp")
self.assertEqual(func("path", scheme="ftp").scheme, "ftp")
self.assertEqual(func(b"path", scheme=b"ftp").scheme, b"ftp")
self.assertEqual(func("path").scheme, "")
self.assertEqual(func(b"path").scheme, b"")
self.assertEqual(func(b"path", "").scheme, b"")
def test_parse_fragments(self):
# Exercise the allow_fragments parameter of urlparse() and urlsplit()
tests = (
("http:#frag", "path", "frag"),
("//example.net#frag", "path", "frag"),
("index.html#frag", "path", "frag"),
(";a=b#frag", "params", "frag"),
("?a=b#frag", "query", "frag"),
("#frag", "path", "frag"),
("abc#@frag", "path", "@frag"),
("//abc#@frag", "path", "@frag"),
("//abc:80#@frag", "path", "@frag"),
("//abc#@frag:80", "path", "@frag:80"),
)
for url, attr, expected_frag in tests:
for func in (urllib.parse.urlparse, urllib.parse.urlsplit):
if attr == "params" and func is urllib.parse.urlsplit:
attr = "path"
with self.subTest(url=url, function=func):
result = func(url, allow_fragments=False)
self.assertEqual(result.fragment, "")
self.assertTrue(
getattr(result, attr).endswith("#" + expected_frag))
self.assertEqual(func(url, "", False).fragment, "")
result = func(url, allow_fragments=True)
self.assertEqual(result.fragment, expected_frag)
self.assertFalse(
getattr(result, attr).endswith(expected_frag))
self.assertEqual(func(url, "", True).fragment,
expected_frag)
self.assertEqual(func(url).fragment, expected_frag)
def test_mixed_types_rejected(self):
# Several functions that process either strings or ASCII encoded bytes
# accept multiple arguments. Check they reject mixed type input
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlparse("www.python.org", b"http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlparse(b"www.python.org", "http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlsplit("www.python.org", b"http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlsplit(b"www.python.org", "http")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunparse(( b"http", "www.python.org","","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunparse(("http", b"www.python.org","","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunsplit((b"http", "www.python.org","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urlunsplit(("http", b"www.python.org","","",""))
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urljoin("http://python.org", b"http://python.org")
with self.assertRaisesRegex(TypeError, "Cannot mix str"):
urllib.parse.urljoin(b"http://python.org", "http://python.org")
def _check_result_type(self, str_type):
num_args = len(str_type._fields)
bytes_type = str_type._encoded_counterpart
self.assertIs(bytes_type._decoded_counterpart, str_type)
str_args = ('',) * num_args
bytes_args = (b'',) * num_args
str_result = str_type(*str_args)
bytes_result = bytes_type(*bytes_args)
encoding = 'ascii'
errors = 'strict'
self.assertEqual(str_result, str_args)
self.assertEqual(bytes_result.decode(), str_args)
self.assertEqual(bytes_result.decode(), str_result)
self.assertEqual(bytes_result.decode(encoding), str_args)
self.assertEqual(bytes_result.decode(encoding), str_result)
self.assertEqual(bytes_result.decode(encoding, errors), str_args)
self.assertEqual(bytes_result.decode(encoding, errors), str_result)
self.assertEqual(bytes_result, bytes_args)
self.assertEqual(str_result.encode(), bytes_args)
self.assertEqual(str_result.encode(), bytes_result)
self.assertEqual(str_result.encode(encoding), bytes_args)
self.assertEqual(str_result.encode(encoding), bytes_result)
self.assertEqual(str_result.encode(encoding, errors), bytes_args)
self.assertEqual(str_result.encode(encoding, errors), bytes_result)
def test_result_pairs(self):
# Check encoding and decoding between result pairs
result_types = [
urllib.parse.DefragResult,
urllib.parse.SplitResult,
urllib.parse.ParseResult,
]
for result_type in result_types:
self._check_result_type(result_type)
def test_parse_qs_encoding(self):
result = urllib.parse.parse_qs("key=\u0141%E9", encoding="latin-1")
self.assertEqual(result, {'key': ['\u0141\xE9']})
result = urllib.parse.parse_qs("key=\u0141%C3%A9", encoding="utf-8")
self.assertEqual(result, {'key': ['\u0141\xE9']})
result = urllib.parse.parse_qs("key=\u0141%C3%A9", encoding="ascii")
self.assertEqual(result, {'key': ['\u0141\ufffd\ufffd']})
result = urllib.parse.parse_qs("key=\u0141%E9-", encoding="ascii")
self.assertEqual(result, {'key': ['\u0141\ufffd-']})
result = urllib.parse.parse_qs("key=\u0141%E9-", encoding="ascii",
errors="ignore")
self.assertEqual(result, {'key': ['\u0141-']})
def test_parse_qsl_encoding(self):
result = urllib.parse.parse_qsl("key=\u0141%E9", encoding="latin-1")
self.assertEqual(result, [('key', '\u0141\xE9')])
result = urllib.parse.parse_qsl("key=\u0141%C3%A9", encoding="utf-8")
self.assertEqual(result, [('key', '\u0141\xE9')])
result = urllib.parse.parse_qsl("key=\u0141%C3%A9", encoding="ascii")
self.assertEqual(result, [('key', '\u0141\ufffd\ufffd')])
result = urllib.parse.parse_qsl("key=\u0141%E9-", encoding="ascii")
self.assertEqual(result, [('key', '\u0141\ufffd-')])
result = urllib.parse.parse_qsl("key=\u0141%E9-", encoding="ascii",
errors="ignore")
self.assertEqual(result, [('key', '\u0141-')])
def test_urlencode_sequences(self):
# Other tests incidentally urlencode things; test non-covered cases:
# Sequence and object values.
result = urllib.parse.urlencode({'a': [1, 2], 'b': (3, 4, 5)}, True)
# we cannot rely on ordering here
assert set(result.split('&')) == {'a=1', 'a=2', 'b=3', 'b=4', 'b=5'}
class Trivial:
def __str__(self):
return 'trivial'
result = urllib.parse.urlencode({'a': Trivial()}, True)
self.assertEqual(result, 'a=trivial')
def test_urlencode_quote_via(self):
result = urllib.parse.urlencode({'a': 'some value'})
self.assertEqual(result, "a=some+value")
result = urllib.parse.urlencode({'a': 'some value/another'},
quote_via=urllib.parse.quote)
self.assertEqual(result, "a=some%20value%2Fanother")
result = urllib.parse.urlencode({'a': 'some value/another'},
safe='/', quote_via=urllib.parse.quote)
self.assertEqual(result, "a=some%20value/another")
def test_quote_from_bytes(self):
self.assertRaises(TypeError, urllib.parse.quote_from_bytes, 'foo')
result = urllib.parse.quote_from_bytes(b'archaeological arcana')
self.assertEqual(result, 'archaeological%20arcana')
result = urllib.parse.quote_from_bytes(b'')
self.assertEqual(result, '')
def test_unquote_to_bytes(self):
result = urllib.parse.unquote_to_bytes('abc%20def')
self.assertEqual(result, b'abc def')
result = urllib.parse.unquote_to_bytes('')
self.assertEqual(result, b'')
def test_quote_errors(self):
self.assertRaises(TypeError, urllib.parse.quote, b'foo',
encoding='utf-8')
self.assertRaises(TypeError, urllib.parse.quote, b'foo', errors='strict')
def test_issue14072(self):
p1 = urllib.parse.urlsplit('tel:+31-641044153')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '+31-641044153')
p2 = urllib.parse.urlsplit('tel:+31641044153')
self.assertEqual(p2.scheme, 'tel')
self.assertEqual(p2.path, '+31641044153')
# assert the behavior for urlparse
p1 = urllib.parse.urlparse('tel:+31-641044153')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '+31-641044153')
p2 = urllib.parse.urlparse('tel:+31641044153')
self.assertEqual(p2.scheme, 'tel')
self.assertEqual(p2.path, '+31641044153')
def test_port_casting_failure_message(self):
message = "Port could not be cast to integer value as 'oracle'"
p1 = urllib.parse.urlparse('http://Server=sde; Service=sde:oracle')
with self.assertRaisesRegex(ValueError, message):
p1.port
p2 = urllib.parse.urlsplit('http://Server=sde; Service=sde:oracle')
with self.assertRaisesRegex(ValueError, message):
p2.port
def test_telurl_params(self):
p1 = urllib.parse.urlparse('tel:123-4;phone-context=+1-650-516')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '123-4')
self.assertEqual(p1.params, 'phone-context=+1-650-516')
p1 = urllib.parse.urlparse('tel:+1-201-555-0123')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '+1-201-555-0123')
self.assertEqual(p1.params, '')
p1 = urllib.parse.urlparse('tel:7042;phone-context=example.com')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '7042')
self.assertEqual(p1.params, 'phone-context=example.com')
p1 = urllib.parse.urlparse('tel:863-1234;phone-context=+1-914-555')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '863-1234')
self.assertEqual(p1.params, 'phone-context=+1-914-555')
def test_Quoter_repr(self):
quoter = urllib.parse.Quoter(urllib.parse._ALWAYS_SAFE)
self.assertIn('Quoter', repr(quoter))
def test_all(self):
expected = []
undocumented = {
'splitattr', 'splithost', 'splitnport', 'splitpasswd',
'splitport', 'splitquery', 'splittag', 'splittype', 'splituser',
'splitvalue',
'Quoter', 'ResultBase', 'clear_cache', 'to_bytes', 'unwrap',
}
for name in dir(urllib.parse):
if name.startswith('_') or name in undocumented:
continue
object = getattr(urllib.parse, name)
if getattr(object, '__module__', None) == 'urllib.parse':
expected.append(name)
self.assertCountEqual(urllib.parse.__all__, expected)
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
# In Python 2 this test class was in test_urllib.
def test_splittype(self):
splittype = urllib.parse.splittype
self.assertEqual(splittype('type:opaquestring'), ('type', 'opaquestring'))
self.assertEqual(splittype('opaquestring'), (None, 'opaquestring'))
self.assertEqual(splittype(':opaquestring'), (None, ':opaquestring'))
self.assertEqual(splittype('type:'), ('type', ''))
self.assertEqual(splittype('type:opaque:string'), ('type', 'opaque:string'))
def test_splithost(self):
splithost = urllib.parse.splithost
self.assertEqual(splithost('//www.example.org:80/foo/bar/baz.html'),
('www.example.org:80', '/foo/bar/baz.html'))
self.assertEqual(splithost('//www.example.org:80'),
('www.example.org:80', ''))
self.assertEqual(splithost('/foo/bar/baz.html'),
(None, '/foo/bar/baz.html'))
# bpo-30500: # starts a fragment.
self.assertEqual(splithost('//127.0.0.1#@host.com'),
('127.0.0.1', '/#@host.com'))
self.assertEqual(splithost('//127.0.0.1#@host.com:80'),
('127.0.0.1', '/#@host.com:80'))
self.assertEqual(splithost('//127.0.0.1:80#@host.com'),
('127.0.0.1:80', '/#@host.com'))
# Empty host is returned as empty string.
self.assertEqual(splithost("///file"),
('', '/file'))
# Trailing semicolon, question mark and hash symbol are kept.
self.assertEqual(splithost("//example.net/file;"),
('example.net', '/file;'))
self.assertEqual(splithost("//example.net/file?"),
('example.net', '/file?'))
self.assertEqual(splithost("//example.net/file#"),
('example.net', '/file#'))
def test_splituser(self):
splituser = urllib.parse.splituser
self.assertEqual(splituser('User:Pass@www.python.org:080'),
('User:Pass', 'www.python.org:080'))
self.assertEqual(splituser('@www.python.org:080'),
('', 'www.python.org:080'))
self.assertEqual(splituser('www.python.org:080'),
(None, 'www.python.org:080'))
self.assertEqual(splituser('User:Pass@'),
('User:Pass', ''))
self.assertEqual(splituser('User@example.com:Pass@www.python.org:080'),
('User@example.com:Pass', 'www.python.org:080'))
def test_splitpasswd(self):
# Some of the password examples are not sensible, but it is added to
# confirming to RFC2617 and addressing issue4675.
splitpasswd = urllib.parse.splitpasswd
self.assertEqual(splitpasswd('user:ab'), ('user', 'ab'))
self.assertEqual(splitpasswd('user:a\nb'), ('user', 'a\nb'))
self.assertEqual(splitpasswd('user:a\tb'), ('user', 'a\tb'))
self.assertEqual(splitpasswd('user:a\rb'), ('user', 'a\rb'))
self.assertEqual(splitpasswd('user:a\fb'), ('user', 'a\fb'))
self.assertEqual(splitpasswd('user:a\vb'), ('user', 'a\vb'))
self.assertEqual(splitpasswd('user:a:b'), ('user', 'a:b'))
self.assertEqual(splitpasswd('user:a b'), ('user', 'a b'))
self.assertEqual(splitpasswd('user 2:ab'), ('user 2', 'ab'))
self.assertEqual(splitpasswd('user+1:a+b'), ('user+1', 'a+b'))
self.assertEqual(splitpasswd('user:'), ('user', ''))
self.assertEqual(splitpasswd('user'), ('user', None))
self.assertEqual(splitpasswd(':ab'), ('', 'ab'))
def test_splitport(self):
splitport = urllib.parse.splitport
self.assertEqual(splitport('parrot:88'), ('parrot', '88'))
self.assertEqual(splitport('parrot'), ('parrot', None))
self.assertEqual(splitport('parrot:'), ('parrot', None))
self.assertEqual(splitport('127.0.0.1'), ('127.0.0.1', None))
self.assertEqual(splitport('parrot:cheese'), ('parrot:cheese', None))
self.assertEqual(splitport('[::1]:88'), ('[::1]', '88'))
self.assertEqual(splitport('[::1]'), ('[::1]', None))
self.assertEqual(splitport(':88'), ('', '88'))
def test_splitnport(self):
splitnport = urllib.parse.splitnport
self.assertEqual(splitnport('parrot:88'), ('parrot', 88))
self.assertEqual(splitnport('parrot'), ('parrot', -1))
self.assertEqual(splitnport('parrot', 55), ('parrot', 55))
self.assertEqual(splitnport('parrot:'), ('parrot', -1))
self.assertEqual(splitnport('parrot:', 55), ('parrot', 55))
self.assertEqual(splitnport('127.0.0.1'), ('127.0.0.1', -1))
self.assertEqual(splitnport('127.0.0.1', 55), ('127.0.0.1', 55))
self.assertEqual(splitnport('parrot:cheese'), ('parrot', None))
self.assertEqual(splitnport('parrot:cheese', 55), ('parrot', None))
def test_splitquery(self):
# Normal cases are exercised by other tests; ensure that we also
# catch cases with no port specified (testcase ensuring coverage)
splitquery = urllib.parse.splitquery
self.assertEqual(splitquery('http://python.org/fake?foo=bar'),
('http://python.org/fake', 'foo=bar'))
self.assertEqual(splitquery('http://python.org/fake?foo=bar?'),
('http://python.org/fake?foo=bar', ''))
self.assertEqual(splitquery('http://python.org/fake'),
('http://python.org/fake', None))
self.assertEqual(splitquery('?foo=bar'), ('', 'foo=bar'))
def test_splittag(self):
splittag = urllib.parse.splittag
self.assertEqual(splittag('http://example.com?foo=bar#baz'),
('http://example.com?foo=bar', 'baz'))
self.assertEqual(splittag('http://example.com?foo=bar#'),
('http://example.com?foo=bar', ''))
self.assertEqual(splittag('#baz'), ('', 'baz'))
self.assertEqual(splittag('http://example.com?foo=bar'),
('http://example.com?foo=bar', None))
self.assertEqual(splittag('http://example.com?foo=bar#baz#boo'),
('http://example.com?foo=bar#baz', 'boo'))
def test_splitattr(self):
splitattr = urllib.parse.splitattr
self.assertEqual(splitattr('/path;attr1=value1;attr2=value2'),
('/path', ['attr1=value1', 'attr2=value2']))
self.assertEqual(splitattr('/path;'), ('/path', ['']))
self.assertEqual(splitattr(';attr1=value1;attr2=value2'),
('', ['attr1=value1', 'attr2=value2']))
self.assertEqual(splitattr('/path'), ('/path', []))
def test_splitvalue(self):
# Normal cases are exercised by other tests; test pathological cases
# with no key/value pairs. (testcase ensuring coverage)
splitvalue = urllib.parse.splitvalue
self.assertEqual(splitvalue('foo=bar'), ('foo', 'bar'))
self.assertEqual(splitvalue('foo='), ('foo', ''))
self.assertEqual(splitvalue('=bar'), ('', 'bar'))
self.assertEqual(splitvalue('foobar'), ('foobar', None))
self.assertEqual(splitvalue('foo=bar=baz'), ('foo', 'bar=baz'))
def test_to_bytes(self):
result = urllib.parse.to_bytes('http://www.python.org')
self.assertEqual(result, 'http://www.python.org')
self.assertRaises(UnicodeError, urllib.parse._to_bytes,
'http://www.python.org/medi\u00e6val')
def test_unwrap(self):
url = urllib.parse.unwrap('<URL:type://host/path>')
self.assertEqual(url, 'type://host/path')
class DeprecationTest(unittest.TestCase):
def test_splittype_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.splittype('')
self.assertEqual(str(cm.warning),
'urllib.parse.splittype() is deprecated as of 3.8, '
'use urllib.parse.urlparse() instead')
def test_splithost_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.splithost('')
self.assertEqual(str(cm.warning),
'urllib.parse.splithost() is deprecated as of 3.8, '
'use urllib.parse.urlparse() instead')
def test_splituser_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.splituser('')
self.assertEqual(str(cm.warning),
'urllib.parse.splituser() is deprecated as of 3.8, '
'use urllib.parse.urlparse() instead')
def test_splitpasswd_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.splitpasswd('')
self.assertEqual(str(cm.warning),
'urllib.parse.splitpasswd() is deprecated as of 3.8, '
'use urllib.parse.urlparse() instead')
def test_splitport_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.splitport('')
self.assertEqual(str(cm.warning),
'urllib.parse.splitport() is deprecated as of 3.8, '
'use urllib.parse.urlparse() instead')
def test_splitnport_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.splitnport('')
self.assertEqual(str(cm.warning),
'urllib.parse.splitnport() is deprecated as of 3.8, '
'use urllib.parse.urlparse() instead')
def test_splitquery_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.splitquery('')
self.assertEqual(str(cm.warning),
'urllib.parse.splitquery() is deprecated as of 3.8, '
'use urllib.parse.urlparse() instead')
def test_splittag_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.splittag('')
self.assertEqual(str(cm.warning),
'urllib.parse.splittag() is deprecated as of 3.8, '
'use urllib.parse.urlparse() instead')
def test_splitattr_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.splitattr('')
self.assertEqual(str(cm.warning),
'urllib.parse.splitattr() is deprecated as of 3.8, '
'use urllib.parse.urlparse() instead')
def test_splitvalue_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.splitvalue('')
self.assertEqual(str(cm.warning),
'urllib.parse.splitvalue() is deprecated as of 3.8, '
'use urllib.parse.parse_qsl() instead')
def test_to_bytes_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.to_bytes('')
self.assertEqual(str(cm.warning),
'urllib.parse.to_bytes() is deprecated as of 3.8')
def test_unwrap(self):
with self.assertWarns(DeprecationWarning) as cm:
urllib.parse.unwrap('')
self.assertEqual(str(cm.warning),
'urllib.parse.unwrap() is deprecated as of 3.8')
if __name__ == "__main__":
unittest.main()
| 49.289088 | 98 | 0.558131 |
994c8458a90546958fe7bc5df72ab9f2da733d06 | 7,441 | py | Python | plugins/modules/oci_network_ip_sec_connection_tunnel_cpe_device_config.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_network_ip_sec_connection_tunnel_cpe_device_config.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_network_ip_sec_connection_tunnel_cpe_device_config.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_ip_sec_connection_tunnel_cpe_device_config
short_description: Manage an IpSecConnectionTunnelCpeDeviceConfig resource in Oracle Cloud Infrastructure
description:
- This module allows the user to update an IpSecConnectionTunnelCpeDeviceConfig resource in Oracle Cloud Infrastructure
version_added: "2.9.0"
author: Oracle (@oracle)
options:
ipsc_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the IPSec connection.
type: str
required: true
tunnel_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the tunnel.
type: str
aliases: ["id"]
required: true
tunnel_cpe_device_config:
description:
- The set of configuration answers for a CPE device.
- This parameter is updatable.
type: list
elements: dict
suboptions:
key:
description:
- A string that identifies the question to be answered. See the `key` attribute in
L(CpeDeviceConfigQuestion,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/datatypes/CpeDeviceConfigQuestion).
- This parameter is updatable.
type: str
value:
description:
- The answer to the question.
- This parameter is updatable.
type: str
state:
description:
- The state of the IpSecConnectionTunnelCpeDeviceConfig.
- Use I(state=present) to update an existing an IpSecConnectionTunnelCpeDeviceConfig.
type: str
required: false
default: 'present'
choices: ["present"]
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Update ip_sec_connection_tunnel_cpe_device_config
oci_network_ip_sec_connection_tunnel_cpe_device_config:
ipsc_id: "ocid1.ipsc.oc1..xxxxxxEXAMPLExxxxxx"
tunnel_id: "ocid1.tunnel.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
ip_sec_connection_tunnel_cpe_device_config:
description:
- Details of the IpSecConnectionTunnelCpeDeviceConfig resource acted upon by the current operation
returned: on success
type: complex
contains:
tunnel_cpe_device_config_parameter:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A string that identifies the question to be answered. See the `key` attribute in
L(CpeDeviceConfigQuestion,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/latest/datatypes/CpeDeviceConfigQuestion).
returned: on success
type: str
sample: key_example
value:
description:
- The answer to the question.
returned: on success
type: str
sample: value_example
sample: {
"tunnel_cpe_device_config_parameter": [{
"key": "key_example",
"value": "value_example"
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.core import VirtualNetworkClient
from oci.core.models import UpdateTunnelCpeDeviceConfigDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class IpSecConnectionTunnelCpeDeviceConfigHelperGen(OCIResourceHelperBase):
"""Supported operations: update and get"""
def get_module_resource_id_param(self):
return "tunnel_id"
def get_module_resource_id(self):
return self.module.params.get("tunnel_id")
def get_get_fn(self):
return self.client.get_tunnel_cpe_device_config
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_tunnel_cpe_device_config,
ipsc_id=self.module.params.get("ipsc_id"),
tunnel_id=self.module.params.get("tunnel_id"),
)
def get_update_model_class(self):
return UpdateTunnelCpeDeviceConfigDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_tunnel_cpe_device_config,
call_fn_args=(),
call_fn_kwargs=dict(
ipsc_id=self.module.params.get("ipsc_id"),
tunnel_id=self.module.params.get("tunnel_id"),
update_tunnel_cpe_device_config_details=update_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
IpSecConnectionTunnelCpeDeviceConfigHelperCustom = get_custom_class(
"IpSecConnectionTunnelCpeDeviceConfigHelperCustom"
)
class ResourceHelper(
IpSecConnectionTunnelCpeDeviceConfigHelperCustom,
IpSecConnectionTunnelCpeDeviceConfigHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
ipsc_id=dict(type="str", required=True),
tunnel_id=dict(aliases=["id"], type="str", required=True),
tunnel_cpe_device_config=dict(
type="list",
elements="dict",
options=dict(key=dict(type="str", no_log=True), value=dict(type="str")),
),
state=dict(type="str", default="present", choices=["present"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="ip_sec_connection_tunnel_cpe_device_config",
service_client_class=VirtualNetworkClient,
namespace="core",
)
result = dict(changed=False)
if resource_helper.is_update():
result = resource_helper.update()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 33.367713 | 149 | 0.654079 |
ff6c867159430d293d84ed1841199c03d480fe6c | 4,050 | py | Python | lispy.py | programble/lispy | d0a4075f1d74392904068a2800bcf2b9835c511d | [
"0BSD"
] | 6 | 2015-02-03T19:36:38.000Z | 2018-01-24T19:47:20.000Z | lispy.py | causal-agent/lispy | d0a4075f1d74392904068a2800bcf2b9835c511d | [
"0BSD"
] | 2 | 2018-01-25T00:03:06.000Z | 2018-02-06T21:01:00.000Z | lispy.py | causal-agent/lispy | d0a4075f1d74392904068a2800bcf2b9835c511d | [
"0BSD"
] | 1 | 2015-09-10T07:04:01.000Z | 2015-09-10T07:04:01.000Z | #!/usr/bin/env python
# Copyright 2010 Curtis McEnroe <programble@gmail.com>
# Licensed under the GNU GPLv3
import sys
import getopt
import traceback
try:
import readline
except ImportError:
pass
from reader import Reader
import core
def load_lisp_core(filename="core.lisp"):
# Load up and evaluate core.lisp
f = open(filename)
reader = Reader(f.read(), filename)
f.close()
for expr in reader.read():
expr.evaluate(core.scope)
def repl():
global EOFError
source = ""
while True:
# Get a new source line
try:
if source == "":
source = raw_input("=> ") + '\n'
else:
source += raw_input() + '\n'
except KeyboardInterrupt, EOFError:
break
# Read the source line
reader = Reader(source)
try:
exprs = reader.read()
except EOFError:
# Need more input
continue
except Exception, e:
print e
source = ""
continue
# Evaluate the source line
for expr in exprs:
try:
print repr(expr.evaluate(core.scope))
except Exception, e:
traceback.print_exc()
source = ""
def evaluate(expr):
reader = Reader(expr)
try:
exprs = reader.read()
except Exception, e:
print e
return
for expr in exprs:
try:
print repr(expr.evaluate(core.scope))
except Exception, e:
print e
return
def evaluate_file(filename):
try:
f = open(filename)
except IOError:
print "Cannot open file %s" % repr(filename)
return
reader = Reader(f.read())
f.close()
try:
exprs = reader.read()
except Exception, e:
print e
return
for expr in exprs:
try:
expr.evaluate(core.scope)
except Exception, e:
traceback.print_exc()
return
def help():
print "Usage: %s [options] file" % sys.argv[0]
print " %s [options] -r" % sys.argv[0]
print " %s [options] -e expr" % sys.argv[0]
print "Options:"
print " -r, --repl Start an REPL"
print " -e EXPR, --evaluate=EXPR Evaluate a single expression"
print " -n, --no-core Do not load lisp core"
print " -c FILE, --core=FILE Load the core from a different file"
print " --version Print version information and exit"
print
def main(argv):
# Parse command line arguments
try:
opts, args = getopt.getopt(argv, "e:rnc:h", ["evaluate=", "repl", "no-core", "core=", "help", "version"])
except getopt.GetoptError, err:
print '%s\n' % err
help()
sys.exit(1)
load_core = True
core_filename = "core.lisp"
for opt, arg in opts:
if opt in ("-n", "--no-core"):
load_core = False
elif opt in ("-c", "--core"):
core_filename = arg
elif opt in ("-e", "--evaluate"):
if load_core:
load_lisp_core(core_filename)
evaluate(arg)
sys.exit()
elif opt in ("-r", "--repl"):
core.scope["*repl*"] = core.t
if load_core:
load_lisp_core(core_filename)
repl()
sys.exit()
elif opt == "--version":
core.scope["*repl*"] = core.t
if load_core:
load_lisp_core(core_filename)
sys.exit()
elif opt in ("-h", "--help"):
help()
sys.exit()
# If not given a file to evaluate, start an REPL
if len(args) == 0:
core.scope["*repl*"] = core.t
if load_core:
load_lisp_core(core_filename)
repl()
else:
if load_core:
load_lisp_core(core_filename)
evaluate_file(args[0])
if __name__ == "__main__":
main(sys.argv[1:])
| 26.821192 | 113 | 0.515062 |
c1f3f086723e538938430730f6213d0bd9c88cfc | 25,025 | py | Python | fedlearner/trainer/trainer_master.py | duanbing/fedlearner | 5cce3c1fe09abe66879274a0ad3dc8e2f25a322d | [
"Apache-2.0"
] | null | null | null | fedlearner/trainer/trainer_master.py | duanbing/fedlearner | 5cce3c1fe09abe66879274a0ad3dc8e2f25a322d | [
"Apache-2.0"
] | null | null | null | fedlearner/trainer/trainer_master.py | duanbing/fedlearner | 5cce3c1fe09abe66879274a0ad3dc8e2f25a322d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=protected-access
import os
import signal
import time
from concurrent import futures
import threading
import grpc
import tensorflow.compat.v1 as tf
from fedlearner.common import fl_logging
from fedlearner.common import trainer_master_service_pb2 as tm_pb
from fedlearner.common import trainer_master_service_pb2_grpc as tm_grpc
from fedlearner.common import common_pb2 as common_pb
from fedlearner.trainer.estimator import FLEstimator
from fedlearner.trainer.sparse_estimator import SparseFLEstimator
from fedlearner.trainer.cluster_server import ClusterServer
class ExportModelHook():
def after_save(self, sess, model, export_dir, inputs, outputs):
pass
class _TriggerHook(tf.train.SessionRunHook):
def __init__(self,
trigger_secs=None,
trigger_steps=None,
trigger_fn=None):
self._trigger_secs = trigger_secs
self._trigger_steps = trigger_steps
self._trigger_fn = trigger_fn
def begin(self):
self._global_step_tensor = tf.train.get_or_create_global_step()
self._last_triggered_time = None
self._last_triggered_step = None
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
self._trigger(global_step)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._should_trigger(global_step):
self._trigger(global_step)
def end(self, session):
global_step = session.run(self._global_step_tensor)
self._trigger(global_step)
def _should_trigger(self, global_step):
if self._last_triggered_time is None \
or self._last_triggered_step is None:
return True
if self._trigger_secs is not None:
if time.time() >= self._last_triggered_time + self._trigger_secs:
return True
if self._trigger_steps is not None:
if global_step >= self._last_triggered_step + self._trigger_steps:
return True
return False
def _trigger(self, global_step):
if self._trigger_fn:
self._trigger_fn(global_step)
self._last_triggered_time = time.time()
self._last_triggered_step = global_step
#class _CheckpointSaverHook(tf.train.CheckpointSaverHook):
# def _save(self, session, step):
# if self._timer.last_triggered_step() is None:
# # skip save checkpoint
# fl_logging.info("skip save checkpoint")
# return False
# return super(_CheckpointSaverHook, self)._save(session, step)
class _DataVisitorCheckpointHook(tf.train.SessionRunHook):
def __init__(self, visitor):
self._visitor = visitor
def begin(self):
self._ckpt_plhd = tf.placeholder(tf.string, name="data_checkpoint_plhd")
self._ckpt_var = tf.Variable("", name="data_checkpoint")
self._save_op = self._ckpt_var.assign(self._ckpt_plhd)
def after_create_session(self, session, coord):
data = session.run(self._ckpt_var)
self._visitor.restore(data)
def before_checkpoint_save(self, session, global_step_value):
data = self._visitor.dump()
fl_logging.info("DataVisitor save checkpoint for global step %d, "
"size: %d", global_step_value, len(data))
session.run(
self._save_op,
{self._ckpt_plhd: data},
)
def create_checkpoint_saver_listener(self):
return _DataVisitorCheckpointHook.CheckpointSaverListener(self)
class CheckpointSaverListener(tf.train.CheckpointSaverListener):
def __init__(self, hook):
self._hook = hook
def before_save(self, session, global_step_value):
self._hook.before_checkpoint_save(session, global_step_value)
class DataBlockCheckpointSaverListener(tf.train.CheckpointSaverListener):
def __init__(self, visitor):
self._visitor = visitor
def begin(self):
self._ckpt = tf.placeholder(tf.string, name="data_checkpoint_plhd")
var_tmp = tf.Variable("", name="data_checkpoint")
self._save_op = var_tmp.assign(self._ckpt)
def before_save(self, session, global_step_value):
session.run(
self._save_op,
{self._ckpt: self._visitor.dump()}
)
#fl_logging.info("data checkpoint saved result: %s", res)
class _FakeBridge():
def send_op(self, name, x):
def func(x):
raise RuntimeError("Unexcepted call send op")
out = tf.py_function(func=func, inp=[x], Tout=[], name='send_' + name)
return out
def receive_op(self, name, dtype):
def func():
raise RuntimeError("Unexcepted call receive op")
return tf.py_function(func=func, inp=[], Tout=[dtype])[0]
def register_data_block_handler(self, handler):
pass
class _FakeTrainerMasterClient():
pass
class _TrainerMaster(tm_grpc.TrainerMasterServiceServicer):
def __init__(self,
cluster_server,
role,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path=None,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None,
summary_path=None,
summary_save_steps=None,
summary_save_secs=None,
export_path=None,
sparse_estimator=False,
export_model_hook=None):
self._cluster_server = cluster_server
self._role = role
self._model_fn = model_fn
self._input_fn = input_fn
self._serving_input_receiver_fn = serving_input_receiver_fn
self._checkpoint_filename_with_path = checkpoint_filename_with_path
self._checkpoint_path = checkpoint_path
self._save_checkpoint_steps = save_checkpoint_steps
self._save_checkpoint_secs = save_checkpoint_secs
self._summary_path = summary_path
self._summary_save_steps = summary_save_steps
self._summary_save_secs = summary_save_secs
self._export_path = export_path
self._sparse_estimator = sparse_estimator
self._export_model_hook = export_model_hook
self._lock = threading.RLock()
self._status = tm_pb.MasterStatus.CREATED
self._checkpoint_listeners = []
self._session_hooks = []
self._running_workers = set() # set(worker_rank)
self._completed_workers = set() # set(worker_rank)
# for compatibility
self._worker0_terminated_at = 0
self._worker0_cluster_def = None
def _check_status(self, callback_fn):
with self._lock:
return callback_fn(self._status)
def _run_grpc_server(self, address):
self._grpc_server = grpc.server(
futures.ThreadPoolExecutor(
max_workers=8,
thread_name_prefix="TrainerMasterServerThreadPoolExecutor"
))
tm_grpc.add_TrainerMasterServiceServicer_to_server(
self, self._grpc_server)
self._grpc_server.add_insecure_port(address)
self._grpc_server.start()
fl_logging.info('Trainer Master Server start on address: %s', address)
def _transfer_status(self, frm, to):
if self._status != frm:
raise RuntimeError(
"Trainer Master status transfer failed, "
"want from %s to %s, but current status: %s"% \
(tm_pb.MasterStatus.Name(frm),
tm_pb.MasterStatus.Name(to),
tm_pb.MasterStatus.Name(self._status))
)
self._status = to
fl_logging.info("Trainer Master status transfer, from %s to %s",
tm_pb.MasterStatus.Name(frm),
tm_pb.MasterStatus.Name(to))
def run_forever(self, listen_port=None):
with self._lock:
self._transfer_status(tm_pb.MasterStatus.CREATED,
tm_pb.MasterStatus.INITIALING)
if listen_port:
self._run_grpc_server(listen_port)
while self._cluster_server is None:
# waiting receive cluster_def from worker0
with self._lock:
if self._worker0_cluster_def:
fl_logging.info("received worker_0 cluster_def: %s",
self._worker0_cluster_def)
self._cluster_server = ClusterServer(
tf.train.ClusterSpec(self._worker0_cluster_def),
"master")
break
fl_logging.info("still waiting receive cluster_def from worker_0")
time.sleep(2)
self._run()
sig = signal.sigwait([signal.SIGHUP, signal.SIGINT, signal.SIGTERM])
fl_logging.info("Server shutdown by signal: %s",
signal.Signals(sig).name)
def _add_checkpoint_listener(self, listener):
with self._lock:
self._checkpoint_listeners.append(listener)
def _add_session_hook(self, hook):
with self._lock:
self._session_hooks.append(hook)
def _create_estimator(self):
estimator_factory = SparseFLEstimator \
if self._sparse_estimator else FLEstimator
return estimator_factory(
cluster_server=self._cluster_server,
bridge=_FakeBridge(),
trainer_master=_FakeTrainerMasterClient(),
role=self._role,
model_fn=self._model_fn)
def _run(self):
fl_logging.info("create estimator")
estimator = self._create_estimator()
fl_logging.info("start session_run")
self._session_run(estimator)
fl_logging.info("session_run done")
fl_logging.info("start export_model")
self._export_model(estimator)
fl_logging.info("export_model done")
self._transfer_status(tm_pb.MasterStatus.WORKER_COMPLETED,
tm_pb.MasterStatus.COMPLETED)
def _session_run(self, estimator):
with tf.Graph().as_default() as g, \
g.device(self._cluster_server.device_setter):
features, labels = estimator. \
_get_features_and_labels_from_input_fn(
self._input_fn, tf.estimator.ModeKeys.TRAIN)
# only for create graph
spec, _ = estimator._get_model_spec(
features, labels, tf.estimator.ModeKeys.TRAIN)
session_creator = tf.train.ChiefSessionCreator(
master=self._cluster_server.target,
config=self._cluster_server.cluster_config,
checkpoint_filename_with_path= \
self._checkpoint_filename_with_path
)
hooks = self._session_hooks
# saver hook
if self._checkpoint_path and \
(self._save_checkpoint_secs or self._save_checkpoint_steps):
hooks.append(
tf.train.CheckpointSaverHook(
checkpoint_dir=self._checkpoint_path,
save_secs=self._save_checkpoint_secs,
save_steps=self._save_checkpoint_steps,
listeners=self._checkpoint_listeners,
)
)
# summary hook
if self._summary_save_secs or self._summary_save_steps:
if not self._summary_path:
self._summary_path = self._checkpoint_path
if self._summary_path:
hooks.append(
tf.train.SummarySaverHook(
output_dir=self._summary_path,
save_secs=self._summary_save_secs,
save_steps=self._summary_save_steps,
)
)
noop = tf.no_op()
with tf.train.MonitoredSession(
session_creator=session_creator,
hooks=hooks) as sess:
with self._lock:
# ready, set status to running
self._transfer_status(tm_pb.MasterStatus.INITIALING,
tm_pb.MasterStatus.RUNNING)
while True:
sess.run(noop)
with self._lock:
if self._status == tm_pb.MasterStatus.WORKER_COMPLETED:
break
time.sleep(0.2)
def _export_model(self, estimator):
if self._export_path:
export_path = os.path.join(
self._export_path, str(self._worker0_terminated_at))
with tf.Graph().as_default() as g, \
g.device(self._cluster_server.device_setter):
receiver = self._serving_input_receiver_fn()
spec, model = estimator._get_model_spec(
receiver.features, None, tf.estimator.ModeKeys.PREDICT)
assert not model.sends, "Exported model cannot send"
assert not model.recvs, "Exported model cannot receive"
with tf.Session(
target=self._cluster_server.target,
config=self._cluster_server.cluster_config) as sess:
tf.saved_model.simple_save(sess, export_path,
receiver.receiver_tensors,
spec.predictions, None)
if self._export_model_hook:
self._export_model_hook.after_save(
sess, model, export_path,
receiver.receiver_tensors, spec.predictions)
def _request_data_block(self, request):
"""override by subclass"""
raise RuntimeError("Unimplement")
def RequestDataBlock(self, request, context):
if request.worker_rank not in self._running_workers:
return tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_REQUEST,
error_message="unregistered worker")
)
if request.worker_rank in self._completed_workers:
return tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_REQUEST,
error_message="worker has completed")
)
return self._request_data_block(request)
def WorkerRegister(self, request, context):
with self._lock:
# for compatibility, more information see:
# protocal/fedlearner/common/trainer_master_service.proto
if self._worker0_cluster_def is None and request.worker_rank == 0:
self._worker0_cluster_def = request.cluster_def
if self._status in (tm_pb.MasterStatus.WORKER_COMPLETED,
tm_pb.MasterStatus.COMPLETED):
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_DATA_FINISHED
))
if self._status != tm_pb.MasterStatus.RUNNING:
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode. \
STATUS_WAIT_FOR_SYNCING_CHECKPOINT
))
if request.worker_rank in self._running_workers:
fl_logging.warning("worker_%d:%s repeat registration",
request.worker_rank, request.hostname)
else:
fl_logging.info("worker_%d:%s registration",
request.worker_rank, request.hostname)
self._running_workers.add(request.worker_rank)
if request.worker_rank in self._completed_workers:
self._completed_workers.remove(request.worker_rank)
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS)
)
def WorkerComplete(self, request, context):
with self._lock:
if request.worker_rank not in self._running_workers:
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_REQUEST,
error_message="unregistered worker")
)
fl_logging.info("worker_%d completed", request.worker_rank)
self._completed_workers.add(request.worker_rank)
if request.worker_rank == 0:
self._worker0_terminated_at = request.timestamp
if len(self._running_workers) == len(self._completed_workers) \
and 0 in self._running_workers:
# worker 0 completed and all datablock has finished
self._transfer_status(tm_pb.MasterStatus.RUNNING,
tm_pb.MasterStatus.WORKER_COMPLETED)
return tm_pb.WorkerCompleteResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS)
)
def IsCompleted(self, request, context):
with self._lock:
return tm_pb.IsCompletedResponse(
completed=(self._status == tm_pb.MasterStatus.COMPLETED)
)
class LeaderTrainerMaster(_TrainerMaster):
def __init__(self,
cluster_server,
data_visitor,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path=None,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None,
summary_path=None,
summary_save_steps=None,
summary_save_secs=None,
export_path=None,
sparse_estimator=False,
export_model_hook=None):
super(LeaderTrainerMaster, self).__init__(
cluster_server,
"leader",
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path,
checkpoint_path,
save_checkpoint_steps,
save_checkpoint_secs,
summary_path,
summary_save_steps,
summary_save_secs,
export_path,
sparse_estimator,
export_model_hook)
self._data_visitor = data_visitor
self._last_trigger_time = 0
self._last_global_step = -1
hook = _DataVisitorCheckpointHook(self._data_visitor)
self._add_checkpoint_listener(
hook.create_checkpoint_saver_listener())
self._add_session_hook(hook)
self._add_session_hook(
_TriggerHook(trigger_secs=10,
trigger_fn=self._trigger_logging)
)
def _trigger_logging(self, global_step):
now = time.time()
if self._last_global_step >= 0:
speed = (global_step-self._last_global_step) \
/ (now-self._last_trigger_time)
epoch, data_index = self._data_visitor.summary()
total_epoch, total_data_size = \
self._data_visitor.epoch_num, \
self._data_visitor.datablock_size
fl_logging.info("global_step: %d, speed: %0.2f step/sec, "
"epoch: %d/%d, datablock allocated: %d/%d, "
"worker: %d/%d(running/completed)",
global_step, speed,
epoch, total_epoch,
data_index, total_data_size,
len(self._running_workers),
len(self._completed_workers))
self._last_trigger_time = now
self._last_global_step = global_step
def _request_data_block(self, request):
try:
data_block = next(self._data_visitor)
except StopIteration:
data_block = None
response = tm_pb.DataBlockResponse()
if data_block:
fl_logging.info("allocated worker_%d with block: %s",
request.worker_rank,
data_block.id)
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS),
block_id=data_block.id,
data_path=data_block.data_path,
)
else:
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_DATA_FINISHED,
error_message="data block finished")
)
return response
class FollowerTrainerMaster(_TrainerMaster):
def __init__(self,
cluster_server,
data_visitor,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path=None,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None,
summary_path=None,
summary_save_steps=None,
summary_save_secs=None,
export_path=None,
sparse_estimator=False,
export_model_hook=None):
super(FollowerTrainerMaster, self).__init__(
cluster_server,
"follower",
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path,
checkpoint_path,
save_checkpoint_steps,
save_checkpoint_secs,
summary_path,
summary_save_steps,
summary_save_secs,
export_path,
sparse_estimator,
export_model_hook)
self._data_visitor = data_visitor
self._last_trigger_time = 0
self._last_global_step = -1
self._add_session_hook(
_TriggerHook(trigger_secs=10,
trigger_fn=self._trigger_logging)
)
def _trigger_logging(self, global_step):
now = time.time()
if self._last_global_step >= 0:
speed = (global_step-self._last_global_step) \
/ (now-self._last_trigger_time)
total_data_size = self._data_visitor.datablock_size
fl_logging.info("global_step: %d, speed: %0.2f step/sec, "
"datablock size: %d, "
"worker: %d/%d(running/completed)",
global_step, speed,
total_data_size,
len(self._running_workers),
len(self._completed_workers))
self._last_trigger_time = now
self._last_global_step = global_step
def _request_data_block(self, request):
data_block = self._data_visitor.get_datablock_by_id(request.block_id)
if data_block:
fl_logging.info("allocated worker_%d with block: %s",
request.worker_rank,
data_block.id)
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS),
block_id=data_block.id,
data_path=data_block.data_path,
)
else:
fl_logging.error("invalid data block id: %s", request.block_id)
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_DATA_BLOCK,
error_message="invalid data block")
)
return response
| 38.618827 | 80 | 0.591608 |
b353306056576c6c6a208d3d5554fbbf3172ada5 | 15,714 | py | Python | integration-test/398-airport-iata-codes.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | integration-test/398-airport-iata-codes.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | integration-test/398-airport-iata-codes.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
import dsl
from shapely.wkt import loads as wkt_loads
from . import FixtureTest
class AirportIataCodes(FixtureTest):
def test_sfo(self):
# San Francisco International
self.generate_fixtures(dsl.way(23718192, wkt_loads('POLYGON ((-122.402782773269 37.63531941394439, -122.40266823807 37.63591797330289, -122.402642995411 37.63627586880459, -122.400825793423 37.63777992163308, -122.400541476635 37.6380545077576, -122.400242517309 37.63819677996181, -122.399856241737 37.6383059736938, -122.399504910629 37.6383616019698, -122.398018917486 37.63836757738308, -122.397126531083 37.63836700829618, -122.396226059842 37.6386334827911, -122.395922968265 37.63869978115548, -122.395041092151 37.6386999945622, -122.394648708034 37.63862437743169, -122.394254167962 37.63858532396358, -122.392438313446 37.63858546623489, -122.391235918439 37.6385816960447, -122.390817393348 37.63864899035377, -122.389157396534 37.63901967704601, -122.388567922045 37.6391058929143, -122.388302919036 37.63913456037959, -122.388062529866 37.6390834141992, -122.387878285401 37.63897038030411, -122.387772913018 37.63880221628041, -122.387668528782 37.63847058205218, -122.387559113981 37.63819279634378, -122.386109861933 37.6361934199638, -122.385986523244 37.63597168263642, -122.385954633052 37.63578217032489, -122.385994338587 37.63559571648658, -122.386109592438 37.63542768710219, -122.386339022162 37.63525887481049, -122.386630794966 37.63517770534189, -122.387070430466 37.6351242088515, -122.387272910731 37.63505619995949, -122.38816152421 37.63456149742279, -122.388740129085 37.63412156936317, -122.389776874754 37.63391569010429, -122.391048170544 37.6335037164537, -122.391386206586 37.6332148142114, -122.391498765491 37.6329047108011, -122.391448819161 37.63268018914851, -122.3909392049 37.63174545752518, -122.392199810738 37.63131220020119, -122.391559311941 37.62998359980139, -122.391427439257 37.6298805122232, -122.388171226015 37.62852100917631, -122.387996773187 37.62878630951541, -122.385765178358 37.62809285554551, -122.379756976244 37.63013221925351, -122.37423197792 37.62792552065459, -122.372338688627 37.62743632006278, -122.371877763055 37.62812010434631, -122.368255665998 37.62669262369908, -122.36714849241 37.62843961891189, -122.365768141145 37.62789087260929, -122.365253136992 37.62676932017637, -122.368741924061 37.62124049667588, -122.367821330558 37.62020429901018, -122.367308033205 37.62043946015379, -122.366929483144 37.62051801305999, -122.366536470207 37.6205380781912, -122.366328240724 37.62050271517259, -122.365617314008 37.62026477925429, -122.365174444573 37.61998422165949, -122.365064221288 37.61986567988729, -122.364742714248 37.61903317832368, -122.364662584524 37.6189115042374, -122.364569519061 37.61884938633769, -122.355255426869 37.61492880935528, -122.355170895401 37.61486868056239, -122.355135142453 37.61479915880869, -122.355136310262 37.6147250116876, -122.355179609059 37.6146547070554, -122.358802963758 37.60916860603569, -122.366974758234 37.6124694664738, -122.367632684348 37.61287835449848, -122.368775431221 37.61338750447399, -122.371457171839 37.61520340875107, -122.378584674798 37.60572142198278, -122.378192021187 37.6052981187722, -122.378828297903 37.60493409601148, -122.381680808256 37.60455576665628, -122.381876820651 37.6044554905509, -122.382042290326 37.60436631658349, -122.382557923299 37.60463568853699, -122.383020825165 37.60492406134129, -122.383427672157 37.60520289656819, -122.384410159584 37.60584226364769, -122.384490109644 37.60594787540128, -122.385525687504 37.60664039727529, -122.385564943881 37.6070947211435, -122.385595486601 37.60767436434308, -122.386246405856 37.6079477807216, -122.386327703389 37.6078152002346, -122.386698707602 37.60797396943178, -122.386608516747 37.60810868458069, -122.386847468613 37.60820916925559, -122.386492364581 37.608780264068, -122.385977719755 37.6085681233169, -122.385745145927 37.60891967458439, -122.385749727335 37.60929072304688, -122.385947895687 37.60937825434759, -122.385847913196 37.6095336043617, -122.386355281668 37.60975648801139, -122.386575818071 37.60953737602549, -122.386752606519 37.60946372180269, -122.386992366868 37.60946770696059, -122.387235361152 37.6095741675278, -122.387984825594 37.60966746272569, -122.388313608988 37.60994386062659, -122.389838229688 37.6095805722362, -122.391743107248 37.6107278601031, -122.392167740883 37.61101065864849, -122.392897172894 37.6129271704265, -122.393137831558 37.6133756207896, -122.393493025421 37.61333456157809, -122.393827737696 37.61327258133899, -122.394060760681 37.61321415904558, -122.394076032041 37.61315096898599, -122.394739078552 37.61303611664558, -122.394902661765 37.61344201284619, -122.39614071989 37.61453935892668, -122.39703409444 37.61493919846718, -122.397241874765 37.6151045701634, -122.397419561528 37.61531825774418, -122.39777170112 37.61584418416938, -122.398200017847 37.61692939821548, -122.398558984635 37.61787840495269, -122.399218258222 37.61967776304128, -122.399837826273 37.6214926598776, -122.400493416768 37.62341132060519, -122.401401433857 37.62610950021288, -122.401612807443 37.62729530789251, -122.401933865326 37.62929079730368, -122.40191239559 37.6295417237437, -122.401836038791 37.629813423405, -122.401691859188 37.63007658481649, -122.400390469836 37.6320202082337, -122.400645860871 37.63246797430131, -122.401998813521 37.6331083874929, -122.402180632534 37.6332533013763, -122.4022967847 37.63343378528259, -122.402344844568 37.63359136153137, -122.402782773269 37.63531941394439))'), {
u'gnis:county_name': u'San Mateo', u'internet_access:ssid': u'#SFO FREE WIFI', u'wikidata': u'Q8688', u'owner': u'San Francisco Airport Commission', u'name:de': u'Internationaler Flughafen San Francisco', u'is_in': u'San Mateo County', u'addr:housenumber': u'780', u'gnis:feature_type': u'Airport', u'way_area': u'1.24398e+07', u'wikipedia': u'en:San Francisco International Airport', u'addr:state': u'CA', u'ele': u'4', u'source': u'openstreetmap.org', u'gnis:feature_id': u'1653945', u'addr:street': u'S Airport Blvd', u'ref': u'KSFO', u'website': u'http://www.flysfo.com/', u'city_served': u'San Francisco, California', u'name:ja': u'\u30b5\u30f3\u30d5\u30e9\u30f3\u30b7\u30b9\u30b3\u56fd\u969b\u7a7a\u6e2f', u'short_name': u'San Francisco Airport', u'passengers': u'47155100', u'iata': u'SFO', u'aerodrome:type': u'public', u'icao': u'KSFO', u'gnis:created': u'03/01/1994', u'aerodrome': u'international', u'name:el': u'\u0394\u03b9\u03b5\u03b8\u03bd\u03ae\u03c2 \u0391\u03b5\u03c1\u03bf\u03bb\u03b9\u03bc\u03ad\u03bd\u03b1\u03c2 \u03a3\u03b1\u03bd \u03a6\u03c1\u03b1\u03bd\u03c3\u03af\u03c3\u03ba\u03bf', u'name:en': u'San Francisco International Airport', u'name': u'San Francisco International Airport', u'addr:postcode': u'94128', u'addr:city': u'San Francisco', u'internet_access:fee': u'no', u'aeroway': u'aerodrome', u'internet_access': u'wlan', u'is_in:iso_3166_2': u'US-CA', u'source_ref': u'geonames.usgs.gov'}))
self.assert_has_feature(
13, 1311, 3170, 'pois',
{'kind': 'aerodrome', 'iata': 'SFO'})
def test_oak(self):
# Oakland airport
self.generate_fixtures(dsl.way(54363486, wkt_loads('POLYGON ((-122.251293129543 37.72490617803489, -122.251293129543 37.72528631025018, -122.250709404272 37.72582261309319, -122.250271745065 37.7262367743562, -122.250091363356 37.72640651656879, -122.249069978878 37.72646079988989, -122.248855101862 37.72649689401339, -122.248675438806 37.72635479106338, -122.248666635316 37.72625027416969, -122.248469185616 37.7260669607028, -122.248469185616 37.72593807259049, -122.248623695845 37.72582261309319, -122.248752514257 37.72576825825217, -122.249044287061 37.72558501464461, -122.248331923041 37.72504871008109, -122.2479457373 37.7249128570103, -122.247782603245 37.72500117574517, -122.247542303906 37.7250079968177, -122.247327696385 37.72510981546048, -122.247233283448 37.72502838897859, -122.246898481342 37.72512338653218, -122.246718279296 37.72511656547029, -122.245619639704 37.72529988128949, -122.245447971653 37.72529988128949, -122.244804328752 37.72495371243168, -122.24467551034 37.72485182252129, -122.244512376284 37.7248246802761, -122.244323640243 37.72472968233939, -122.244109032722 37.72456668634359, -122.243937364671 37.72443765351368, -122.243646220687 37.72425156447159, -122.239630212378 37.7209887130929, -122.236737188006 37.71960011022599, -122.236621934155 37.71958952257982, -122.236554830003 37.71960011022599, -122.236205385357 37.71989478726049, -122.234797186318 37.72126107288809, -122.233032266279 37.72297109718351, -122.232549601477 37.7234314597321, -122.232078614774 37.72388876410138, -122.231358615074 37.72458778915839, -122.228689900027 37.72721628601359, -122.228556500208 37.72728861560778, -122.228344587632 37.72728229209988, -122.228231759233 37.7272540849852, -122.226920398581 37.7269259011653, -122.226794275115 37.72716768730739, -122.226740645692 37.7271634953123, -122.226721870903 37.72736499501418, -122.226727170963 37.72824956937759, -122.227655579809 37.72842811712219, -122.229165378307 37.72871849608017, -122.229052729571 37.72902819922441, -122.227660610375 37.73160566920351, -122.226756725536 37.73164808395509, -122.226756725536 37.73330060628359, -122.22662530201 37.7335700079751, -122.226531428063 37.73386271123349, -122.226469713803 37.7341723928629, -122.226453633959 37.7345414657444, -122.226445189796 37.73578500402448, -122.22645956284 37.73899030097969, -122.226462706944 37.7396773910561, -122.226480493586 37.74363052145789, -122.223849238287 37.74366227408098, -122.223878703029 37.7449985001975, -122.224981115545 37.74510881537909, -122.224986505437 37.7452614663153, -122.22449297102 37.7453335651822, -122.224001862054 37.74538996562618, -122.223736859045 37.74540999700779, -122.223386875411 37.74543301177997, -122.222657892558 37.74542995735078, -122.222356867106 37.74542399055839, -122.222115938947 37.74539699792041, -122.22201092589 37.7453789554674, -122.221959901582 37.74536496190831, -122.221738916022 37.74532497014858, -122.221533920474 37.74528497836727, -122.221283919331 37.74521600492469, -122.220993943157 37.74511300635818, -122.220868897669 37.74506498766729, -122.22042692655 37.74486396233569, -122.220167942253 37.74472700907621, -122.219941926128 37.74458302319249, -122.219780858197 37.74445601419819, -122.219598859521 37.7443109622726, -122.219506872036 37.74422898862967, -122.219180873419 37.74388901724902, -122.219129938942 37.7438140045485, -122.219053941469 37.743729970344, -122.218955935272 37.74360999234979, -122.218702879856 37.7433220159562, -122.218492943574 37.74306799342191, -122.218350919928 37.74288301673258, -122.218234857593 37.74274698327319, -122.218044863911 37.74250901291288, -122.217887928231 37.74232801285489, -122.217629932081 37.7420009613922, -122.217460869144 37.741798009747, -122.216767908734 37.74091295789887, -122.215603781958 37.73950455355099, -122.214659562762 37.73837829385128, -122.214093713965 37.73765929445129, -122.213559935023 37.73698049748229, -122.213345417333 37.73667501905528, -122.213176444228 37.73639930593229, -122.212881257826 37.73591351978678, -122.212755314023 37.73567595371398, -122.212487077079 37.73509901451231, -122.212256389714 37.73459631145159, -122.211846039293 37.73373291311091, -122.211615262096 37.73327900863718, -122.211462368835 37.73295646408938, -122.211014468834 37.73200615727129, -122.210864270519 37.73174527545498, -122.210687302408 37.73149071586859, -122.210440535199 37.73123395295428, -122.209893371359 37.7307884877894, -122.209311263055 37.73039609257019, -122.208659535317 37.73001421038119, -122.207375124124 37.72930607125718, -122.205283037658 37.72815628102949, -122.20212869337 37.72638278536621, -122.201608389157 37.72624281374678, -122.200337003535 37.72580151063029, -122.199832689335 37.72561059346148, -122.19938748428 37.72538990482221, -122.199671531573 37.72489580430601, -122.199730281393 37.724835054015, -122.199822179046 37.72476115885698, -122.199902668096 37.72467397668627, -122.199995464064 37.72452959653339, -122.200099938132 37.7243327786471, -122.200176025437 37.72413617339889, -122.200230373511 37.7239380044439, -122.20026558747 37.7236803626934, -122.200231631153 37.72315278405669, -122.200230014185 37.72269810418028, -122.200249597458 37.72236002347581, -122.200313557507 37.72196459925629, -122.200420636688 37.72159105821197, -122.200535261719 37.72123890327881, -122.20072579439 37.72080268644267, -122.200918842345 37.7204400112942, -122.201175940179 37.72006447294958, -122.201490260697 37.7196851666305, -122.201784099627 37.7193575177389, -122.202200917919 37.71896406713338, -122.202626809195 37.71857871516488, -122.202868905164 37.7183700856397, -122.202909419183 37.71816571909098, -122.203313481398 37.71785959456909, -122.20244256473 37.71708177109411, -122.202021973514 37.71674920749609, -122.201678727244 37.71650106291599, -122.201399710517 37.71624225831889, -122.201104703777 37.7159580127994, -122.200854702634 37.7156763954754, -122.200552060215 37.71538930530248, -122.200210969901 37.71511280230468, -122.199884791621 37.71485477454648, -122.199507229708 37.7144473716878, -122.200841766894 37.71374277903888, -122.200927645835 37.7136578579394, -122.200970585305 37.7135475669603, -122.200976604018 37.7134330119809, -122.200948217255 37.71330658912748, -122.202133723935 37.71302645407658, -122.204107771772 37.71257242269488, -122.20510562039 37.712349563262, -122.205883471594 37.71216500728681, -122.206845746927 37.71190796417878, -122.206905844219 37.71185367019291, -122.207086046265 37.71180619845529, -122.207163211548 37.71183327440519, -122.20728089085 37.7118383200536, -122.208766794162 37.71149450452489, -122.209276408422 37.7113692864847, -122.209703826835 37.7112697230678, -122.209849803068 37.71120860627168, -122.210004223466 37.71108637252841, -122.210313243923 37.71080800574517, -122.210699519495 37.71041415440249, -122.211128644707 37.70999315328689, -122.211440988931 37.7097396560193, -122.211824479726 37.7093514133591, -122.211969377981 37.70919222119888, -122.211996237608 37.70905221709679, -122.211170146873 37.70203241348079, -122.211172841819 37.7019283597972, -122.211215691458 37.7018370994791, -122.212551396454 37.700546791649, -122.212803643385 37.70028160551789, -122.21358679465 37.69954937129089, -122.213712828285 37.69945178216058, -122.2139094695 37.6992977573942, -122.214029664085 37.69926385342248, -122.214167016492 37.69927067686459, -122.215772126242 37.6995422635655, -122.216407235148 37.6996441882836, -122.216730718482 37.6997436962414, -122.21699572149 37.6998624659219, -122.231144277047 37.70987098860599, -122.244036898004 37.71908799328549, -122.249210924546 37.72267501128519, -122.250975485259 37.7240642670484, -122.251198626776 37.72428837020699, -122.251258724068 37.7244648669541, -122.251293129543 37.72490617803489))'), {
u'gnis:county_name': u'Alameda', u'source_ref': u'geonames.usgs.gov', u'iata': u'OAK', u'name:ja': u'\u30aa\u30fc\u30af\u30e9\u30f3\u30c9\u56fd\u969b\u7a7a\u6e2f', u'gnis:feature_type': u'Airport', u'way_area': u'1.54832e+07', u'wikipedia': u'en:Oakland International Airport', u'addr:state': u'CA', u'ele': u'2', u'icao': u'KOAK', u'source': u'openstreetmap.org', u'aeroway': u'aerodrome', u'wikidata': u'Q1165584', u'gnis:created': u'03/01/1994', u'gnis:feature_id': u'1653772', u'aerodrome': u'international', u'name': u'Metropolitan Oakland International Airport'}))
self.assert_has_feature(
13, 1314, 3167, 'pois',
{'kind': 'aerodrome', 'iata': 'OAK'})
| 604.384615 | 7,766 | 0.80826 |
a1907facc5a5c18ae0a1f933544a173587712a86 | 6,093 | py | Python | light_gbm.py | raph-m/safe_driver_prediction | a4963c59b0941e23fcc90f5db8e0dbf10bfcc3f4 | [
"MIT"
] | null | null | null | light_gbm.py | raph-m/safe_driver_prediction | a4963c59b0941e23fcc90f5db8e0dbf10bfcc3f4 | [
"MIT"
] | 15 | 2020-01-28T22:21:40.000Z | 2022-03-11T23:19:31.000Z | light_gbm.py | raph-m/safe_driver_prediction | a4963c59b0941e23fcc90f5db8e0dbf10bfcc3f4 | [
"MIT"
] | 1 | 2018-02-28T15:10:43.000Z | 2018-02-28T15:10:43.000Z | import json
import time
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from util import gini_normalized
from feature_selection_1 import get_cached_features, continuous_values, categorical_features
feature_selection = "none"
number_of_features = 10
alpha = 32
max_depth = 4
n_estimators = 100
loss = "rank:pairwise"
subsample = 0.8
learning_rate = 0.05
colsample_bytree = 0.8
gamma = 9
parameters = {
"feature_selection": {
"name": feature_selection,
"number_of_features": number_of_features
},
"classifier": {
"name": "lightgbm",
"loss":
{
"name": loss,
"alpha": alpha
},
"max_depth": max_depth,
"n_estimators": n_estimators
}
}
# Part 1 - Data Preprocessing
# Importing the dataset
dataset = pd.read_csv('train.csv')
categorical_features_count = len(categorical_features)
selected_features = categorical_features + continuous_values
X = dataset.iloc[:, selected_features].values
y = dataset.iloc[:, 1].values
column_ranges = []
print("replacing missing values")
t0 = time.time()
print("number of examples: "+str(len(X[:, 0])))
for i in range(len(X[0, :])):
if i <= categorical_features_count:
# si c'est une variable de catégories, on prend comme stratégie de remplacer par la
# valeur la plus fréquente
(values, counts) = np.unique(X[:, i], return_counts=True)
counts = [counts[i] if values[i] >= 0 else 0 for i in range(len(values))]
ind = np.argmax(counts)
column_ranges.append(max(values))
replacement_value = values[ind]
else:
# sinon on prend simplement la moyenne
replacement_value = np.mean(X[:, i])
for j in range(len(X[:, i])):
if X[j, i] < -0.5:
X[j, i] = replacement_value
t1 = time.time()
print(t1-t0)
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'auc'},
'num_leaves': 31,
'max_depth': 6,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.9,
'bagging_freq': 5,
'verbose': 0
}
params_1 = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'max_depth': 3,
'learning_rate': 0.05,
'feature_fraction': 1,
'bagging_fraction': 1,
'bagging_freq': 10,
'verbose': 0
}
params_2 = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'max_depth': 4,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.9,
'bagging_freq': 2,
'verbose': 0
}
params_3 = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'max_depth': 5,
'learning_rate': 0.05,
'feature_fraction': 0.3,
'bagging_fraction': 0.7,
'bagging_freq': 10,
'verbose': 0
}
print('Start training...')
# train
t2 = time.time()
gbm = lgb.train(
params_2,
lgb_train,
num_boost_round=1000,
valid_sets=lgb_eval,
early_stopping_rounds=100,
verbose_eval=50
)
t3 = time.time()
print(t3-t2)
print('Save model...')
# save model to file
gbm.save_model('model.txt')
print('Start predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
y_pred_train = gbm.predict(X_train, num_iteration=gbm.best_iteration)
print("gini normalized score (train): ")
gini_score = gini_normalized(y_train, y_pred_train)
print(gini_score)
print("gini normalized score (test): ")
gini_score = gini_normalized(y_test, y_pred)
print(gini_score)
import numpy as np
np.savetxt("y_test", y_test)
np.savetxt("y_pred", y_pred)
np.savetxt("y_train", y_test)
np.savetxt("y_pred_train", y_pred)
print("mean de y pred")
print(np.mean(y_pred))
parameters.update({
"result": {
"gini_score": gini_score
}})
f = open("results.json", "r")
results_txt = f.read()
f.close()
results = json.loads(results_txt)
# décommenter cette ligne si vous voulez sauvegarder les résultats
# results.append(parameters)
f = open("results.json", "w")
f.write(json.dumps(results))
f.close()
def make_submission():
submission_dataset = pd.read_csv('test.csv')
X_submission = submission_dataset.iloc[:, [i-1 for i in selected_features]].values
ids = submission_dataset.iloc[:, 0].values
print("replacing missing values")
print("number of examples in test: "+str(len(X_submission[:, 0])))
for i in range(len(X[0, :])):
if i <= categorical_features_count:
# si c'est une variable de catégories, on prend comme stratégie de remplacer par la
# valeur la plus fréquente
(values, counts) = np.unique(X[:, i], return_counts=True)
counts = [counts[i] if values[i] >= 0 else 0 for i in range(len(values))]
ind = np.argmax(counts)
column_ranges.append(max(values))
replacement_value = values[ind]
else:
# sinon on prend simplement la moyenne
replacement_value = np.mean(X[:, i])
for j in range(len(X_submission[:, i])):
if X_submission[j, i] < -0.5:
X_submission[j, i] = replacement_value
y_submission = gbm.predict(X_submission, num_iteration=gbm.best_iteration)
from tools import to_csv
minimum = 1
maximum = 0
epsilon = 0.01
for y_i in y_submission:
if y_i < minimum:
minimum = y_i
if y_i > maximum:
maximum = y_i
y_submission = y_submission - minimum + epsilon
y_submission = y_submission/(maximum - minimum)
y_submission = y_submission/2
to_csv(y_submission, ids)
make_submission()
| 25.282158 | 95 | 0.645167 |
05fa93f2fd4b62a9501df98c1930e1070eefbd51 | 3,380 | py | Python | src/python/txtai/ann/faiss.py | neuml/txtai | b25173a8650a73dbcae43caa278f32020fef0236 | [
"Apache-2.0"
] | 1,893 | 2020-08-09T19:55:46.000Z | 2022-03-31T22:03:13.000Z | src/python/txtai/ann/faiss.py | neuml/txtai | b25173a8650a73dbcae43caa278f32020fef0236 | [
"Apache-2.0"
] | 243 | 2020-08-12T15:22:31.000Z | 2022-03-31T17:16:28.000Z | src/python/txtai/ann/faiss.py | neuml/txtai | b25173a8650a73dbcae43caa278f32020fef0236 | [
"Apache-2.0"
] | 189 | 2020-08-12T20:42:53.000Z | 2022-03-30T13:32:46.000Z | """
Faiss module
"""
import math
import numpy as np
from faiss import index_factory, METRIC_INNER_PRODUCT, read_index, write_index
from .base import ANN
class Faiss(ANN):
"""
Builds an ANN model using the Faiss library.
"""
def load(self, path):
# Load index
self.model = read_index(path)
def index(self, embeddings):
# Configure embeddings index. Inner product is equal to cosine similarity on normalized vectors.
params = self.configure(embeddings.shape[0])
self.model = index_factory(embeddings.shape[1], params, METRIC_INNER_PRODUCT)
# Train model
self.model.train(embeddings)
# Add embeddings - position in embeddings is used as the id
self.model.add_with_ids(embeddings, np.arange(embeddings.shape[0], dtype=np.int64))
# Add id offset and index build metadata
self.config["offset"] = embeddings.shape[0]
self.metadata({"components": params})
def append(self, embeddings):
new = embeddings.shape[0]
# Append new ids - position in embeddings + existing offset is used as the id
self.model.add_with_ids(embeddings, np.arange(self.config["offset"], self.config["offset"] + new, dtype=np.int64))
# Update id offset
self.config["offset"] += new
def delete(self, ids):
# Remove specified ids
self.model.remove_ids(np.array(ids, dtype=np.int64))
def search(self, queries, limit):
# Run the query
self.model.nprobe = self.nprobe()
scores, ids = self.model.search(queries, limit)
# Map results to [(id, score)]
results = []
for x, score in enumerate(scores):
results.append(list(zip(ids[x].tolist(), score.tolist())))
return results
def count(self):
return self.model.ntotal
def save(self, path):
# Write index
write_index(self.model, path)
def configure(self, count):
"""
Configures settings for a new index.
Args:
count: initial number of embeddings rows
Returns:
user-specified or generated components setting
"""
# Lookup components setting
components = self.setting("components")
if components:
return components
# Get storage setting
storage = "SQ8" if self.config.get("quantize") else "Flat"
# Small index, use storage directly with IDMap
if count <= 5000:
return f"IDMap,{storage}"
x = self.cells(count)
components = f"IVF{x},{storage}"
return components
def cells(self, count):
"""
Calculates the number of IVF cells for an IVF index.
Args:
count: number of embeddings rows
Returns:
number of IVF cells
"""
# Calculate number of IVF cells where x = min(4 * sqrt(embeddings count), embeddings count / 39)
# Faiss requires at least 39 * x data points
return min(round(4 * math.sqrt(count)), int(count / 39))
def nprobe(self):
"""
Gets or derives the nprobe search parameter
"""
# Get size of embeddings index
count = self.count()
default = 6 if count <= 5000 else round(self.cells(count) / 16)
return self.setting("nprobe", default)
| 27.258065 | 122 | 0.607101 |
feb3c10a729ae9cc605d2dd3588eae61375e5363 | 209,381 | py | Python | pandas/tests/test_groupby.py | Remiremi/pandas | 05a8badce4a786eed17d6a4fc1287c84023024da | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/tests/test_groupby.py | Remiremi/pandas | 05a8badce4a786eed17d6a4fc1287c84023024da | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/tests/test_groupby.py | Remiremi/pandas | 05a8badce4a786eed17d6a4fc1287c84023024da | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from numpy.testing.decorators import slow
from datetime import datetime
from numpy import nan
from pandas import date_range,bdate_range, Timestamp
from pandas.core.index import Index, MultiIndex, Int64Index, CategoricalIndex
from pandas.core.api import Categorical, DataFrame
from pandas.core.groupby import (SpecificationError, DataError,
_nargsort, _lexsort_indexer)
from pandas.core.series import Series
from pandas.core.config import option_context
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_index_equal, assertRaisesRegexp)
from pandas.compat import(
range, long, lrange, StringIO, lmap, lzip, map,
zip, builtins, OrderedDict, product as cart_product
)
from pandas import compat
from pandas.core.panel import Panel
from pandas.tools.merge import concat
from collections import defaultdict
from functools import partial
import pandas.core.common as com
import numpy as np
import pandas.core.nanops as nanops
import pandas.util.testing as tm
import pandas as pd
from numpy.testing import assert_equal
def commonSetUp(self):
self.dateRange = bdate_range('1/1/2005', periods=250)
self.stringIndex = Index([rands(8).upper() for x in range(250)])
self.groupId = Series([x[0] for x in self.stringIndex],
index=self.stringIndex)
self.groupDict = dict((k, v) for k, v in compat.iteritems(self.groupId))
self.columnIndex = Index(['A', 'B', 'C', 'D', 'E'])
randMat = np.random.randn(250, 5)
self.stringMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.stringIndex)
self.timeMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.dateRange)
class TestGroupBy(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(np.random.randn(8),
dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_basic(self):
def checkit(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
self.assertEqual(len(v), 3)
agged = grouped.aggregate(np.mean)
self.assertEqual(agged[1], 1)
assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
assert_series_equal(agged, grouped.mean())
assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
assert_series_equal(value_grouped.aggregate(np.mean), agged)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
agged = grouped.aggregate({'one': np.mean,
'two': np.std})
group_constants = {
0: 10,
1: 20,
2: 30
}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
self.assertEqual(agged[1], 21)
# corner cases
self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)
for dtype in ['int64', 'int32', 'float64', 'float32']:
checkit(dtype)
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
g = df.groupby('A')
self.assertRaises(KeyError, g.__getitem__, ['C']) # g[['C']]
self.assertRaises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']]
with assertRaisesRegexp(KeyError, '^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'C']]
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
first = grouped.first()
expected = self.df.ix[[1, 0], ['B','C','D']]
expected.index = Index(['bar', 'foo'],name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = self.df.ix[[5, 7], ['B','C','D']]
expected.index = Index(['bar', 'foo'],name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = self.df.ix[[2, 3],['B','C','D']].copy()
expected.index = Index(['foo', 'bar'],name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
self.assertTrue(com.isnull(grouped['B'].first()['foo']))
self.assertTrue(com.isnull(grouped['B'].last()['foo']))
self.assertTrue(com.isnull(grouped['B'].nth(0)[0])) # not sure what this is testing
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.first()
expected = df.iloc[[1,2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1,2]].set_index('A')
result = g.nth(0,dropna='any')
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(self):
df = self.df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.ix[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.ix[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.ix[[3, 2],['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = lrange(10)
idx.append(9)
s = Series(data=lrange(11), index=idx, name='IntCol')
self.assertEqual(s.dtype, 'int64')
f = s.groupby(level=0).first()
self.assertEqual(f.dtype, 'int64')
def test_nth(self):
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
assert_frame_equal(g.nth(2), df.loc[[],['B']])
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
assert_frame_equal(g.nth(-3), df.loc[[],['B']])
assert_series_equal(g.B.nth(0), df.B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.B.iloc[[1]])
assert_frame_equal(g[['B']].nth(0), df.ix[[0, 2], ['A', 'B']].set_index('A'))
exp = df.set_index('A')
assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
exp['B'] = np.nan
assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame({'color': {0: 'green', 1: 'green', 2: 'red', 3: 'red', 4: 'red'},
'food': {0: 'ham', 1: 'eggs', 2: 'eggs', 3: 'ham', 4: 'pork'},
'two': {0: 1.5456590000000001, 1: -0.070345000000000005, 2: -2.4004539999999999, 3: 0.46206000000000003, 4: 0.52350799999999997},
'one': {0: 0.56573799999999996, 1: -0.9742360000000001, 2: 1.033801, 3: -0.78543499999999999, 4: 0.70422799999999997}}).set_index(['color', 'food'])
result = df.groupby(level=0).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result,expected)
result = df.groupby(level=0).nth(3)
expected = df.loc[[]]
assert_frame_equal(result,expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)),dtype='int64')
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
self.assertTrue(expected.name, 0)
self.assertEqual(expected.name, 1)
# validate first
v = s[g==1].iloc[0]
self.assertEqual(expected.iloc[0],v)
self.assertEqual(expected2.iloc[0],v)
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g,sort=False).first()
expected.index = pd.Index(range(1,10), name=0)
result = s.groupby(g).nth(0, dropna='all')
assert_series_equal(result, expected)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.B.nth(0, dropna=True)
expected = g.B.first()
assert_series_equal(result,expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
assert_frame_equal(g.nth([3, 4]), df.loc[[],['B']])
business_dates = pd.date_range(start='4/1/2014', end='6/30/2014', freq='B')
df = DataFrame(1, index=business_dates, columns=['a', 'b'])
# get the first, fourth and last two business days for each month
result = df.groupby((df.index.year, df.index.month)).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30',
'2014/5/1', '2014/5/6', '2014/5/29', '2014/5/30',
'2014/6/2', '2014/6/5', '2014/6/27', '2014/6/30'])
expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
assert_frame_equal(result, expected)
def test_nth_multi_index(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = self.three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = grouped.first()
assert_frame_equal(result, expected)
def test_nth_multi_index_as_expected(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny']})
grouped = three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = DataFrame({'C': ['dull', 'dull', 'dull', 'dull']},
index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'], ['one', 'two', 'one', 'two']],
names=['A', 'B']))
assert_frame_equal(result, expected)
def test_grouper_index_types(self):
# related GH5375
# groupby misbehaving when using a Floatlike index
df = DataFrame(np.arange(10).reshape(5,2),columns=list('AB'))
for index in [ tm.makeFloatIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex ]:
df.index = index(len(df))
df.groupby(list('abcde')).apply(lambda x: x)
df.index = list(reversed(df.index.tolist()))
df.groupby(list('abcde')).apply(lambda x: x)
def test_grouper_multilevel_freq(self):
# GH 7885
# with level and freq specified in a pd.Grouper
from datetime import date, timedelta
d0 = date.today() - timedelta(days=14)
dates = date_range(d0, date.today())
date_index = pd.MultiIndex.from_product([dates, dates], names=['foo', 'bar'])
df = pd.DataFrame(np.random.randint(0, 100, 225), index=date_index)
# Check string level
expected = df.reset_index().groupby([pd.Grouper(key='foo', freq='W'),
pd.Grouper(key='bar', freq='W')]).sum()
result = df.groupby([pd.Grouper(level='foo', freq='W'),
pd.Grouper(level='bar', freq='W')]).sum()
assert_frame_equal(result, expected)
# Check integer level
result = df.groupby([pd.Grouper(level=0, freq='W'),
pd.Grouper(level=1, freq='W')]).sum()
assert_frame_equal(result, expected)
def test_grouper_creation_bug(self):
# GH 8795
df = DataFrame({'A':[0,0,1,1,2,2], 'B':[1,2,3,4,5,6]})
g = df.groupby('A')
expected = g.sum()
g = df.groupby(pd.Grouper(key='A'))
result = g.sum()
assert_frame_equal(result, expected)
result = g.apply(lambda x: x.sum())
assert_frame_equal(result, expected)
g = df.groupby(pd.Grouper(key='A',axis=0))
result = g.sum()
assert_frame_equal(result, expected)
# GH8866
s = Series(np.arange(8,dtype='int64'),
index=pd.MultiIndex.from_product([list('ab'),
range(2),
date_range('20130101',periods=2)],
names=['one','two','three']))
result = s.groupby(pd.Grouper(level='three',freq='M')).sum()
expected = Series([28],index=Index([Timestamp('2013-01-31')],freq='M',name='three'))
assert_series_equal(result, expected)
# just specifying a level breaks
result = s.groupby(pd.Grouper(level='one')).sum()
expected = s.groupby(level='one').sum()
assert_series_equal(result, expected)
def test_grouper_getting_correct_binner(self):
# GH 10063
# using a non-time-based grouper and a time-based grouper
# and specifying levels
df = DataFrame({'A' : 1 },
index=pd.MultiIndex.from_product([list('ab'),
date_range('20130101',periods=80)],
names=['one','two']))
result = df.groupby([pd.Grouper(level='one'),pd.Grouper(level='two',freq='M')]).sum()
expected = DataFrame({'A' : [31,28,21,31,28,21]},
index=MultiIndex.from_product([list('ab'),date_range('20130101',freq='M',periods=3)],
names=['one','two']))
assert_frame_equal(result, expected)
def test_grouper_iter(self):
self.assertEqual(sorted(self.df.groupby('A').grouper), ['bar', 'foo'])
def test_empty_groups(self):
# GH # 1048
self.assertRaises(ValueError, self.df.groupby, [])
def test_groupby_grouper(self):
grouped = self.df.groupby('A')
result = self.df.groupby(grouped.grouper).mean()
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_groupby_duplicated_column_errormsg(self):
# GH7511
df = DataFrame(columns=['A','B','A','C'], \
data=[range(4), range(2,6), range(0, 8, 2)])
self.assertRaises(ValueError, df.groupby, 'A')
self.assertRaises(ValueError, df.groupby, ['A', 'B'])
grouped = df.groupby('B')
c = grouped.count()
self.assertTrue(c.columns.nlevels == 1)
self.assertTrue(c.columns.size == 3)
def test_groupby_dict_mapping(self):
# GH #679
from pandas import Series
s = Series({'T1': 5})
result = s.groupby({'T1': 'T2'}).agg(sum)
expected = s.groupby(['T2']).agg(sum)
assert_series_equal(result, expected)
s = Series([1., 2., 3., 4.], index=list('abcd'))
mapping = {'a': 0, 'b': 0, 'c': 1, 'd': 1}
result = s.groupby(mapping).mean()
result2 = s.groupby(mapping).agg(np.mean)
expected = s.groupby([0, 0, 1, 1]).mean()
expected2 = s.groupby([0, 0, 1, 1]).mean()
assert_series_equal(result, expected)
assert_series_equal(result, result2)
assert_series_equal(result, expected2)
def test_groupby_bounds_check(self):
import pandas as pd
# groupby_X is code-generated, so if one variant
# does, the rest probably do to
a = np.array([1,2],dtype='object')
b = np.array([1,2,3],dtype='object')
self.assertRaises(AssertionError, pd.algos.groupby_object,a, b)
def test_groupby_grouper_f_sanity_checked(self):
import pandas as pd
dates = date_range('01-Jan-2013', periods=12, freq='MS')
ts = pd.TimeSeries(np.random.randn(12), index=dates)
# GH3035
# index.map is used to apply grouper to the index
# if it fails on the elements, map tries it on the entire index as
# a sequence. That can yield invalid results that cause trouble
# down the line.
# the surprise comes from using key[0:6] rather then str(key)[0:6]
# when the elements are Timestamp.
# the result is Index[0:6], very confusing.
self.assertRaises(AssertionError, ts.groupby,lambda key: key[0:6])
def test_groupby_nonobject_dtype(self):
key = self.mframe.index.labels[0]
grouped = self.mframe.groupby(key)
result = grouped.sum()
expected = self.mframe.groupby(key.astype('O')).sum()
assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = self.df_mixed_floats.copy()
df['value'] = lrange(len(df))
def max_value(group):
return group.ix[group['value'].idxmax()]
applied = df.groupby('A').apply(max_value)
result = applied.get_dtype_counts()
result.sort()
expected = Series({ 'object' : 2, 'float64' : 2, 'int64' : 1 })
expected.sort()
assert_series_equal(result,expected)
def test_groupby_return_type(self):
# GH2893, return a reduced type
df1 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19},
{"val1":2, "val2": 27}, {"val1":2, "val2": 12}])
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
result = df1.groupby("val1", squeeze=True).apply(func)
tm.assertIsInstance(result,Series)
df2 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19},
{"val1":1, "val2": 27}, {"val1":1, "val2": 12}])
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
result = df2.groupby("val1", squeeze=True).apply(func)
tm.assertIsInstance(result,Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1,1],[1,1]],columns=['X','Y'])
result = df.groupby('X',squeeze=False).count()
tm.assertIsInstance(result,DataFrame)
# GH5592
# inconcistent return type
df = DataFrame(dict(A = [ 'Tiger', 'Tiger', 'Tiger', 'Lamb', 'Lamb', 'Pony', 'Pony' ],
B = Series(np.arange(7),dtype='int64'),
C = date_range('20130101',periods=7)))
def f(grp):
return grp.iloc[0]
expected = df.groupby('A').first()[['B']]
result = df.groupby('A').apply(f)[['B']]
assert_frame_equal(result,expected)
def f(grp):
if grp.name == 'Tiger':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['B']]
e = expected.copy()
e.loc['Tiger'] = np.nan
assert_frame_equal(result,e)
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['B']]
e = expected.copy()
e.loc['Pony'] = np.nan
assert_frame_equal(result,e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['C']]
e = df.groupby('A').first()[['C']]
e.loc['Pony'] = pd.NaT
assert_frame_equal(result,e)
# scalar outputs
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0].loc['C']
result = df.groupby('A').apply(f)
e = df.groupby('A').first()['C'].copy()
e.loc['Pony'] = np.nan
e.name = None
assert_series_equal(result,e)
def test_agg_api(self):
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1':np.random.randn(5),
'data2':np.random.randn(5),
'key1':['a','a','b','b','a'],
'key2':['one','two','one','two','one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns=['data1','data2']
result = grouped.agg(peak_to_peak)
assert_frame_equal(result,expected)
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_agg_datetimes_mixed(self):
data = [[1, '2012-01-01', 1.0],
[2, '2012-01-02', 2.0],
[3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date()
if row[1] else None, row[2]] for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert(len(gb1) == len(gb2))
def test_agg_period_index(self):
from pandas import period_range, PeriodIndex
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
tm.assertIsInstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2',s2)]
df = DataFrame.from_items(series)
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
self.assertRaises(Exception, grouped.agg, lambda x: x.describe())
self.assertRaises(Exception, grouped.agg, lambda x: x.index[:2])
def test_agg_ser_multi_key(self):
ser = self.df.C
f = lambda x: x.sum()
results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f)
expected = self.df.groupby(['A', 'B']).sum()['C']
assert_series_equal(results, expected)
def test_get_group(self):
wp = tm.makePanel()
grouped = wp.groupby(lambda x: x.month, axis='major')
gp = grouped.get_group(1)
expected = wp.reindex(major=[x for x in wp.major_axis if x.month == 1])
assert_panel_equal(gp, expected)
# GH 5267
# be datelike friendly
df = DataFrame({'DATE' : pd.to_datetime(['10-Oct-2013', '10-Oct-2013', '10-Oct-2013',
'11-Oct-2013', '11-Oct-2013', '11-Oct-2013']),
'label' : ['foo','foo','bar','foo','foo','bar'],
'VAL' : [1,2,3,4,5,6]})
g = df.groupby('DATE')
key = list(g.groups)[0]
result1 = g.get_group(key)
result2 = g.get_group(Timestamp(key).to_datetime())
result3 = g.get_group(str(Timestamp(key)))
assert_frame_equal(result1,result2)
assert_frame_equal(result1,result3)
g = df.groupby(['DATE','label'])
key = list(g.groups)[0]
result1 = g.get_group(key)
result2 = g.get_group((Timestamp(key[0]).to_datetime(),key[1]))
result3 = g.get_group((str(Timestamp(key[0])),key[1]))
assert_frame_equal(result1,result2)
assert_frame_equal(result1,result3)
# must pass a same-length tuple with multiple keys
self.assertRaises(ValueError, lambda : g.get_group('foo'))
self.assertRaises(ValueError, lambda : g.get_group(('foo')))
self.assertRaises(ValueError, lambda : g.get_group(('foo','bar','baz')))
def test_get_group_grouped_by_tuple(self):
# GH 8121
df = DataFrame([[(1,), (1, 2), (1,), (1, 2)]],
index=['ids']).T
gr = df.groupby('ids')
expected = DataFrame({'ids': [(1,), (1,)]}, index=[0, 2])
result = gr.get_group((1,))
assert_frame_equal(result, expected)
dt = pd.to_datetime(['2010-01-01', '2010-01-02', '2010-01-01',
'2010-01-02'])
df = DataFrame({'ids': [(x,) for x in dt]})
gr = df.groupby('ids')
result = gr.get_group(('2010-01-01',))
expected = DataFrame({'ids': [(dt[0],), (dt[0],)]}, index=[0, 2])
assert_frame_equal(result, expected)
def test_agg_apply_corner(self):
# nothing to group, all NA
grouped = self.ts.groupby(self.ts * np.nan)
assert_series_equal(grouped.sum(), Series([]))
assert_series_equal(grouped.agg(np.sum), Series([]))
assert_series_equal(grouped.apply(np.sum), Series([]))
# DataFrame
grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan)
exp_df = DataFrame(columns=self.tsframe.columns, dtype=float)
assert_frame_equal(grouped.sum(), exp_df, check_names=False)
assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
assert_frame_equal(grouped.apply(np.sum), DataFrame({}, dtype=float))
def test_agg_grouping_is_list_tuple(self):
from pandas.core.groupby import Grouping
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_grouping_error_on_multidim_input(self):
from pandas.core.groupby import Grouping
self.assertRaises(ValueError, \
Grouping, self.df.index, self.df[['A','A']])
def test_agg_python_multiindex(self):
grouped = self.mframe.groupby(['A', 'B'])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_apply_describe_bug(self):
grouped = self.mframe.groupby(level='first')
result = grouped.describe() # it works!
def test_apply_issues(self):
# GH 5788
s="""2011.05.16,00:00,1.40893
2011.05.16,01:00,1.40760
2011.05.16,02:00,1.40750
2011.05.16,03:00,1.40649
2011.05.17,02:00,1.40893
2011.05.17,03:00,1.40760
2011.05.17,04:00,1.40750
2011.05.17,05:00,1.40649
2011.05.18,02:00,1.40893
2011.05.18,03:00,1.40760
2011.05.18,04:00,1.40750
2011.05.18,05:00,1.40649"""
df = pd.read_csv(StringIO(s), header=None, names=['date', 'time', 'value'], parse_dates=[['date', 'time']])
df = df.set_index('date_time')
expected = df.groupby(df.index.date).idxmax()
result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
assert_frame_equal(result,expected)
# GH 5789
# don't auto coerce dates
df = pd.read_csv(StringIO(s), header=None, names=['date', 'time', 'value'])
exp_idx = pd.Index(['2011.05.16','2011.05.17','2011.05.18'], dtype=object, name='date')
expected = Series(['00:00','02:00','02:00'], index=exp_idx)
result = df.groupby('date').apply(lambda x: x['time'][x['value'].idxmax()])
assert_series_equal(result, expected)
def test_len(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year,
lambda x: x.month,
lambda x: x.day])
self.assertEqual(len(grouped), len(df))
grouped = df.groupby([lambda x: x.year,
lambda x: x.month])
expected = len(set([(x.year, x.month) for x in df.index]))
self.assertEqual(len(grouped), expected)
def test_groups(self):
grouped = self.df.groupby(['A'])
groups = grouped.groups
self.assertIs(groups, grouped.groups) # caching works
for k, v in compat.iteritems(grouped.groups):
self.assertTrue((self.df.ix[v]['A'] == k).all())
grouped = self.df.groupby(['A', 'B'])
groups = grouped.groups
self.assertIs(groups, grouped.groups) # caching works
for k, v in compat.iteritems(grouped.groups):
self.assertTrue((self.df.ix[v]['A'] == k[0]).all())
self.assertTrue((self.df.ix[v]['B'] == k[1]).all())
def test_aggregate_str_func(self):
def _check_results(grouped):
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg(OrderedDict([['A', 'var'],
['B', 'std'],
['C', 'mean'],
['D', 'sem']]))
expected = DataFrame(OrderedDict([['A', grouped['A'].var()],
['B', grouped['B'].std()],
['C', grouped['C'].mean()],
['D', grouped['D'].sem()]]))
assert_frame_equal(result, expected)
by_weekday = self.tsframe.groupby(lambda x: x.weekday())
_check_results(by_weekday)
by_mwkday = self.tsframe.groupby([lambda x: x.month,
lambda x: x.weekday()])
_check_results(by_mwkday)
def test_aggregate_item_by_item(self):
df = self.df.copy()
df['E'] = ['a'] * len(self.df)
grouped = self.df.groupby('A')
# API change in 0.11
# def aggfun(ser):
# return len(ser + 'a')
# result = grouped.agg(aggfun)
# self.assertEqual(len(result.columns), 1)
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (self.df.A == 'foo').sum()
bar = (self.df.A == 'bar').sum()
K = len(result.columns)
# GH5782
# odd comparisons can result here, so cast to make easy
assert_almost_equal(result.xs('foo'), np.array([foo] * K).astype('float64'))
assert_almost_equal(result.xs('bar'), np.array([bar] * K).astype('float64'))
def aggfun(ser):
return ser.size
result = DataFrame().groupby(self.df.A).agg(aggfun)
tm.assertIsInstance(result, DataFrame)
self.assertEqual(len(result), 0)
def test_agg_item_by_item_raise_typeerror(self):
from numpy.random import randint
df = DataFrame(randint(10, size=(20, 10)))
def raiseException(df):
com.pprint_thing('----------------------------------------')
com.pprint_thing(df.to_string())
raise TypeError
self.assertRaises(TypeError, df.groupby(0).agg,
raiseException)
def test_basic_regression(self):
# regression
T = [1.0 * x for x in lrange(1, 10) * 10][:1095]
result = Series(T, lrange(0, len(T)))
groupings = np.random.random((1100,))
groupings = Series(groupings, lrange(0, len(groupings))) * 10.
grouped = result.groupby(groupings)
grouped.mean()
def test_transform(self):
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
# GH 8046
# make sure that we preserve the input order
df = DataFrame(np.arange(6,dtype='int64').reshape(3,2), columns=["a","b"], index=[0,2,1])
key = [0,0,1]
expected = df.sort_index().groupby(key).transform(lambda x: x-x.mean()).groupby(key).mean()
result = df.groupby(key).transform(lambda x: x-x.mean()).groupby(key).mean()
assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(np.random.randn(5, 5),
columns=['a', 'b', 'c', 'd', 'e'],
index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
key = ['one', 'two', 'one', 'two', 'one']
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.TimeGrouper('M'))
g.transform(lambda x: x-1)
# GH 9700
df = DataFrame({'a' : range(5, 10), 'b' : range(5)})
result = df.groupby('a').transform(max)
expected = DataFrame({'b' : range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast(self):
df = DataFrame( { 'id' : np.arange( 100000 ) / 3,
'val': np.random.randn( 100000) } )
grp=df.groupby('id')['val']
values = np.repeat(grp.mean().values, com._ensure_platform_int(grp.count().values))
expected = pd.Series(values,index=df.index)
result = grp.transform(np.mean)
assert_series_equal(result,expected)
result = grp.transform('mean')
assert_series_equal(result,expected)
def test_transform_broadcast(self):
grouped = self.ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
self.assertTrue(result.index.equals(self.ts.index))
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = self.tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
self.assertTrue(result.index.equals(self.tsframe.index))
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in self.tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = self.tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis=1)
result = grouped.transform(np.mean)
self.assertTrue(result.index.equals(self.tsframe.index))
self.assertTrue(result.columns.equals(self.tsframe.columns))
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_dtype(self):
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform('mean')
expected = DataFrame([[1.5], [1.5]])
assert_frame_equal(result, expected)
def test_transform_bug(self):
# GH 5712
# transforming on a datetime column
df = DataFrame(dict(A = Timestamp('20130101'), B = np.arange(5)))
result = df.groupby('A')['B'].transform(lambda x: x.rank(ascending=False))
expected = Series(np.arange(5,0,step=-1),name='B')
assert_series_equal(result,expected)
def test_transform_multiple(self):
grouped = self.ts.groupby([lambda x: x.year, lambda x: x.month])
transformed = grouped.transform(lambda x: x * 2)
broadcasted = grouped.transform(np.mean)
def test_dispatch_transform(self):
df = self.tsframe[::5].reindex(self.tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_transform_select_columns(self):
f = lambda x: x.mean()
result = self.df.groupby('A')['C', 'D'].transform(f)
selection = self.df[['C', 'D']]
expected = selection.groupby(self.df['A']).transform(f)
assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(self):
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = self.df.groupby('A')
expected['C'] = grouped['C'].transform(np.mean)
expected['D'] = grouped['D'].transform(np.mean)
expected = DataFrame(expected)
result = self.df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
def test_transform_function_aliases(self):
result = self.df.groupby('A').transform('mean')
expected = self.df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
result = self.df.groupby('A')['C'].transform('mean')
expected = self.df.groupby('A')['C'].transform(np.mean)
assert_series_equal(result, expected)
def test_transform_length(self):
# GH 9697
df = pd.DataFrame({'col1':[1,1,2,2], 'col2':[1,2,3,np.nan]})
expected = pd.Series([3.0]*4)
def nsum(x):
return np.nansum(x)
results = [df.groupby('col1').transform(sum)['col2'],
df.groupby('col1')['col2'].transform(sum),
df.groupby('col1').transform(nsum)['col2'],
df.groupby('col1')['col2'].transform(nsum)]
for result in results:
assert_series_equal(result, expected, check_names=False)
def test_with_na(self):
index = Index(np.arange(10))
for dtype in ['float64','float32','int64','int32','int16','int8']:
values = Series(np.ones(10), index, dtype=dtype)
labels = Series([nan, 'foo', 'bar', 'bar', nan, nan, 'bar',
'bar', nan, 'foo'], index=index)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected, check_dtype=False)
#self.assertTrue(issubclass(agged.dtype.type, np.integer))
# explicity return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected, check_dtype=False)
self.assertTrue(issubclass(agged.dtype.type, np.dtype(dtype).type))
def test_groupby_transform_with_int(self):
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(dict(A = [1,1,1,2,2,2], B = Series(1,dtype='float64'), C = Series([1,2,3,1,2,3],dtype='float64'), D = 'foo'))
result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())
expected = DataFrame(dict(B = np.nan, C = Series([-1,0,1,-1,0,1],dtype='float64')))
assert_frame_equal(result,expected)
# int case
df = DataFrame(dict(A = [1,1,1,2,2,2], B = 1, C = [1,2,3,1,2,3], D = 'foo'))
result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())
expected = DataFrame(dict(B = np.nan, C = [-1,0,1,-1,0,1]))
assert_frame_equal(result,expected)
# int that needs float conversion
s = Series([2,3,4,10,5,-1])
df = DataFrame(dict(A = [1,1,1,2,2,2], B = 1, C = s, D = 'foo'))
result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())
s1 = s.iloc[0:3]
s1 = (s1-s1.mean())/s1.std()
s2 = s.iloc[3:6]
s2 = (s2-s2.mean())/s2.std()
expected = DataFrame(dict(B = np.nan, C = concat([s1,s2])))
assert_frame_equal(result,expected)
# int downcasting
result = df.groupby('A').transform(lambda x: x*2/2)
expected = DataFrame(dict(B = 1, C = [2,3,4,10,5,-1]))
assert_frame_equal(result,expected)
def test_indices_concatenation_order(self):
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1]**2
if y.empty:
multiindex = MultiIndex(
levels = [[]]*2,
labels = [[]]*2,
names = ['b', 'c']
)
res = DataFrame(None,
columns=['a'],
index=multiindex)
return res
else:
y = y.set_index(['b','c'])
return y
def f2(x):
y = x[(x.b % 2) == 1]**2
if y.empty:
return DataFrame()
else:
y = y.set_index(['b','c'])
return y
def f3(x):
y = x[(x.b % 2) == 1]**2
if y.empty:
multiindex = MultiIndex(
levels = [[]]*2,
labels = [[]]*2,
names = ['foo', 'bar']
)
res = DataFrame(None,
columns=['a','b'],
index=multiindex)
return res
else:
return y
df = DataFrame({'a':[1,2,2,2],
'b':lrange(4),
'c':lrange(5,9)})
df2 = DataFrame({'a':[3,2,2,2],
'b':lrange(4),
'c':lrange(5,9)})
# correct result
result1 = df.groupby('a').apply(f1)
result2 = df2.groupby('a').apply(f1)
assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
self.assertRaises(AssertionError, df.groupby('a').apply, f2)
self.assertRaises(AssertionError, df2.groupby('a').apply, f2)
# should fail (incorrect shape)
self.assertRaises(AssertionError, df.groupby('a').apply, f3)
self.assertRaises(AssertionError, df2.groupby('a').apply, f3)
def test_attr_wrapper(self):
grouped = self.ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {}
for name, gp in grouped:
expected[name] = gp.describe()
expected = DataFrame(expected).T
assert_frame_equal(result.unstack(), expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
self.assertRaises(AttributeError, getattr, grouped, 'foo')
def test_series_describe_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe().unstack()
assert_series_equal(result['mean'], grouped.mean(), check_names=False)
assert_series_equal(result['std'], grouped.std(), check_names=False)
assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe()
assert_series_equal(result, expected)
def test_series_agg_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
assert_series_equal(result, expected)
def test_series_agg_multi_pure_python(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def bad(x):
assert(len(x.base) > 0)
return 'foo'
result = data.groupby(['A', 'B']).agg(bad)
expected = data.groupby(['A', 'B']).agg(lambda x: 'foo')
assert_frame_equal(result, expected)
def test_series_index_name(self):
grouped = self.df.ix[:, ['C']].groupby(self.df['A'])
result = grouped.agg(lambda x: x.mean())
self.assertEqual(result.index.name, 'A')
def test_frame_describe_multikey(self):
grouped = self.tsframe.groupby([lambda x: x.year,
lambda x: x.month])
result = grouped.describe()
for col in self.tsframe:
expected = grouped[col].describe()
assert_series_equal(result[col], expected, check_names=False)
groupedT = self.tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
for name, group in groupedT:
assert_frame_equal(result[name], group.describe())
def test_frame_groupby(self):
grouped = self.tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), 5)
self.assertEqual(len(aggregated.columns), 4)
# by string
tscopy = self.tsframe.copy()
tscopy['weekday'] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby('weekday').aggregate(np.mean)
assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = self.tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
self.assertEqual(len(transformed), 30)
self.assertEqual(len(transformed.columns), 4)
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
assert_almost_equal(transformed.xs(idx), mean)
# iterate
for weekday, group in grouped:
self.assertEqual(group.index[0].weekday(), weekday)
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in compat.iteritems(groups):
samething = self.tsframe.index.take(indices[k])
self.assertTrue((samething == v).all())
def test_grouping_is_iterable(self):
# this code path isn't used anywhere else
# not sure it's useful
grouped = self.tsframe.groupby([lambda x: x.weekday(),
lambda x: x.year])
# test it works
for g in grouped.grouper.groupings[0]:
pass
def test_frame_groupby_columns(self):
mapping = {
'A': 0, 'B': 0, 'C': 1, 'D': 1
}
grouped = self.tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), len(self.tsframe))
self.assertEqual(len(aggregated.columns), 2)
# transform
tf = lambda x: x - x.mean()
groupedT = self.tsframe.T.groupby(mapping, axis=0)
assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
self.assertEqual(len(v.columns), 2)
def test_frame_set_name_single(self):
grouped = self.df.groupby('A')
result = grouped.mean()
self.assertEqual(result.index.name, 'A')
result = self.df.groupby('A', as_index=False).mean()
self.assertNotEqual(result.index.name, 'A')
result = grouped.agg(np.mean)
self.assertEqual(result.index.name, 'A')
result = grouped.agg({'C': np.mean, 'D': np.std})
self.assertEqual(result.index.name, 'A')
result = grouped['C'].mean()
self.assertEqual(result.index.name, 'A')
result = grouped['C'].agg(np.mean)
self.assertEqual(result.index.name, 'A')
result = grouped['C'].agg([np.mean, np.std])
self.assertEqual(result.index.name, 'A')
result = grouped['C'].agg({'foo': np.mean, 'bar': np.std})
self.assertEqual(result.index.name, 'A')
def test_multi_iter(self):
s = Series(np.arange(6))
k1 = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
grouped = s.groupby([k1, k2])
iterated = list(grouped)
expected = [('a', '1', s[[0, 2]]),
('a', '2', s[[1]]),
('b', '1', s[[4]]),
('b', '2', s[[3, 5]])]
for i, ((one, two), three) in enumerate(iterated):
e1, e2, e3 = expected[i]
self.assertEqual(e1, one)
self.assertEqual(e2, two)
assert_series_equal(three, e3)
def test_multi_iter_frame(self):
k1 = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
df = DataFrame({'v1': np.random.randn(6),
'v2': np.random.randn(6),
'k1': k1, 'k2': k2},
index=['one', 'two', 'three', 'four', 'five', 'six'])
grouped = df.groupby(['k1', 'k2'])
# things get sorted!
iterated = list(grouped)
idx = df.index
expected = [('a', '1', df.ix[idx[[4]]]),
('a', '2', df.ix[idx[[3, 5]]]),
('b', '1', df.ix[idx[[0, 2]]]),
('b', '2', df.ix[idx[[1]]])]
for i, ((one, two), three) in enumerate(iterated):
e1, e2, e3 = expected[i]
self.assertEqual(e1, one)
self.assertEqual(e2, two)
assert_frame_equal(three, e3)
# don't iterate through groups with no data
df['k1'] = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
df['k2'] = np.array(['1', '1', '1', '2', '2', '2'])
grouped = df.groupby(['k1', 'k2'])
groups = {}
for key, gp in grouped:
groups[key] = gp
self.assertEqual(len(groups), 2)
# axis = 1
three_levels = self.three_group.groupby(['A', 'B', 'C']).mean()
grouped = three_levels.T.groupby(axis=1, level=(1, 2))
for key, group in grouped:
pass
def test_multi_iter_panel(self):
wp = tm.makePanel()
grouped = wp.groupby([lambda x: x.month, lambda x: x.weekday()],
axis=1)
for (month, wd), group in grouped:
exp_axis = [x for x in wp.major_axis
if x.month == month and x.weekday() == wd]
expected = wp.reindex(major=exp_axis)
assert_panel_equal(group, expected)
def test_multi_func(self):
col1 = self.df['A']
col2 = self.df['B']
grouped = self.df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = self.df.groupby(['A', 'B']).mean()
assert_frame_equal(agged.ix[:, ['C', 'D']],
expected.ix[:, ['C', 'D']],
check_names=False) # TODO groupby get drops names
# some "groups" with no data
df = DataFrame({'v1': np.random.randn(6),
'v2': np.random.randn(6),
'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
'k2': np.array(['1', '1', '1', '2', '2', '2'])},
index=['one', 'two', 'three', 'four', 'five', 'six'])
# only verify that it works for now
grouped = df.groupby(['k1', 'k2'])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(self):
grouped = self.df.groupby(['A', 'B'])['C']
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({'mean': grouped.agg(np.mean),
'std': grouped.agg(np.std)})
assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
grouped = data.groupby(['A', 'B'])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = concat([grouped['D'].agg(funcs), grouped['E'].agg(funcs),
grouped['F'].agg(funcs)],
keys=['D', 'E', 'F'], axis=1)
assert(isinstance(agged.index, MultiIndex))
assert(isinstance(expected.index, MultiIndex))
assert_frame_equal(agged, expected)
def test_groupby_multiple_columns(self):
data = self.df
grouped = data.groupby(['A', 'B'])
def _check_op(op):
result1 = op(grouped)
expected = defaultdict(dict)
for n1, gp1 in data.groupby('A'):
for n2, gp2 in gp1.groupby('B'):
expected[n1][n2] = op(gp2.ix[:, ['C', 'D']])
expected = dict((k, DataFrame(v)) for k, v in compat.iteritems(expected))
expected = Panel.fromDict(expected).swapaxes(0, 1)
expected.major_axis.name, expected.minor_axis.name = 'A', 'B'
# a little bit crude
for col in ['C', 'D']:
result_col = op(grouped[col])
exp = expected[col]
pivoted = result1[col].unstack()
pivoted2 = result_col.unstack()
assert_frame_equal(pivoted.reindex_like(exp), exp)
assert_frame_equal(pivoted2.reindex_like(exp), exp)
_check_op(lambda x: x.sum())
_check_op(lambda x: x.mean())
# test single series works the same
result = data['C'].groupby([data['A'], data['B']]).mean()
expected = data.groupby(['A', 'B']).mean()['C']
assert_series_equal(result, expected)
def test_groupby_as_index_agg(self):
grouped = self.df.groupby('A', as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
expected2 = grouped.mean()
expected2['D'] = grouped.sum()['D']
assert_frame_equal(result2, expected2)
grouped = self.df.groupby('A', as_index=True)
expected3 = grouped['C'].sum()
expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
result3 = grouped['C'].agg({'Q': np.sum})
assert_frame_equal(result3, expected3)
# multi-key
grouped = self.df.groupby(['A', 'B'], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
expected2 = grouped.mean()
expected2['D'] = grouped.sum()['D']
assert_frame_equal(result2, expected2)
expected3 = grouped['C'].sum()
expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
result3 = grouped['C'].agg({'Q': np.sum})
assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)),
columns=['jim', 'joe', 'jolie'])
ts = Series(np.random.randint(5, 10, 50), name='jim')
gr = df.groupby(ts)
_ = gr.nth(0) # invokes _set_selection_from_grouper internally
assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ['mean', 'max', 'count', 'idxmax', 'cumsum', 'all']:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
assert_frame_equal(left, right)
def test_mulitindex_passthru(self):
# GH 7997
# regression from 0.14.1
df = pd.DataFrame([[1,2,3],[4,5,6],[7,8,9]])
df.columns = pd.MultiIndex.from_tuples([(0,1),(1,1),(2,1)])
result = df.groupby(axis=1, level=[0,1]).first()
assert_frame_equal(result, df)
def test_multifunc_select_col_integer_cols(self):
df = self.df
df.columns = np.arange(len(df.columns))
# it works!
result = df.groupby(1, as_index=False)[2].agg({'Q': np.mean})
def test_as_index_series_return_frame(self):
grouped = self.df.groupby('A', as_index=False)
grouped2 = self.df.groupby(['A', 'B'], as_index=False)
result = grouped['C'].agg(np.sum)
expected = grouped.agg(np.sum).ix[:, ['A', 'C']]
tm.assertIsInstance(result, DataFrame)
assert_frame_equal(result, expected)
result2 = grouped2['C'].agg(np.sum)
expected2 = grouped2.agg(np.sum).ix[:, ['A', 'B', 'C']]
tm.assertIsInstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
result = grouped['C'].sum()
expected = grouped.sum().ix[:, ['A', 'C']]
tm.assertIsInstance(result, DataFrame)
assert_frame_equal(result, expected)
result2 = grouped2['C'].sum()
expected2 = grouped2.sum().ix[:, ['A', 'B', 'C']]
tm.assertIsInstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
# corner case
self.assertRaises(Exception, grouped['C'].__getitem__,
'D')
def test_groupby_as_index_cython(self):
data = self.df
# single-key
grouped = data.groupby('A', as_index=False)
result = grouped.mean()
expected = data.groupby(['A']).mean()
expected.insert(0, 'A', expected.index)
expected.index = np.arange(len(expected))
assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(['A', 'B'], as_index=False)
result = grouped.mean()
expected = data.groupby(['A', 'B']).mean()
arrays = lzip(*expected.index._tuple_index)
expected.insert(0, 'A', arrays[0])
expected.insert(1, 'B', arrays[1])
expected.index = np.arange(len(expected))
assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(self):
grouped = self.df.groupby(['A', 'B'], as_index=False)
# GH #421
result = grouped['C'].agg(len)
expected = grouped.agg(len).ix[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_groupby_as_index_corner(self):
self.assertRaises(TypeError, self.ts.groupby,
lambda x: x.weekday(), as_index=False)
self.assertRaises(ValueError, self.df.groupby,
lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_as_index_apply(self):
# GH #4648 and #3417
df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],
'user_id': [1,2,1,1,3,1],
'time': range(6)})
g_as = df.groupby('user_id', as_index=True)
g_not_as = df.groupby('user_id', as_index=False)
res_as = g_as.head(2).index
res_not_as = g_not_as.head(2).index
exp = Index([0, 1, 2, 4])
assert_index_equal(res_as, exp)
assert_index_equal(res_not_as, exp)
res_as_apply = g_as.apply(lambda x: x.head(2)).index
res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
# apply doesn't maintain the original ordering
# changed in GH5610 as the as_index=False returns a MI here
exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (2, 4)])
tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None])
assert_index_equal(res_as_apply, exp_as_apply)
assert_index_equal(res_not_as_apply, exp_not_as_apply)
ind = Index(list('abcde'))
df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
res = df.groupby(0, as_index=False).apply(lambda x: x).index
assert_index_equal(res, ind)
def test_groupby_head_tail(self):
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
g_as = df.groupby('A', as_index=True)
g_not_as = df.groupby('A', as_index=False)
# as_index= False, much easier
assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1))
assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1))
empty_not_as = DataFrame(columns=df.columns)
empty_not_as['A'] = empty_not_as['A'].astype(df.A.dtype)
empty_not_as['B'] = empty_not_as['B'].astype(df.B.dtype)
assert_frame_equal(empty_not_as, g_not_as.head(0))
assert_frame_equal(empty_not_as, g_not_as.tail(0))
assert_frame_equal(empty_not_as, g_not_as.head(-1))
assert_frame_equal(empty_not_as, g_not_as.tail(-1))
assert_frame_equal(df, g_not_as.head(7)) # contains all
assert_frame_equal(df, g_not_as.tail(7))
# as_index=True, (used to be different)
df_as = df
assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1))
assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1))
empty_as = DataFrame(index=df_as.index[:0], columns=df.columns)
empty_as['A'] = empty_not_as['A'].astype(df.A.dtype)
empty_as['B'] = empty_not_as['B'].astype(df.B.dtype)
assert_frame_equal(empty_as, g_as.head(0))
assert_frame_equal(empty_as, g_as.tail(0))
assert_frame_equal(empty_as, g_as.head(-1))
assert_frame_equal(empty_as, g_as.tail(-1))
assert_frame_equal(df_as, g_as.head(7)) # contains all
assert_frame_equal(df_as, g_as.tail(7))
# test with selection
assert_frame_equal(g_as[[]].head(1), df_as.loc[[0,2], []])
assert_frame_equal(g_as[['A']].head(1), df_as.loc[[0,2], ['A']])
assert_frame_equal(g_as[['B']].head(1), df_as.loc[[0,2], ['B']])
assert_frame_equal(g_as[['A', 'B']].head(1), df_as.loc[[0,2]])
assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0,2], []])
assert_frame_equal(g_not_as[['A']].head(1), df_as.loc[[0,2], ['A']])
assert_frame_equal(g_not_as[['B']].head(1), df_as.loc[[0,2], ['B']])
assert_frame_equal(g_not_as[['A', 'B']].head(1), df_as.loc[[0,2]])
def test_groupby_multiple_key(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year,
lambda x: x.month,
lambda x: x.day])
agged = grouped.sum()
assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby([lambda x: x.year,
lambda x: x.month,
lambda x: x.day], axis=1)
agged = grouped.agg(lambda x: x.sum())
self.assertTrue(agged.index.equals(df.columns))
assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(self):
# test that having an all-NA column doesn't mess you up
df = self.df.copy()
df['bad'] = np.nan
agged = df.groupby(['A', 'B']).mean()
expected = self.df.groupby(['A', 'B']).mean()
expected['bad'] = np.nan
assert_frame_equal(agged, expected)
def test_omit_nuisance(self):
grouped = self.df.groupby('A')
result = grouped.mean()
expected = self.df.ix[:, ['A', 'C', 'D']].groupby('A').mean()
assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
assert_frame_equal(agged, exp)
df = self.df.ix[:, ['A', 'C', 'D']]
df['E'] = datetime.now()
grouped = df.groupby('A')
result = grouped.agg(np.sum)
expected = grouped.sum()
assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({'A': 0, 'C': 0, 'D': 1, 'E': 1}, axis=1)
result = self.assertRaises(TypeError, grouped.agg,
lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(self):
grouped = self.three_group.groupby(['A', 'B'])
agged = grouped.agg(np.mean)
exp = grouped.mean()
assert_frame_equal(agged, exp)
def test_empty_groups_corner(self):
# handle empty groups
df = DataFrame({'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
'k2': np.array(['1', '1', '1', '2', '2', '2']),
'k3': ['foo', 'bar'] * 3,
'v1': np.random.randn(6),
'v2': np.random.randn(6)})
grouped = df.groupby(['k1', 'k2'])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
grouped = self.mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped['A'].apply(np.mean)
assert_series_equal(agged['A'], agged_A)
self.assertEqual(agged.index.name, 'first')
def test_apply_concat_preserve_names(self):
grouped = self.three_group.groupby(['A', 'B'])
def desc(group):
result = group.describe()
result.index.name = 'stat'
return result
def desc2(group):
result = group.describe()
result.index.name = 'stat'
result = result[:len(group)]
# weirdo
return result
def desc3(group):
result = group.describe()
# names are different
result.index.name = 'stat_%d' % len(group)
result = result[:len(group)]
# weirdo
return result
result = grouped.apply(desc)
self.assertEqual(result.index.names, ('A', 'B', 'stat'))
result2 = grouped.apply(desc2)
self.assertEqual(result2.index.names, ('A', 'B', 'stat'))
result3 = grouped.apply(desc3)
self.assertEqual(result3.index.names, ('A', 'B', None))
def test_nonsense_func(self):
df = DataFrame([0])
self.assertRaises(Exception, df.groupby, lambda x: x + 'foo')
def test_builtins_apply(self): # GH8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=['jim', 'joe'])
df['jolie'] = np.random.randn(1000)
for keys in ['jim', ['jim', 'joe']]: # single key & multi-key
if keys == 'jim': continue
for f in [max, min, sum]:
fname = f.__name__
result = df.groupby(keys).apply(f)
_shape = result.shape
ngroups = len(df.drop_duplicates(subset=keys))
assert result.shape == (ngroups, 3), 'invalid frame shape: '\
'{} (expected ({}, 3))'.format(result.shape, ngroups)
assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
assert_frame_equal(result, expected, check_dtype=False)
assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_cythonized_aggers(self):
data = {'A': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1., nan, nan],
'B': ['A', 'B'] * 6,
'C': np.random.randn(12)}
df = DataFrame(data)
df.loc[2:10:2,'C'] = nan
def _testit(op):
# single column
grouped = df.drop(['B'], axis=1).groupby('A')
exp = {}
for cat, group in grouped:
exp[cat] = op(group['C'])
exp = DataFrame({'C': exp})
exp.index.name = 'A'
result = op(grouped)
assert_frame_equal(result, exp)
# multiple columns
grouped = df.groupby(['A', 'B'])
expd = {}
for (cat1, cat2), group in grouped:
expd.setdefault(cat1, {})[cat2] = op(group['C'])
exp = DataFrame(expd).T.stack(dropna=False)
exp.index.names = ['A', 'B']
exp.name = 'C'
result = op(grouped)['C']
assert_series_equal(result, exp)
_testit(lambda x: x.count())
_testit(lambda x: x.sum())
_testit(lambda x: x.std())
_testit(lambda x: x.var())
_testit(lambda x: x.sem())
_testit(lambda x: x.mean())
_testit(lambda x: x.median())
_testit(lambda x: x.prod())
_testit(lambda x: x.min())
_testit(lambda x: x.max())
def test_max_min_non_numeric(self):
# #2700
aa = DataFrame({'nn':[11,11,22,22],'ii':[1,2,3,4],'ss':4*['mama']})
result = aa.groupby('nn').max()
self.assertTrue('ss' in result)
result = aa.groupby('nn').min()
self.assertTrue('ss' in result)
def test_cython_agg_boolean(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': np.random.randint(0, 2, 50).astype('bool')})
result = frame.groupby('a')['b'].mean()
expected = frame.groupby('a')['b'].agg(np.mean)
assert_series_equal(result, expected)
def test_cython_agg_nothing_to_agg(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25})
self.assertRaises(DataError, frame.groupby('a')['b'].mean)
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25})
self.assertRaises(DataError, frame[['b']].groupby(frame['a']).mean)
def test_cython_agg_nothing_to_agg_with_dates(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25,
'dates': pd.date_range('now', periods=50,
freq='T')})
with tm.assertRaisesRegexp(DataError, "No numeric types to aggregate"):
frame.groupby('b').dates.mean()
def test_groupby_timedelta_cython_count(self):
df = DataFrame({'g': list('ab' * 2),
'delt': np.arange(4).astype('timedelta64[ns]')})
expected = Series([2, 2], index=pd.Index(['a', 'b'], name='g'), name='delt')
result = df.groupby('g').delt.count()
tm.assert_series_equal(expected, result)
def test_cython_agg_frame_columns(self):
# #2113
df = DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]})
result = df.groupby(level=0, axis='columns').mean()
result = df.groupby(level=0, axis='columns').mean()
result = df.groupby(level=0, axis='columns').mean()
_ = df.groupby(level=0, axis='columns').mean()
def test_wrap_aggregated_output_multindex(self):
df = self.mframe.T
df['baz', 'two'] = 'peekaboo'
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
tm.assertIsInstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ('foo', 'one'):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
self.assertEqual(len(agged2.columns) + 1, len(df.columns))
def test_groupby_level(self):
frame = self.mframe
deleveled = frame.reset_index()
result0 = frame.groupby(level=0).sum()
result1 = frame.groupby(level=1).sum()
expected0 = frame.groupby(deleveled['first'].values).sum()
expected1 = frame.groupby(deleveled['second'].values).sum()
expected0 = expected0.reindex(frame.index.levels[0])
expected1 = expected1.reindex(frame.index.levels[1])
self.assertEqual(result0.index.name, 'first')
self.assertEqual(result1.index.name, 'second')
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
self.assertEqual(result0.index.name, frame.index.names[0])
self.assertEqual(result1.index.name, frame.index.names[1])
# groupby level name
result0 = frame.groupby(level='first').sum()
result1 = frame.groupby(level='second').sum()
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
# axis=1
result0 = frame.T.groupby(level=0, axis=1).sum()
result1 = frame.T.groupby(level=1, axis=1).sum()
assert_frame_equal(result0, expected0.T)
assert_frame_equal(result1, expected1.T)
# raise exception for non-MultiIndex
self.assertRaises(ValueError, self.df.groupby, level=1)
def test_groupby_level_index_names(self):
## GH4014 this used to raise ValueError since 'exp'>1 (in py2)
df = DataFrame({'exp' : ['A']*3 + ['B']*3, 'var1' : lrange(6),}).set_index('exp')
df.groupby(level='exp')
self.assertRaises(ValueError, df.groupby, level='foo')
def test_groupby_level_with_nas(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 2, 3, 0, 1, 2, 3]])
# factorizing doesn't confuse things
s = Series(np.arange(8.), index=index)
result = s.groupby(level=0).sum()
expected = Series([22., 6.], index=[1, 0])
assert_series_equal(result, expected)
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0],
[0, 1, 2, 3, 0, 1, 2, 3]])
# factorizing doesn't confuse things
s = Series(np.arange(8.), index=index)
result = s.groupby(level=0).sum()
expected = Series([18., 6.], index=[1, 0])
assert_series_equal(result, expected)
def test_groupby_level_apply(self):
frame = self.mframe
result = frame.groupby(level=0).count()
self.assertEqual(result.index.name, 'first')
result = frame.groupby(level=1).count()
self.assertEqual(result.index.name, 'second')
result = frame['A'].groupby(level=0).count()
self.assertEqual(result.index.name, 'first')
def test_groupby_args(self):
#PR8618 and issue 8015
frame = self.mframe
def j():
frame.groupby()
self.assertRaisesRegexp(TypeError, "You have to supply one of 'by' and 'level'", j)
def k():
frame.groupby(by=None, level=None)
self.assertRaisesRegexp(TypeError, "You have to supply one of 'by' and 'level'", k)
def test_groupby_level_mapper(self):
frame = self.mframe
deleveled = frame.reset_index()
mapper0 = {'foo': 0, 'bar': 0,
'baz': 1, 'qux': 1}
mapper1 = {'one': 0, 'two': 0, 'three': 1}
result0 = frame.groupby(mapper0, level=0).sum()
result1 = frame.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled['first']])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled['second']])
expected0 = frame.groupby(mapped_level0).sum()
expected1 = frame.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = 'first', 'second'
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
def test_groupby_level_0_nonmulti(self):
# #1313
a = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1,
4, 5, 2, 6], name='foo'))
result = a.groupby(level=0).sum()
self.assertEqual(result.index.name, a.index.name)
def test_level_preserve_order(self):
grouped = self.mframe.groupby(level=0)
exp_labels = np.array([0, 0, 0, 1, 1, 2, 2, 3, 3, 3])
assert_almost_equal(grouped.grouper.labels[0], exp_labels)
def test_grouping_labels(self):
grouped = self.mframe.groupby(self.mframe.index.get_level_values(0))
exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3])
assert_almost_equal(grouped.grouper.labels[0], exp_labels)
def test_cython_fail_agg(self):
dr = bdate_range('1/1/2000', periods=50)
ts = Series(['A', 'B', 'C', 'D', 'E'] * 10, index=dr)
grouped = ts.groupby(lambda x: x.month)
summed = grouped.sum()
expected = grouped.agg(np.sum)
assert_series_equal(summed, expected)
def test_apply_series_to_frame(self):
def f(piece):
return DataFrame({'value': piece,
'demeaned': piece - piece.mean(),
'logged': np.log(piece)})
dr = bdate_range('1/1/2000', periods=100)
ts = Series(np.random.randn(100), index=dr)
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(f)
tm.assertIsInstance(result, DataFrame)
self.assertTrue(result.index.equals(ts.index))
def test_apply_series_yield_constant(self):
result = self.df.groupby(['A', 'B'])['C'].apply(len)
self.assertEqual(result.index.names[:2], ('A', 'B'))
def test_apply_frame_to_series(self):
grouped = self.df.groupby(['A', 'B'])
result = grouped.apply(len)
expected = grouped.count()['C']
self.assertTrue(result.index.equals(expected.index))
self.assert_numpy_array_equal(result.values, expected.values)
def test_apply_frame_concat_series(self):
def trans(group):
return group.groupby('B')['C'].sum().order()[:2]
def trans2(group):
grouped = group.groupby(df.reindex(group.index)['B'])
return grouped.sum().order()[:2]
df = DataFrame({'A': np.random.randint(0, 5, 1000),
'B': np.random.randint(0, 5, 1000),
'C': np.random.randn(1000)})
result = df.groupby('A').apply(trans)
exp = df.groupby('A')['C'].apply(trans2)
assert_series_equal(result, exp, check_names=False)
self.assertEqual(result.name, 'C')
def test_apply_transform(self):
grouped = self.ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(result, expected)
def test_apply_multikey_corner(self):
grouped = self.tsframe.groupby([lambda x: x.year,
lambda x: x.month])
def f(group):
return group.sort('A')[-5:]
result = grouped.apply(f)
for key, group in grouped:
assert_frame_equal(result.ix[key], f(group))
def test_mutate_groups(self):
# GH3380
mydf = DataFrame({
'cat1' : ['a'] * 8 + ['b'] * 6,
'cat2' : ['c'] * 2 + ['d'] * 2 + ['e'] * 2 + ['f'] * 2 + ['c'] * 2 + ['d'] * 2 + ['e'] * 2,
'cat3' : lmap(lambda x: 'g%s' % x, lrange(1,15)),
'val' : np.random.randint(100, size=14),
})
def f_copy(x):
x = x.copy()
x['rank'] = x.val.rank(method='min')
return x.groupby('cat2')['rank'].min()
def f_no_copy(x):
x['rank'] = x.val.rank(method='min')
return x.groupby('cat2')['rank'].min()
grpby_copy = mydf.groupby('cat1').apply(f_copy)
grpby_no_copy = mydf.groupby('cat1').apply(f_no_copy)
assert_series_equal(grpby_copy,grpby_no_copy)
def test_no_mutate_but_looks_like(self):
# GH 8467
# first show's mutation indicator
# second does not, but should yield the same results
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'value': range(9)})
result1 = df.groupby('key', group_keys=True).apply(lambda x: x[:].key)
result2 = df.groupby('key', group_keys=True).apply(lambda x: x.key)
assert_series_equal(result1, result2)
def test_apply_chunk_view(self):
# Low level tinkering could be unsafe, make sure not
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'value': lrange(9)})
# return view
f = lambda x: x[:2]
result = df.groupby('key', group_keys=False).apply(f)
expected = df.take([0, 1, 3, 4, 6, 7])
assert_frame_equal(result, expected)
def test_apply_no_name_column_conflict(self):
df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
'value': lrange(10)[::-1]})
# it works! #2605
grouped = df.groupby(['name', 'name2'])
grouped.apply(lambda x: x.sort('value'))
def test_groupby_series_indexed_differently(self):
s1 = Series([5.0, -9.0, 4.0, 100., -5., 55., 6.7],
index=Index(['a', 'b', 'c', 'd', 'e', 'f', 'g']))
s2 = Series([1.0, 1.0, 4.0, 5.0, 5.0, 7.0],
index=Index(['a', 'b', 'd', 'f', 'g', 'h']))
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
assert_series_equal(agged, exp)
def test_groupby_with_hier_columns(self):
tuples = list(zip(*[['bar', 'bar', 'baz', 'baz',
'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two',
'one', 'two', 'one', 'two']]))
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'),
('B', 'cat'), ('A', 'dog')])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=columns)
result = df.groupby(level=0).mean()
self.assertTrue(result.columns.equals(columns))
result = df.groupby(level=0, axis=1).mean()
self.assertTrue(result.index.equals(df.index))
result = df.groupby(level=0).agg(np.mean)
self.assertTrue(result.columns.equals(columns))
result = df.groupby(level=0).apply(lambda x: x.mean())
self.assertTrue(result.columns.equals(columns))
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
self.assertTrue(result.columns.equals(Index(['A', 'B'])))
self.assertTrue(result.index.equals(df.index))
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df['A', 'foo'] = 'bar'
result = df.groupby(level=0).mean()
self.assertTrue(result.columns.equals(df.columns[:-1]))
def test_pass_args_kwargs(self):
from numpy import percentile
def f(x, q=None, axis=0):
return percentile(x, q, axis=axis)
g = lambda x: percentile(x, 80, axis=0)
# Series
ts_grouped = self.ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(percentile, 80, axis=0)
apply_result = ts_grouped.apply(percentile, 80, axis=0)
trans_result = ts_grouped.transform(percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(.8)
trans_expected = ts_grouped.transform(g)
assert_series_equal(apply_result, agg_expected)
assert_series_equal(agg_result, agg_expected)
assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
assert_series_equal(agg_result, agg_expected)
assert_series_equal(apply_result, agg_expected)
assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = self.tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, .8)
expected = df_grouped.quantile(.8)
assert_frame_equal(apply_result, expected)
assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=.8)
assert_frame_equal(agg_result, expected)
assert_frame_equal(apply_result, expected)
# def test_cython_na_bug(self):
# values = np.random.randn(10)
# shape = (5, 5)
# label_list = [np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2], dtype=np.int32),
# np.array([1, 2, 3, 4, 0, 1, 2, 3, 3, 4], dtype=np.int32)]
# lib.group_aggregate(values, label_list, shape)
def test_size(self):
grouped = self.df.groupby(['A', 'B'])
result = grouped.size()
for key, group in grouped:
self.assertEqual(result[key], len(group))
grouped = self.df.groupby('A')
result = grouped.size()
for key, group in grouped:
self.assertEqual(result[key], len(group))
grouped = self.df.groupby('B')
result = grouped.size()
for key, group in grouped:
self.assertEqual(result[key], len(group))
def test_count(self):
# GH5610
# count counts non-nulls
df = pd.DataFrame([[1, 2, 'foo'], [1, nan, 'bar'], [3, nan, nan]],
columns=['A', 'B', 'C'])
count_as = df.groupby('A').count()
count_not_as = df.groupby('A', as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'], index=[1,3])
expected.index.name='A'
assert_frame_equal(count_not_as, expected.reset_index())
assert_frame_equal(count_as, expected)
count_B = df.groupby('A')['B'].count()
assert_series_equal(count_B, expected['B'])
def test_count_object(self):
df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3,
'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([3, 3], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([1, 3], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
def test_count_cross_type(self): # GH8169
vals = np.hstack((np.random.randint(0,5,(100,2)),
np.random.randint(0,2,(100,2))))
df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
df[df==2] = np.nan
expected = df.groupby(['c', 'd']).count()
for t in ['float32', 'object']:
df['a'] = df['a'].astype(t)
df['b'] = df['b'].astype(t)
result = df.groupby(['c', 'd']).count()
tm.assert_frame_equal(result, expected)
def test_non_cython_api(self):
# GH5610
# non-cython calls should not include the grouper
df = DataFrame([[1, 2, 'foo'], [1, nan, 'bar',], [3, nan, 'baz']], columns=['A', 'B','C'])
g = df.groupby('A')
gni = df.groupby('A',as_index=False)
# mad
expected = DataFrame([[0],[nan]],columns=['B'],index=[1,3])
expected.index.name = 'A'
result = g.mad()
assert_frame_equal(result,expected)
expected = DataFrame([[0.,0.],[0,nan]],columns=['A','B'],index=[0,1])
result = gni.mad()
assert_frame_equal(result,expected)
# describe
expected = DataFrame(dict(B = concat([df.loc[[0,1],'B'].describe(),df.loc[[2],'B'].describe()],keys=[1,3])))
expected.index.names = ['A',None]
result = g.describe()
assert_frame_equal(result,expected)
expected = concat([df.loc[[0,1],['A','B']].describe(),df.loc[[2],['A','B']].describe()],keys=[0,1])
result = gni.describe()
assert_frame_equal(result,expected)
# any
expected = DataFrame([[True, True],[False, True]],columns=['B','C'],index=[1,3])
expected.index.name = 'A'
result = g.any()
assert_frame_equal(result,expected)
# idxmax
expected = DataFrame([[0],[nan]],columns=['B'],index=[1,3])
expected.index.name = 'A'
result = g.idxmax()
assert_frame_equal(result,expected)
def test_cython_api2(self):
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=['A', 'B', 'C'])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
assert_frame_equal(result,expected)
expected = DataFrame([[1, 2, np.nan], [2, np.nan, 9], [3, 4, 9]], columns=['A', 'B', 'C']).astype('float64')
result = df.groupby('A', as_index=False).cumsum()
assert_frame_equal(result,expected)
def test_grouping_ndarray(self):
grouped = self.df.groupby(self.df['A'].values)
result = grouped.sum()
expected = self.df.groupby('A').sum()
assert_frame_equal(result, expected, check_names=False) # Note: no names when grouping by value
def test_agg_consistency(self):
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
try:
return np.percentile(a.dropna(), q=1)
except:
return np.nan
import datetime as dt
df = DataFrame({'col1':[1,2,3,4],
'col2':[10,25,26,31],
'date':[dt.date(2013,2,10),dt.date(2013,2,10),dt.date(2013,2,11),dt.date(2013,2,11)]})
g = df.groupby('date')
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
assert_frame_equal(result, expected)
def test_apply_typecast_fail(self):
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)})
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
assert_frame_equal(result, expected)
def test_apply_multiindex_fail(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
assert_frame_equal(result, expected)
def test_apply_corner(self):
result = self.tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)
expected = self.tsframe * 2
assert_frame_equal(result, expected)
def test_apply_without_copy(self):
# GH 5545
# returning a non-copy in an applied function fails
data = DataFrame({'id_field' : [100, 100, 200, 300], 'category' : ['a','b','c','c'], 'value' : [1,2,3,4]})
def filt1(x):
if x.shape[0] == 1:
return x.copy()
else:
return x[x.category == 'c']
def filt2(x):
if x.shape[0] == 1:
return x
else:
return x[x.category == 'c']
expected = data.groupby('id_field').apply(filt1)
result = data.groupby('id_field').apply(filt2)
assert_frame_equal(result,expected)
def test_apply_use_categorical_name(self):
from pandas import qcut
cats = qcut(self.df.C, 4)
def get_stats(group):
return {'min': group.min(), 'max': group.max(),
'count': group.count(), 'mean': group.mean()}
result = self.df.groupby(cats).D.apply(get_stats)
self.assertEqual(result.index.names[0], 'C')
def test_apply_categorical_data(self):
# GH 10138
for ordered in [True, False]:
dense = Categorical(list('abc'), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list('aaa'), categories=['a', 'b'], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({'missing': missing,
'dense': dense,
'values': values})
grouped = df.groupby(['missing', 'dense'])
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_product([['a', 'b'], ['a', 'b', 'c']],
names=['missing', 'dense'])
expected = DataFrame([0, 1, 2, np.nan, np.nan, np.nan],
index=idx,
columns=['values'])
assert_frame_equal(grouped.apply(lambda x: np.mean(x)), expected)
assert_frame_equal(grouped.mean(), expected)
assert_frame_equal(grouped.agg(np.mean), expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_product([['a'], ['a', 'b', 'c']],
names=['missing', 'dense'])
expected = Series(1, index=idx)
assert_series_equal(grouped.apply(lambda x: 1), expected)
def test_apply_corner_cases(self):
# #535, can't use sliding iterator
N = 1000
labels = np.random.randint(0, 100, size=N)
df = DataFrame({'key': labels,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
grouped = df.groupby('key')
def f(g):
g['value3'] = g['value1'] * 2
return g
result = grouped.apply(f)
self.assertTrue('value3' in result)
def test_transform_mixed_type(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
group['g'] = group['d'] * 2
return group[:1]
grouped = df.groupby('c')
result = grouped.apply(f)
self.assertEqual(result['d'].dtype, np.float64)
# this is by definition a mutating operation!
with option_context('mode.chained_assignment',None):
for key, group in grouped:
res = f(group)
assert_frame_equal(res, result.ix[key])
def test_groupby_wrong_multi_labels(self):
from pandas import read_csv
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(['foo', 'bar', 'baz', 'spam'])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_groupby_series_with_name(self):
result = self.df.groupby(self.df['A']).mean()
result2 = self.df.groupby(self.df['A'], as_index=False).mean()
self.assertEqual(result.index.name, 'A')
self.assertIn('A', result2)
result = self.df.groupby([self.df['A'], self.df['B']]).mean()
result2 = self.df.groupby([self.df['A'], self.df['B']],
as_index=False).mean()
self.assertEqual(result.index.names, ('A', 'B'))
self.assertIn('A', result2)
self.assertIn('B', result2)
def test_seriesgroupby_name_attr(self):
# GH 6265
result = self.df.groupby('A')['C']
self.assertEqual(result.count().name, 'C')
self.assertEqual(result.mean().name, 'C')
testFunc = lambda x: np.sum(x)*2
self.assertEqual(result.agg(testFunc).name, 'C')
def test_groupby_name_propagation(self):
# GH 6124
def summarize(df, name=None):
return Series({
'count': 1,
'mean': 2,
'omissions': 3,
}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({
'count': 1,
'mean': 2,
'omissions': 3,
}, name=df.iloc[0]['A'])
metrics = self.df.groupby('A').apply(summarize)
self.assertEqual(metrics.columns.name, None)
metrics = self.df.groupby('A').apply(summarize, 'metrics')
self.assertEqual(metrics.columns.name, 'metrics')
metrics = self.df.groupby('A').apply(summarize_random_name)
self.assertEqual(metrics.columns.name, None)
def test_groupby_nonstring_columns(self):
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
assert_frame_equal(result, expected)
def test_cython_grouper_series_bug_noncontig(self):
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0], index=lrange(100))
inds = np.tile(lrange(10), 10)
result = obj.groupby(inds).agg(Series.median)
self.assertTrue(result.isnull().all())
def test_series_grouper_noncontig_index(self):
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone(self):
from decimal import Decimal
s = Series(lrange(5))
labels = np.array(['a', 'b', 'c', 'd', 'e'], dtype='O')
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert(len(x.base) > 0)
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
self.assertEqual(result.dtype, np.object_)
tm.assertIsInstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
self.assertEqual(result.dtype, np.object_)
tm.assertIsInstance(result[0], Decimal)
def test_fast_apply(self):
# make sure that fast apply is correctly called
# rather than raising any kind of error
# otherwise the python path will be callsed
# which slows things down
N = 1000
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
def f(g):
return 1
g = df.groupby(['key', 'key2'])
grouper = g.grouper
splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
group_keys = grouper._get_group_keys()
values, mutated = splitter.fast_apply(f, group_keys)
self.assertFalse(mutated)
def test_apply_with_mixed_dtype(self):
# GH3480, apply with mixed dtype on axis=1 breaks in 0.11
df = DataFrame({'foo1' : ['one', 'two', 'two', 'three', 'one', 'two'],
'foo2' : np.random.randn(6)})
result = df.apply(lambda x: x, axis=1)
assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
# GH 3610 incorrect dtype conversion with as_index=False
df = DataFrame({"c1" : [1,2,6,6,8]})
df["c2"] = df.c1/2.0
result1 = df.groupby("c2").mean().reset_index().c2
result2 = df.groupby("c2", as_index=False).mean().c2
assert_series_equal(result1,result2)
def test_groupby_aggregation_mixed_dtype(self):
# GH 6212
expected = DataFrame({
'v1': [5,5,7,np.nan,3,3,4,1],
'v2': [55,55,77,np.nan,33,33,44,11]},
index=MultiIndex.from_tuples([(1,95),(1,99),(2,95),(2,99),('big','damp'),
('blue','dry'),('red','red'),('red','wet')],
names=['by1','by2']))
df = DataFrame({
'v1': [1,3,5,7,8,3,5,np.nan,4,5,7,9],
'v2': [11,33,55,77,88,33,55,np.nan,44,55,77,99],
'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99, np.nan,
np.nan]
})
g = df.groupby(['by1','by2'])
result = g[['v1','v2']].mean()
assert_frame_equal(result,expected)
def test_groupby_dtype_inference_empty(self):
# GH 6733
df = DataFrame({'x': [], 'range': np.arange(0,dtype='int64')})
result = df.groupby('x').first()
expected = DataFrame({'range' : Series([],index=Index([],name='x'),dtype='int64') })
assert_frame_equal(result,expected,by_blocks=True)
def test_groupby_list_infer_array_like(self):
result = self.df.groupby(list(self.df['A'])).mean()
expected = self.df.groupby(self.df['A']).mean()
assert_frame_equal(result, expected, check_names=False)
self.assertRaises(Exception, self.df.groupby, list(self.df['A'][:-1]))
# pathological case of ambiguity
df = DataFrame({'foo': [0, 1], 'bar': [3, 4],
'val': np.random.randn(2)})
result = df.groupby(['foo', 'bar']).mean()
expected = df.groupby([df['foo'], df['bar']]).mean()[['val']]
def test_groupby_nat_exclude(self):
# GH 6992
df = pd.DataFrame({'values': np.random.randn(8),
'dt': [np.nan, pd.Timestamp('2013-01-01'), np.nan, pd.Timestamp('2013-02-01'),
np.nan, pd.Timestamp('2013-02-01'), np.nan, pd.Timestamp('2013-01-01')],
'str': [np.nan, 'a', np.nan, 'a',
np.nan, 'a', np.nan, 'b']})
grouped = df.groupby('dt')
expected = [[1, 7], [3, 5]]
keys = sorted(grouped.groups.keys())
self.assertEqual(len(keys), 2)
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
self.assertEqual(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
self.assertEqual(grouped.ngroups, 2)
expected = {Timestamp('2013-01-01 00:00:00'): np.array([1, 7]),
Timestamp('2013-02-01 00:00:00'): np.array([3, 5])}
for k in grouped.indices:
self.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(grouped.get_group(Timestamp('2013-01-01')), df.iloc[[1, 7]])
tm.assert_frame_equal(grouped.get_group(Timestamp('2013-02-01')), df.iloc[[3, 5]])
self.assertRaises(KeyError, grouped.get_group, pd.NaT)
nan_df = DataFrame({'nan': [np.nan, np.nan, np.nan],
'nat': [pd.NaT, pd.NaT, pd.NaT]})
self.assertEqual(nan_df['nan'].dtype, 'float64')
self.assertEqual(nan_df['nat'].dtype, 'datetime64[ns]')
for key in ['nan', 'nat']:
grouped = nan_df.groupby(key)
self.assertEqual(grouped.groups, {})
self.assertEqual(grouped.ngroups, 0)
self.assertEqual(grouped.indices, {})
self.assertRaises(KeyError, grouped.get_group, np.nan)
self.assertRaises(KeyError, grouped.get_group, pd.NaT)
def test_dictify(self):
dict(iter(self.df.groupby('A')))
dict(iter(self.df.groupby(['A', 'B'])))
dict(iter(self.df['C'].groupby(self.df['A'])))
dict(iter(self.df['C'].groupby([self.df['A'], self.df['B']])))
dict(iter(self.df.groupby('A')['C']))
dict(iter(self.df.groupby(['A', 'B'])['C']))
def test_sparse_friendly(self):
sdf = self.df[['C', 'D']].to_sparse()
panel = tm.makePanel()
tm.add_nans(panel)
def _check_work(gp):
gp.mean()
gp.agg(np.mean)
dict(iter(gp))
# it works!
_check_work(sdf.groupby(lambda x: x // 2))
_check_work(sdf['C'].groupby(lambda x: x // 2))
_check_work(sdf.groupby(self.df['A']))
# do this someday
# _check_work(panel.groupby(lambda x: x.month, axis=1))
def test_panel_groupby(self):
self.panel = tm.makePanel()
tm.add_nans(self.panel)
grouped = self.panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1},
axis='items')
agged = grouped.mean()
agged2 = grouped.agg(lambda x: x.mean('items'))
tm.assert_panel_equal(agged, agged2)
self.assert_numpy_array_equal(agged.items, [0, 1])
grouped = self.panel.groupby(lambda x: x.month, axis='major')
agged = grouped.mean()
self.assert_numpy_array_equal(agged.major_axis, sorted(list(set(self.panel.major_axis.month))))
grouped = self.panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis='minor')
agged = grouped.mean()
self.assert_numpy_array_equal(agged.minor_axis, [0, 1])
def test_numpy_groupby(self):
from pandas.core.groupby import numpy_groupby
data = np.random.randn(100, 100)
labels = np.random.randint(0, 10, size=100)
df = DataFrame(data)
result = df.groupby(labels).sum().values
expected = numpy_groupby(data, labels)
assert_almost_equal(result, expected)
result = df.groupby(labels, axis=1).sum().values
expected = numpy_groupby(data, labels, axis=1)
assert_almost_equal(result, expected)
def test_groupby_2d_malformed(self):
d = DataFrame(index=lrange(2))
d['group'] = ['g1', 'g2']
d['zeros'] = [0, 0]
d['ones'] = [1, 1]
d['label'] = ['l1', 'l2']
tmp = d.groupby(['group']).mean()
res_values = np.array([[0., 1.], [0., 1.]])
self.assert_numpy_array_equal(tmp.columns, ['zeros', 'ones'])
self.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow(self):
B = np.concatenate((np.arange(10000), np.arange(10000),
np.arange(5000)))
A = np.arange(25000)
df = DataFrame({'A': A, 'B': B,
'C': A, 'D': B,
'E': np.random.randn(25000)})
left = df.groupby(['A', 'B', 'C', 'D']).sum()
right = df.groupby(['D', 'C', 'B', 'A']).sum()
self.assertEqual(len(left), len(right))
def test_int64_overflow(self):
from pandas.core.groupby import _int64_overflow_possible
B = np.concatenate((np.arange(1000), np.arange(1000),
np.arange(500)))
A = np.arange(2500)
df = DataFrame({'A': A, 'B': B,
'C': A, 'D': B,
'E': A, 'F': B,
'G': A, 'H': B,
'values': np.random.randn(2500)})
lg = df.groupby(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
rg = df.groupby(['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'])
left = lg.sum()['values']
right = rg.sum()['values']
exp_index, _ = left.index.sortlevel(0)
self.assertTrue(left.index.equals(exp_index))
exp_index, _ = right.index.sortlevel(0)
self.assertTrue(right.index.equals(exp_index))
tups = list(map(tuple, df[['A', 'B', 'C', 'D',
'E', 'F', 'G', 'H']].values))
tups = com._asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()['values']
for k, v in compat.iteritems(expected):
self.assertEqual(left[k], right[k[::-1]])
self.assertEqual(left[k], v)
self.assertEqual(len(left), len(right))
# GH9096
values = range(55109)
data = pd.DataFrame.from_dict({'a': values, 'b': values,
'c': values, 'd': values})
grouped = data.groupby(['a', 'b', 'c', 'd'])
self.assertEqual(len(grouped), len(values))
arr = np.random.randint(- 1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list('abcde'))
df['jim'], df['joe'] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list('abcde'))
# verify this is testing what it is supposed to test!
self.assertTrue(_int64_overflow_possible(gr.grouper.shape))
# mannually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df['jim'], df['joe']):
jim[key].append(a)
joe[key].append(b)
self.assertEqual(len(gr), len(jim))
mi = MultiIndex.from_tuples(jim.keys(), names=list('abcde'))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype='f8')
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=['jim', 'joe'], index=mi)
return res.sort_index()
assert_frame_equal(gr.mean(), aggr(np.mean))
assert_frame_equal(gr.median(), aggr(np.median))
def test_groupby_sort_multi(self):
df = DataFrame({'a': ['foo', 'bar', 'baz'],
'b': [3, 2, 1],
'c': [0, 1, 2],
'd': np.random.randn(3)})
tups = lmap(tuple, df[['a', 'b', 'c']].values)
tups = com._asarray_tuplesafe(tups)
result = df.groupby(['a', 'b', 'c'], sort=True).sum()
self.assert_numpy_array_equal(result.index.values,
tups[[1, 2, 0]])
tups = lmap(tuple, df[['c', 'a', 'b']].values)
tups = com._asarray_tuplesafe(tups)
result = df.groupby(['c', 'a', 'b'], sort=True).sum()
self.assert_numpy_array_equal(result.index.values, tups)
tups = lmap(tuple, df[['b', 'c', 'a']].values)
tups = com._asarray_tuplesafe(tups)
result = df.groupby(['b', 'c', 'a'], sort=True).sum()
self.assert_numpy_array_equal(result.index.values,
tups[[2, 1, 0]])
df = DataFrame({'a': [0, 1, 2, 0, 1, 2],
'b': [0, 0, 0, 1, 1, 1],
'd': np.random.randn(6)})
grouped = df.groupby(['a', 'b'])['d']
result = grouped.sum()
_check_groupby(df, result, ['a', 'b'], 'd')
def test_intercept_builtin_sum(self):
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_column_select_via_attr(self):
result = self.df.groupby('A').C.sum()
expected = self.df.groupby('A')['C'].sum()
assert_series_equal(result, expected)
self.df['mean'] = 1.5
result = self.df.groupby('A').mean()
expected = self.df.groupby('A').agg(np.mean)
assert_frame_equal(result, expected)
def test_rank_apply(self):
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame({'value': np.random.randn(500),
'key1': lev1.take(lab1),
'key2': lev2.take(lab2)})
result = df.groupby(['key1', 'key2']).value.rank()
expected = []
for key, piece in df.groupby(['key1', 'key2']):
expected.append(piece.value.rank())
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
assert_series_equal(result, expected, check_names=False)
self.assertTrue(result.name is None)
result = df.groupby(['key1', 'key2']).value.rank(pct=True)
expected = []
for key, piece in df.groupby(['key1', 'key2']):
expected.append(piece.value.rank(pct=True))
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
assert_series_equal(result, expected, check_names=False)
self.assertTrue(result.name is None)
def test_dont_clobber_name_column(self):
df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'],
'name': ['foo', 'bar', 'baz'] * 2})
result = df.groupby('key').apply(lambda x: x)
assert_frame_equal(result, df)
def test_skip_group_keys(self):
from pandas import concat
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_index(by='A')[:3])
pieces = []
for key, group in grouped:
pieces.append(group.sort_index(by='A')[:3])
expected = concat(pieces)
assert_frame_equal(result, expected)
grouped = tsf['A'].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.order()[:3])
pieces = []
for key, group in grouped:
pieces.append(group.order()[:3])
expected = concat(pieces)
assert_series_equal(result, expected, check_names=False)
self.assertTrue(result.name is None)
def test_no_nonsense_name(self):
# GH #995
s = self.frame['C'].copy()
s.name = None
result = s.groupby(self.frame['A']).agg(np.sum)
self.assertIsNone(result.name)
def test_wrap_agg_out(self):
grouped = self.three_group.groupby(['A', 'B'])
def func(ser):
if ser.dtype == np.object:
raise TypeError
else:
return ser.sum()
result = grouped.aggregate(func)
exp_grouped = self.three_group.ix[:, self.three_group.columns != 'C']
expected = exp_grouped.groupby(['A', 'B']).aggregate(func)
assert_frame_equal(result, expected)
def test_multifunc_sum_bug(self):
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x['test'] = 0
x['fl'] = [1.3, 1.5, 1.6]
grouped = x.groupby('test')
result = grouped.agg({'fl': 'sum', 2: 'size'})
self.assertEqual(result['fl'].dtype, np.float64)
def test_handle_dict_return_value(self):
def f(group):
return {'min': group.min(), 'max': group.max()}
def g(group):
return Series({'min': group.min(), 'max': group.max()})
result = self.df.groupby('A')['C'].apply(f)
expected = self.df.groupby('A')['C'].apply(g)
tm.assertIsInstance(result, Series)
assert_series_equal(result, expected)
def test_getitem_list_of_columns(self):
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8),
'E': np.random.randn(8)})
result = df.groupby('A')[['C', 'D']].mean()
result2 = df.groupby('A')['C', 'D'].mean()
result3 = df.groupby('A')[df.columns[2:4]].mean()
expected = df.ix[:, ['A', 'C', 'D']].groupby('A').mean()
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
def test_agg_multiple_functions_maintain_order(self):
# GH #610
funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)]
result = self.df.groupby('A')['C'].agg(funcs)
exp_cols = ['mean', 'max', 'min']
self.assert_numpy_array_equal(result.columns, exp_cols)
def test_multiple_functions_tuples_and_non_tuples(self):
# #1359
funcs = [('foo', 'mean'), 'std']
ex_funcs = [('foo', 'mean'), ('std', 'std')]
result = self.df.groupby('A')['C'].agg(funcs)
expected = self.df.groupby('A')['C'].agg(ex_funcs)
assert_frame_equal(result, expected)
result = self.df.groupby('A').agg(funcs)
expected = self.df.groupby('A').agg(ex_funcs)
assert_frame_equal(result, expected)
def test_agg_multiple_functions_too_many_lambdas(self):
grouped = self.df.groupby('A')
funcs = ['mean', lambda x: x.mean(), lambda x: x.std()]
self.assertRaises(SpecificationError, grouped.agg, funcs)
def test_more_flexible_frame_multi_function(self):
from pandas import concat
grouped = self.df.groupby('A')
exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]]))
exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]]))
expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1)
expected = expected.swaplevel(0, 1, axis=1).sortlevel(0, axis=1)
d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]])
result = grouped.aggregate(d)
assert_frame_equal(result, expected)
# be careful
result = grouped.aggregate(OrderedDict([['C', np.mean],
['D', [np.mean, np.std]]]))
expected = grouped.aggregate(OrderedDict([['C', np.mean],
['D', [np.mean, np.std]]]))
assert_frame_equal(result, expected)
def foo(x):
return np.mean(x)
def bar(x):
return np.std(x, ddof=1)
d = OrderedDict([['C', np.mean],
['D', OrderedDict([['foo', np.mean],
['bar', np.std]])]])
result = grouped.aggregate(d)
d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]])
expected = grouped.aggregate(d)
assert_frame_equal(result, expected)
def test_multi_function_flexible_mix(self):
# GH #1268
grouped = self.df.groupby('A')
d = OrderedDict([['C', OrderedDict([['foo', 'mean'],
[
'bar', 'std']])],
['D', 'sum']])
result = grouped.aggregate(d)
d2 = OrderedDict([['C', OrderedDict([['foo', 'mean'],
[
'bar', 'std']])],
['D', ['sum']]])
result2 = grouped.aggregate(d2)
d3 = OrderedDict([['C', OrderedDict([['foo', 'mean'],
[
'bar', 'std']])],
['D', {'sum': 'sum'}]])
expected = grouped.aggregate(d3)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_agg_callables(self):
# GH 7929
df = DataFrame({'foo' : [1,2], 'bar' :[3,4]}).astype(np.int64)
class fn_class(object):
def __call__(self, x):
return sum(x)
equiv_callables = [sum, np.sum,
lambda x: sum(x),
lambda x: x.sum(),
partial(sum), fn_class()]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby('foo').agg(ecall)
assert_frame_equal(result, expected)
def test_set_group_name(self):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
def _check_all(grouped):
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({'C': freduce, 'D': freduce})
grouped.transform(f)
grouped['C'].apply(f)
grouped['C'].aggregate(freduce)
grouped['C'].aggregate([freduce, foo])
grouped['C'].transform(f)
_check_all(self.df.groupby('A'))
_check_all(self.df.groupby(['A', 'B']))
def test_no_dummy_key_names(self):
# GH #1291
result = self.df.groupby(self.df['A'].values).sum()
self.assertIsNone(result.index.name)
result = self.df.groupby([self.df['A'].values,
self.df['B'].values]).sum()
self.assertEqual(result.index.names, (None, None))
def test_groupby_sort_categorical(self):
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame([['(7.5, 10]', 10, 10],
['(7.5, 10]', 8, 20],
['(2.5, 5]', 5, 30],
['(5, 7.5]', 6, 40],
['(2.5, 5]', 4, 50],
['(0, 2.5]', 1, 60],
['(5, 7.5]', 7, 70]], columns=['range', 'foo', 'bar'])
df['range'] = Categorical(df['range'],ordered=True)
index = Index(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], dtype='object')
index.name = 'range'
result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = index
index = Index(['(7.5, 10]', '(2.5, 5]', '(5, 7.5]', '(0, 2.5]'], dtype='object')
index.name = 'range'
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=['foo', 'bar'])
result_nosort.index = index
col = 'range'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
df['range'] = Categorical(df['range'],ordered=False)
index = Index(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], dtype='object')
index.name = 'range'
result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = index
index = Index(['(7.5, 10]', '(2.5, 5]', '(5, 7.5]', '(0, 2.5]'], dtype='object')
index.name = 'range'
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=['foo', 'bar'])
result_nosort.index = index
col = 'range'
#### this is an unordered categorical, but we allow this ####
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
def test_groupby_sort_categorical_datetimelike(self):
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame({'dt': [datetime(2011, 7, 1), datetime(2011, 7, 1),
datetime(2011, 2, 1), datetime(2011, 5, 1),
datetime(2011, 2, 1), datetime(2011, 1, 1),
datetime(2011, 5, 1)],
'foo': [10, 8, 5, 6, 4, 1, 7],
'bar': [10, 20, 30, 40, 50, 60, 70]},
columns=['dt', 'foo', 'bar'])
# ordered=True
df['dt'] = Categorical(df['dt'], ordered=True)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt', ordered=True)
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt', ordered=True)
col = 'dt'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
# ordered = False
df['dt'] = Categorical(df['dt'], ordered=False)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt')
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index, name='dt')
col = 'dt'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
def test_groupby_sort_multiindex_series(self):
# series multiindex groupby sort argument was not being passed through _compress_group_index
# GH 9444
index = MultiIndex(levels=[[1, 2], [1, 2]],
labels=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=['a', 'b'])
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(levels=[[1, 2], [1, 2]],
labels=[[0, 0, 1], [1, 0, 0]],
names=['a', 'b'])
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=['a', 'b'], sort=False).first()
assert_series_equal(result, mseries_result)
result = mseries.groupby(level=['a', 'b'], sort=True).first()
assert_series_equal(result, mseries_result.sort_index())
def test_groupby_categorical(self):
levels = ['foo', 'bar', 'baz', 'qux']
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats).mean()
expected = data.groupby(np.asarray(cats)).mean()
expected = expected.reindex(levels)
assert_frame_equal(result, expected)
grouped = data.groupby(cats)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, sort=False).describe()
expected.index.names = [None, None]
assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
self.assert_index_equal(desc_result.index.get_level_values(0), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)
self.assert_index_equal(desc_result.index.get_level_values(1), exp)
def test_groupby_datetime_categorical(self):
# GH9049: ensure backward compatibility
levels = pd.date_range('2014-01-01', periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats).mean()
expected = data.groupby(np.asarray(cats)).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(expected.index, categories=expected.index,
ordered=True)
assert_frame_equal(result, expected)
grouped = data.groupby(cats)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take_nd(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels).describe()
expected.index.names = [None, None]
assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(desc_result.index.get_level_values(0), expected.index.get_level_values(0))
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
self.assert_index_equal(desc_result.index.get_level_values(0), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)
self.assert_index_equal(desc_result.index.get_level_values(1), exp)
def test_groupby_categorical_index(self):
levels = ['foo', 'bar', 'baz', 'qux']
codes = np.random.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, name='myfactor', ordered=True)
df = DataFrame(np.repeat(np.arange(20),4).reshape(-1,4), columns=list('abcd'))
df['cats'] = cats
# with a cat index
result = df.set_index('cats').groupby(level=0).sum()
expected = df[list('abcd')].groupby(cats.codes).sum()
expected.index = CategoricalIndex(Categorical.from_codes([0,1,2,3], levels, ordered=True),name='cats')
assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby('cats').sum()
expected = df[list('abcd')].groupby(cats.codes).sum()
expected.index = CategoricalIndex(Categorical.from_codes([0,1,2,3], levels, ordered=True),name='cats')
assert_frame_equal(result, expected)
def test_groupby_groups_datetimeindex(self):
# #1430
from pandas.tseries.api import DatetimeIndex
periods = 1000
ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
df = DataFrame({'high': np.arange(periods),
'low': np.arange(periods)}, index=ind)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
# it works!
groups = grouped.groups
tm.assertIsInstance(list(groups.keys())[0], datetime)
def test_groupby_groups_datetimeindex_tz(self):
# GH 3950
dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': dates,
'value1': np.arange(6,dtype='int64'),
'value2': [1, 2] * 3})
df['datetime'] = df['datetime'].apply(lambda d: Timestamp(d, tz='US/Pacific'))
exp_idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 09:00:00'],
tz='US/Pacific', name='datetime')
exp_idx2 = Index(['a', 'b'] * 3, name='label')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5], 'value2': [1, 2, 2, 1, 1, 2]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(['datetime', 'label']).sum()
assert_frame_equal(result, expected)
# by level
didx = pd.DatetimeIndex(dates, tz='Asia/Tokyo')
df = DataFrame({'value1': np.arange(6,dtype='int64'),
'value2': [1, 2, 3, 1, 2, 3]},
index=didx)
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='Asia/Tokyo')
expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(level=0).sum()
assert_frame_equal(result, expected)
def test_groupby_reindex_inside_function(self):
from pandas.tseries.api import DatetimeIndex
periods = 1000
ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
df = DataFrame({'high': np.arange(
periods), 'low': np.arange(periods)}, index=ind)
def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.select(lambda x: x.hour < 11).dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
def afunc(data):
d = data.select(lambda x: x.hour < 11).dropna()
return np.max(d)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({'high': agg_before(11, np.max)})
closure_good = grouped.agg({'high': agg_before(11, np.max, True)})
assert_frame_equal(closure_bad, closure_good)
def test_multiindex_columns_empty_level(self):
l = [['count', 'values'], ['to filter', '']]
midx = MultiIndex.from_tuples(l)
df = DataFrame([[long(1), 'A']], columns=midx)
grouped = df.groupby('to filter').groups
self.assert_numpy_array_equal(grouped['A'], [0])
grouped = df.groupby([('to filter', '')]).groups
self.assert_numpy_array_equal(grouped['A'], [0])
df = DataFrame([[long(1), 'A'], [long(2), 'B']], columns=midx)
expected = df.groupby('to filter').groups
result = df.groupby([('to filter', '')]).groups
self.assertEqual(result, expected)
df = DataFrame([[long(1), 'A'], [long(2), 'A']], columns=midx)
expected = df.groupby('to filter').groups
result = df.groupby([('to filter', '')]).groups
self.assertEqual(result, expected)
def test_cython_median(self):
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
assert_frame_equal(rs, xp)
def test_groupby_categorical_no_compress(self):
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats).mean()
exp = data.groupby(codes).mean()
exp.index = CategoricalIndex(exp.index,categories=cats.categories,ordered=cats.ordered)
assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats).mean()
exp = data.groupby(codes).mean().reindex(cats.categories)
exp.index = CategoricalIndex(exp.index,categories=cats.categories,ordered=cats.ordered)
assert_series_equal(result, exp)
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a","b","c","d"], ordered=True)
data = DataFrame({"a":[1,1,1,2,2,2,3,4,5], "b":cats})
result = data.groupby("b").mean()
result = result["a"].values
exp = np.array([1,2,4,np.nan])
self.assert_numpy_array_equal(result, exp)
def test_groupby_non_arithmetic_agg_types(self):
# GH9311, GH6620
df = pd.DataFrame([{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
dtypes = ['int8', 'int16', 'int32', 'int64',
'float32', 'float64']
grp_exp = {'first': {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]},
'last': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]},
'min': {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]},
'max': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]},
'nth': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]},
'count': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'}}
for dtype in dtypes:
df_in = df.copy()
df_in['b'] = df_in.b.astype(dtype)
for method, data in compat.iteritems(grp_exp):
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df_in.groupby('a')
t = getattr(grpd, method)(*data['args'])
assert_frame_equal(t, df_out)
def test_groupby_non_arithmetic_agg_intlike_precision(self):
# GH9311, GH6620
c = 24650000000000000
inputs = ((Timestamp('2011-01-15 12:50:28.502376'),
Timestamp('2011-01-20 12:50:28.593448')),
(1 + c, 2 + c))
for i in inputs:
df = pd.DataFrame([{'a': 1,
'b': i[0]},
{'a': 1,
'b': i[1]}])
grp_exp = {'first': {'expected': i[0]},
'last': {'expected': i[1]},
'min': {'expected': i[0]},
'max': {'expected': i[1]},
'nth': {'expected': i[1], 'args': [1]},
'count': {'expected': 2}}
for method, data in compat.iteritems(grp_exp):
if 'args' not in data:
data['args'] = []
grpd = df.groupby('a')
res = getattr(grpd, method)(*data['args'])
self.assertEqual(res.iloc[0].b, data['expected'])
def test_groupby_first_datetime64(self):
df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)])
df[1] = df[1].view('M8[ns]')
self.assertTrue(issubclass(df[1].dtype.type, np.datetime64))
result = df.groupby(level=0).first()
got_dt = result[1].dtype
self.assertTrue(issubclass(got_dt.type, np.datetime64))
result = df[1].groupby(level=0).first()
got_dt = result.dtype
self.assertTrue(issubclass(got_dt.type, np.datetime64))
def test_groupby_max_datetime64(self):
# GH 5869
# datetimelike dtype conversion from int
df = DataFrame(dict(A = Timestamp('20130101'), B = np.arange(5)))
expected = df.groupby('A')['A'].apply(lambda x: x.max())
result = df.groupby('A')['A'].max()
assert_series_equal(result,expected)
def test_groupby_datetime64_32_bit(self):
# GH 6410 / numpy 4328
# 32-bit under 1.9-dev indexing issue
df = DataFrame({"A": range(2), "B": [pd.Timestamp('2000-01-1')]*2})
result = df.groupby("A")["B"].transform(min)
expected = Series([pd.Timestamp('2000-01-1')]*2)
assert_series_equal(result,expected)
def test_groupby_categorical_unequal_len(self):
import pandas as pd
#GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
# The raises only happens with categorical, not with series of types category
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
self.assertRaises(ValueError,lambda : series.groupby(bins).mean())
def test_groupby_multiindex_missing_pair(self):
# GH9049
df = DataFrame({'group1': ['a','a','a','b'],
'group2': ['c','c','d','c'],
'value': [1,1,1,5]})
df = df.set_index(['group1', 'group2'])
df_grouped = df.groupby(level=['group1','group2'], sort=True)
res = df_grouped.agg('sum')
idx = MultiIndex.from_tuples([('a','c'), ('a','d'), ('b','c')], names=['group1', 'group2'])
exp = DataFrame([[2], [1], [5]], index=idx, columns=['value'])
tm.assert_frame_equal(res, exp)
def test_groupby_levels_and_columns(self):
# GH9344, GH9049
idx_names = ['x', 'y']
idx = pd.MultiIndex.from_tuples([(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names)
df = pd.DataFrame(np.arange(12).reshape(-1, 3), index=idx)
by_levels = df.groupby(level=idx_names).mean()
by_columns = df.reset_index().groupby(idx_names).mean()
tm.assert_frame_equal(by_levels, by_columns)
def test_gb_apply_list_of_unequal_len_arrays(self):
# GH1738
df = DataFrame({'group1': ['a','a','a','b','b','b','a','a','a','b','b','b'],
'group2': ['c','c','d','d','d','e','c','c','d','d','d','e'],
'weight': [1.1,2,3,4,5,6,2,4,6,8,1,2],
'value': [7.1,8,9,10,11,12,8,7,6,5,4,3]
})
df = df.set_index(['group1', 'group2'])
df_grouped = df.groupby(level=['group1','group2'], sort=True)
def noddy(value, weight):
out = np.array( value * weight ).repeat(3)
return out
# the kernel function returns arrays of unequal length
# pandas sniffs the first one, sees it's an array and not
# a list, and assumed the rest are of equal length
# and so tries a vstack
# don't die
no_toes = df_grouped.apply(lambda x: noddy(x.value, x.weight ))
def test_groupby_with_empty(self):
import pandas as pd
index = pd.DatetimeIndex(())
data = ()
series = pd.Series(data, index)
grouper = pd.tseries.resample.TimeGrouper('D')
grouped = series.groupby(grouper)
assert next(iter(grouped), None) is None
def test_groupby_with_timegrouper(self):
# GH 4161
# TimeGrouper requires a sorted index
# also verifies that the resultant index has the correct name
import datetime as DT
df_original = DataFrame({
'Buyer': 'Carl Carl Carl Carl Joe Carl'.split(),
'Quantity': [18,3,5,1,9,3],
'Date' : [
DT.datetime(2013,9,1,13,0),
DT.datetime(2013,9,1,13,5),
DT.datetime(2013,10,1,20,0),
DT.datetime(2013,10,3,10,0),
DT.datetime(2013,12,2,12,0),
DT.datetime(2013,9,2,14,0),
]})
# GH 6908 change target column's order
df_reordered = df_original.sort(columns='Quantity')
for df in [df_original, df_reordered]:
df = df.set_index(['Date'])
expected = DataFrame({ 'Quantity' : np.nan },
index=date_range('20130901 13:00:00','20131205 13:00:00',
freq='5D',name='Date',closed='left'))
expected.iloc[[0,6,18],0] = np.array([24.,6.,9.],dtype='float64')
result1 = df.resample('5D',how=sum)
assert_frame_equal(result1, expected)
df_sorted = df.sort_index()
result2 = df_sorted.groupby(pd.TimeGrouper(freq='5D')).sum()
assert_frame_equal(result2, expected)
result3 = df.groupby(pd.TimeGrouper(freq='5D')).sum()
assert_frame_equal(result3, expected)
def test_groupby_with_timegrouper_methods(self):
# GH 3881
# make sure API of timegrouper conforms
import datetime as DT
df_original = pd.DataFrame({
'Branch' : 'A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Joe Joe Carl'.split(),
'Quantity': [1,3,5,8,9,3],
'Date' : [
DT.datetime(2013,1,1,13,0),
DT.datetime(2013,1,1,13,5),
DT.datetime(2013,10,1,20,0),
DT.datetime(2013,10,2,10,0),
DT.datetime(2013,12,2,12,0),
DT.datetime(2013,12,2,14,0),
]})
df_sorted = df_original.sort(columns='Quantity', ascending=False)
for df in [df_original, df_sorted]:
df = df.set_index('Date', drop=False)
g = df.groupby(pd.TimeGrouper('6M'))
self.assertTrue(g.group_keys)
self.assertTrue(isinstance(g.grouper,pd.core.groupby.BinGrouper))
groups = g.groups
self.assertTrue(isinstance(groups,dict))
self.assertTrue(len(groups) == 3)
def test_timegrouper_with_reg_groups(self):
# GH 3794
# allow combinateion of timegrouper/reg groups
import datetime as DT
df_original = DataFrame({
'Branch' : 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1,3,5,1,8,1,9,3],
'Date' : [
DT.datetime(2013,1,1,13,0),
DT.datetime(2013,1,1,13,5),
DT.datetime(2013,10,1,20,0),
DT.datetime(2013,10,2,10,0),
DT.datetime(2013,10,1,20,0),
DT.datetime(2013,10,2,10,0),
DT.datetime(2013,12,2,12,0),
DT.datetime(2013,12,2,14,0),
]}).set_index('Date')
df_sorted = df_original.sort(columns='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10,18,3],
'Date' : [
DT.datetime(2013,12,31,0,0),
DT.datetime(2013,12,31,0,0),
DT.datetime(2013,12,31,0,0),
]}).set_index(['Date','Buyer'])
result = df.groupby([pd.Grouper(freq='A'),'Buyer']).sum()
assert_frame_equal(result,expected)
expected = DataFrame({
'Buyer': 'Carl Mark Carl Joe'.split(),
'Quantity': [1,3,9,18],
'Date' : [
DT.datetime(2013,1,1,0,0),
DT.datetime(2013,1,1,0,0),
DT.datetime(2013,7,1,0,0),
DT.datetime(2013,7,1,0,0),
]}).set_index(['Date','Buyer'])
result = df.groupby([pd.Grouper(freq='6MS'),'Buyer']).sum()
assert_frame_equal(result,expected)
df_original = DataFrame({
'Branch' : 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1,3,5,1,8,1,9,3],
'Date' : [
DT.datetime(2013,10,1,13,0),
DT.datetime(2013,10,1,13,5),
DT.datetime(2013,10,1,20,0),
DT.datetime(2013,10,2,10,0),
DT.datetime(2013,10,1,20,0),
DT.datetime(2013,10,2,10,0),
DT.datetime(2013,10,2,12,0),
DT.datetime(2013,10,2,14,0),
]}).set_index('Date')
df_sorted = df_original.sort(columns='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
'Buyer': 'Carl Joe Mark Carl Joe'.split(),
'Quantity': [6,8,3,4,10],
'Date' : [
DT.datetime(2013,10,1,0,0),
DT.datetime(2013,10,1,0,0),
DT.datetime(2013,10,1,0,0),
DT.datetime(2013,10,2,0,0),
DT.datetime(2013,10,2,0,0),
]}).set_index(['Date','Buyer'])
result = df.groupby([pd.Grouper(freq='1D'),'Buyer']).sum()
assert_frame_equal(result,expected)
result = df.groupby([pd.Grouper(freq='1M'),'Buyer']).sum()
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10,18,3],
'Date' : [
DT.datetime(2013,10,31,0,0),
DT.datetime(2013,10,31,0,0),
DT.datetime(2013,10,31,0,0),
]}).set_index(['Date','Buyer'])
assert_frame_equal(result,expected)
# passing the name
df = df.reset_index()
result = df.groupby([pd.Grouper(freq='1M',key='Date'),'Buyer']).sum()
assert_frame_equal(result,expected)
self.assertRaises(KeyError, lambda : df.groupby([pd.Grouper(freq='1M',key='foo'),'Buyer']).sum())
# passing the level
df = df.set_index('Date')
result = df.groupby([pd.Grouper(freq='1M',level='Date'),'Buyer']).sum()
assert_frame_equal(result,expected)
result = df.groupby([pd.Grouper(freq='1M',level=0),'Buyer']).sum()
assert_frame_equal(result,expected)
self.assertRaises(ValueError, lambda : df.groupby([pd.Grouper(freq='1M',level='foo'),'Buyer']).sum())
# multi names
df = df.copy()
df['Date'] = df.index + pd.offsets.MonthEnd(2)
result = df.groupby([pd.Grouper(freq='1M',key='Date'),'Buyer']).sum()
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10,18,3],
'Date' : [
DT.datetime(2013,11,30,0,0),
DT.datetime(2013,11,30,0,0),
DT.datetime(2013,11,30,0,0),
]}).set_index(['Date','Buyer'])
assert_frame_equal(result,expected)
# error as we have both a level and a name!
self.assertRaises(ValueError, lambda : df.groupby([pd.Grouper(freq='1M',key='Date',level='Date'),'Buyer']).sum())
# single groupers
expected = DataFrame({ 'Quantity' : [31],
'Date' : [DT.datetime(2013,10,31,0,0)] }).set_index('Date')
result = df.groupby(pd.Grouper(freq='1M')).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M')]).sum()
assert_frame_equal(result, expected)
expected = DataFrame({ 'Quantity' : [31],
'Date' : [DT.datetime(2013,11,30,0,0)] }).set_index('Date')
result = df.groupby(pd.Grouper(freq='1M',key='Date')).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M',key='Date')]).sum()
assert_frame_equal(result, expected)
# GH 6764 multiple grouping with/without sort
df = DataFrame({
'date' : pd.to_datetime([
'20121002','20121007','20130130','20130202','20130305','20121002',
'20121207','20130130','20130202','20130305','20130202','20130305']),
'user_id' : [1,1,1,1,1,3,3,3,5,5,5,5],
'whole_cost' : [1790,364,280,259,201,623,90,312,359,301,359,801],
'cost1' : [12,15,10,24,39,1,0,90,45,34,1,12] }).set_index('date')
for freq in ['D', 'M', 'A', 'Q-APR']:
expected = df.groupby('user_id')['whole_cost'].resample(
freq, how='sum').dropna().reorder_levels(
['date','user_id']).sortlevel().astype('int64')
expected.name = 'whole_cost'
result1 = df.sort_index().groupby([pd.TimeGrouper(freq=freq), 'user_id'])['whole_cost'].sum()
assert_series_equal(result1, expected)
result2 = df.groupby([pd.TimeGrouper(freq=freq), 'user_id'])['whole_cost'].sum()
assert_series_equal(result2, expected)
def test_timegrouper_get_group(self):
# GH 6914
df_original = DataFrame({
'Buyer': 'Carl Joe Joe Carl Joe Carl'.split(),
'Quantity': [18,3,5,1,9,3],
'Date' : [datetime(2013,9,1,13,0), datetime(2013,9,1,13,5),
datetime(2013,10,1,20,0), datetime(2013,10,3,10,0),
datetime(2013,12,2,12,0), datetime(2013,9,2,14,0),]})
df_reordered = df_original.sort(columns='Quantity')
# single grouping
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
dt_list = ['2013-09-30', '2013-10-31', '2013-12-31']
for df in [df_original, df_reordered]:
grouped = df.groupby(pd.Grouper(freq='M', key='Date'))
for t, expected in zip(dt_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group(dt)
assert_frame_equal(result, expected)
# multiple grouping
expected_list = [df_original.iloc[[1]], df_original.iloc[[3]],
df_original.iloc[[4]]]
g_list = [('Joe', '2013-09-30'), ('Carl', '2013-10-31'), ('Joe', '2013-12-31')]
for df in [df_original, df_reordered]:
grouped = df.groupby(['Buyer', pd.Grouper(freq='M', key='Date')])
for (b, t), expected in zip(g_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group((b, dt))
assert_frame_equal(result, expected)
# with index
df_original = df_original.set_index('Date')
df_reordered = df_original.sort(columns='Quantity')
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
for df in [df_original, df_reordered]:
grouped = df.groupby(pd.Grouper(freq='M'))
for t, expected in zip(dt_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group(dt)
assert_frame_equal(result, expected)
def test_cumcount(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'])
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3])
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
e = Series(dtype='int64') # edge case, as this is usually considered float
assert_series_equal(e, ge.cumcount())
assert_series_equal(e, se.cumcount())
def test_cumcount_dupe_index(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'], index=[0] * 5)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'], index=mi)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=mi)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_groupby_not_col(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'], index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_filter_series(self):
import pandas as pd
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index))
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index))
def test_filter_single_column_df(self):
import pandas as pd
df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index))
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index))
def test_filter_multi_column_df(self):
import pandas as pd
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10), expected)
def test_filter_mixed_df(self):
import pandas as pd
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']},
index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 10), expected)
def test_filter_out_all_groups(self):
import pandas as pd
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(
grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 1000), df.ix[[]])
def test_filter_out_no_groups(self):
import pandas as pd
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
assert_series_equal(filtered, s)
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x['A'].mean() > 0)
assert_frame_equal(filtered, df)
def test_filter_condition_raises(self):
import pandas as pd
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = pd.Series([-1,0,1,2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
self.assertRaises(TypeError,
lambda: grouped.filter(raise_if_sum_is_zero))
def test_filter_bad_shapes(self):
df = DataFrame({'A': np.arange(8), 'B': list('aabbbbcc'), 'C': np.arange(8)})
s = df['B']
g_df = df.groupby('B')
g_s = s.groupby(s)
f = lambda x: x
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
df = DataFrame({'A': np.arange(8), 'B': list('aabbbbcc'), 'C': np.arange(8)})
s = df['B']
g_df = df.groupby(df['B'])
g_s = s.groupby(s)
f = lambda x: np.nan
assert_frame_equal(g_df.filter(f), df.loc[[]])
assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround(self):
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0,100,1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.order(), old_way.order())
# Series of floats
s = 100*Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.order(), old_way.order())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
'floats': N/10*Series(np.random.random(N)),
'letters': Series(random_letters)})
# Group by ints; filter on floats.
grouped = df.groupby('ints')
old_way = df[grouped.floats.\
transform(lambda x: x.mean() > N/20).astype('bool')]
new_way = grouped.filter(lambda x: x['floats'].mean() > N/20)
assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.\
transform(lambda x: len(x) < N/10).astype('bool')]
new_way = grouped.filter(
lambda x: len(x.letters) < N/10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby('letters')
old_way = df[grouped.ints.\
transform(lambda x: x.mean() > N/20).astype('bool')]
new_way = grouped.filter(lambda x: x['ints'].mean() > N/20)
assert_frame_equal(new_way, old_way)
def test_filter_using_len(self):
# BUG GH4447
df = DataFrame({'A': np.arange(8), 'B': list('aabbbbcc'), 'C': np.arange(8)})
grouped = df.groupby('B')
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame({'A': np.arange(2, 6), 'B': list('bbbb'), 'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.ix[[]]
assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df['B']
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4*['b'], index=np.arange(2, 6), name='B')
assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
assert_series_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]})
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Now index is sequentially decreasing.
df.index = np.arange(len(df) - 1, -1, -1)
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
df.index = df.index[SHUFFLED]
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
df = DataFrame({'A' : np.arange(5,dtype='int64'),
'B' : ['foo','bar','foo','bar','bar'],
'C' : Timestamp('20130101') })
grouped = df.groupby(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_series_equal(df['A'], result)
result = grouped['A'].transform(len)
expected = Series([2, 3, 2, 3, 3], name='A')
assert_series_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(df, result)
result = grouped.transform('sum')
expected = DataFrame({'A' : [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(len)
expected = DataFrame({'A' : [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index)
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 0, 0, 0, 1], dtype=float)
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_timestamp_index(self):
# GH4620
t0 = Timestamp('2013-09-30 00:05:00')
t1 = Timestamp('2013-10-30 00:05:00')
t2 = Timestamp('2013-11-30 00:05:00')
index = [t1, t1, t1, t2, t1, t1, t0, t1]
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_string_index(self):
# GH4620
index = list('bbbcbbab')
df = DataFrame({'pid' : [1,1,1,2,2,3,3,3],
'tag' : [23,45,62,24,45,34,25,62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_has_access_to_grouped_cols(self):
df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
# previously didn't have access to col A #????
filt = g.filter(lambda x: x['A'].sum() == 2)
assert_frame_equal(filt, df.iloc[[0, 1]])
def test_filter_enforces_scalarness(self):
df = pd.DataFrame([
['best', 'a', 'x'],
['worst', 'b', 'y'],
['best', 'c', 'x'],
['best','d', 'y'],
['worst','d', 'y'],
['worst','d', 'y'],
['best','d', 'z'],
], columns=['a', 'b', 'c'])
with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'):
df.groupby('c').filter(lambda g: g['a'] == 'best')
def test_filter_non_bool_raises(self):
df = pd.DataFrame([
['best', 'a', 1],
['worst', 'b', 1],
['best', 'c', 1],
['best','d', 1],
['worst','d', 1],
['worst','d', 1],
['best','d', 1],
], columns=['a', 'b', 'c'])
with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'):
df.groupby('a').filter(lambda g: g.c.mean())
def test_fill_constistency(self):
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product([['value1','value2'],
date_range('2014-01-01','2014-01-06')]),
columns=Index(['1','2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan, np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan, np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
assert_frame_equal(result, expected)
def test_index_label_overlaps_location(self):
# checking we don't have any label/location confusion in the
# the wake of GH5375
df = DataFrame(list('ABCDE'), index=[2, 0, 2, 1, 1])
g = df.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
assert_series_equal(actual, expected)
# ... and again, with a generic Index of floats
df.index = df.index.astype(float)
g = df.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
assert_series_equal(actual, expected)
def test_groupby_selection_with_methods(self):
# some methods which require DatetimeIndex
rng = pd.date_range('2014', periods=len(self.df))
self.df.index = rng
g = self.df.groupby(['A'])[['C']]
g_exp = self.df[['C']].groupby(self.df['A'])
# TODO check groupby with > 1 col ?
# methods which are called as .foo()
methods = ['count',
'corr',
'cummax', 'cummin', 'cumprod',
'describe', 'rank',
'quantile',
'diff', 'shift',
'all', 'any',
'idxmin', 'idxmax',
'ffill', 'bfill',
'pct_change',
'tshift',
#'ohlc'
]
for m in methods:
res = getattr(g, m)()
exp = getattr(g_exp, m)()
assert_frame_equal(res, exp) # should always be frames!
# methods which aren't just .foo()
assert_frame_equal(g.fillna(0), g_exp.fillna(0))
assert_frame_equal(g.dtypes, g_exp.dtypes)
assert_frame_equal(g.apply(lambda x: x.sum()),
g_exp.apply(lambda x: x.sum()))
assert_frame_equal(g.resample('D'), g_exp.resample('D'))
assert_frame_equal(g.resample('D', how='ohlc'),
g_exp.resample('D', how='ohlc'))
assert_frame_equal(g.filter(lambda x: len(x) == 3),
g_exp.filter(lambda x: len(x) == 3))
def test_groupby_whitelist(self):
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 10
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
s = df.floats
df_whitelist = frozenset([
'last', 'first',
'mean', 'sum', 'min', 'max',
'head', 'tail',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'plot', 'boxplot', 'hist',
'median', 'dtypes',
'corrwith', 'corr', 'cov',
'diff',
])
s_whitelist = frozenset([
'last', 'first',
'mean', 'sum', 'min', 'max',
'head', 'tail',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'plot', 'hist',
'median', 'dtype',
'corr', 'cov',
'value_counts',
'diff',
'unique', 'nunique',
'nlargest', 'nsmallest',
])
for obj, whitelist in zip((df, s),
(df_whitelist, s_whitelist)):
gb = obj.groupby(df.letters)
self.assertEqual(whitelist, gb._apply_whitelist)
for m in whitelist:
getattr(type(gb), m)
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
AGG_FUNCTIONS_WITH_SKIPNA = ['skew', 'mad']
def test_groupby_whitelist_deprecations(self):
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 10
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# 10711 deprecated
with tm.assert_produces_warning(FutureWarning):
df.groupby('letters').irow(0)
with tm.assert_produces_warning(FutureWarning):
df.groupby('letters').floats.irow(0)
def test_regression_whitelist_methods(self) :
# GH6944
# explicity test the whitelest methods
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
raw_frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
raw_frame.ix[1, [1, 2]] = np.nan
raw_frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[True,False]) :
if axis == 0 :
frame = raw_frame
else :
frame = raw_frame.T
if op in self.AGG_FUNCTIONS_WITH_SKIPNA :
grouped = frame.groupby(level=level,axis=axis)
result = getattr(grouped,op)(skipna=skipna)
expected = getattr(frame,op)(level=level,axis=axis,skipna=skipna)
assert_frame_equal(result, expected)
else :
grouped = frame.groupby(level=level,axis=axis)
result = getattr(grouped,op)()
expected = getattr(frame,op)(level=level,axis=axis)
assert_frame_equal(result, expected)
def test_groupby_blacklist(self):
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 10
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
s = df.floats
blacklist = [
'eval', 'query', 'abs', 'where',
'mask', 'align', 'groupby', 'clip', 'astype',
'at', 'combine', 'consolidate', 'convert_objects',
]
to_methods = [method for method in dir(df) if method.startswith('to_')]
blacklist.extend(to_methods)
# e.g., to_csv
defined_but_not_allowed = ("(?:^Cannot.+{0!r}.+{1!r}.+try using the "
"'apply' method$)")
# e.g., query, eval
not_defined = "(?:^{1!r} object has no attribute {0!r}$)"
fmt = defined_but_not_allowed + '|' + not_defined
for bl in blacklist:
for obj in (df, s):
gb = obj.groupby(df.letters)
msg = fmt.format(bl, type(gb).__name__)
with tm.assertRaisesRegexp(AttributeError, msg):
getattr(gb, bl)
def test_tab_completion(self):
grp = self.mframe.groupby(level='second')
results = set([v for v in dir(grp) if not v.startswith('_')])
expected = set(['A','B','C',
'agg','aggregate','apply','boxplot','filter','first','get_group',
'groups','hist','indices','last','max','mean','median',
'min','name','ngroups','nth','ohlc','plot', 'prod',
'size', 'std', 'sum', 'transform', 'var', 'sem', 'count', 'head',
'irow',
'describe', 'cummax', 'quantile', 'rank', 'cumprod', 'tail',
'resample', 'cummin', 'fillna', 'cumsum', 'cumcount',
'all', 'shift', 'skew', 'bfill', 'ffill',
'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',
'cov', 'dtypes', 'diff', 'idxmax', 'idxmin'
])
self.assertEqual(results, expected)
def test_lexsort_indexer(self):
keys = [[nan]*5 + list(range(100)) + [nan]*5]
# orders=True, na_position='last'
result = _lexsort_indexer(keys, orders=True, na_position='last')
expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# orders=True, na_position='first'
result = _lexsort_indexer(keys, orders=True, na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
assert_equal(result, expected)
# orders=False, na_position='last'
result = _lexsort_indexer(keys, orders=False, na_position='last')
expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# orders=False, na_position='first'
result = _lexsort_indexer(keys, orders=False, na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
assert_equal(result, expected)
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [nan]*5 + list(range(100)) + [nan]*5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype='O')
try:
# GH 2785; due to a regression in NumPy1.6.2
np.argsort(np.array([[1, 2], [1, 3], [1, 2]], dtype='i'))
np.argsort(items2, kind='mergesort')
except TypeError as err:
raise nose.SkipTest('requested sort not available for type')
# mergesort is the most difficult to get right because we want it to be stable.
# According to numpy/core/tests/test_multiarray, """The number
# of sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = _nargsort(
items, kind='mergesort', ascending=True, na_position='last')
expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# mergesort, ascending=True, na_position='first'
result = _nargsort(
items, kind='mergesort', ascending=True, na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='last'
result = _nargsort(
items, kind='mergesort', ascending=False, na_position='last')
expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='first'
result = _nargsort(
items, kind='mergesort', ascending=False, na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
assert_equal(result, expected)
# mergesort, ascending=True, na_position='last'
result = _nargsort(
items2, kind='mergesort', ascending=True, na_position='last')
expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# mergesort, ascending=True, na_position='first'
result = _nargsort(
items2, kind='mergesort', ascending=True, na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='last'
result = _nargsort(
items2, kind='mergesort', ascending=False, na_position='last')
expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='first'
result = _nargsort(
items2, kind='mergesort', ascending=False, na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
assert_equal(result, expected)
def test_datetime_count(self):
df = DataFrame({'a': [1,2,3] * 2,
'dates': pd.date_range('now', periods=6, freq='T')})
result = df.groupby('a').dates.count()
expected = Series([2, 2, 2], index=Index([1, 2, 3], name='a'),
name='dates')
tm.assert_series_equal(result, expected)
def test_lower_int_prec_count(self):
df = DataFrame({'a': np.array([0, 1, 2, 100], np.int8),
'b': np.array([1, 2, 3, 6], np.uint32),
'c': np.array([4, 5, 6, 8], np.int16),
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2],
'b': [2, 2],
'c': [2, 2]}, index=pd.Index(list('ab'),
name='grp'))
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception(self):
class RaisingObjectException(Exception):
pass
class RaisingObject(object):
def __init__(self, msg='I will raise inside Cython'):
super(RaisingObject, self).__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({'a': [RaisingObject() for _ in range(4)],
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2]}, index=pd.Index(list('ab'),
name='grp'))
tm.assert_frame_equal(result, expected)
def test__cython_agg_general(self):
ops = [('mean', np.mean),
('median', np.median),
('var', np.var),
('add', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size),
]
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = df.groupby(labels)._cython_agg_general(op)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op,)
raise
def test_ops_general(self):
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size),
]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op,)
raise
def test_max_nan_bug(self):
raw = """,Date,app,File
2013-04-23,2013-04-23 00:00:00,,log080001.log
2013-05-06,2013-05-06 00:00:00,,log.log
2013-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
self.assertFalse(r['File'].isnull().any())
def test_nlargest(self):
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([7, 5, 3, 10, 9, 6],
index=MultiIndex.from_arrays([list('aaabbb'),
[3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
def test_nsmallest(self):
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([1, 2, 3, 0, 4, 6],
index=MultiIndex.from_arrays([list('aaabbb'),
[0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
def test_transform_doesnt_clobber_ints(self):
# GH 7972
n = 6
x = np.arange(n)
df = DataFrame({'a': x // 2, 'b': 2.0 * x, 'c': 3.0 * x})
df2 = DataFrame({'a': x // 2 * 1.0, 'b': 2.0 * x, 'c': 3.0 * x})
gb = df.groupby('a')
result = gb.transform('mean')
gb2 = df2.groupby('a')
expected = gb2.transform('mean')
tm.assert_frame_equal(result, expected)
def test_groupby_categorical_two_columns(self):
# https://github.com/pydata/pandas/issues/8138
d = {'cat': pd.Categorical(["a","b","a","b"], categories=["a", "b", "c"], ordered=True),
'ints': [1, 1, 2, 2],'val': [10, 20, 30, 40]}
test = pd.DataFrame(d)
# Grouping on a single column
groups_single_key = test.groupby("cat")
res = groups_single_key.agg('mean')
exp = DataFrame({"ints":[1.5,1.5,np.nan], "val":[20,30,np.nan]},
index=pd.Index(["a", "b", "c"], name="cat"))
tm.assert_frame_equal(res, exp)
# Grouping on two columns
groups_double_key = test.groupby(["cat","ints"])
res = groups_double_key.agg('mean')
exp = DataFrame({"val":[10,30,20,40,np.nan,np.nan],
"cat": ["a","a","b","b","c","c"],
"ints": [1,2,1,2,1,2]}).set_index(["cat","ints"])
tm.assert_frame_equal(res, exp)
# GH 10132
for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = test[(test.cat == c) & (test.ints == i)]
assert_frame_equal(result, expected)
d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}
test = pd.DataFrame(d)
values = pd.cut(test['C1'], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = test.groupby([values,'C2'])
res = groups_double_key.agg('mean')
nan = np.nan
idx = MultiIndex.from_product([["(1, 2]", "(2, 3]", "(3, 6]"],[1,2,3,4]],
names=["cat", "C2"])
exp = DataFrame({"C1":[nan,nan,nan,nan, 3, 3,nan,nan, nan,nan, 4, 5],
"C3":[nan,nan,nan,nan, 10,100,nan,nan, nan,nan,200,34]}, index=idx)
tm.assert_frame_equal(res, exp)
def test_groupby_apply_all_none(self):
# Tests to make sure no errors if apply function returns all None
# values. Issue 9684.
test_df = DataFrame({'groups': [0,0,1,1], 'random_vars': [8,7,4,5]})
def test_func(x):
pass
result = test_df.groupby('groups').apply(test_func)
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
tups = com._asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in compat.iteritems(expected):
assert(result[k] == v)
def test_decons():
from pandas.core.groupby import decons_group_index, get_group_index
def testit(label_list, shape):
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
label_list2 = decons_group_index(group_index, shape)
for a, b in zip(label_list, label_list2):
assert(np.array_equal(a, b))
shape = (4, 5, 6)
label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100),
np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100),
np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100)]
testit(label_list, shape)
shape = (10000, 10000)
label_list = [np.tile(np.arange(10000), 5),
np.tile(np.arange(10000), 5)]
testit(label_list, shape)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure',
'-s'], exit=False)
| 39.107396 | 172 | 0.532885 |
6d62c8e70bead6736ecf2acccb7d745472e64ab6 | 39 | py | Python | setup/brython/__init__.py | ennec-e/brython | 7b6b713d6d9638da096166ba099ec054230f853e | [
"BSD-3-Clause"
] | 1 | 2022-03-26T10:18:48.000Z | 2022-03-26T10:18:48.000Z | setup/brython/__init__.py | ennec-e/brython | 7b6b713d6d9638da096166ba099ec054230f853e | [
"BSD-3-Clause"
] | null | null | null | setup/brython/__init__.py | ennec-e/brython | 7b6b713d6d9638da096166ba099ec054230f853e | [
"BSD-3-Clause"
] | null | null | null | __version__ = implementation = "3.10.5" | 39 | 39 | 0.74359 |
81775ad34231138d7ad9bf5507677fd7835bd9de | 13,748 | py | Python | notebooks/P-value-exercise.py | ReproNim/stat-repronim-module | ccef0fa1d23d0023db4cbbfc1e3091037df77f3b | [
"CC-BY-4.0"
] | 3 | 2020-02-27T19:04:46.000Z | 2020-02-27T19:13:30.000Z | notebooks/P-value-exercise.py | ReproNim/stat-repronim-module | ccef0fa1d23d0023db4cbbfc1e3091037df77f3b | [
"CC-BY-4.0"
] | 6 | 2016-11-12T02:07:16.000Z | 2020-06-11T10:47:46.000Z | notebooks/P-value-exercise.py | ReproNim/stat-repronim-module | ccef0fa1d23d0023db4cbbfc1e3091037df77f3b | [
"CC-BY-4.0"
] | 12 | 2016-11-03T18:03:01.000Z | 2021-06-04T06:53:07.000Z | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Playing with P-values
# + [markdown] slideshow={"slide_type": "slide"}
# ## Recall some definitions
# + [markdown] slideshow={"slide_type": "subslide"}
# * $H_0$ : null hypothesis: The hypotheis that the effect we are testing for is null
#
# * $H_A$ : alternative hypothesis : Not $H_0$, so there is some signal
#
# * $T$ : The random variable that takes value "significant" or "not significant"
#
# * $T_S$ : Value of T when test is significant (eg $T = T_S$)
#
# * $T_N$ : Value of T when test is not significant (eg $T = T_N$)
#
# * $\alpha$ : false positive rate - probability to reject $H_0$ when $H_0$ is true (therefore $H_A$ is false)
#
# * $\beta$ : false negative rate - probability to accept $H_0$ when $H_A$ is true (i.e. $H_0$ is false)
#
# -
# Plainly:
# - a false positive is saying that something is true when it is false.
# - a false negative is saying that something is false when it is true.
#
# ## What is P-hacking?
# P-hacking is finding regular variations in either data or models and considering them to be significant. The result is a positive report that is not reproducible or does not generalize. It is often unintentional. In this notebook, we'll go through a few examples of how we can find (and subsequently report) statistically-significant results
# ## Exercise 1: Random selection
# In this exercise, we'll sample data points from a zero-mean Gaussian distribution and test whether the sample mean is significantly different from 0.
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import scipy.stats as sst
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Step 1: Generate N samples from the normal distribution
# + slideshow={"slide_type": "skip"}
# define the Normal 0,1 object
norm01 = sst.norm(0,1)
# Let's fix the seed of the random generator
#np.random.seed(42) # 42 is arbitrary
# Draw from the normal:
# norm01 has a "random variables" function drawing from this distribution
# and returns a numpy array of a given size
sample = norm01.rvs(size=(30,))
plt.plot(sample,'+')
sample = norm01.rvs(size=(30,))
plt.plot(sample,'+')
# -
# #### Is the mean significant?
# +
# Compute sample mean and corrected standard deviation
sample_mean = sample.mean()
sample_std = sample.std()
N = len(sample)
std_corrected = np.sqrt((sample**2 - sample_mean**2).sum()/(N-1))
# take into account the "N-1" since 1 degrees of freedoms have been used
# to estimate the mean
# assert not np.isclose(std_corrected, sample_std)
assert np.isclose(std_corrected, np.sqrt(np.var(sample,ddof=1)))
# t-test: compute t statistics
t_value = sample_mean / (std_corrected/np.sqrt(N))
# +
# test it against the null : make a central t distribution
central_t = sst.t(loc=0,scale=1,df=N-1)
significance_thr = 0.05
# use the survival function
pvalue = central_t.sf(t_value)
print("This is our p-value : {}".format(pvalue))
if pvalue < significance_thr:
print("Significant p-value!")
else:
print("Not good enough, continue p-hacking")
# -
# #### Estimate the chance that our p-value is significant under the null:
print(norm01.rvs(size=(2,)))
# +
# create a function that generate a p-value when data are from N(0,1)
def yield_a_pvalue(distrib, N=30):
"""
Parameters
----------
distrib: distribution object(eg, norm(0,1))
a scipy.stats distribution
Returns
-------
A p-value
"""
sample = distrib.rvs(size=(N,))
sample_mean = sample.mean()
std_corrected = np.sqrt(np.var(sample, ddof=1))
# compute t statistics
t_value = sample_mean / (std_corrected/np.sqrt(N))
return sst.t.sf(t_value, df=N-1)
# -
# On average: 20
nb_of_test_needed = 0
while yield_a_pvalue(norm01) > significance_thr:
nb_of_test_needed += 1
print(nb_of_test_needed)
N_pval = 100
pvalues = np.asarray([yield_a_pvalue(sst.norm(0,1)) for i in range(N_pval)])
number_significant = (pvalues <= significance_thr).sum()
print("We have {} tests significant over {} trials, ie {}%"
.format(number_significant, N_pval, 100*number_significant/N_pval))
# #### Sample from non-zero mean:
# We'll run tests that are similar to those above, but instead we'll examine the difference between effect size vs. signal-to-noise ratio (SNR).
# As above, we are trying to determine whether a sample mean is significantly different from zero. Compare modifying our effect size (the mean, 'm') and the noise ('sigma'). What do you notice if you keep the ratio, m/sigma constant?
# +
def launch_a_bunch_of_tests(distrib, N_pval=1000, N=30):
"""
launches a series of sampling and then t tests on these (testing if the mean is > 0)
Parameters:
-----------
distrib: the sampling distribution
N_pval: number of p-value to compute
N : the sample size
"""
pvalues = np.asarray([yield_a_pvalue(distrib) for i in range(N_pval)])
number_significant = (pvalues <= significance_thr).sum()
print("We have {} tests significant over {} trials, ie {}%"
.format(number_significant, N_pval, 100*number_significant/N_pval))
# Demonstrate that the test depends only on the signal to noise ratio, not the effect size
# case 1: m=1.65, sigma=1
#---------------------------
loc, scale = (1.65/np.sqrt(N), 1)
distrib = sst.norm(loc, scale)
launch_a_bunch_of_tests(distrib)
# case 2: m=0.165, sigma=0.1
#---------------------------
loc, scale = (0.165/np.sqrt(N), 0.1)
distrib = sst.norm(loc, scale)
launch_a_bunch_of_tests(distrib)
#---------------------------
# case 3: Modify 'm'; SNR is kept constant
m = 50; sigma = m/1.65
loc, scale = (m/np.sqrt(N), sigma)
distrib = sst.norm(loc, scale)
launch_a_bunch_of_tests(distrib)
# -
# ### Exercise 2: Overfitting
# Overfitting refers to a model (e.g. a GLM) that fits too closely to the data on which it is trained and its predictions can't reproduced on new data.
# Overfitting can occur when there is very little data or if your model has too many parameters. For example, a GLM with 1000 explanatory variables can be might fit well to the time series of 4 subjects, but is unlikely to do well on a different set of 4 subjects.
# In this exercise, we'll create some data and use regression to fit a model. The important parts of the code are the "modify" section for you to modify and examine the resulting changes.
# The number of samples, "n_samples" is the amount of data you have.
# The number of model parameters, "degrees" is the number of degrees of freedom given to your model. It will perform a linear regression on our generated data and a polynomial of the given degrees.
# Taken from: https://scikit-learn.org/stable/auto_examples/model_selection/plot_underfitting_overfitting.html#sphx-glr-auto-examples-model-selection-plot-underfitting-overfitting-py
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
# Define a function for us to fit; here we'll take a simple cosine
def true_fun(X):
return np.cos(1.5 * np.pi * X)
###
# Modify
###
n_samples = 30 # Number of data points
degrees = [1, 4, 15] # Number of parameters; change the values to see the effect on out of sample error
# For degree 4, the model will fit Y = b0 + b1*x + b2*x^2 + b3*x^3 + b4*x^4
###
test_max_value=1
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
X_test = np.linspace(0, test_max_value, 100)
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
train_score = mean_squared_error(y, pipeline.predict(X[:, np.newaxis]))
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, np.max(X_test)))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nSample Error: {:.2e}\nOut of sample err. = {:.2e}(+/- {:.1e})".format(
degrees[i], train_score.mean(), -scores.mean(), scores.std()))
plt.show()
# -
# Examine the three previous plots.
# The "Sample error" is the error for the data in your sample; it represents the error you would have if you evaluated your model on data it had already seen.
# The "Out of sample error" is the error when your model is evaluated on data it has not seen. It represents how well your model generalizes.
# In both cases, a lower error is better.
#
# 1) The left-most plot presents a plausible model.
#
# 2) The polynomial of degree 4 has the lowest out of sample error, indicating that it is the best-performing model.
#
# 3) The high-degree polynomial has the lowest sample error, but a very large out of sample error, indicating that it does not generalize well.
# If we were to select our high-degree polynomial and report those results, we can immediately see that our results could not be reproduced.
# ### Exercise 3: Real Data
# The previous exercises examined toy examples.
#
# +
import pandas as pd
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
data = pd.read_csv('brain_size.csv', delimiter=';', header=0, index_col=0)
linear_regression = LinearRegression()
# -
# Let's first look at our data:
data.describe()
# The dataset is relatively simple, and contains 38 subjects with Full Scale, Verbal, and Perceptual IQ, as well as weight, height, and brain volume ("MRI_Count"). We'll repeat the previous exercise and we'll see a similar pattern.
# +
###
# Modify
###
num_subject = 30 # Number of subjects for in-sample data (must be less than 38)
degrees = [1, 4, 15] # For degree 4, the model will fit Y = b0 + b1*x + b2*x^2 + b3*x^3 + b4*x^4
input_variable='Weight'
predicted_variable='Height'
###
test_max_value=1
X = np.array(data[input_variable][:num_subject]).T[:, np.newaxis]
y = np.array(data[predicted_variable][:num_subject]).T[:, np.newaxis]
X_test = np.array(data[input_variable][num_subject:]).T
xind = np.argsort(X_test)
X_test = np.take_along_axis(X_test, xind, 0)[:, np.newaxis]
y_test = np.array(data[predicted_variable][num_subject:]).T
y_test = np.take_along_axis(y_test, xind, 0)[:, np.newaxis]
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X, y)
# Evaluate the models using crossvalidation
train_score = mean_squared_error(y, pipeline.predict(X))
scores = cross_val_score(pipeline, X, y,
scoring="neg_mean_squared_error", cv=10)
m_range = np.linspace(np.min(X), np.max(X), 100)[:, np.newaxis]
plt.plot(m_range, pipeline.predict(m_range), label="Model")
plt.scatter(X_test, y_test, label="Test data")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((np.min(X), np.max(X_test)))
plt.ylim((np.min(y)*0.9, np.max(y)*1.1))
plt.legend(loc="best")
plt.title("Degree {}\nSample Error: {:.2e}\nOut of sample err. = {:.2e}(+/- {:.1e})".format(
degrees[i], train_score.mean(), -scores.mean(), scores.std()))
plt.show()
# -
# ### Avoiding overfitting
# Overfitting is caused by having a model that is too complex for the amount of available data. Such situations are sometimes unavoidable; however, there are ways of reducing the risks of overfitting
# 1) Acquire more data.
#
# The solution to a lot of modeling problems is 'more data'; however, that's rarely possible.
# 2) Reduce the degrees of freedom; use smaller models.
#
# Use the minimum number of explanatory variables.
# 3) Use regulatization
#
# Regularization penalizes model parameters that don't sufficiently contribute to the explained variance. l1 regularization (aka Lasso) sets unimportant parameters to 0.
# 4) Share your model.
#
# Ultimately, the generalizability of a method/claim will be determined by people being able to reproduce it.
# ### Closing Remarks
# Adopting some of the practices discussed today help with producing good science: false positive results obtained by a incidental variations in data/methods can be quickly verified and rejected. Similarly, true positive results can be verified and trusted more easily.
| 36.857909 | 344 | 0.695665 |
16c23d72f1a0aa1bc5063aed081134ad2b0d86c3 | 475 | py | Python | Lib/site-packages/plotly/validators/scattergl/_uid.py | tytanya/my-first-blog | 2b40adb0816c3546e90ad6ca1e7fb50d924c1536 | [
"bzip2-1.0.6"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/scattergl/_uid.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2021-03-18T22:27:08.000Z | 2022-03-11T23:40:50.000Z | plotly/validators/scattergl/_uid.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name='uid', parent_name='scattergl', **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 31.666667 | 77 | 0.629474 |
18b53947c67541f078ba33cb2d3cd784ecd4a640 | 14,325 | py | Python | redcache/local.py | tomekwojcik/redcache | 3d353dd9e4a988ede9edc0e1222d4d3e816af05a | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2015-10-17T16:41:42.000Z | 2017-03-26T22:00:34.000Z | redcache/local.py | tomekwojcik/redcache | 3d353dd9e4a988ede9edc0e1222d4d3e816af05a | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | redcache/local.py | tomekwojcik/redcache | 3d353dd9e4a988ede9edc0e1222d4d3e816af05a | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * The names of the contributors may not be used to endorse or
# promote products derived from this software without specific
# prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# Since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident.
try:
from greenlet import getcurrent as get_ident
except ImportError: # noqa
try:
from thread import get_ident # noqa
except ImportError: # noqa
from dummy_thread import get_ident # noqa
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`StackLocal` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value): # noqa
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
def __len__(self):
stack = getattr(self._local, 'stack', None)
if stack is None:
return 0
return len(stack)
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
Yu can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __nonzero__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
| 35.19656 | 78 | 0.644817 |
511354e8f0f0b35eb487f8e15c32620c79295612 | 593 | py | Python | examples/ch06/fig06_02.py | edson-gomes/Intro-to-Python | 00a2f549916616b0f2036401573e35d66317f998 | [
"MIT"
] | null | null | null | examples/ch06/fig06_02.py | edson-gomes/Intro-to-Python | 00a2f549916616b0f2036401573e35d66317f998 | [
"MIT"
] | null | null | null | examples/ch06/fig06_02.py | edson-gomes/Intro-to-Python | 00a2f549916616b0f2036401573e35d66317f998 | [
"MIT"
] | null | null | null | # fig06_02.py
"""Tokenizing a string and counting unique words."""
text = ('this is sample text with several words '
'this is more sample text with some different words')
word_counts = {}
# count occurrences of each unique word
for word in text.split():
if word in word_counts:
word_counts[word] += 1 # update existing key-value pair
else:
word_counts[word] = 1 # insert new key-value pair
print(f'{"WORD":<12}COUNT')
for word, count in sorted(word_counts.items()):
print(f'{word:<12}{count}')
print('\nNumber of unique words:', len(word_counts))
| 26.954545 | 64 | 0.67285 |
e5eede49a04d8533c001b7a50b6c496c21b49d72 | 571 | py | Python | nuke_stubs/nuke/nuke_classes/ChannelMask_Knob.py | sisoe24/Nuke-Python-Stubs | 79c53cf5cb7b38e15a34fd04f672b143d9d7dc85 | [
"MIT"
] | 1 | 2022-01-12T01:29:16.000Z | 2022-01-12T01:29:16.000Z | nuke_stubs/nuke/nuke_classes/ChannelMask_Knob.py | sisoe24/Nuke-Python-Stubs | 79c53cf5cb7b38e15a34fd04f672b143d9d7dc85 | [
"MIT"
] | null | null | null | nuke_stubs/nuke/nuke_classes/ChannelMask_Knob.py | sisoe24/Nuke-Python-Stubs | 79c53cf5cb7b38e15a34fd04f672b143d9d7dc85 | [
"MIT"
] | null | null | null | from numbers import Number
from typing import *
import nuke
from . import *
class ChannelMask_Knob(Channel_Knob):
"""
ChannelMask_Knob
"""
def __hash__(self, ):
"""
Return hash(self).
"""
return None
def __init__(self, *args, **kwargs):
"""
Initialize self. See help(type(self)) for accurate signature.
"""
return None
def __new__(self,*args, **kwargs):
"""
Create and return a new object. See help(type) for accurate signature.
"""
return None | 21.148148 | 79 | 0.562172 |
c3fc4a8a22d5f7aa5e6e069df7b4edb60f32a1ba | 23,661 | py | Python | Lib/pagebot/fonttoolbox/analyzers/apointcontext.py | bghryct/PageBot | 394150c0fd399f02faec28f4576046882f4d7d39 | [
"MIT"
] | 68 | 2018-10-22T22:42:58.000Z | 2022-03-19T11:07:31.000Z | Lib/pagebot/fonttoolbox/analyzers/apointcontext.py | TypeNetwork/PageBot | 394150c0fd399f02faec28f4576046882f4d7d39 | [
"MIT"
] | 97 | 2017-07-10T23:49:30.000Z | 2018-10-03T08:17:55.000Z | Lib/pagebot/fonttoolbox/analyzers/apointcontext.py | TypeNetwork/PageBot | 394150c0fd399f02faec28f4576046882f4d7d39 | [
"MIT"
] | 9 | 2017-07-11T09:59:00.000Z | 2018-09-12T11:59:30.000Z | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# pointcontext.py
#
# Naming of "context" here has not relation with DrawBotContext /
# FlatContext. It is glyph contour pints with its +3 and -3 neighbors,
# combined into one point. This way users can easily see the progression
# of lines through a points and test on vertical/horizontal directions.
# The 7 contained points can be of type point2D (x,y)-tuple, or APoint
# instances.
#
import math
from pagebot.mathematics import *
from pagebot.fonttoolbox.analyzers.apoint import APoint
def calculateAngle(p1, p2, inDegrees=True):
"""Calculate the angle between points p1 and p2. Points can be either 2D or
3D point tuples or Point instances. In the case of Point3D, only the 2D
projection in (x, y) plane is calculated.
>>> p1 = (0, 0)
>>> p2 = (100, 100)
>>> angle = calculateAngle(p1, p2)
>>> angle
45.0
>>> p1 = (0, 0)
>>> p2 = (0, 30)
>>> angle = calculateAngle(p1, p2, inDegrees=False)
>>> angle == math.radians(90)
True
"""
xDiff = p2[0] - p1[0]
yDiff = p2[1] - p1[1]
angle = math.atan2(yDiff, xDiff)
if inDegrees:
angle = math.degrees(angle)
return angle
def angleOfLines(p1, p2, q1, q2, inDegrees=True):
"""Answers the angle difference (radials or default degrees) between
p1-->p2 and q1-->q2. Points can be either 2D or 3D point tuples or Point
instances. In the case of Point3D, only the 2D projection in (x, y) plane
is calculated.
>>> p1 = (0, 0)
>>> p2 = (100, 100)
>>> q1 = (100, 0)
>>> q2 = (0, 100)
>>> angleOfLines(p1, p2, q1, q1)
-45.0
>>> angle = angleOfLines(q1, q2, p1, p1, inDegrees=False)
>>> angle == math.radians(-135.0)
True
"""
angle1 = calculateAngle(p1, p2, inDegrees)
angle2 = calculateAngle(q1, q2, inDegrees)
angle = angle2 - angle1
if angle < -180:
angle += 360
elif angle > 180:
angle -= 360
return angle
class APointContext:
"""The PointContext instance is a Point wrapper, that also takes the 3
points previous and next 3 points on the contour. The instance behaves like
a normal point p, but additional information is available as interpreted
from the point context in relation to the neighbor points. The total of 7
points is derived from the average construction of a serif, so it is
possible to hold (and interpret) an entire serif sequence inside one point
context.
"""
PARALLEL_TOLERANCE = 2 # Difference tolerance angle in degrees to take point contexts as parallel
def __init__(self, points, index=None, contourIndex=None, clockwise=None, glyphName=None):
"""Points context for Point instances, three before, one in the middle,
and three after. We need the extra storage, e.g. for point type that
Point holds.
>>> b1 = APoint((0, 0))
>>> b2 = APoint((0, 100))
>>> b3 = APoint((40, 200))
>>> p1 = APoint((100, 200))
>>> a1 = APoint((200, 200))
>>> a2 = APoint((140, 100))
>>> a3 = APoint((40, 80))
>>> pc = APointContext((b1, b2, b3, p1, a1, a2, a3))
>>> pc[0]
100
>>> pc[1]
200
>>> # TODO: find some positive results.
>>> pc.isInflection()
False
>>> pc.isDiagonal()
False
>>> pc.isParallel(pc)
True
>>> #pc.getProjectedPoint(a3)
>>> pc.nextOnCurvePoint
APoint(200,200,On)
>>> pc.prevOnCurvePoint
APoint(40,200,On)
"""
assert len(points) == 7
self.p_3, self.p_2, self.p_1, self.p, self.p1, self.p2, self.p3 = points
self.contourIndex = contourIndex
self.index = index
#self.clockwise = clockwise # TODO: Add clockwise attribute from calling function.
self.glyphName = glyphName
self._direction = None # Cache direction once calculated.
self._angle = None # Cache axis once calculated.
def __getitem__(self, index):
"""
Gets the main, middle point within the point context.
"""
return self.p[index]
def __lt__(self, p):
"""Compare the points.
>>> o = APoint((0, 0))
>>> pc1 = APointContext((o, o, o, APoint((100, 200)), o, o, o))
>>> pc2 = APointContext((o, o, o, APoint((200, 200)), o, o, o))
>>> pc3 = APointContext((o, o, o, APoint((200, 200)), o, o, o))
>>> pc1 < pc2
True
>>> pc2 < pc3
False
"""
return self.p < p.p
def __le__(self, p):
"""Compare the points.
>>> o = APoint((0, 0))
>>> pc1 = APointContext((o, o, o, APoint((100, 200)), o, o, o))
>>> pc2 = APointContext((o, o, o, APoint((200, 200)), o, o, o))
>>> pc3 = APointContext((o, o, o, APoint((200, 199)), o, o, o))
>>> pc1 < pc2
True
>>> pc2 <= pc3
False
"""
return self.p <= p.p
def __gt__(self, p):
"""Compare the points.
>>> o = APoint((0, 0))
>>> pc1 = APointContext((o, o, o, APoint((100, 200)), o, o, o))
>>> pc2 = APointContext((o, o, o, APoint((200, 200)), o, o, o))
>>> pc3 = APointContext((o, o, o, APoint((200, 199)), o, o, o))
>>> pc1 > pc2
False
>>> pc2 > pc3
True
"""
return self.p > p.p
def __ge__(self, p):
"""Compare the points.
>>> o = APoint((0, 0))
>>> pc1 = APointContext((o, o, o, APoint((100, 200)), o, o, o))
>>> pc2 = APointContext((o, o, o, APoint((200, 200)), o, o, o))
>>> pc3 = APointContext((o, o, o, APoint((200, 199)), o, o, o))
>>> pc1 >= pc2
False
>>> pc2 >= pc3
True
"""
return self.p >= p.p
'''
# These seem to be based on APoint instead of APointContext. Does it make
# sense to do math operations on point contexts?
def __sub__(self, p):
"""Subtract the points. Result is a point3D tuple.
>>> APoint((200, 500)) - APoint((100, 300))
(100, 200, 0)
>>> APoint((200, 500, 10)) - APoint((100, 300))
(100, 200, 10)
>>> APoint((200, 500, 10)) - APoint((-100, -300, -100))
(300, 800, 110)
"""
return self.p[0] - p[0], self.p[1] - p[1], self.p[2] - p[2]
def __add__(self, p):
"""Add the points. Result is a point3D tuple.
>>> APoint((200, 500)) + APoint((100, 300))
(300, 800, 0)
>>> APoint((200, 500, 10)) + APoint((100, 300))
(300, 800, 10)
>>> APoint((200, 500, 10)) + APoint((-100, -300, -100))
(100, 200, -90)
"""
return self.p[0] + p[0], self.p[1] + p[1], self.p[2] + p[2]
def __mul__(self, v):
"""Multiple the point with a scalar. Result is a point3D tuple.
>>> APoint((200, 500)) * 2
(400, 1000, 0)
>>> APoint((200, 500, 10)) * 2
(400, 1000, 20)
"""
assert isinstance(v, (int, float))
return self.p[0] * v, self.p[1] * v, self.p[2] * v
def __div__(self, v):
"""Divide the point by a scalar. Result is a point3D tuple.
>>> APoint((200, 500)) / 2
(100, 250, 0)
>>> APoint((200, 500, 10)) / 2
(100, 250, 5)
"""
assert isinstance(v, (int, float))
return self.p[0] / v, self.p[1] / v, self.p[2] / v
'''
def _get_x(self):
return self.p.x
x = property(_get_x)
def _get_y(self):
return self.p.y
y = property(_get_y)
def _get_rx(self):
# Get rounded value.
return int(round(self.x))
rx = property(_get_x)
def _get_ry(self):
# Get rounded value
return int(round(self.y))
ry = property(_get_ry)
# self.angle Answer angle of the point context
def _get_angle(self):
if self._angle is None:
xDiff = self.p1[0] - self.p[0]
yDiff = self.p1[1] - self.p[1]
self._angle = round(math.atan2(yDiff, xDiff) * 180 / math.pi, 3)
return self._angle
angle = property(_get_angle)
# self.normalizedAngle Answer the normalized angle of self, -90 <= angle <= 90
def _get_normalizedAngle(self):
angle = self.angle
while angle < 0:
angle += 180
while angle > 360:
angle -= 180
return angle
normalizedAngle = property(_get_normalizedAngle)
def __repr__(self):
"""
>>> o = APoint((0, 0))
>>> pc1 = APointContext((o, o, o, APoint((100, 200)), o, o, o), index=1, contourIndex=2, clockwise=True, glyphName='bla')
>>> pc1[1]
200
>>> print(pc1)
pc[index:1](100,200) (bla)
"""
s = 'pc'
if self.index is not None:
s += '[index:%s]' % self.index
s += '(%s,%s)' % (self.p[0], self.p[1])
if self.isNextVertical():
s += ' vertical'
elif self.isNextHorizontal():
s += ' horizontal'
if self.isRoundStemExtreme():
s += ' roundstem'
elif self.isRoundBarExtreme():
s += ' roundbar'
if self.isTerminal():
s += ' terminal'
# TODO: Add clocksize info at initialization
#if self.clockwise:
# s += ' c%d CW' % self.contourIndex
#else:
# s += ' c%s CCW' % self.contourIndex
if self.glyphName is not None:
s += ' (%s)' % self.glyphName
return s
def isOffCurve(self, p=None):
if p is None:
p = self.p
if isinstance(p, APoint):
return not p.onCurve
return False # Point2D type is always on-curve by definition.
def isOnCurve(self, p=None):
if p is None:
p = self.p
if isinstance(p, APoint):
return p.onCurve
return True # Point2D type is always on-curve by definition.
def isUp(self):
return self.p[1] < self.p1[1]
def isDown(self):
return self.p[1] > self.p1[1]
def isLeft(self):
return self.p[0] < self.p1[0]
def isRight(self):
return self.p[0] > self.p1[0]
def isHorizontalExtreme(self, tolerance=0):
"""The `isHorizontalExtreme` method answers if the point context is an
extreme (such as the side of an O)."""
# Is the point context a vertical and extreme in x-direction?
# @@@ Also test on non-inflection point.
return self.isNextVertical(tolerance)\
and self.isPrevVertical(tolerance)\
and self.isOffCurve(self.p1)\
and self.isOffCurve(self.p_1)
def isLeftRoundExtreme(self, tolerance=0):
"""Answers if the point context is a left round extreme. x/y selection
by index, as these can be APoint or point2D tuple instances."""
nextP = self.nextOnCurvePoint
prevP = self.prevOnCurvePoint
return nextP is not None and prevP is not None\
and self.p[0] < (nextP[0] - tolerance)\
and self.p[0] < (prevP[0] - tolerance)
def isRightRoundExtreme(self, tolerance=0):
"""Answers if the point context is a right round extreme. x/y selection
by index, as these can be APoint or point2D tuple instances."""
nextP = self.nextOnCurvePoint
prevP = self.prevOnCurvePoint
return nextP is not None and prevP is not None\
and self.p[0] > (nextP[0] + tolerance)\
and self.p[0] > (prevP[0] + tolerance)
def isTopRoundExtreme(self, tolerance=0):
"""Answers if the point context is a top round extreme. x/y selection
by index, as these can be APoint or point2D tuple instances."""
nextP = self.nextOnCurvePoint
prevP = self.prevOnCurvePoint
return nextP is not None and prevP is not None\
and self.p[1] > (nextP[1] + tolerance)\
and self.p[1] > (prevP[1] + tolerance)
def isBottomRoundExtreme(self, tolerance=0):
"""Answers if the point context is a bottom round extreme. x/y
selection by index, as these can be APoint or point2D tuple
instances."""
nextP = self.nextOnCurvePoint
prevP = self.prevOnCurvePoint
return nextP is not None and prevP is not None\
and self.p[1] < (nextP[1] - tolerance)\
and self.p[1] < (prevP[1] - tolerance)
def isVerticalRoundExtreme(self, tolerance=0):
"""Answers if the point context is a vertical round extreme. x/y
selection by index, as these can be APoint or point2D tuple
instances."""
return self.isTopRoundExtreme(tolerance) or self.isBottomRoundExtreme(tolerance)
def isHorizontalRoundExtreme(self, tolerance=0):
"""Answers if the point context is a horizontal round extreme. x/y
selection by index, as these can be APoint or point2D tuple
instances."""
return self.isLeftRoundExtreme(tolerance) or self.isRightRoundExtreme(tolerance)
def isNextVertical(self, tolerance=0):
"""Answers if the point context next point is vertical. x/y selection
by index, as these can be APoint or point2D tuple instances."""
return abs(self.p[0] - self.p1[0]) <= tolerance
isVertical = isNextVertical
def isPrevVertical(self, tolerance=0):
"""Answers if the point context prev point is vertical. x/y selection
by index, as these can be APoint or point2D tuple instances."""
return abs(self.p[0] - self.p_1[0]) <= tolerance
def isVerticalExtreme(self, tolerance=0):
"""Is the point context a horizontal and extreme in y-direction?"""
return self.isNextHorizontal(tolerance)\
and self.isPrevHorizontal(tolerance)\
and self.isOffCurve(self.p1)\
and self.isOffCurve(self.p_1)
def isNextHorizontal(self, tolerance=0):
"""Answers if the point context prev point is horizontal. x/y
selection by index, as these can be APoint or point2D tuple
instances."""
return abs(self.p[1] - self.p1[1]) <= tolerance
isHorizontal = isNextHorizontal
def isPrevHorizontal(self, tolerance=0):
"""Answers if the point context prev point is horizontal. x/y
selection by index, as these can be APoint or point2D tuple
instances."""
return abs(self.p[1] - self.p_1[1]) <= tolerance
def isInflection(self):
valid = self.isOnCurve(self.p) and self.isOffCurve(self.p_1) and self.isOffCurve(self.p1)
nextP = self.nextOnCurvePoint
prevP = self.prevOnCurvePoint
if valid and nextP is not None and prevP is not None:
a1 = angleOfLines(self.p, self.p1, self.p, nextP)
a2 = angleOfLines(self.p, self.p_1, self.p, prevP)
if a1 * a2 > 0:
return True
return False
def isDiagonal(self):
return not (self.isVertical() or self.isHorizontal())
def isParallel(self, pc, tolerance=None):
"""Answers if self is parallel to pc point context. Optional attribute
tolerance is the margin to interpret point context lines to be
parallel. Default is self.PARALLEL_TOLERANCE."""
if tolerance is None:
tolerance = self.PARALLEL_TOLERANCE
return abs(self.normalizedAngle - pc.normalizedAngle) <= tolerance
def isRoundStemExtreme(self, tolerance=0):
return self.isHorizontalExtreme(tolerance)\
and (self.isLeftRoundExtreme(tolerance) or self.isRightRoundExtreme(tolerance))\
and self.isOffCurve(self.p1)\
and self.isOffCurve(self.p_1)
def isRoundBarExtreme(self, tolerance=0):
return self.isVerticalExtreme(tolerance)\
and (self.isTopRoundExtreme(tolerance) or self.isBottomRoundExtreme(tolerance))\
and self.isOffCurve(self.p1)\
and self.isOffCurve(self.p_1)
def isTerminal(self):
return False
def isRoundStem(self, pc0, pc1, tolerance=0):
"""The isRoundStem method answers if the pc0 and pc1 define a round
stem. This is True if one of both of point contexts are extremes and if
both, they must “bend” in the same direction. Also there should be
overlap in horizontal direction and the point context should span black
only."""
return pc0.isHorizontalRoundExtreme(tolerance)\
and pc1.isHorizontalRoundExtreme(tolerance)\
and self.spanRoundsOnBlack(pc0, pc1)
def inVerticalWindow(self, pc):
"""The `inVerticalWindow` method checks if there is any overlap in
X-direction to make the vertical comparison optically define as a
"bar".
True self.minx-------------self.maxx
p.minx----------------p.maxx
True self.minx----------------------p.maxx
p.minx---------------p.maxx
True self.minx------------self.maxx
p.minx-------------------------------p.maxx
False self.minx----------self.maxx
p.minx--------p.maxx
False self.minx--------self.maxx
p.minx------------------------p.maxx
"""
return pc.minx() < self.maxx() and self.minx() < pc.maxx()
def inHorizontalWindow(self, pc):
"""The `inHorizontalWindow` method checks if there is any overlap in
X-direction to make the vertical comparison optically define as a
"stem".
True self.miny-------------self.maxy
p.miny----------------p.maxy
True self.miny----------------------p.maxy
p.miny---------------p.maxy
True self.miny------------self.maxy
p.miny-------------------------------p.maxy
False self.miny----------self.maxy
p.miny--------p.maxy
False self.miny--------self.maxy
p.miny------------------------p.maxy
"""
return pc.miny() < self.maxy() and self.miny() < pc.maxy()
def inDiagonalWindow(self, pc):
"""Answers if pc fits in the diagonal window of self."""
return not None in self.getProjectedWindowLine(pc)
def getProjectedPoint(self, p):
"""Answers the perpendicular projection of point p in the line segment
of self. If the projection in not within the range of the line
segment, then answer None."""
pp = self.projectedOnLine(p)
if self.inBoundingBox(pp): # Is the projected point in inside the line segment
return pp
return None # Projection not within the line segment window.
def getProjectedWindowLines(self, pc):
"""Answers all 4 projected window lines. Note that some of them can be
None is the projects falls outside the window (the overlapping area of
a perpendicular line that intersects with both line segments). This
method is different from self.getProjectedWindowLine as that one only
answers one of the projected points that is not None. For efficiency
reasons only one of the projections is made there. For almost parallel
lines all projects are more or less identical."""
return (
(pc.p, self.getProjectedPoint(pc.p)),
(pc.p1, self.getProjectedPoint(pc.p1)),
(self.p, pc.getProjectedPoint(self.p)),
(self.p1, pc.getProjectedPoint(self.p1))
)
def getProjectedWindowLine(self, pc):
"""Answers a tuple of one of the 4 points of `(self.p, self.p1, pc.p,
pc.p1)` that has a projection on the other line and its projection
point. If no projection exists in the window of the two line segments,
then answer (None, None)."""
pp = self.getProjectedPoint(pc.p)
if pp is not None:
return pc.p, pp
pp = self.getProjectedPoint(pc.p1)
if pp is not None:
return pc.p1, pp
pp = pc.getProjectedPoint(self.p)
if pp is not None:
return self.p, pp
pp = pc.getProjectedPoint(self.p1)
if pp is not None:
return self.p1, pp
return None, None
def inBoundingBox(self, p):
"""Answers is p is inside the bounding box of the glyph.
p can be a point-typles or a Point instance."""
return (self.p[0] <= p[0] <= self.p1[0] or self.p1[0] <= p[0] <= self.p[0]) and \
(self.p[1] <= p[1] <= self.p1[1] or self.p1[1] <= p[1] <= self.p[1])
def minx(self):
"""Answers the minimum x, compared with the two neighbor points. x/y
selection by index, as these can be APoint or point2D tuple
instances."""
return min(self.p_1[0], self.p[0], self.p1[0])
def maxx(self):
"""Answers the maximum x, compared with the two neighbor points. x/y
selection by index, as these can be APoint or point2D tuple
instances."""
return max(self.p_1[0], self.p[0], self.p1[0])
def miny(self):
"""Answers the minimum y, compared with the two neighbor points. x/y
selection by index, as these can be APoint or point2D tuple
instances."""
return min(self.p_1[1], self.p[1], self.p1[1])
def maxy(self):
"""Answers the maximum y, compared with the two neighbor points. x/y
selection by index, as these can be APoint or point2D tuple
instances."""
return max(self.p_1[1], self.p[1], self.p1[1])
def middle(self, p1=None):
"""Answers the Point instance of the middle between the optional
attribute points p0 and p1. If the points are omitted, then use
respectively self.p and self.p1. x/y selection by index, as these can
be APoint or point2D tuple instances."""
if p1 is None:
p1 = self.p1
return int(round((self.p[0] + p1[0])/2)), int(round((self.p[1] + p1[1])/2))
def distanceTo(self, p):
"""Answers the distance of point p to the line of self."""
return point2Line(self.p, self.p1, p)
def projectedOnLine(self, p):
"""Answers the point context pc projects on the line of self."""
xy = pointProjectedOnLine(self.p.p, self.p1.p, p.p)
return APoint(xy)
# self.nextOnCurvePoint
def _get_nextOnCurvePoint(self):
if self.isOnCurve(self.p1):
return self.p1
if self.isOnCurve(self.p2):
return self.p2
if self.isOnCurve(self.p3):
return self.p3
return None
nextOnCurvePoint = property(_get_nextOnCurvePoint)
# self.prevOnCurvePoint
def _get_prevOnCurvePoint(self):
if self.isOnCurve(self.p_1):
return self.p_1
if self.isOnCurve(self.p_2):
return self.p_2
if self.isOnCurve(self.p_3):
return self.p_3
return None
prevOnCurvePoint = property(_get_prevOnCurvePoint)
if __name__ == '__main__':
import doctest
import sys
sys.exit(doctest.testmod()[0])
| 36.626935 | 129 | 0.569334 |
f4dee14da32fed961c2b1bf83b8a3af93bdc0314 | 1,013 | py | Python | pymtl3/passes/mamba/test/HeuTopoUnrollSim_test.py | mondO/pymtl3 | 9869dda28c01926cee6da94ebdeac2a210150c62 | [
"BSD-3-Clause"
] | 1 | 2022-01-03T06:22:11.000Z | 2022-01-03T06:22:11.000Z | pymtl3/passes/mamba/test/HeuTopoUnrollSim_test.py | mondO/pymtl3 | 9869dda28c01926cee6da94ebdeac2a210150c62 | [
"BSD-3-Clause"
] | null | null | null | pymtl3/passes/mamba/test/HeuTopoUnrollSim_test.py | mondO/pymtl3 | 9869dda28c01926cee6da94ebdeac2a210150c62 | [
"BSD-3-Clause"
] | null | null | null | from pymtl3.datatypes import Bits32
from pymtl3.dsl import *
from ..PassGroups import HeuTopoUnrollSim
def test_very_deep_dag():
class Inner(Component):
def construct( s ):
s.in_ = InPort(Bits32)
s.out = OutPort(Bits32)
@s.update
def up():
s.out = s.in_ + 1
def done( s ):
return True
def line_trace( s ):
return "{} > {}".format( s.a, s.b, s.c, s.d )
class Top(Component):
def construct( s, N=2000 ):
s.inners = [ Inner() for i in range(N) ]
for i in range(N-1):
s.inners[i].out //= s.inners[i+1].in_
s.out = OutPort(Bits32)
@s.update_ff
def ff():
if s.reset:
s.out <<= 0
else:
s.out <<= s.out + s.inners[N-1].out
def line_trace( s ):
return str(s.inners[-1].out) + " " + str(s.out)
N = 2000
A = Top( N )
A.apply( HeuTopoUnrollSim() )
T = 0
while T < 5:
A.tick()
print(A.line_trace())
assert A.out == T * N
T += 1
return A
| 19.113208 | 53 | 0.52616 |
6595c4063fa8a395cf375c5516bca3f126cd5f05 | 3,314 | py | Python | generate_codecs.py | igutekunst/binary-codec | 83216f4bf452bf45eeb7deaec46a6c7bb15c1bcf | [
"MIT"
] | null | null | null | generate_codecs.py | igutekunst/binary-codec | 83216f4bf452bf45eeb7deaec46a6c7bb15c1bcf | [
"MIT"
] | null | null | null | generate_codecs.py | igutekunst/binary-codec | 83216f4bf452bf45eeb7deaec46a6c7bb15c1bcf | [
"MIT"
] | null | null | null | import argparse
import os
import string
import mako
from generate_headers import process_yaml
def fpath(f):
path = os.path.dirname(os.path.abspath(__file__))
return os.path.realpath(os.path.join(path, f))
def get_template(template_name):
return fpath(os.path.join('templates', template_name))
def substitute_from_env(s, env):
return string.Template(s).safe_substitute(env)
class GeneratorPlugin(object):
def __init__(self, plugin_dir):
self.plugin_dir = os.path.realpath(plugin_dir)
self.data_yaml = os.path.join(self.plugin_dir, 'data.yaml')
if not os.path.exists(self.data_yaml):
raise Exception("Please create data.yaml")
self._get_templates()
def _get_templates(self):
# TODO load yaml plugin descriptor
self.templates = [
('$base/c/core/slip.c', 'src/slip.c'),
('$base/c/core/BinaryCodec.c.mako', 'src/BinaryCodec.c'),
]
env = {
'base': fpath('templates')
}
if hasattr(self, 'get_templates'):
self.templates.extend(self.get_templates())
self.templates = [(substitute_from_env(s, env),
substitute_from_env(d, env)) for s, d in self.templates]
self.templates = [(s,
os.path.join(self.plugin_dir, d)) for s, d in self.templates]
print(self.templates)
def generate(self):
template_context = process_yaml(self.data_yaml)
for (src, dst) in self.templates:
if not os.path.exists(src):
raise Exception('Template does not exist: {}'.format(os.path.relpath(src)))
update_needed = True
if src.endswith('.mako'):
print("Rendering {}".format(src))
template = mako.template.Template(filename=src)
rendered = template.render(**template_context)
else:
print("Copying {}".format(src))
with open(src) as f:
rendered = f.read()
dest_dir = os.path.dirname(dst)
# Ensure destination directory exists
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
# Don't overwrite the destination file of the content is identical
if os.path.exists(dst):
with open(dst) as f:
old_content = f.read()
if old_content == rendered:
update_needed = False
if update_needed:
with open(dst, 'w') as f:
f.write(rendered)
def main():
parser = argparse.ArgumentParser()
# This function runs the codec template processor
# The codec template processor looks for a binary codec
# description file in the current directory, and uses it to generate
# codec source files for the enabled targets
# Targets can be specified on the command line, or via a configuration file
# Each target/enabled plugin will create its own folder in the output folder
plugins = [GeneratorPlugin('.')]
for plugin in plugins:
plugin.generate()
# TODO need a good name for the output folder
if __name__ == '__main__':
main()
| 32.174757 | 91 | 0.588111 |
70ac6c9493422e112c4d55241f0be3a59d79c912 | 39,930 | py | Python | tensorflow_probability/python/distributions/vector_diffeomixture.py | ykkawana/probability | 65bfd91cf6e855674da8dd9976c067f79da46e90 | [
"Apache-2.0"
] | 1 | 2018-08-27T14:37:40.000Z | 2018-08-27T14:37:40.000Z | tensorflow_probability/python/distributions/vector_diffeomixture.py | ykkawana/probability | 65bfd91cf6e855674da8dd9976c067f79da46e90 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/vector_diffeomixture.py | ykkawana/probability | 65bfd91cf6e855674da8dd9976c067f79da46e90 | [
"Apache-2.0"
] | 1 | 2018-08-27T14:37:44.000Z | 2018-08-27T14:37:44.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The VectorDiffeomixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.bijectors import affine_linear_operator as affine_linear_operator_bijector
from tensorflow_probability.python.bijectors import softmax_centered as softmax_centered_bijector
from tensorflow_probability.python.distributions import categorical
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.distributions import seed_stream
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow.python.ops.linalg import linear_operator_addition as linop_add_lib
__all__ = [
"VectorDiffeomixture",
"quadrature_scheme_softmaxnormal_gauss_hermite",
"quadrature_scheme_softmaxnormal_quantiles",
]
def quadrature_scheme_softmaxnormal_gauss_hermite(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""Use Gauss-Hermite quadrature to form quadrature on `K - 1` simplex.
A `SoftmaxNormal` random variable `Y` may be generated via
```
Y = SoftmaxCentered(X),
X = Normal(normal_loc, normal_scale)
```
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_softmaxnormal_quantiles`.
Args:
normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.
The location parameter of the Normal used to construct the SoftmaxNormal.
normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.
The scale parameter of the Normal used to construct the SoftmaxNormal.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
convex combination of affine parameters for `K` components.
`grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.
probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
associated with each grid point.
"""
with tf.name_scope(name, "quadrature_scheme_softmaxnormal_gauss_hermite",
[normal_loc, normal_scale]):
normal_loc = tf.convert_to_tensor(value=normal_loc, name="normal_loc")
dt = normal_loc.dtype.base_dtype
normal_scale = tf.convert_to_tensor(
value=normal_scale, dtype=dt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(dt.dtype.as_numpy_dtype)
probs = probs.astype(dt.dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=dt)
grid = softmax(
-distribution_util.pad(
(normal_loc[..., tf.newaxis] +
np.sqrt(2.) * normal_scale[..., tf.newaxis] * grid),
axis=-2,
front=True),
axis=-2) # shape: [B, components, deg]
return grid, probs
def quadrature_scheme_softmaxnormal_quantiles(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""Use SoftmaxNormal quantiles to form quadrature on `K - 1` simplex.
A `SoftmaxNormal` random variable `Y` may be generated via
```
Y = SoftmaxCentered(X),
X = Normal(normal_loc, normal_scale)
```
Args:
normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.
The location parameter of the Normal used to construct the SoftmaxNormal.
normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.
The scale parameter of the Normal used to construct the SoftmaxNormal.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
convex combination of affine parameters for `K` components.
`grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.
probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
associated with each grid point.
"""
with tf.name_scope(name, "softmax_normal_grid_and_probs",
[normal_loc, normal_scale]):
normal_loc = tf.convert_to_tensor(value=normal_loc, name="normal_loc")
dt = normal_loc.dtype.base_dtype
normal_scale = tf.convert_to_tensor(
value=normal_scale, dtype=dt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
dist = normal.Normal(loc=normal_loc, scale=normal_scale)
def _get_batch_ndims():
"""Helper to get dist.batch_shape.ndims, statically if possible."""
ndims = dist.batch_shape.ndims
if ndims is None:
ndims = tf.shape(input=dist.batch_shape_tensor())[0]
return ndims
batch_ndims = _get_batch_ndims()
def _get_final_shape(qs):
"""Helper to build `TensorShape`."""
bs = dist.batch_shape.with_rank_at_least(1)
num_components = tf.compat.dimension_value(bs[-1])
if num_components is not None:
num_components += 1
tail = tf.TensorShape([num_components, qs])
return bs[:-1].concatenate(tail)
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = tf.zeros([], dtype=dist.dtype)
edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = tf.reshape(
edges,
shape=tf.concat(
[[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))
quantiles = dist.quantile(edges)
quantiles = softmax_centered_bijector.SoftmaxCentered().forward(quantiles)
# Cyclically permute left by one.
perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = tf.transpose(a=quantiles, perm=perm)
quantiles.set_shape(_get_final_shape(quadrature_size + 1))
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
grid.set_shape(_get_final_shape(quadrature_size))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = tf.fill(
dims=[quadrature_size], value=1. / tf.cast(quadrature_size, dist.dtype))
return grid, probs
class VectorDiffeomixture(distribution_lib.Distribution):
"""VectorDiffeomixture distribution.
A vector diffeomixture (VDM) is a distribution parameterized by a convex
combination of `K` component `loc` vectors, `loc[k], k = 0,...,K-1`, and `K`
`scale` matrices `scale[k], k = 0,..., K-1`. It approximates the following
[compound distribution]
(https://en.wikipedia.org/wiki/Compound_probability_distribution)
```none
p(x) = int p(x | z) p(z) dz,
where z is in the K-simplex, and
p(x | z) := p(x | loc=sum_k z[k] loc[k], scale=sum_k z[k] scale[k])
```
The integral `int p(x | z) p(z) dz` is approximated with a quadrature scheme
adapted to the mixture density `p(z)`. The `N` quadrature points `z_{N, n}`
and weights `w_{N, n}` (which are non-negative and sum to 1) are chosen
such that
```q_N(x) := sum_{n=1}^N w_{n, N} p(x | z_{N, n}) --> p(x)```
as `N --> infinity`.
Since `q_N(x)` is in fact a mixture (of `N` points), we may sample from
`q_N` exactly. It is important to note that the VDM is *defined* as `q_N`
above, and *not* `p(x)`. Therefore, sampling and pdf may be implemented as
exact (up to floating point error) methods.
A common choice for the conditional `p(x | z)` is a multivariate Normal.
The implemented marginal `p(z)` is the `SoftmaxNormal`, which is a
`K-1` dimensional Normal transformed by a `SoftmaxCentered` bijector, making
it a density on the `K`-simplex. That is,
```
Z = SoftmaxCentered(X),
X = Normal(mix_loc / temperature, 1 / temperature)
```
The default quadrature scheme chooses `z_{N, n}` as `N` midpoints of
the quantiles of `p(z)` (generalized quantiles if `K > 2`).
See [Dillon and Langmore (2018)][1] for more details.
#### About `Vector` distributions in TensorFlow.
The `VectorDiffeomixture` is a non-standard distribution that has properties
particularly useful in [variational Bayesian
methods](https://en.wikipedia.org/wiki/Variational_Bayesian_methods).
Conditioned on a draw from the SoftmaxNormal, `X|z` is a vector whose
components are linear combinations of affine transformations, thus is itself
an affine transformation.
Note: The marginals `X_1|v, ..., X_d|v` are *not* generally identical to some
parameterization of `distribution`. This is due to the fact that the sum of
draws from `distribution` are not generally itself the same `distribution`.
#### About `Diffeomixture`s and reparameterization.
The `VectorDiffeomixture` is designed to be reparameterized, i.e., its
parameters are only used to transform samples from a distribution which has no
trainable parameters. This property is important because backprop stops at
sources of stochasticity. That is, as long as the parameters are used *after*
the underlying source of stochasticity, the computed gradient is accurate.
Reparametrization means that we can use gradient-descent (via backprop) to
optimize Monte-Carlo objectives. Such objectives are a finite-sample
approximation of an expectation and arise throughout scientific computing.
WARNING: If you backprop through a VectorDiffeomixture sample and the "base"
distribution is both: not `FULLY_REPARAMETERIZED` and a function of trainable
variables, then the gradient is not guaranteed correct!
#### Examples
```python
tfd = tfp.distributions
# Create two batches of VectorDiffeomixtures, one with mix_loc=[0.],
# another with mix_loc=[1]. In both cases, `K=2` and the affine
# transformations involve:
# k=0: loc=zeros(dims) scale=LinearOperatorScaledIdentity
# k=1: loc=[2.]*dims scale=LinOpDiag
dims = 5
vdm = tfd.VectorDiffeomixture(
mix_loc=[[0.], [1]],
temperature=[1.],
distribution=tfd.Normal(loc=0., scale=1.),
loc=[
None, # Equivalent to `np.zeros(dims, dtype=np.float32)`.
np.float32([2.]*dims),
],
scale=[
tf.linalg.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
tf.linalg.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
validate_args=True)
```
#### References
[1]: Joshua Dillon and Ian Langmore. Quadrature Compound: An approximating
family of distributions. _arXiv preprint arXiv:1801.03080_, 2018.
https://arxiv.org/abs/1801.03080
"""
def __init__(self,
mix_loc,
temperature,
distribution,
loc=None,
scale=None,
quadrature_size=8,
quadrature_fn=quadrature_scheme_softmaxnormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="VectorDiffeomixture"):
"""Constructs the VectorDiffeomixture on `R^d`.
The vector diffeomixture (VDM) approximates the compound distribution
```none
p(x) = int p(x | z) p(z) dz,
where z is in the K-simplex, and
p(x | z) := p(x | loc=sum_k z[k] loc[k], scale=sum_k z[k] scale[k])
```
Args:
mix_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`.
In terms of samples, larger `mix_loc[..., k]` ==>
`Z` is more likely to put more weight on its `kth` component.
temperature: `float`-like `Tensor`. Broadcastable with `mix_loc`.
In terms of samples, smaller `temperature` means one component is more
likely to dominate. I.e., smaller `temperature` makes the VDM look more
like a standard mixture of `K` components.
distribution: `tfp.distributions.Distribution`-like instance. Distribution
from which `d` iid samples are used as input to the selected affine
transformation. Must be a scalar-batch, scalar-event distribution.
Typically `distribution.reparameterization_type = FULLY_REPARAMETERIZED`
or it is a function of non-trainable parameters. WARNING: If you
backprop through a VectorDiffeomixture sample and the `distribution`
is not `FULLY_REPARAMETERIZED` yet is a function of trainable variables,
then the gradient will be incorrect!
loc: Length-`K` list of `float`-type `Tensor`s. The `k`-th element
represents the `shift` used for the `k`-th affine transformation. If
the `k`-th item is `None`, `loc` is implicitly `0`. When specified,
must have shape `[B1, ..., Bb, d]` where `b >= 0` and `d` is the event
size.
scale: Length-`K` list of `LinearOperator`s. Each should be
positive-definite and operate on a `d`-dimensional vector space. The
`k`-th element represents the `scale` used for the `k`-th affine
transformation. `LinearOperator`s must have shape `[B1, ..., Bb, d, d]`,
`b >= 0`, i.e., characterizes `b`-batches of `d x d` matrices
quadrature_size: Python `int` scalar representing number of
quadrature points. Larger `quadrature_size` means `q_N(x)` better
approximates `p(x)`.
quadrature_fn: Python callable taking `normal_loc`, `normal_scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the SoftmaxNormal grid and corresponding normalized weight.
normalized) weight.
Default value: `quadrature_scheme_softmaxnormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if `not scale or len(scale) < 2`.
ValueError: if `len(loc) != len(scale)`
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
ValueError: if `validate_args` and any not scale.is_positive_definite.
TypeError: if any scale.dtype != scale[0].dtype.
TypeError: if any loc.dtype != scale[0].dtype.
NotImplementedError: if `len(scale) != 2`.
ValueError: if `not distribution.is_scalar_batch`.
ValueError: if `not distribution.is_scalar_event`.
"""
parameters = dict(locals())
with tf.name_scope(name, values=[mix_loc, temperature]) as name:
if not scale or len(scale) < 2:
raise ValueError("Must specify list (or list-like object) of scale "
"LinearOperators, one for each component with "
"num_component >= 2.")
if loc is None:
loc = [None]*len(scale)
if len(loc) != len(scale):
raise ValueError("loc/scale must be same-length lists "
"(or same-length list-like objects).")
dtype = scale[0].dtype.base_dtype
loc = [
tf.convert_to_tensor(value=loc_, dtype=dtype, name="loc{}".format(k))
if loc_ is not None else None for k, loc_ in enumerate(loc)
]
for k, scale_ in enumerate(scale):
if validate_args and not scale_.is_positive_definite:
raise ValueError("scale[{}].is_positive_definite = {} != True".format(
k, scale_.is_positive_definite))
if scale_.dtype.base_dtype != dtype:
raise TypeError(
"dtype mismatch; scale[{}].base_dtype=\"{}\" != \"{}\"".format(
k, scale_.dtype.base_dtype.name, dtype.name))
self._endpoint_affine = [
affine_linear_operator_bijector.AffineLinearOperator(
shift=loc_, scale=scale_,
validate_args=validate_args,
name="endpoint_affine_{}".format(k))
for k, (loc_, scale_) in enumerate(zip(loc, scale))]
# TODO(jvdillon): Remove once we support k-mixtures.
# We make this assertion here because otherwise `grid` would need to be a
# vector not a scalar.
if len(scale) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(scale)))
mix_loc = tf.convert_to_tensor(value=mix_loc, dtype=dtype, name="mix_loc")
temperature = tf.convert_to_tensor(
value=temperature, dtype=dtype, name="temperature")
self._grid, probs = tuple(quadrature_fn(
mix_loc / temperature,
1. / temperature,
quadrature_size,
validate_args))
# Note: by creating the logits as `log(prob)` we ensure that
# `self.mixture_distribution.logits` is equivalent to
# `math_ops.log(self.mixture_distribution.probs)`.
self._mixture_distribution = categorical.Categorical(
logits=tf.math.log(probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
self._grid = distribution_util.with_dependencies(
asserts, self._grid)
self._distribution = distribution
self._interpolated_affine = [
affine_linear_operator_bijector.AffineLinearOperator(
shift=loc_, scale=scale_,
validate_args=validate_args,
name="interpolated_affine_{}".format(k))
for k, (loc_, scale_) in enumerate(zip(
interpolate_loc(self._grid, loc),
interpolate_scale(self._grid, scale)))]
[
self._batch_shape_,
self._batch_shape_tensor_,
self._event_shape_,
self._event_shape_tensor_,
] = determine_batch_event_shapes(self._grid,
self._endpoint_affine)
super(VectorDiffeomixture, self).__init__(
dtype=dtype,
# We hard-code `FULLY_REPARAMETERIZED` because when
# `validate_args=True` we verify that indeed
# `distribution.reparameterization_type == FULLY_REPARAMETERIZED`. A
# distribution which is a function of only non-trainable parameters
# also implies we can use `FULLY_REPARAMETERIZED`. However, we cannot
# easily test for that possibility thus we use `validate_args=False`
# as a "back-door" to allow users a way to use non
# `FULLY_REPARAMETERIZED` distribution. In such cases IT IS THE USERS
# RESPONSIBILITY to verify that the base distribution is a function of
# non-trainable parameters.
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
distribution._graph_parents # pylint: disable=protected-access
+ [loc_ for loc_ in loc if loc_ is not None] +
[p for scale_ in scale for p in scale_.graph_parents]),
name=name)
@property
def mixture_distribution(self):
"""Distribution used to select a convex combination of affine transforms."""
return self._mixture_distribution
@property
def distribution(self):
"""Base scalar-event, scalar-batch distribution."""
return self._distribution
@property
def grid(self):
"""Grid of mixing probabilities, one for each grid point."""
return self._grid
@property
def endpoint_affine(self):
"""Affine transformation for each of `K` components."""
return self._endpoint_affine
@property
def interpolated_affine(self):
"""Affine transformation for each convex combination of `K` components."""
return self._interpolated_affine
def _batch_shape_tensor(self):
return self._batch_shape_tensor_
def _batch_shape(self):
return self._batch_shape_
def _event_shape_tensor(self):
return self._event_shape_tensor_
def _event_shape(self):
return self._event_shape_
def _sample_n(self, n, seed=None):
stream = seed_stream.SeedStream(seed, salt="VectorDiffeomixture")
x = self.distribution.sample(
sample_shape=concat_vectors(
[n],
self.batch_shape_tensor(),
self.event_shape_tensor()),
seed=stream()) # shape: [n, B, e]
x = [aff.forward(x) for aff in self.endpoint_affine]
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = self.batch_shape.num_elements()
if batch_size is None:
batch_size = tf.reduce_prod(input_tensor=self.batch_shape_tensor())
mix_batch_size = self.mixture_distribution.batch_shape.num_elements()
if mix_batch_size is None:
mix_batch_size = tf.reduce_prod(
input_tensor=self.mixture_distribution.batch_shape_tensor())
ids = self.mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
[batch_size // mix_batch_size])),
seed=stream())
# We need to flatten batch dims in case mixture_distribution has its own
# batch dims.
ids = tf.reshape(
ids,
shape=concat_vectors([n],
distribution_util.pick_vector(
self.is_scalar_batch(), np.int32([]),
np.int32([-1]))))
# Stride `components * quadrature_size` for `batch_size` number of times.
stride = self.grid.shape.with_rank_at_least(
2)[-2:].num_elements()
if stride is None:
stride = tf.reduce_prod(input_tensor=tf.shape(input=self.grid)[-2:])
offset = tf.range(
start=0, limit=batch_size * stride, delta=stride, dtype=ids.dtype)
weight = tf.gather(tf.reshape(self.grid, shape=[-1]), ids + offset)
# At this point, weight flattened all batch dims into one.
# We also need to append a singleton to broadcast with event dims.
if self.batch_shape.is_fully_defined():
new_shape = [-1] + self.batch_shape.as_list() + [1]
else:
new_shape = tf.concat(([-1], self.batch_shape_tensor(), [1]), axis=0)
weight = tf.reshape(weight, shape=new_shape)
if len(x) != 2:
# We actually should have already triggered this exception. However as a
# policy we're putting this exception wherever we exploit the bimixture
# assumption.
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(x)))
# Alternatively:
# x = weight * x[0] + (1. - weight) * x[1]
x = weight * (x[0] - x[1]) + x[1]
return x
def _log_prob(self, x):
# By convention, we always put the grid points right-most.
y = tf.stack([aff.inverse(x) for aff in self.interpolated_affine], axis=-1)
log_prob = tf.reduce_sum(
input_tensor=self.distribution.log_prob(y), axis=-2)
# Because the affine transformation has a constant Jacobian, it is the case
# that `affine.fldj(x) = -affine.ildj(x)`. This is not true in general.
fldj = tf.stack(
[
aff.forward_log_det_jacobian(
x, event_ndims=tf.rank(self.event_shape_tensor()))
for aff in self.interpolated_affine
],
axis=-1)
return tf.reduce_logsumexp(
input_tensor=self.mixture_distribution.logits - fldj + log_prob,
axis=-1)
def _mean(self):
p = self._expand_mix_distribution_probs()
m = self._expand_base_distribution_mean()
mean = None
for k, aff in enumerate(self.interpolated_affine):
# aff.forward is going to do this:
# y = tf.squeeze(aff.scale.matmul(m), axis=[-1])
# if aff.shift is not None:
# y += aff.shift
mean = add(mean, p[..., k] * aff.forward(m))
return mean
def _covariance(self):
# Law of total variance:
#
# Cov[Z] = E[Cov[Z | V]] + Cov[E[Z | V]]
#
# where,
#
# E[Cov[Z | V]] = sum_i mix_prob[i] Scale[i]
# Cov[E[Z | V]] = sum_i mix_prob[i] osquare(loc[i])
# - osquare(sum_i mix_prob[i] loc[i])
#
# osquare(x) = x.transpose @ x
return add(
self._mean_of_covariance_given_quadrature_component(diag_only=False),
self._covariance_of_mean_given_quadrature_component(diag_only=False))
def _variance(self):
# Equivalent to: tf.diag_part(self._covariance()),
return add(
self._mean_of_covariance_given_quadrature_component(diag_only=True),
self._covariance_of_mean_given_quadrature_component(diag_only=True))
def _mean_of_covariance_given_quadrature_component(self, diag_only):
p = self.mixture_distribution.probs
# To compute E[Cov(Z|V)], we'll add matrices within three categories:
# scaled-identity, diagonal, and full. Then we'll combine these at the end.
scale_identity_multiplier = None
diag = None
full = None
for k, aff in enumerate(self.interpolated_affine):
s = aff.scale # Just in case aff.scale has side-effects, we'll call once.
if (s is None or isinstance(s, tf.linalg.LinearOperatorIdentity)):
scale_identity_multiplier = add(scale_identity_multiplier,
p[..., k, tf.newaxis])
elif isinstance(s, tf.linalg.LinearOperatorScaledIdentity):
scale_identity_multiplier = add(
scale_identity_multiplier,
(p[..., k, tf.newaxis] * tf.square(s.multiplier)))
elif isinstance(s, tf.linalg.LinearOperatorDiag):
diag = add(diag, (p[..., k, tf.newaxis] * tf.square(s.diag_part())))
else:
x = (
p[..., k, tf.newaxis, tf.newaxis] * s.matmul(
s.to_dense(), adjoint_arg=True))
if diag_only:
x = tf.linalg.diag_part(x)
full = add(full, x)
# We must now account for the fact that the base distribution might have a
# non-unity variance. Recall that, since X ~ iid Law(X_0),
# `Cov(SX+m) = S Cov(X) S.T = S S.T Diag(Var(X_0))`.
# We can scale by `Var(X)` (vs `Cov(X)`) since X corresponds to `d` iid
# samples from a scalar-event distribution.
v = self.distribution.variance()
if scale_identity_multiplier is not None:
scale_identity_multiplier *= v
if diag is not None:
diag *= v[..., tf.newaxis]
if full is not None:
full *= v[..., tf.newaxis]
if diag_only:
# Apparently we don't need the full matrix, just the diagonal.
r = add(diag, full)
if r is None and scale_identity_multiplier is not None:
ones = tf.ones(self.event_shape_tensor(), dtype=self.dtype)
return scale_identity_multiplier[..., tf.newaxis] * ones
return add(r, scale_identity_multiplier)
# `None` indicates we don't know if the result is positive-definite.
is_positive_definite = (True if all(aff.scale.is_positive_definite
for aff in self.endpoint_affine)
else None)
to_add = []
if diag is not None:
to_add.append(
tf.linalg.LinearOperatorDiag(
diag=diag, is_positive_definite=is_positive_definite))
if full is not None:
to_add.append(
tf.linalg.LinearOperatorFullMatrix(
matrix=full, is_positive_definite=is_positive_definite))
if scale_identity_multiplier is not None:
to_add.append(
tf.linalg.LinearOperatorScaledIdentity(
num_rows=self.event_shape_tensor()[0],
multiplier=scale_identity_multiplier,
is_positive_definite=is_positive_definite))
return (linop_add_lib.add_operators(to_add)[0].to_dense()
if to_add else None)
def _covariance_of_mean_given_quadrature_component(self, diag_only):
square = tf.square if diag_only else vec_osquare
p = self._expand_mix_distribution_probs()
if not diag_only:
p = p[..., tf.newaxis, :] # Assuming event.ndims=1.
m = self._expand_base_distribution_mean()
cov_e_z_given_v = None
e_z_given_v = self._mean()
for k, aff in enumerate(self.interpolated_affine):
y = aff.forward(m)
cov_e_z_given_v = add(cov_e_z_given_v,
p[..., k] * square(y - e_z_given_v))
return cov_e_z_given_v
def _expand_base_distribution_mean(self):
"""Ensures `self.distribution.mean()` has `[batch, event]` shape."""
single_draw_shape = concat_vectors(self.batch_shape_tensor(),
self.event_shape_tensor())
m = tf.reshape(
self.distribution.mean(), # A scalar.
shape=tf.ones_like(single_draw_shape, dtype=tf.int32))
m = tf.tile(m, multiples=single_draw_shape)
m.set_shape(self.batch_shape.concatenate(self.event_shape))
return m
def _expand_mix_distribution_probs(self):
p = self.mixture_distribution.probs # [B, deg]
deg = tf.compat.dimension_value(p.shape.with_rank_at_least(1)[-1])
if deg is None:
deg = tf.shape(input=p)[-1]
event_ndims = self.event_shape.ndims
if event_ndims is None:
event_ndims = tf.shape(input=self.event_shape_tensor())[0]
expand_shape = tf.concat(
[
self.mixture_distribution.batch_shape_tensor(),
tf.ones([event_ndims], dtype=tf.int32),
[deg],
],
axis=0)
return tf.reshape(p, shape=expand_shape)
def maybe_check_quadrature_param(param, name, validate_args):
"""Helper which checks validity of `loc` and `scale` init args."""
with tf.name_scope(name="check_" + name, values=[param]):
assertions = []
if param.shape.ndims is not None:
if param.shape.ndims == 0:
raise ValueError("Mixing params must be a (batch of) vector; "
"{}.rank={} is not at least one.".format(
name, param.shape.ndims))
elif validate_args:
assertions.append(
tf.compat.v1.assert_rank_at_least(
param,
1,
message=("Mixing params must be a (batch of) vector; "
"{}.rank is not at least one.".format(name))))
# TODO(jvdillon): Remove once we support k-mixtures.
if param.shape.with_rank_at_least(1)[-1] is not None:
if tf.compat.dimension_value(param.shape[-1]) != 1:
raise NotImplementedError("Currently only bimixtures are supported; "
"{}.shape[-1]={} is not 1.".format(
name,
tf.compat.dimension_value(
param.shape[-1])))
elif validate_args:
assertions.append(
tf.compat.v1.assert_equal(
tf.shape(input=param)[-1],
1,
message=("Currently only bimixtures are supported; "
"{}.shape[-1] is not 1.".format(name))))
if assertions:
return distribution_util.with_dependencies(assertions, param)
return param
def determine_batch_event_shapes(grid, endpoint_affine):
"""Helper to infer batch_shape and event_shape."""
with tf.name_scope(name="determine_batch_event_shapes"):
# grid # shape: [B, k, q]
# endpoint_affine # len=k, shape: [B, d, d]
batch_shape = grid.shape[:-2]
batch_shape_tensor = tf.shape(input=grid)[:-2]
event_shape = None
event_shape_tensor = None
def _set_event_shape(shape, shape_tensor):
if event_shape is None:
return shape, shape_tensor
return (tf.broadcast_static_shape(event_shape, shape),
tf.broadcast_dynamic_shape(event_shape_tensor, shape_tensor))
for aff in endpoint_affine:
if aff.shift is not None:
batch_shape = tf.broadcast_static_shape(batch_shape,
aff.shift.shape[:-1])
batch_shape_tensor = tf.broadcast_dynamic_shape(
batch_shape_tensor,
tf.shape(input=aff.shift)[:-1])
event_shape, event_shape_tensor = _set_event_shape(
aff.shift.shape[-1:],
tf.shape(input=aff.shift)[-1:])
if aff.scale is not None:
batch_shape = tf.broadcast_static_shape(batch_shape,
aff.scale.batch_shape)
batch_shape_tensor = tf.broadcast_dynamic_shape(
batch_shape_tensor, aff.scale.batch_shape_tensor())
event_shape, event_shape_tensor = _set_event_shape(
tf.TensorShape([aff.scale.range_dimension]),
aff.scale.range_dimension_tensor()[tf.newaxis])
return batch_shape, batch_shape_tensor, event_shape, event_shape_tensor
def interpolate_loc(grid, loc):
"""Helper which interpolates between two locs."""
if len(loc) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(loc)))
deg = tf.compat.dimension_value(grid.shape.with_rank_at_least(1)[-1])
if deg is None:
raise ValueError("Num quadrature grid points must be known prior "
"to graph execution.")
with tf.name_scope("interpolate_loc", values=[grid, loc]):
if loc is None or loc[0] is None and loc[1] is None:
return [None]*deg
# shape: [B, 1, k, deg]
w = grid[..., tf.newaxis, :, :]
loc = [
x[..., tf.newaxis] # shape: [B, e, 1]
if x is not None else None for x in loc
]
if loc[0] is None:
x = w[..., 1, :] * loc[1] # shape: [B, e, deg]
elif loc[1] is None:
x = w[..., 0, :] * loc[0] # shape: [B, e, deg]
else:
delta = loc[0] - loc[1]
x = w[..., 0, :] * delta + loc[1] # shape: [B, e, deg]
return [x[..., k] for k in range(deg)] # list(shape:[B, e])
def interpolate_scale(grid, scale):
"""Helper which interpolates between two scales."""
if len(scale) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(scale)))
deg = tf.compat.dimension_value(grid.shape.with_rank_at_least(1)[-1])
if deg is None:
raise ValueError("Num quadrature grid points must be known prior "
"to graph execution.")
with tf.name_scope("interpolate_scale", values=[grid]):
return [linop_add_lib.add_operators([
linop_scale(grid[..., k, q], s)
for k, s in enumerate(scale)
])[0] for q in range(deg)]
def linop_scale(w, op):
"""Creates weighted `LinOp` from existing `LinOp`."""
# We assume w > 0. (This assumption only relates to the is_* attributes.)
with tf.name_scope("linop_scale", values=[w]):
# TODO(b/35301104): LinearOperatorComposition doesn't combine operators, so
# special case combinations here. Once it does, this function can be
# replaced by:
# return linop_composition_lib.LinearOperatorComposition([
# scaled_identity(w), op])
def scaled_identity(w):
return tf.linalg.LinearOperatorScaledIdentity(
num_rows=op.range_dimension_tensor(),
multiplier=w,
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorIdentity):
return scaled_identity(w)
if isinstance(op, tf.linalg.LinearOperatorScaledIdentity):
return scaled_identity(w * op.multiplier)
if isinstance(op, tf.linalg.LinearOperatorDiag):
return tf.linalg.LinearOperatorDiag(
diag=w[..., tf.newaxis] * op.diag_part(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorLowerTriangular):
return tf.linalg.LinearOperatorLowerTriangular(
tril=w[..., tf.newaxis, tf.newaxis] * op.to_dense(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
raise NotImplementedError(
"Unsupported Linop type ({})".format(type(op).__name__))
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [distribution_util.static_value(x) for x in args]
if any(vec is None for vec in args_):
return tf.concat(args, axis=0)
return [val for vec in args_ for val in vec]
def add(x, y):
"""Adds inputs; interprets `None` as zero."""
if x is None:
return y
if y is None:
return x
return x + y
def vec_osquare(x):
"""Computes the outer-product of a (batch of) vector, i.e., x.T x."""
return x[..., :, tf.newaxis] * x[..., tf.newaxis, :]
def softmax(x, axis, name=None):
"""Equivalent to tf.nn.softmax but works around b/70297725."""
with tf.name_scope(name, "softmax", [x, axis]):
x = tf.convert_to_tensor(value=x, name="x")
ndims = (
x.shape.ndims
if x.shape.ndims is not None else tf.rank(x, name="ndims"))
axis = tf.convert_to_tensor(value=axis, dtype=tf.int32, name="axis")
axis_ = tf.get_static_value(axis)
if axis_ is not None:
axis = np.int(ndims + axis_ if axis_ < 0 else axis_)
else:
axis = tf.where(axis < 0, ndims + axis, axis)
return tf.nn.softmax(x, axis=axis)
| 41.421162 | 109 | 0.653769 |
bd7c6393ad83930d862a61fc662a7c2f5d428c04 | 5,877 | py | Python | lib/BiochemistryAPI/utils.py | samseaver/BiochemistryAPI | 443b0e86c222f095f7d53dfaa0bbda2726413f8b | [
"MIT"
] | null | null | null | lib/BiochemistryAPI/utils.py | samseaver/BiochemistryAPI | 443b0e86c222f095f7d53dfaa0bbda2726413f8b | [
"MIT"
] | 3 | 2017-12-13T18:15:30.000Z | 2020-06-30T20:48:45.000Z | lib/BiochemistryAPI/utils.py | samseaver/BiochemistryAPI | 443b0e86c222f095f7d53dfaa0bbda2726413f8b | [
"MIT"
] | 4 | 2017-08-30T23:11:48.000Z | 2020-06-30T20:38:43.000Z | import csv
import logging
import os
import re
from collections import defaultdict, namedtuple, OrderedDict
from boltons.setutils import IndexedSet
from rdkit import RDLogger
from rdkit.Chem import AllChem
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.DataStructs import FingerprintSimilarity
rdk_lg = RDLogger.logger()
rdk_lg.setLevel(RDLogger.CRITICAL)
logging.basicConfig(level=logging.INFO)
ttable = str.maketrans("_-;", " ", "()[]")
def check_param(in_params, req_param, opt_param=list()):
"""
Check if each of the params in the list are in the input params
"""
for param in req_param:
if param not in in_params:
raise ValueError('{} parameter is required'.format(param))
defined_param = set(req_param + opt_param)
for param in in_params:
if param not in defined_param:
logging.warning("Received unexpected parameter {}".format(param))
def _tokenize_string(raw_string):
if not raw_string:
return []
normed_str = raw_string.strip().lower().translate(ttable)
return normed_str.split() + [normed_str]
def dict_from_file(path, key='id', dialect='excel-tab'):
"""
Build a dictionary from an object array in a file
:param path: local path to object
:param key: what field should be used as the key
:param dialect: excel-tab for TSV or excel for CSV
:return:
"""
if not os.path.exists(path):
raise ValueError("File not found: {}".format(path))
reader = csv.DictReader(open(path), dialect=dialect)
id_dict = OrderedDict()
alias_dict = defaultdict(IndexedSet)
for line in reader:
id_dict[line[key]] = line
for tok in _tokenize_string(line[key]):
alias_dict[tok].add(line[key])
for tok in _tokenize_string(line.get('name')):
alias_dict[tok].add(line[key])
for tok in _tokenize_string(line.get('abbreviation')):
alias_dict[tok].add(line[key])
for tok in _tokenize_string(line.get('ec_number')):
alias_dict[tok].add(line[key])
if line.get('aliases'):
for match in re.findall('"\S+?:(\S+?)"', line['aliases']):
for tok in _tokenize_string(match):
alias_dict[tok].add(line[key])
return id_dict, alias_dict
def alias_dict_from_file(path, dialect='excel-tab'):
"""
Build a dictionary from an object array in a file
:param path: local path to object
:param dialect: excel-tab for TSV or excel for CSV
:return:
"""
alias_mappings = defaultdict(list)
with open(path) as infile:
r = csv.DictReader(infile, dialect=dialect)
for line in r:
for seed_id in line['MS ID'].split('|'):
if line['Source'] == 'Enzyme Class':
alias_mappings[seed_id].append(line['External ID'])
else:
alias_mappings[seed_id].append('%s:%s' % (
line['Source'].strip(), line['External ID']))
return alias_mappings
def _get_mol(structure):
if "InChI" in structure:
mol = AllChem.MolFromInchi(structure)
else:
mol = AllChem.MolFromSmiles(structure)
return mol
def make_mol_tuples(compound_dict, id_key="id", struct_key='structure', struct_type='inchi'):
"""
Creates named tuples with (compound_id, RDKit Mol Object) from a dict with SMILES or InChI
"""
MolTuple = namedtuple("MolTuple", "id mol maccs_fp rdkit_fp")
tups = []
for comp in compound_dict:
mol = _get_mol(comp[struct_key])
if mol:
tups.append(MolTuple(comp[id_key],
mol,
AllChem.GetMACCSKeysFingerprint(mol),
AllChem.RDKFingerprint(mol)))
return tups
def substructure_search(query, structures):
"""Performs substructure search on 'query' in 'structures'"""
pattern = AllChem.MolFromSmarts(query)
return [x.id for x in structures if x.mol and x.mol.HasSubstructMatch(pattern)]
def similarity_search(query, structures, fp_type='maccs', min_similarity=0.8):
"""Perform return compound ids where tanimoto similarity of fingerprint 'fp_type is greater
than 'min_similarity'"""
if not isinstance(min_similarity, float) or not 0 <= min_similarity <= 1.0:
raise ValueError('Invalid min_similarity. Value must be a float between 0 and 1.')
if fp_type.lower() == 'maccs':
fp1 = AllChem.GetMACCSKeysFingerprint(_get_mol(query))
return [x.id for x in structures
if FingerprintSimilarity(fp1, x.maccs_fp) >= min_similarity]
elif fp_type.lower() == 'rdkit':
fp1 = AllChem.RDKFingerprint(_get_mol(query))
return [x.id for x in structures
if FingerprintSimilarity(fp1, x.rdkit_fp) >= min_similarity]
else:
fp_types = ", ".join(('maccs', 'rdkit'))
raise ValueError('Invalid fingerprint type: choose one of {}'.format(fp_types))
def depict_compound(structure, size=(300, 300)):
"""
Generate a SVG depiction of a chemical structure
:param structure: SMILES or InChI string
:param size: Tuple of (Height, Width)
:return:
"""
mol = _get_mol(structure)
if not mol:
return ""
AllChem.Compute2DCoords(mol)
dwr = rdMolDraw2D.MolDraw2DSVG(*size)
dwr.DrawMolecule(mol)
dwr.FinishDrawing()
return dwr.GetDrawingText().replace('svg:', '')
def get_3d_mol(structure, output='mol', optimize=False):
mol = _get_mol(structure)
if not mol:
return ""
AllChem.AddHs(mol)
AllChem.EmbedMolecule(mol, AllChem.ETKDG())
if optimize:
AllChem.MMFFOptimizeMolecule(mol)
AllChem.RemoveHs(mol)
if output == 'mol':
return AllChem.MolToMolBlock(mol)
if output == 'pdb':
return AllChem.MolToPDBBlock(mol)
| 35.403614 | 95 | 0.644376 |
bddfaba1861324984879a40e1853eae244eb5dc5 | 85 | py | Python | opac/admin/models/__init__.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | 1 | 2020-11-26T05:25:46.000Z | 2020-11-26T05:25:46.000Z | opac/admin/models/__init__.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | null | null | null | opac/admin/models/__init__.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | null | null | null | import opac.admin.models.masters
import opac.admin.models.transactions # noqa: F401
| 28.333333 | 51 | 0.811765 |
d5e15e0d4997ec2ee162cdecf851d3feead7cb6d | 9,207 | py | Python | salt/modules/splunk_search.py | Rafflecopter/salt | 08bbfcd4d9b93351d7d5d25b097e892026b6f1cd | [
"Apache-2.0"
] | 3 | 2015-04-16T18:42:35.000Z | 2017-10-30T16:57:49.000Z | salt/salt/modules/splunk_search.py | smallyear/linuxLearn | 342e5020bf24b5fac732c4275a512087b47e578d | [
"Apache-2.0"
] | 16 | 2015-11-18T00:44:03.000Z | 2018-10-29T20:48:27.000Z | salt/salt/modules/splunk_search.py | smallyear/linuxLearn | 342e5020bf24b5fac732c4275a512087b47e578d | [
"Apache-2.0"
] | 1 | 2018-04-19T16:57:27.000Z | 2018-04-19T16:57:27.000Z | # -*- coding: utf-8 -*-
'''
Module for interop with the Splunk API
.. versionadded:: 2015.5.0
:depends: - splunk-sdk python module
:configuration: Configure this module by specifying the name of a configuration
profile in the minion config, minion pillar, or master config. The module
will use the 'splunk' key by default, if defined.
For example:
.. code-block:: yaml
splunk:
username: alice
password: abc123
host: example.splunkcloud.com
port: 8080
'''
# Import python libs
from __future__ import absolute_import
import logging
import yaml
import urllib
# Import third party libs
import salt.ext.six as six
HAS_LIBS = False
try:
import splunklib.client
import requests
HAS_LIBS = True
except ImportError:
pass
# Import salt libs
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
# Don't shadow built-in's.
__func_alias__ = {
'list_': 'list'
}
__virtualname__ = 'splunk_search'
def __virtual__():
'''
Only load this module if splunk is installed on this minion.
'''
if HAS_LIBS:
return __virtualname__
return False
def _get_splunk(profile):
'''
Return the splunk client, cached into __context__ for performance
'''
config = __salt__['config.option'](profile)
key = "splunk_search.{0}:{1}:{2}:{3}".format(
config.get('host'),
config.get('port'),
config.get('username'),
config.get('password')
)
if key not in __context__:
__context__[key] = splunklib.client.connect(
host=config.get('host'),
port=config.get('port'),
username=config.get('username'),
password=config.get('password'))
return __context__[key]
def _get_splunk_search_props(search):
'''
Get splunk search properties from an object
'''
props = search.content
props["app"] = search.access.app
props["sharing"] = search.access.sharing
return props
def get(name, profile="splunk"):
'''
Get a splunk search
CLI Example:
splunk_search.get 'my search name'
'''
client = _get_splunk(profile)
search = None
# uglyness of splunk lib
try:
search = client.saved_searches[name]
except KeyError:
pass
return search
def update(name, profile="splunk", **kwargs):
'''
Update a splunk search
CLI Example:
splunk_search.update 'my search name' sharing=app
'''
client = _get_splunk(profile)
search = client.saved_searches[name]
props = _get_splunk_search_props(search)
updates = kwargs
update_needed = False
update_set = dict()
diffs = []
for key in sorted(kwargs):
old_value = props.get(key, None)
new_value = updates.get(key, None)
if isinstance(old_value, six.string_types):
old_value = old_value.strip()
if isinstance(new_value, six.string_types):
new_value = new_value.strip()
if old_value != new_value:
update_set[key] = new_value
update_needed = True
diffs.append("{0}: '{1}' => '{2}'".format(
key, old_value, new_value
))
if update_needed:
search.update(**update_set).refresh()
return update_set, diffs
return False
def create(name, profile="splunk", **kwargs):
'''
Create a splunk search
CLI Example:
splunk_search.create 'my search name' search='error msg'
'''
client = _get_splunk(profile)
search = client.saved_searches.create(name, **kwargs)
# use the REST API to set owner and permissions
# this is hard-coded for now; all managed searches are app scope and
# readable by all
config = __salt__['config.option'](profile)
url = "https://{0}:{1}".format(config.get('host'), config.get('port'))
auth = (config.get('username'), config.get('password'))
data = {
"owner": config.get("username"),
"sharing": "app",
"perms.read": "*",
}
_req_url = "{0}/servicesNS/{1}/search/saved/searches/{2}/acl".format(
url, config.get("username"), urllib.quote(name)
)
requests.post(_req_url, auth=auth, verify=True, data=data)
return _get_splunk_search_props(search)
def delete(name, profile="splunk"):
'''
Delete a splunk search
CLI Example:
splunk_search.delete 'my search name'
'''
client = _get_splunk(profile)
try:
client.saved_searches.delete(name)
return True
except KeyError:
return None
def list_(profile="splunk"):
'''
List splunk searches (names only)
CLI Example:
splunk_search.list
'''
client = _get_splunk(profile)
searches = [x['name'] for x in client.saved_searches]
return searches
def list_all(prefix=None, app=None, owner=None, description_contains=None,
name_not_contains=None, profile="splunk"):
'''
Get all splunk search details. Produces results that can be used to create
an sls file.
if app or owner are specified, results will be limited to matching saved
searches.
if description_contains is specified, results will be limited to those
where "description_contains in description" is true if name_not_contains is
specified, results will be limited to those where "name_not_contains not in
name" is true.
If prefix parameter is given, alarm names in the output will be prepended
with the prefix; alarms that have the prefix will be skipped. This can be
used to convert existing alarms to be managed by salt, as follows:
CLI example:
1. Make a "backup" of all existing searches
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > legacy_searches.sls
2. Get all searches with new prefixed names
$ salt-call splunk_search.list_all "prefix=**MANAGED BY SALT** " --out=txt | sed "s/local: //" > managed_searches.sls
3. Insert the managed searches into splunk
$ salt-call state.sls managed_searches.sls
4. Manually verify that the new searches look right
5. Delete the original searches
$ sed s/present/absent/ legacy_searches.sls > remove_legacy_searches.sls
$ salt-call state.sls remove_legacy_searches.sls
6. Get all searches again, verify no changes
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > final_searches.sls
$ diff final_searches.sls managed_searches.sls
'''
client = _get_splunk(profile)
# yaml magic to output OrderedDict
def ordered_dict_presenter(dumper, data):
return dumper.represent_dict(six.iteritems(data))
yaml.add_representer(
OrderedDict, ordered_dict_presenter, Dumper=yaml.dumper.SafeDumper)
# splunklib doesn't provide the default settings for saved searches.
# so, in order to get the defaults, we create a search with no
# configuration, get that search, and then delete it. We use its contents
# as the default settings
name = "splunk_search.list_all get defaults"
try:
client.saved_searches.delete(name)
except Exception:
pass
search = client.saved_searches.create(name, search="nothing")
defaults = dict(search.content)
client.saved_searches.delete(name)
# stuff that splunk returns but that you should not attempt to set.
# cf http://dev.splunk.com/view/python-sdk/SP-CAAAEK2
readonly_keys = set([
"triggered_alert_count",
"action.email",
"action.populate_lookup",
"action.rss",
"action.script",
"action.summary_index",
"qualifiedSearch",
"next_scheduled_time"
])
results = OrderedDict()
# sort the splunk searches by name, so we get consistent output
searches = sorted([(s.name, s) for s in client.saved_searches])
for name, search in searches:
if app and search.access.app != app:
continue
if owner and search.access.owner != owner:
continue
if name_not_contains and name_not_contains in name:
continue
if prefix:
if name.startswith(prefix):
continue
name = prefix + name
# put name in the OrderedDict first
d = []
d.append({"name": name})
# add the rest of the splunk settings, ignoring any defaults
description = ''
for (k, v) in sorted(search.content.items()):
if k in readonly_keys:
continue
if k.startswith("display."):
continue
if not v:
continue
if k in defaults and defaults[k] == v:
continue
d.append({k: v})
if k == 'description':
description = v
if description_contains and description_contains not in description:
continue
results["manage splunk search " + name] = {"splunk_search.present": d}
return yaml.safe_dump(results, default_flow_style=False, width=120)
| 29.796117 | 133 | 0.631259 |
1b7ebe58c0e798b4be2daa0fec0f5a4fd71830d1 | 24,186 | py | Python | utm_simulator/layer_environment.py | colineRamee/UTM_simulator | fe0cb59b1a3a64f279fbc12f90c33455a2522d50 | [
"MIT"
] | null | null | null | utm_simulator/layer_environment.py | colineRamee/UTM_simulator | fe0cb59b1a3a64f279fbc12f90c33455a2522d50 | [
"MIT"
] | null | null | null | utm_simulator/layer_environment.py | colineRamee/UTM_simulator | fe0cb59b1a3a64f279fbc12f90c33455a2522d50 | [
"MIT"
] | 1 | 2022-02-01T15:57:05.000Z | 2022-02-01T15:57:05.000Z | import numpy as np
import random
import math
import json
from utm_simulator import agent
from utm_simulator.my_utils import MyLogger
from utm_simulator import layer
class LayerEnvironment:
def __init__(self, size_map, min_distance, agent_speed, sensing_radius, tolerance, utm_on, desired_number_of_agents=1, centralized_manager=None,
multiple_planning_agents=False, structure=None, demand=None, static_obstacles=None, priority_agents=0, simulation_type='reactive',
algo_type='A_star_8', log_type='short', log_density=False, n_valid_agents_for_simulation_end=None, logger=MyLogger(), n_layers=1):
self.my_logger = logger
self.min_distance = min_distance
self.max_speed = agent_speed
self.size_map = size_map
self.tolerance = tolerance
self.layers = []
self.n_layers = n_layers
self.default_planning_time_step = 2 * min_distance / agent_speed
for i in range(0, n_layers):
self.layers.append(layer.Layer(size_map, min_distance, agent_speed, sensing_radius, tolerance, utm_on, static_obstacles=static_obstacles, demand=demand,
default_planning_time_step=self.default_planning_time_step, multiple_planning_agents=multiple_planning_agents,
simulation_type=simulation_type, algo_type=algo_type, log_type=log_type, log_density=log_density, logger=self.my_logger))
self.desired_number_of_agents = desired_number_of_agents
self.centralized_manager = centralized_manager # Just for generating start/end pairs
self.multiple_planning_agents = multiple_planning_agents
self.structure = structure
# If structure is None layer assignment if n_layers>1 will be at random
# If structure is not None, the number of layers must match the structure
if structure is not None:
if structure['type'] == 'layer':
parameters = np.array(structure['parameters'])
if len(parameters.shape) == 1:
n_parameters = 1
else:
n_parameters = parameters.shape[0]
if n_parameters != n_layers:
logger.log('The number of parameters for the layer structure should match the number of layers requested in the simulation')
else:
logger.log('The Layer environment is not setup to work with this structure type ' + str(structure))
self.lz_coordinates = None
self.demand = demand
self.static_obstacles = static_obstacles
self.n_priority_agents = priority_agents
self.n_min_valid_agents = n_valid_agents_for_simulation_end
self.n_valid_density_agents = 0
self.n_controlled_agents_current = 0
self.agent_added_time = 0
self.priority_agent_added_time = 0
self.total_number_of_agents = 0
self.simulation_type = simulation_type
self.algo_type = algo_type
self.log_type = log_type
self.log_density = log_density
self.t_density = None
self.t0 = None
self.ramping_up = True
if priority_agents != 0:
self.ramping_up_priority = True
else:
self.ramping_up_priority = False
def run(self, time, sim_dt):
for layer in self.layers:
layer.run(time, sim_dt)
# Add agents (based on sim phase)
self.add_agents(time + sim_dt)
# Remove outsiders (and add agents to compensate agents that left)
agents_to_add = []
valid_agents = []
found_old_agents_in_one_layer = False
for layer in self.layers:
found_old_agents, n_valid_agents_finished, agents_removed_to_compensate = layer.remove_outsiders(time + sim_dt, self.t_density, self.t0)
agents_to_add = agents_to_add + agents_removed_to_compensate
valid_agents.append(n_valid_agents_finished)
if found_old_agents:
found_old_agents_in_one_layer = True
if self.t_density is not None and self.t0 is None and not found_old_agents_in_one_layer:
self.t0 = time + sim_dt
self.n_valid_density_agents += sum(valid_agents)
self.add_agents(time + sim_dt, agent_types=agents_to_add)
# Build KDTree and check loss of separation
for layer in self.layers:
layer.build_kd_tree()
layer.check_collisions(time + sim_dt)
# Checking and updating simulation status
self.check_simulation_phase(time + sim_dt)
if not self.multiple_planning_agents and self.n_controlled_agents_current == 0:
finished = True
elif self.n_min_valid_agents is not None and self.n_valid_density_agents >= self.n_min_valid_agents:
finished = True
else:
finished = False
# Add agents to the appropriate layer
return finished
def check_simulation_phase(self, time):
controlled_agents = 0
n_priority_agents = 0
for layer in self.layers:
controlled_agents += len(layer.smart_agents) + len(layer.waiting_agent)
n_priority_agents += len(layer.dumb_agents)
if self.ramping_up:
if controlled_agents >= self.desired_number_of_agents:
self.ramping_up = False
self.t_density = time
if self.ramping_up_priority:
if n_priority_agents >= self.n_priority_agents:
self.ramping_up_priority = False
self.n_controlled_agents_current = controlled_agents
def get_random_start_and_end(self, heading_range=None, protected_area_start=None):
if self.demand is None:
return self.get_random_start_and_end_on_edge(heading_range=heading_range, protected_area_start=protected_area_start)
else:
if self.demand['type'] == 'hub_and_spoke':
return self.get_start_and_end_with_hub_and_spoke(heading_range=heading_range, protected_area_start=protected_area_start)
elif self.demand['type'] == 'population_density':
return self.get_start_and_end_with_pop_density(heading_range=heading_range, protected_area_start=protected_area_start)
else:
self.my_logger.log('This type of demand is not implemented ' + self.demand['type'])
def get_random_start_and_end_outside(self):
a = random.randint(1000, self.size_map - 1000)
b = random.randint(1000, self.size_map - 1000)
side_a = random.randint(0, 3)
side_b = (side_a + random.randint(1, 3)) % 4
if side_a % 2 == 0:
start_x = a
if side_a == 0:
start_y = -500
else:
start_y = self.size_map + 500
else:
start_y = a
if side_a == 1:
start_x = self.size_map + 500
else:
start_x = -500
if side_b % 2 == 0:
end_x = b
if side_b == 0:
end_y = -500
else:
end_y = self.size_map + 500
else:
end_y = b
if side_b == 1:
end_x = self.size_map + 500
else:
end_x = -500
return np.array([start_x, start_y]), np.array([end_x, end_y])
def get_random_start_and_end_on_edge(self, heading_range=None, protected_area_start=None):
if heading_range is None:
valid_structure = True
else:
valid_structure = False
valid_start_and_end = False
n_trials_structure = 0
max_trials_structure = 100
while (not valid_structure or not valid_start_and_end) and n_trials_structure < max_trials_structure:
a = random.randint(1, self.size_map - 1) # +1, -1 to avoid corner cases
side_a = random.randint(0, 3)
# depending on the side picked build the start vector
start_x = a * (-(side_a % 2) + 1) + (side_a == 1) * self.size_map
start_y = a * (side_a % 2) + (side_a == 2) * self.size_map
start = np.array([float(start_x), float(start_y)])
if self.algo_type == 'SIPP':
i, j = self.centralized_manager.get_coordinates(start)
start = self.centralized_manager.get_position(i, j)
# Check if there is an issue with a protected area
if protected_area_start is not None:
# This prevents a dumb agent from being initialized too close to a reactive agent
number_of_trials = 0
max_number_of_trials = 10
while np.linalg.norm(protected_area_start['center'] - start) <= protected_area_start['radius'] and number_of_trials < max_number_of_trials:
a = random.randint(0, self.size_map)
# b = random.randint(0, self.size_map)
start_x = a * (-(side_a % 2) + 1) + (side_a == 1) * self.size_map
start_y = a * (side_a % 2) + (side_a == 2) * self.size_map
number_of_trials += 1
start = np.array([float(start_x), float(start_y)])
if np.linalg.norm(protected_area_start['center'] - start) <= protected_area_start['radius']:
print(start)
print(protected_area_start['center'])
self.my_logger.log('getRandomStartAndEnd failed to place the random agent in a conflict free zone')
max_number_of_trials = 10
number_of_trials = 0
valid_start_and_end = False
while not valid_start_and_end and number_of_trials < max_number_of_trials:
angle = random.random() * math.pi
end_x = None
end_y = None
if angle == 0: # probability 0
# Returns a corner of the line where it started
end_x = (side_a == 1) * self.size_map
end_y = (side_a == 2) * self.size_map
elif angle == (math.pi / 2):
side_b = (side_a + 2) % 4
end_x = a * (-(side_b % 2) + 1) + (side_b == 1) * self.size_map
end_y = a * (side_b % 2) + (side_b == 2) * self.size_map
else:
# compute the intersection with all three other sides (catch exception if angle is pi/2)
# Also exception if we are exactly at the corner
for i in range(1, 4):
side_b = (side_a + i) % 4
if (side_b % 2) == 1:
x = (side_b == 1) * self.size_map
y = start_y + math.tan(angle) * (x - start_x)
if 0 <= y <= self.size_map and x != start_x:
my_side = i
end_x = x
end_y = y
else:
y = (side_b == 2) * self.size_map
x = start_x + (1 / math.tan(angle)) * (y - start_y)
if 0 <= x <= self.size_map and y != start_y:
my_side = i
end_x = x
end_y = y
if end_x is None or end_y is None:
print('environment random start and end bug')
print(angle)
print(start_x)
print(start_y)
print('side a, ', side_a)
print('side b, ', my_side)
end = np.array([float(end_x), float(end_y)])
if self.algo_type == 'SIPP':
i, j = self.centralized_manager.get_coordinates(end)
end = self.centralized_manager.get_position(i, j)
# Is the pair valid ?
if np.linalg.norm(end - start) > self.tolerance:
valid_start_and_end = True
number_of_trials += 1
if number_of_trials >= max_number_of_trials:
print('get random start and end failed to find a valid pair')
if heading_range is not None:
heading = math.atan2(end[1] - start[1], end[0] - start[0]) * 180 / math.pi
if heading_range[1] > heading >= heading_range[0]:
valid_structure = True
n_trials_structure += 1
if n_trials_structure >= max_trials_structure:
print('get random start and end Failed to find a pair valid for the structure ')
return start, end
def get_start_and_end_with_pop_density(self, heading_range=None, protected_area_start=None):
if self.lz_coordinates is None:
if self.demand['type'] == 'population_density':
with open(self.demand['parameters']) as f:
data = json.load(f)
self.lz_coordinates = np.array(data['coordinates_xy'], dtype=np.float64)
self.cumulative_densities = data['cumulative_distribution']
else:
print('this demand type is not implemented ' + str(self.demand))
if heading_range is None:
valid_structure = True
else:
valid_structure = False
valid_start_and_end = False
n_trials_structure = 0
max_trials_structure = 100
while (not valid_structure or not valid_start_and_end) and n_trials_structure < max_trials_structure:
# Select start
val1 = random.random()
index1 = np.searchsorted(self.cumulative_densities, val1)
start = self.lz_coordinates[index1]
if protected_area_start is not None:
number_of_trials = 0
max_number_of_trials = 10
while np.linalg.norm(protected_area_start['center'] - start) <= protected_area_start['radius'] and number_of_trials < max_number_of_trials:
number_of_trials += 1
val1 = random.random()
index1 = np.searchsorted(self.cumulative_densities, val1)
start = self.lz_coordinates[index1]
if np.linalg.norm(protected_area_start['center'] - start) <= protected_area_start['radius']:
print(start)
print(protected_area_start['center'])
self.my_logger.log('getRandomStartAndEnd failed to place the random agent in a conflict free zone')
# Select goal
valid_start_and_end = False
max_number_of_trials = 10
number_of_trials = 0
while not valid_start_and_end and number_of_trials < max_number_of_trials:
number_of_trials += 1
val2 = random.random()
index2 = np.searchsorted(self.cumulative_densities, val2)
goal = self.lz_coordinates[index2]
if np.linalg.norm(start - goal) > self.tolerance:
valid_start_and_end = True
# Check the structure to ensure that start and end
if heading_range is not None:
heading = math.atan2(goal[1] - start[1], goal[0] - start[0]) * 180 / math.pi
if heading_range[1] > heading >= heading_range[0]:
valid_structure = True
n_trials_structure += 1
if n_trials_structure >= max_trials_structure:
print('get_start_and_end_with_demand failed to find a pair valid for the structure')
return start, goal
def get_start_and_end_with_hub_and_spoke(self, heading_range=None, protected_area_start=None):
if self.lz_coordinates is None:
if self.demand['type'] == 'hub_and_spoke':
with open(self.demand['parameters']) as f:
data = json.load(f)
self.lz_coordinates = {}
for k in data:
self.lz_coordinates[int(k)] = {'distribution_center': np.array(data[k]['distribution_center'], dtype=np.float64),
'customers': np.array(data[k]['customers'], dtype=np.float64)}
if heading_range is not None:
self.my_logger.log('Hub and spoke not really compatible with single layer study')
valid_start_and_end = False
max_number_of_trials = 100
n_trials = 0
while not valid_start_and_end and n_trials < max_number_of_trials:
n_trials += 1
index_start = random.randrange(0, len(self.lz_coordinates))
start = self.lz_coordinates[index_start]['distribution_center']
if protected_area_start is not None:
number_of_trials = 0
max_number_of_trials = 10
while np.linalg.norm(protected_area_start['center'] - start) <= protected_area_start['radius'] and number_of_trials < max_number_of_trials:
number_of_trials += 1
index_start = random.randrange(0, len(self.lz_coordinates))
start = self.lz_coordinates[index_start]['distribution_center']
if np.linalg.norm(protected_area_start['center'] - start) <= protected_area_start['radius']:
print(start)
print(protected_area_start['center'])
self.my_logger.log('getRandomStartAndEnd failed to place the random agent in a conflict free zone')
index_end = random.randrange(0, len(self.lz_coordinates[index_start]['customers']))
goal = self.lz_coordinates[index_start]['customers'][index_end]
if np.linalg.norm(start - goal) > self.tolerance:
valid_start_and_end = True
if n_trials >= max_number_of_trials:
self.my_logger.log('exceeded max number of trials to place random start and end for hub and spoke')
return start, goal
def terminate(self):
# Go through all the conflicts and agents that are leftovers
combined_log = {'conflicts': [], 'agents': [], 'times': {'time_density_is_reached': self.t_density, 'time_all_started_after_t_density': self.t0}}
if self.log_density:
combined_log['density_map'] = []
for layer in self.layers:
log_data = layer.terminate()
combined_log['conflicts'].append(log_data['conflicts'])
combined_log['agents'].append(log_data['agents'])
if self.log_density:
combined_log['density_map'].append(log_data['density_map'])
return combined_log
def add_agents(self, time, agent_types=None):
if self.multiple_planning_agents:
if agent_types is None:
# Depending on the phase, add agents to the simulation to increase the total number of agents
if self.ramping_up:
time_interval = 1570.0 / self.desired_number_of_agents
if time - self.agent_added_time > time_interval:
self.agent_added_time = time
a, layer_id = self.create_agent(time)
self.layers[layer_id].add_agent(time, a)
if self.n_priority_agents != 0 and self.ramping_up_priority:
time_interval = 1570.0 / self.n_priority_agents
if time - self.priority_agent_added_time > time_interval:
self.priority_agent_added_time = time
a, layer_id = self.create_agent(time, priority=True)
self.layers[layer_id].add_agent(time, a)
else:
for agent_type in agent_types:
if agent_type['type'] == 'dumb':
priority = True
else:
priority = False
if agent_type['parameters'] is not None:
# Only use case, return to base for hub and spoke demand
flight_leg = agent_type['parameters']['flight_leg']
start = agent_type['parameters']['start']
end = agent_type['parameters']['goal']
a, layer_id = self.create_agent(time, priority=priority, flight_leg=flight_leg, my_start=start, my_end=end)
else:
a, layer_id = self.create_agent(time, priority=priority)
self.layers[layer_id].add_agent(time, a)
def create_agent(self, time, priority=False, flight_leg='initial', my_start=None, my_end=None):
if priority:
agent_logic = 'dumb'
algo_type = None
else:
agent_logic = self.simulation_type
algo_type = self.algo_type
if self.structure is None:
# Assign layer randomly
layer_id = random.randint(0, self.n_layers - 1)
# Create random start and end based on demand if there is one
if my_start is None:
if not priority:
start, goal = self.get_random_start_and_end()
else:
start, goal = self.get_random_start_and_end_outside()
else:
start = my_start
goal = my_end
else:
# There is a structure
layer_ranges = np.array(self.structure['parameters'])
if len(layer_ranges.shape) == 1:
# There is only one range, create random start and end in this range not compatible with non-uniform demand
self.my_logger.log('There is only one range in the structure')
if my_start is not None:
self.my_logger.log('structure and demand are incompatible')
layer_id = 0
if not priority:
start, goal = self.get_random_start_and_end(heading_range=layer_ranges) # TODO enforce the range
else:
start, goal = self.get_random_start_and_end_outside()
else:
# There are multiple layers
if priority:
# ignore rules
if my_start is None:
start, goal = self.get_random_start_and_end_outside()
else:
start = my_start
goal = my_end
layer_id = random.randint(0, self.n_layers - 1)
else:
if my_start is None:
# Create a random start and end
start, goal = self.get_random_start_and_end()
else:
start = my_start
goal = my_end
# Figure out in which layer it belongs
heading = math.atan2(goal[1] - start[1], goal[0] - start[0]) * 180.0 / math.pi
i = 0
for l_range in layer_ranges:
if l_range[0] <= heading <= l_range[1]:
layer_id = i
break
i += 1
if i >= len(layer_ranges):
print('The heading is: ' + str(heading))
self.my_logger.log('There is an issue with the structure and the layers, the structure does not cover all potential headings')
layer_id = None
# Create the agent
a = agent.Agent(self, self.min_distance, self.max_speed, start=start, end=goal, start_time=time, agent_logic=agent_logic, flight_leg=flight_leg,
algo_type=algo_type, id=self.total_number_of_agents)
self.total_number_of_agents += 1
return a, layer_id
def get_all_agents(self):
agents_dic = {}
for i in range(0, self.n_layers):
agents_dic[i] = self.layers[i].get_all_agents()
return agents_dic
def get_protected_area(self):
return None
| 51.350318 | 164 | 0.574713 |
846ad2eb92ec9c97485585d29eff7678aed4ec9f | 3,053 | py | Python | book/migrations/0001_initial.py | Bongkot-Kladklaen/Python-Django-Bookshop | 99ec92ea3aaac0bd8e41ba7825834ee159833697 | [
"MIT"
] | null | null | null | book/migrations/0001_initial.py | Bongkot-Kladklaen/Python-Django-Bookshop | 99ec92ea3aaac0bd8e41ba7825834ee159833697 | [
"MIT"
] | null | null | null | book/migrations/0001_initial.py | Bongkot-Kladklaen/Python-Django-Bookshop | 99ec92ea3aaac0bd8e41ba7825834ee159833697 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.6 on 2021-02-11 05:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, unique=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True, null=True)),
('price', models.FloatField(default=0)),
('image', models.FileField(blank=True, null=True, upload_to='upload/')),
('level', models.CharField(blank=True, choices=[('B', 'Basic'), ('M', 'Medium'), ('A', 'Advance')], max_length=20, null=True)),
('published', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('author', models.ManyToManyField(blank=True, to='book.Author')),
],
options={
'verbose_name_plural': 'Book',
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='BookComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(max_length=100)),
('rating', models.FloatField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_comment', to='book.book')),
],
options={
'verbose_name_plural': 'Book Comment',
'ordering': ['id'],
},
),
migrations.AddField(
model_name='book',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='book.category'),
),
]
| 41.256757 | 143 | 0.542745 |
3cbce8ad4f2ce04348a1c796e9f89ffdb3b7e99b | 677 | py | Python | packages/python/yap_kernel/yap_ipython/config.py | ryandesign/yap | 9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214 | [
"Artistic-1.0-Perl",
"ClArtistic"
] | 90 | 2015-03-09T01:24:15.000Z | 2022-02-24T13:56:25.000Z | packages/python/yap_kernel/yap_ipython/config.py | ryandesign/yap | 9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214 | [
"Artistic-1.0-Perl",
"ClArtistic"
] | 52 | 2016-02-14T08:59:37.000Z | 2022-03-14T16:39:35.000Z | packages/python/yap_kernel/yap_ipython/config.py | ryandesign/yap | 9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214 | [
"Artistic-1.0-Perl",
"ClArtistic"
] | 27 | 2015-11-19T02:45:49.000Z | 2021-11-25T19:47:58.000Z | """
Shim to maintain backwards compatibility with old yap_ipython.config imports.
"""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from warnings import warn
from yap_ipython.utils.shimmodule import ShimModule, ShimWarning
warn("The `yap_ipython.config` package has been deprecated since yap_ipython 4.0. "
"You should import from traitlets.config instead.", ShimWarning)
# Unconditionally insert the shim into sys.modules so that further import calls
# trigger the custom attribute access above
sys.modules['yap_ipython.config'] = ShimModule(src='yap_ipython.config', mirror='traitlets.config')
| 33.85 | 99 | 0.791728 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.