text
stringlengths 28
881k
|
---|
import torchNEWLINEimport torch.nn as nnNEWLINEimport torch.nn.functional as FNEWLINEfrom mmcv.cnn import normal_init, kaiming_initNEWLINEimport numpy as npNEWLINENEWLINEfrom mmdet.ops import ModulatedDeformConvPack, RoIAlign, soft_nmsNEWLINEfrom mmdet.core import multi_apply, bbox_areas, force_fp32NEWLINEfrom mmdet.core.utils.summary import write_txtNEWLINEfrom mmdet.core.anchor.guided_anchor_target import calc_regionNEWLINEfrom mmdet.models.losses import ct_focal_loss, giou_loss, diou_loss, ciou_lossNEWLINEfrom mmdet.models.utils import (build_norm_layer, bias_init_with_prob, ConvModule,NEWLINE simple_nms, build_conv_layer, SEBlock)NEWLINEfrom .anchor_head import AnchorHeadNEWLINEfrom ..registry import HEADSNEWLINENEWLINENEWLINE@HEADS.register_moduleNEWLINEclass TTFHead(AnchorHead):NEWLINENEWLINE def __init__(self,NEWLINE inplanes=(64, 128, 256, 512),NEWLINE planes=(256, 128, 64),NEWLINE down_ratio=4,NEWLINE head_conv=256,NEWLINE wh_conv=64,NEWLINE hm_head_conv_num=2,NEWLINE wh_head_conv_num=2,NEWLINE num_classes=81,NEWLINE shortcut_kernel=3,NEWLINE conv_cfg=None,NEWLINE head_conv_size=3,NEWLINE use_trident=False,NEWLINE use_dla=False,NEWLINE wh_sym=False,NEWLINE upsample_vanilla_conv=False,NEWLINE upsample_multiscale_conv=False,NEWLINE up_conv_cfg=None,NEWLINE norm_cfg=dict(type='BN'),NEWLINE shortcut_cfg=(1, 2, 3),NEWLINE wh_offset_base=16.,NEWLINE wh_area_process='log',NEWLINE wh_agnostic=True,NEWLINE wh_gaussian=True,NEWLINE box_size_range=None,NEWLINE two_stage=False,NEWLINE alpha=0.54,NEWLINE beta=0.54,NEWLINE hm_weight=1.,NEWLINE dcn_mean=False,NEWLINE iou_type='giou',NEWLINE use_simple_nms=True,NEWLINE aug_reg=False,NEWLINE hm_last_3x3=False,NEWLINE hm_last_3x3_d2=False,NEWLINE hm_last_se3x3=False,NEWLINE hm_last_5x5=False,NEWLINE hm_last_7x7=False,NEWLINE no_wh_se=False,NEWLINE wh_weight=5.,NEWLINE max_objs=128):NEWLINE super(AnchorHead, self).__init__()NEWLINE assert len(planes) in [2, 3, 4]NEWLINE shortcut_num = min(len(inplanes) - 1, len(planes))NEWLINE assert shortcut_num == len(shortcut_cfg)NEWLINE assert wh_area_process in [None, 'norm', 'log', 'sqrt']NEWLINENEWLINE self.planes = planesNEWLINE self.head_conv = head_convNEWLINE self.num_classes = num_classesNEWLINE self.conv_cfg = conv_cfgNEWLINE self.head_conv_size = head_conv_sizeNEWLINE self.use_trident = use_tridentNEWLINE self.use_dla = use_dlaNEWLINE self.wh_sym = wh_symNEWLINE self.upsample_vanilla_conv = upsample_vanilla_convNEWLINE self.upsample_multiscale_conv = upsample_multiscale_convNEWLINE self.up_conv_cfg = up_conv_cfgNEWLINE self.wh_offset_base = wh_offset_baseNEWLINE self.wh_area_process = wh_area_processNEWLINE self.wh_agnostic = wh_agnosticNEWLINE self.wh_gaussian = wh_gaussianNEWLINE self.box_size_range = box_size_rangeNEWLINE self.two_stage = two_stageNEWLINE self.alpha = alphaNEWLINE self.beta = betaNEWLINE self.hm_weight = hm_weightNEWLINE self.dcn_mean = dcn_meanNEWLINE self.iou_loss = eval(iou_type + '_loss')NEWLINE self.use_simple_nms = use_simple_nmsNEWLINE self.aug_reg = aug_regNEWLINE self.hm_last_3x3 = hm_last_3x3NEWLINE self.hm_last_3x3_d2 = hm_last_3x3_d2NEWLINE self.hm_last_se3x3 = hm_last_se3x3NEWLINE self.no_wh_se = no_wh_seNEWLINE self.hm_last_5x5 = hm_last_5x5NEWLINE self.hm_last_7x7 = hm_last_7x7NEWLINE self.wh_weight = wh_weightNEWLINE self.max_objs = max_objsNEWLINE self.fp16_enabled = FalseNEWLINENEWLINE self.down_ratio = down_ratioNEWLINE self.num_fg = num_classes - 1NEWLINE self.wh_planes = 4 if wh_agnostic else 4 * self.num_fgNEWLINE self.base_loc = NoneNEWLINENEWLINE # repeat upsampling n times. 32x to 4x by default.NEWLINE self.deconv_layers = nn.ModuleList([NEWLINE self.build_upsample(inplanes[-1], planes[0], norm_cfg=norm_cfg),NEWLINE self.build_upsample(planes[0], planes[1], norm_cfg=norm_cfg)NEWLINE ])NEWLINE for i in range(2, len(planes)):NEWLINE self.deconv_layers.append(NEWLINE self.build_upsample(planes[i - 1], planes[i],NEWLINE norm_cfg=norm_cfg, no_upsample=(down_ratio == 8)))NEWLINENEWLINE padding = (shortcut_kernel - 1) // 2NEWLINE self.shortcut_layers = self.build_shortcut(NEWLINE inplanes[:-1][::-1][:shortcut_num], planes[:shortcut_num], shortcut_cfg,NEWLINE kernel_size=shortcut_kernel, padding=padding)NEWLINENEWLINE # headsNEWLINE self.wh = self.build_head(self.wh_planes, wh_head_conv_num,NEWLINE head_conv_plane=wh_conv, use_sym_conv=wh_sym)NEWLINE self.hm = self.build_head(self.num_fg, hm_head_conv_num)NEWLINE if two_stage:NEWLINE assert wh_agnosticNEWLINE self.align = RoIAlign(7, spatial_scale=1 / 4., sample_num=2)NEWLINE self.wh2 = nn.Sequential(ConvModule(self.planes[-1], 32, 5, norm_cfg=norm_cfg), # 3x3NEWLINE ConvModule(32, 32, 3, norm_cfg=norm_cfg),NEWLINE ConvModule(32, 32, 1),NEWLINE ConvModule(32, 4, 1, activation=None))NEWLINENEWLINE def build_shortcut(self,NEWLINE inplanes,NEWLINE planes,NEWLINE shortcut_cfg,NEWLINE kernel_size=3,NEWLINE padding=1):NEWLINE assert len(inplanes) == len(planes) == len(shortcut_cfg)NEWLINENEWLINE shortcut_layers = nn.ModuleList()NEWLINE for i, (inp, outp, layer_num) in enumerate(zip(NEWLINE inplanes, planes, shortcut_cfg)):NEWLINE assert layer_num > 0NEWLINE layer = ShortcutConv2d(NEWLINE inp, outp, [kernel_size] * layer_num, [padding] * layer_num,NEWLINE down=(self.down_ratio == 8 and i == len(inplanes) - 1))NEWLINE shortcut_layers.append(layer)NEWLINE return shortcut_layersNEWLINENEWLINE def build_upsample(self, inplanes, planes, norm_cfg=None, no_upsample=False):NEWLINE if self.upsample_vanilla_conv:NEWLINE if isinstance(self.upsample_vanilla_conv, int):NEWLINE padding = int((self.upsample_vanilla_conv - 1) / 2)NEWLINE dila = paddingNEWLINE mdcn = nn.Conv2d(inplanes, planes, 3, stride=1, padding=padding, dilation=dila)NEWLINE else:NEWLINE mdcn = nn.Conv2d(inplanes, planes, 3, stride=1, padding=1)NEWLINE elif self.upsample_multiscale_conv:NEWLINE mdcn = build_conv_layer(dict(type='MultiScaleConv'), inplanes, planes)NEWLINE elif self.use_trident:NEWLINE mdcn = build_conv_layer(dict(type='TriConv'), inplanes, planes)NEWLINE elif self.up_conv_cfg:NEWLINE mdcn = build_conv_layer(self.up_conv_cfg, inplanes, planes)NEWLINE else:NEWLINE mdcn = ModulatedDeformConvPack(inplanes, planes, 3, offset_mean=self.dcn_mean, stride=1,NEWLINE padding=1, dilation=1, deformable_groups=1)NEWLINE layers = []NEWLINE layers.append(mdcn)NEWLINE if norm_cfg:NEWLINE layers.append(build_norm_layer(norm_cfg, planes)[1])NEWLINE layers.append(nn.ReLU(inplace=True))NEWLINE if not no_upsample:NEWLINE up = nn.UpsamplingBilinear2d(scale_factor=2)NEWLINE layers.append(up)NEWLINENEWLINE return nn.Sequential(*layers)NEWLINENEWLINE def build_head(self, out_channel, conv_num=1, head_conv_plane=None, use_sym_conv=False):NEWLINE head_convs = []NEWLINE head_conv_plane = self.head_conv if not head_conv_plane else head_conv_planeNEWLINE for i in range(conv_num):NEWLINE inp = self.planes[-1] if i == 0 else head_conv_planeNEWLINE head_convs.append(ConvModule(inp, head_conv_plane,NEWLINE self.head_conv_size, conv_cfg=self.conv_cfg, padding=1))NEWLINENEWLINE inp = self.planes[-1] if conv_num <= 0 else head_conv_planeNEWLINE if use_sym_conv:NEWLINE assert out_channel == 4NEWLINE head_convs.append(nn.Conv2d(inp, out_channel, 3, padding=1))NEWLINE # head_convs.append(ConvModule(inp, out_channel, 3, conv_cfg=dict(type='WHSymConv')))NEWLINE else:NEWLINE if self.hm_last_3x3:NEWLINE head_convs.append(nn.Conv2d(inp, out_channel, 3, padding=1))NEWLINE elif self.hm_last_3x3_d2:NEWLINE head_convs.append(nn.Conv2d(inp, out_channel, 3, padding=2, dilation=2))NEWLINE elif self.hm_last_5x5:NEWLINE head_convs.append(nn.Conv2d(inp, out_channel, 5, padding=2))NEWLINE elif self.hm_last_7x7:NEWLINE head_convs.append(nn.Conv2d(inp, out_channel, 7, padding=3))NEWLINE elif self.hm_last_se3x3:NEWLINE head_convs.append(nn.Conv2d(inp, out_channel, 3, padding=1))NEWLINE if not self.no_wh_se or out_channel != 4:NEWLINE head_convs.append(SEBlock(out_channel, compress_ratio=4))NEWLINE else:NEWLINE head_convs.append(nn.Conv2d(inp, out_channel, 1))NEWLINE return nn.Sequential(*head_convs)NEWLINENEWLINE def init_weights(self):NEWLINE for _, m in self.shortcut_layers.named_modules():NEWLINE if isinstance(m, nn.Conv2d):NEWLINE kaiming_init(m)NEWLINENEWLINE for _, m in self.deconv_layers.named_modules():NEWLINE if isinstance(m, nn.BatchNorm2d):NEWLINE nn.init.constant_(m.weight, 1)NEWLINE nn.init.constant_(m.bias, 0)NEWLINENEWLINE for _, m in self.hm.named_modules():NEWLINE if isinstance(m, nn.Conv2d):NEWLINE normal_init(m, std=0.01)NEWLINENEWLINE bias_cls = bias_init_with_prob(0.01)NEWLINE if self.hm_last_se3x3:NEWLINE normal_init(self.hm[-2], std=0.01, bias=bias_cls)NEWLINE else:NEWLINE normal_init(self.hm[-1], std=0.01, bias=bias_cls)NEWLINENEWLINE for _, m in self.wh.named_modules():NEWLINE if isinstance(m, nn.Conv2d):NEWLINE normal_init(m, std=0.001)NEWLINENEWLINE if self.two_stage:NEWLINE for _, m in self.wh2.named_modules():NEWLINE if isinstance(m, nn.Conv2d):NEWLINE normal_init(m, std=0.001)NEWLINENEWLINE def forward(self, feats):NEWLINE """NEWLINENEWLINE Args:NEWLINE feats: list(tensor).NEWLINENEWLINE Returns:NEWLINE hm: tensor, (batch, 80, h, w).NEWLINE wh: tensor, (batch, 4, h, w) or (batch, 80 * 4, h, w).NEWLINE """NEWLINE x = feats[-1]NEWLINE if not self.use_dla:NEWLINE for i, upsample_layer in enumerate(self.deconv_layers):NEWLINE x = upsample_layer(x)NEWLINE if i < len(self.shortcut_layers):NEWLINE shortcut = self.shortcut_layers[i](feats[-i - 2])NEWLINE x = x + shortcutNEWLINE hm = self.hm(x)NEWLINE wh = F.relu(self.wh(x)) * self.wh_offset_baseNEWLINE return x, hm, whNEWLINENEWLINE @force_fp32(apply_to=('pred_feat', 'pred_heatmap', 'pred_wh'))NEWLINE def get_bboxes(self,NEWLINE pred_feat,NEWLINE pred_heatmap,NEWLINE pred_wh,NEWLINE img_metas,NEWLINE cfg,NEWLINE rescale=False):NEWLINE batch, cat, height, width = pred_heatmap.size()NEWLINE pred_heatmap = pred_heatmap.detach().sigmoid_()NEWLINE wh = pred_wh.detach()NEWLINE # write_txt(pred_heatmap, filename='pred_hm', thre=0.001)NEWLINE # perform nms on heatmapsNEWLINE if self.use_simple_nms and not getattr(cfg, 'debug', False):NEWLINE heat = simple_nms(pred_heatmap) # used maxpool to filter the max scoreNEWLINE else:NEWLINE heat = pred_heatmapNEWLINE kernel = 3NEWLINE pad = (kernel - 1) // 2NEWLINE hmax = nn.functional.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)NEWLINE keep = (hmax == heat).float()NEWLINE keep_pad = keep.new_zeros(batch, cat, height + 2, width + 2)NEWLINE keep_pad[..., 1:-1, 1:-1] = keepNEWLINE keep = keep_padNEWLINE # keep = ((keep[..., :-2, :-2] + keep[..., :-2, 1:-1] + keep[..., :-2, 2:] +NEWLINE # keep[..., 1:-1, :-2] + keep[..., 1:-1, 1:-1] + keep[..., 1:-1, 2:] +NEWLINE # keep[..., 2:, :-2] + keep[..., 2:, 1:-1] + keep[..., 2:, 2:]) > 0).float()NEWLINE keep = ((keep[..., :-2, 1:-1] +NEWLINE keep[..., 1:-1, :-2] + keep[..., 1:-1, 1:-1] + keep[..., 1:-1, 2:] +NEWLINE keep[..., 2:, 1:-1]) > 0).float()NEWLINE heat = heat * keepNEWLINENEWLINE topk = getattr(cfg, 'max_per_img', 100)NEWLINE # (batch, topk)NEWLINE scores, inds, clses, ys, xs = self._topk(heat, topk=topk)NEWLINE xs = xs.view(batch, topk, -1, 1) * self.down_ratioNEWLINE ys = ys.view(batch, topk, -1, 1) * self.down_ratioNEWLINENEWLINE wh = wh.permute(0, 2, 3, 1).contiguous()NEWLINE wh = wh.view(wh.size(0), -1, wh.size(3))NEWLINE inds = inds.view(batch, -1, 1)NEWLINE wh_inds = inds.expand(*inds.shape[:-1], wh.size(2))NEWLINE wh = wh.gather(1, wh_inds)NEWLINENEWLINE if not self.wh_agnostic:NEWLINE wh = wh.view(-1, topk, self.num_fg, 4)NEWLINE wh = torch.gather(wh, 2, clses[..., None, None].expand(NEWLINE clses.size(0), clses.size(1), 1, 4).long())NEWLINENEWLINE wh = wh.view(batch, topk, -1, 4)NEWLINE clses = clses.view(batch, topk, 1).long()NEWLINE scores = scores.view(batch, topk, 1)NEWLINENEWLINE bboxes = torch.cat([xs - wh[..., [0]], ys - wh[..., [1]],NEWLINE xs + wh[..., [2]], ys + wh[..., [3]]], dim=-1)NEWLINE if self.aug_reg:NEWLINE heat = pred_heatmap.permute(0, 2, 3, 1).contiguous()NEWLINE heat = heat.view(heat.size(0), -1, heat.size(3))NEWLINE score_inds = inds.expand(*inds.shape[:-1], heat.size(2))NEWLINE area_scores = heat.gather(1, score_inds).view(batch, topk, -1, self.num_fg)NEWLINE area_scores = area_scores.gather(-1, clses.expand(NEWLINE *clses.shape[:-1], area_scores.size(-2)).unsqueeze(-1)).squeeze(-1)NEWLINENEWLINE bbox_weight = torch.cat([bboxes.new_ones((*bboxes.shape[:-2], 1)),NEWLINE torch.exp(-1 / (2 * (wh[..., 0, :] / 24) ** 2))],NEWLINE dim=-1) * area_scoresNEWLINE # print(bbox_weight)NEWLINE bboxes = (bboxes * bbox_weight.unsqueeze(-1)).sum(-2) / bbox_weight.sum(-1,NEWLINE keepdims=True)NEWLINE else:NEWLINE bboxes = bboxes.squeeze(-2)NEWLINENEWLINE clses = clses.float()NEWLINE roi_boxes = bboxes.new_tensor([])NEWLINE if self.two_stage:NEWLINE for batch_i in range(bboxes.shape[0]):NEWLINE vaid_pre_boxes_i = bboxes[batch_i] # (xx, 4)NEWLINE roi_boxes = torch.cat([NEWLINE roi_boxes, torch.cat([NEWLINE vaid_pre_boxes_i.new_ones([vaid_pre_boxes_i.size(0), 1]) * batch_i,NEWLINE vaid_pre_boxes_i], dim=1)], dim=0)NEWLINENEWLINE if roi_boxes.size(0) > 0:NEWLINE rois = self.align(pred_feat, roi_boxes) # (n, cha, 7, 7)NEWLINE pred_wh2 = self.wh2(rois).view(-1, 4)NEWLINE bboxes = bboxes.view(-1, 4)NEWLINE bboxes[:, [0, 1]] = bboxes[:, [0, 1]] - pred_wh2[:, [0, 1]] * 16NEWLINE bboxes[:, [2, 3]] = bboxes[:, [2, 3]] + pred_wh2[:, [2, 3]] * 16NEWLINE bboxes = bboxes.view(batch, topk, 4)NEWLINENEWLINE result_list = []NEWLINE score_thr = getattr(cfg, 'score_thr', 0.01)NEWLINE for batch_i in range(bboxes.shape[0]):NEWLINE scores_per_img = scores[batch_i]NEWLINE scores_keep = (scores_per_img > score_thr).squeeze(-1)NEWLINENEWLINE scores_per_img = scores_per_img[scores_keep]NEWLINE bboxes_per_img = bboxes[batch_i][scores_keep]NEWLINE labels_per_img = clses[batch_i][scores_keep].squeeze(-1)NEWLINE img_shape = img_metas[batch_i]['pad_shape']NEWLINE bboxes_per_img[:, 0::2] = bboxes_per_img[:, 0::2].clamp(min=0, max=img_shape[1] - 1)NEWLINE bboxes_per_img[:, 1::2] = bboxes_per_img[:, 1::2].clamp(min=0, max=img_shape[0] - 1)NEWLINENEWLINE if rescale:NEWLINE scale_factor = img_metas[batch_i]['scale_factor']NEWLINE bboxes_per_img /= bboxes_per_img.new_tensor(scale_factor)NEWLINENEWLINE if self.use_simple_nms:NEWLINE bboxes_per_img = torch.cat([bboxes_per_img, scores_per_img], dim=1)NEWLINE else:NEWLINE labels_int_flatten = labels_per_img.int()NEWLINE unique_cls_ids = list(set(list(labels_int_flatten.cpu().numpy())))NEWLINE bboxes_per_img_per_cls = bboxes_per_img.new_zeros((0, 5))NEWLINE labels_per_img_per_cls = labels_int_flatten.new_zeros((0,))NEWLINE for cls_id in unique_cls_ids:NEWLINE cls_id_idx = (labels_int_flatten == cls_id)NEWLINE soft_bboxes, ori_idx = soft_nms(torch.cat((NEWLINE bboxes_per_img[cls_id_idx], scores_per_img[cls_id_idx]), dim=1),NEWLINE iou_thr=0.6)NEWLINE unique_labels = labels_int_flatten[cls_id_idx][ori_idx]NEWLINE bboxes_per_img_per_cls = torch.cat((bboxes_per_img_per_cls, soft_bboxes), dim=0)NEWLINE labels_per_img_per_cls = torch.cat((labels_per_img_per_cls, unique_labels))NEWLINE bboxes_per_img = bboxes_per_img_per_clsNEWLINE labels_per_img = labels_per_img_per_cls.float()NEWLINENEWLINE result_list.append((bboxes_per_img, labels_per_img))NEWLINENEWLINE return result_listNEWLINENEWLINE @force_fp32(apply_to=('pred_feat', 'pred_heatmap', 'pred_wh'))NEWLINE def loss(self,NEWLINE pred_feat,NEWLINE pred_heatmap,NEWLINE pred_wh,NEWLINE gt_bboxes,NEWLINE gt_labels,NEWLINE img_metas,NEWLINE cfg,NEWLINE gt_bboxes_ignore=None):NEWLINE all_targets = self.target_generator(gt_bboxes, gt_labels, img_metas)NEWLINE hm_loss, wh_loss, wh2_loss = self.loss_calc(pred_feat, pred_heatmap, pred_wh, *all_targets)NEWLINE return {'losses/ttfnet_loss_heatmap': hm_loss, 'losses/ttfnet_loss_wh': wh_loss,NEWLINE 'losses/ttfnet_loss_wh2': wh2_loss}NEWLINENEWLINE def _topk(self, scores, topk):NEWLINE batch, cat, height, width = scores.size()NEWLINENEWLINE # both are (batch, 80, topk)NEWLINE topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), topk)NEWLINENEWLINE topk_inds = topk_inds % (height * width)NEWLINE topk_ys = (topk_inds / width).int().float()NEWLINE topk_xs = (topk_inds % width).int().float()NEWLINENEWLINE # both are (batch, topk). select topk from 80*topkNEWLINE topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), topk)NEWLINE topk_clses = (topk_ind / topk).int()NEWLINE topk_ind = topk_ind.unsqueeze(2)NEWLINE topk_inds = topk_inds.view(batch, -1, 1).gather(1, topk_ind).view(batch, topk)NEWLINE topk_ys = topk_ys.view(batch, -1, 1).gather(1, topk_ind).view(batch, topk)NEWLINE topk_xs = topk_xs.view(batch, -1, 1).gather(1, topk_ind).view(batch, topk)NEWLINENEWLINE if self.aug_reg:NEWLINE expand_topk_inds = topk_inds.unsqueeze(-1).expand(*topk_inds.shape, 5)NEWLINE expand_topk_ys = topk_ys.unsqueeze(-1).expand(*topk_ys.shape, 5)NEWLINE expand_topk_xs = topk_xs.unsqueeze(-1).expand(*topk_xs.shape, 5)NEWLINE topk_inds = torch.stack((topk_inds, topk_inds - 1, topk_inds - width,NEWLINE topk_inds + 1, topk_inds + width), dim=2)NEWLINE topk_ys = torch.stack((topk_ys, topk_ys, topk_ys - 1, topk_ys, topk_ys + 1), dim=2)NEWLINE topk_xs = torch.stack((topk_xs, topk_xs - 1, topk_xs, topk_xs + 1, topk_xs), dim=2)NEWLINE aug_err_ys = (topk_ys >= height) | (topk_ys < 0)NEWLINE aug_err_xs = (topk_xs >= width) | (topk_xs < 0)NEWLINE aug_err_inds = (topk_inds >= (height * width)) | (topk_inds < 0)NEWLINE aug_err = aug_err_ys | aug_err_xs | aug_err_indsNEWLINE topk_ys[aug_err] = expand_topk_ys[aug_err]NEWLINE topk_xs[aug_err] = expand_topk_xs[aug_err]NEWLINE topk_inds[aug_err] = expand_topk_inds[aug_err]NEWLINENEWLINE return topk_score, topk_inds, topk_clses, topk_ys, topk_xsNEWLINENEWLINE def gaussian_2d(self, shape, sigma_x=1, sigma_y=1):NEWLINE m, n = [(ss - 1.) / 2. for ss in shape]NEWLINE y, x = np.ogrid[-m:m + 1, -n:n + 1]NEWLINENEWLINE h = np.exp(-(x * x / (2 * sigma_x * sigma_x) + y * y / (2 * sigma_y * sigma_y)))NEWLINE h[h < np.finfo(h.dtype).eps * h.max()] = 0NEWLINE return hNEWLINENEWLINE def draw_truncate_gaussian(self, heatmap, center, h_radius, w_radius, k=1):NEWLINE h, w = 2 * h_radius + 1, 2 * w_radius + 1NEWLINE sigma_x = w / 6NEWLINE sigma_y = h / 6NEWLINE gaussian = self.gaussian_2d((h, w), sigma_x=sigma_x, sigma_y=sigma_y)NEWLINE gaussian = heatmap.new_tensor(gaussian)NEWLINENEWLINE x, y = int(center[0]), int(center[1])NEWLINENEWLINE height, width = heatmap.shape[0:2]NEWLINENEWLINE left, right = min(x, w_radius), min(width - x, w_radius + 1)NEWLINE top, bottom = min(y, h_radius), min(height - y, h_radius + 1)NEWLINENEWLINE masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]NEWLINE masked_gaussian = gaussian[h_radius - top:h_radius + bottom,NEWLINE w_radius - left:w_radius + right]NEWLINE if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:NEWLINE torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap)NEWLINE return heatmapNEWLINENEWLINE def target_single_image(self, gt_boxes, gt_labels, feat_shape):NEWLINE """NEWLINENEWLINE Args:NEWLINE gt_boxes: tensor, tensor <=> img, (num_gt, 4).NEWLINE gt_labels: tensor, tensor <=> img, (num_gt,).NEWLINE feat_shape: tuple.NEWLINENEWLINE Returns:NEWLINE heatmap: tensor, tensor <=> img, (80, h, w).NEWLINE box_target: tensor, tensor <=> img, (4, h, w) or (80 * 4, h, w).NEWLINE reg_weight: tensor, same as box_targetNEWLINE """NEWLINE output_h, output_w = feat_shapeNEWLINE heatmap_channel = self.num_fgNEWLINENEWLINE heatmap = gt_boxes.new_zeros((heatmap_channel, output_h, output_w))NEWLINE fake_heatmap = gt_boxes.new_zeros((output_h, output_w))NEWLINE box_target = gt_boxes.new_ones((self.wh_planes, output_h, output_w)) * -1NEWLINE reg_weight = gt_boxes.new_zeros((self.wh_planes // 4, output_h, output_w))NEWLINENEWLINE boxes_areas_log = bbox_areas(gt_boxes)NEWLINE if self.box_size_range:NEWLINE keep_idx = (self.box_size_range[1] ** 2 >= boxes_areas_log) &\NEWLINE (boxes_areas_log >= self.box_size_range[0] ** 2)NEWLINE boxes_areas_log = boxes_areas_log[keep_idx]NEWLINE gt_boxes = gt_boxes[keep_idx]NEWLINE gt_labels = gt_labels[keep_idx]NEWLINE if self.wh_area_process == 'log':NEWLINE boxes_areas_log = boxes_areas_log.log()NEWLINE elif self.wh_area_process == 'sqrt':NEWLINE boxes_areas_log = boxes_areas_log.sqrt()NEWLINE boxes_area_topk_log, boxes_ind = torch.topk(boxes_areas_log, boxes_areas_log.size(0))NEWLINENEWLINE if self.wh_area_process == 'norm':NEWLINE boxes_area_topk_log[:] = 1.NEWLINENEWLINE gt_boxes = gt_boxes[boxes_ind]NEWLINE gt_labels = gt_labels[boxes_ind]NEWLINENEWLINE feat_gt_boxes = gt_boxes / self.down_ratioNEWLINE feat_gt_boxes[:, [0, 2]] = torch.clamp(feat_gt_boxes[:, [0, 2]], min=0,NEWLINE max=output_w - 1)NEWLINE feat_gt_boxes[:, [1, 3]] = torch.clamp(feat_gt_boxes[:, [1, 3]], min=0,NEWLINE max=output_h - 1)NEWLINE feat_hs, feat_ws = (feat_gt_boxes[:, 3] - feat_gt_boxes[:, 1],NEWLINE feat_gt_boxes[:, 2] - feat_gt_boxes[:, 0])NEWLINENEWLINE # we calc the center and ignore area based on the gt-boxes of the origin scaleNEWLINE # no peak will fall between pixelsNEWLINE ct_ints = (torch.stack([(gt_boxes[:, 0] + gt_boxes[:, 2]) / 2,NEWLINE (gt_boxes[:, 1] + gt_boxes[:, 3]) / 2],NEWLINE dim=1) / self.down_ratio).to(torch.int)NEWLINENEWLINE h_radiuses_alpha = (feat_hs / 2. * self.alpha).int()NEWLINE w_radiuses_alpha = (feat_ws / 2. * self.alpha).int()NEWLINE if self.wh_gaussian and self.alpha != self.beta:NEWLINE h_radiuses_beta = (feat_hs / 2. * self.beta).int()NEWLINE w_radiuses_beta = (feat_ws / 2. * self.beta).int()NEWLINENEWLINE if not self.wh_gaussian:NEWLINE # calculate positive (center) regionsNEWLINE r1 = (1 - self.beta) / 2NEWLINE ctr_x1s, ctr_y1s, ctr_x2s, ctr_y2s = calc_region(gt_boxes.transpose(0, 1), r1)NEWLINE ctr_x1s, ctr_y1s, ctr_x2s, ctr_y2s = [torch.round(x.float() / self.down_ratio).int()NEWLINE for x in [ctr_x1s, ctr_y1s, ctr_x2s, ctr_y2s]]NEWLINE ctr_x1s, ctr_x2s = [torch.clamp(x, max=output_w - 1) for x in [ctr_x1s, ctr_x2s]]NEWLINE ctr_y1s, ctr_y2s = [torch.clamp(y, max=output_h - 1) for y in [ctr_y1s, ctr_y2s]]NEWLINENEWLINE # larger boxes have lower priority than small boxes.NEWLINE for k in range(boxes_ind.shape[0]):NEWLINE cls_id = gt_labels[k] - 1NEWLINENEWLINE fake_heatmap = fake_heatmap.zero_()NEWLINE self.draw_truncate_gaussian(fake_heatmap, ct_ints[k],NEWLINE h_radiuses_alpha[k].item(), w_radiuses_alpha[k].item())NEWLINE heatmap[cls_id] = torch.max(heatmap[cls_id], fake_heatmap)NEWLINENEWLINE if self.wh_gaussian:NEWLINE if self.alpha != self.beta:NEWLINE fake_heatmap = fake_heatmap.zero_()NEWLINE self.draw_truncate_gaussian(fake_heatmap, ct_ints[k],NEWLINE h_radiuses_beta[k].item(),NEWLINE w_radiuses_beta[k].item())NEWLINE box_target_inds = fake_heatmap > 0NEWLINE else:NEWLINE ctr_x1, ctr_y1, ctr_x2, ctr_y2 = ctr_x1s[k], ctr_y1s[k], ctr_x2s[k], ctr_y2s[k]NEWLINE box_target_inds = torch.zeros_like(fake_heatmap, dtype=torch.uint8)NEWLINE box_target_inds[ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1NEWLINENEWLINE if self.wh_agnostic:NEWLINE box_target[:, box_target_inds] = gt_boxes[k][:, None]NEWLINE cls_id = 0NEWLINE else:NEWLINE box_target[(cls_id * 4):((cls_id + 1) * 4), box_target_inds] = gt_boxes[k][:, None]NEWLINENEWLINE if self.wh_gaussian:NEWLINE local_heatmap = fake_heatmap[box_target_inds]NEWLINE ct_div = local_heatmap.sum()NEWLINE local_heatmap *= boxes_area_topk_log[k]NEWLINE reg_weight[cls_id, box_target_inds] = local_heatmap / ct_divNEWLINE else:NEWLINE reg_weight[cls_id, box_target_inds] = \NEWLINE boxes_area_topk_log[k] / box_target_inds.sum().float()NEWLINENEWLINE reg_weight[cls_id, box_target_inds] = local_heatmap / ct_divNEWLINENEWLINE return heatmap, box_target, reg_weightNEWLINENEWLINE def target_generator(self, gt_boxes, gt_labels, img_metas):NEWLINE """NEWLINENEWLINE Args:NEWLINE gt_boxes: list(tensor). tensor <=> image, (gt_num, 4).NEWLINE gt_labels: list(tensor). tensor <=> image, (gt_num,).NEWLINE img_metas: list(dict).NEWLINENEWLINE Returns:NEWLINE heatmap: tensor, (batch, 80, h, w).NEWLINE box_target: tensor, (batch, 4, h, w) or (batch, 80 * 4, h, w).NEWLINE reg_weight: tensor, same as box_target.NEWLINE """NEWLINE with torch.no_grad():NEWLINE feat_shape = (img_metas[0]['pad_shape'][0] // self.down_ratio,NEWLINE img_metas[0]['pad_shape'][1] // self.down_ratio)NEWLINE heatmap, box_target, reg_weight = multi_apply(NEWLINE self.target_single_image,NEWLINE gt_boxes,NEWLINE gt_labels,NEWLINE feat_shape=feat_shapeNEWLINE )NEWLINENEWLINE heatmap, box_target = [torch.stack(t, dim=0).detach() for t in [heatmap, box_target]]NEWLINE reg_weight = torch.stack(reg_weight, dim=0).detach()NEWLINENEWLINE return heatmap, box_target, reg_weightNEWLINENEWLINE def loss_calc(self,NEWLINE pred_feat,NEWLINE pred_hm,NEWLINE pred_wh,NEWLINE heatmap,NEWLINE box_target,NEWLINE wh_weight):NEWLINE """NEWLINENEWLINE Args:NEWLINE pred_hm: tensor, (batch, 80, h, w).NEWLINE pred_wh: tensor, (batch, 4, h, w) or (batch, 80 * 4, h, w).NEWLINE heatmap: tensor, same as pred_hm.NEWLINE box_target: tensor, same as pred_wh.NEWLINE wh_weight: tensor, same as pred_wh.NEWLINENEWLINE Returns:NEWLINE hm_lossNEWLINE wh_lossNEWLINE """NEWLINE H, W = pred_hm.shape[2:]NEWLINE pred_hm = torch.clamp(pred_hm.sigmoid_(), min=1e-4, max=1 - 1e-4)NEWLINE hm_loss = ct_focal_loss(pred_hm, heatmap) * self.hm_weightNEWLINENEWLINE mask = wh_weight.view(-1, H, W)NEWLINE avg_factor = mask.sum() + 1e-4NEWLINENEWLINE if self.base_loc is None or H != self.base_loc.shape[1] or W != self.base_loc.shape[2]:NEWLINE base_step = self.down_ratioNEWLINE shifts_x = torch.arange(0, (W - 1) * base_step + 1, base_step,NEWLINE dtype=torch.float32, device=heatmap.device)NEWLINE shifts_y = torch.arange(0, (H - 1) * base_step + 1, base_step,NEWLINE dtype=torch.float32, device=heatmap.device)NEWLINE shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)NEWLINE self.base_loc = torch.stack((shift_x, shift_y), dim=0) # (2, h, w)NEWLINENEWLINE # (batch, h, w, 4)NEWLINE pred_boxes = torch.cat((self.base_loc - pred_wh[:, [0, 1]],NEWLINE self.base_loc + pred_wh[:, [2, 3]]), dim=1).permute(0, 2, 3, 1)NEWLINE # (batch, h, w, 4)NEWLINE boxes = box_target.permute(0, 2, 3, 1)NEWLINE wh_loss = self.iou_loss(pred_boxes, boxes, mask,NEWLINE avg_factor=avg_factor) * self.wh_weightNEWLINENEWLINE wh2_loss = wh_loss.new_zeros([1])NEWLINE if self.two_stage:NEWLINE heat = simple_nms(pred_hm)NEWLINE scores, inds, clses, ys, xs = self._topk(heat, topk=100)NEWLINENEWLINE pred_boxes_2 = pred_boxes.view(pred_boxes.size(0), -1, pred_boxes.size(3))NEWLINE boxes_2 = boxes.view(*pred_boxes_2.shape)NEWLINE inds = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), pred_boxes_2.size(2))NEWLINE pred_boxes_2 = pred_boxes_2.gather(1, inds) # (batch, 100, 4)NEWLINE boxes_2 = boxes_2.gather(1, inds)NEWLINENEWLINE score_thr = 0.01NEWLINE scores_keep = scores > score_thr # (batch, topk)NEWLINENEWLINE batch_idx = pred_boxes_2.new_tensor(torch.arange(0., pred_boxes_2.shape[0], 1.)).view(NEWLINE -1, 1, 1).expand(pred_boxes_2.shape[0], pred_boxes_2.shape[1], 1)[scores_keep]NEWLINE pred_boxes_2 = pred_boxes_2[scores_keep]NEWLINE boxes_2 = boxes_2[scores_keep].detach()NEWLINENEWLINE valid_boxes = (boxes_2 >= 0).min(1)[0]NEWLINE batch_idx = batch_idx[valid_boxes] # (n, 1)NEWLINE pred_boxes_2 = pred_boxes_2[valid_boxes] # (n, 4)NEWLINE boxes_2 = boxes_2[valid_boxes] # (n, 4)NEWLINE roi_boxes = torch.cat((batch_idx, pred_boxes_2), dim=1).detach()NEWLINENEWLINE if roi_boxes.size(0) > 0:NEWLINE rois = self.align(pred_feat, roi_boxes) # (n, cha, 7, 7)NEWLINE pred_wh2 = self.wh2(rois).view(-1, 4)NEWLINE pred_boxes_2[:, [0, 1]] = pred_boxes_2[:, [0, 1]].detach() - \NEWLINE pred_wh2[:, [0, 1]] * 16NEWLINE pred_boxes_2[:, [2, 3]] = pred_boxes_2[:, [2, 3]].detach() + \NEWLINE pred_wh2[:, [2, 3]] * 16NEWLINE wh2_loss = giou_loss(pred_boxes_2, boxes_2,NEWLINE boxes_2.new_ones(boxes_2.size(0)))NEWLINENEWLINE return hm_loss, wh_loss, wh2_lossNEWLINENEWLINENEWLINEclass ShortcutConv2d(nn.Module):NEWLINENEWLINE def __init__(self,NEWLINE in_channels,NEWLINE out_channels,NEWLINE kernel_sizes,NEWLINE paddings,NEWLINE activation_last=False,NEWLINE down=False):NEWLINE super(ShortcutConv2d, self).__init__()NEWLINE assert len(kernel_sizes) == len(paddings)NEWLINENEWLINE layers = []NEWLINE for i, (kernel_size, padding) in enumerate(zip(kernel_sizes, paddings)):NEWLINE inc = in_channels if i == 0 else out_channelsNEWLINE if i == 0 and down:NEWLINE layers.append(nn.Conv2d(inc, out_channels, kernel_size,NEWLINE padding=padding, stride=2))NEWLINE else:NEWLINE layers.append(nn.Conv2d(inc, out_channels, kernel_size, padding=padding))NEWLINE if i < len(kernel_sizes) - 1 or activation_last:NEWLINE layers.append(nn.ReLU(inplace=True))NEWLINENEWLINE self.layers = nn.Sequential(*layers)NEWLINENEWLINE def forward(self, x):NEWLINE if isinstance(x, tuple):NEWLINE x = torch.cat([x[0], F.upsample(x[1], scale_factor=2)], dim=1)NEWLINE y = self.layers(x)NEWLINE return yNEWLINE |
# coding: utf-8NEWLINENEWLINE"""NEWLINE Isilon SDKNEWLINENEWLINE Isilon SDK - Language bindings for the OneFS API # noqa: E501NEWLINENEWLINE OpenAPI spec version: 4NEWLINE Contact: sdk@isilon.comNEWLINE Generated by: https://github.com/swagger-api/swagger-codegen.gitNEWLINE"""NEWLINENEWLINENEWLINEfrom __future__ import absolute_importNEWLINENEWLINEimport unittestNEWLINENEWLINEimport isi_sdk_8_0_1NEWLINEfrom isi_sdk_8_0_1.models.event_channel_parameters import EventChannelParameters # noqa: E501NEWLINEfrom isi_sdk_8_0_1.rest import ApiExceptionNEWLINENEWLINENEWLINEclass TestEventChannelParameters(unittest.TestCase):NEWLINE """EventChannelParameters unit test stubs"""NEWLINENEWLINE def setUp(self):NEWLINE passNEWLINENEWLINE def tearDown(self):NEWLINE passNEWLINENEWLINE def testEventChannelParameters(self):NEWLINE """Test EventChannelParameters"""NEWLINE # FIXME: construct object with mandatory attributes with example valuesNEWLINE # model = isi_sdk_8_0_1.models.event_channel_parameters.EventChannelParameters() # noqa: E501NEWLINE passNEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE unittest.main()NEWLINE |
from os import path, environNEWLINEfrom os.path import joinNEWLINEimport jsonNEWLINENEWLINENEWLINEclass Config:NEWLINE def __init__(self):NEWLINE _dirpath = path.dirname(path.realpath(__file__))NEWLINE self.config_path = join(_dirpath, '../config.json')NEWLINENEWLINE if path.isfile(self.config_path):NEWLINE with open(self.config_path, 'r') as file:NEWLINE self.data = file.read()NEWLINE self.parse = json.loads(self.data)NEWLINE else:NEWLINE with open(self.config_path, 'w') as file:NEWLINE self.parse = {NEWLINE "config":NEWLINE {NEWLINENEWLINE "default_dir": f"/home/{environ['USER']}/Pictures/Screenshots/",NEWLINE "filename_format": "%Y-%m-%d-%H-%M-%S",NEWLINE "default_delay": 0.5,NEWLINE "icon": "colored",NEWLINENEWLINE "canvas":NEWLINE {NEWLINE "last_size": 6,NEWLINE "last_cap": "round",NEWLINE "last_joint": "round",NEWLINE "last_style": "solid",NEWLINE "last_pen_color": "",NEWLINE "last_brush_color": "",NEWLINE "pen_color": "196 31 31 255",NEWLINE "pen_hsv": "0 0 100",NEWLINE "pen_sel": "114 27",NEWLINE "brush_color": "0 0 0 0",NEWLINE "brush_hsv": "0 0 0",NEWLINE "brush_sel": "-5 135",NEWLINE "outline": 'disabled',NEWLINE "upload_service": "Imgur",NEWLINE "upload_confirmation": 0,NEWLINE "magnifier": 1,NEWLINE "save_action": "dir",NEWLINE "img_clip": 1NEWLINE },NEWLINE "upload":NEWLINE {NEWLINE "clipboard_state": 0,NEWLINE "random_fname_state": 0,NEWLINE "last_service": "Imgur"NEWLINE },NEWLINE "imgur":NEWLINE {NEWLINE "client_id": "25b4ba1ecc97502",NEWLINE "link": "https://api.imgur.com/3/image"NEWLINE },NEWLINE "palette":NEWLINE {NEWLINE "red": "#C41F1F",NEWLINE "magenta": "#F230A5",NEWLINE "yellow": "#DBB126",NEWLINE "green": "#1DC129",NEWLINE "blue": "#2288E6",NEWLINE "white": "#FFFFFF",NEWLINE "black": "#000000"NEWLINE },NEWLINE },NEWLINE }NEWLINE file.write(str(json.dumps(self.parse, indent=4)))NEWLINENEWLINE def change_config(self, section, undersection=None, value=None, save_changes=True):NEWLINE if undersection:NEWLINE self.parse["config"][section][undersection] = valueNEWLINE else:NEWLINE self.parse["config"][section] = valueNEWLINE if save_changes:NEWLINE with open(self.config_path, 'w') as file:NEWLINE new_config = str(json.dumps(self.parse, indent=4))NEWLINE file.write(new_config)NEWLINE |
"""DSM 7 SYNO.Core.* datas."""NEWLINE |
import loggingNEWLINEimport reNEWLINEimport timeNEWLINEimport hashlibNEWLINENEWLINEfrom bs4 import BeautifulSoupNEWLINEimport requestsNEWLINEfrom requests.exceptions import ReadTimeoutNEWLINENEWLINEclass BackEnd:NEWLINENEWLINE @staticmethodNEWLINE def help():NEWLINE return [NEWLINE "url: ex https://rtblivedemo.herokuapp.com/scoreboard",NEWLINE "poll-interval: seconds",NEWLINE ]NEWLINENEWLINE @staticmethodNEWLINE def supports(conf, url):NEWLINE # Return True if the url seems like a system we supportNEWLINE resp = requests.get(url)NEWLINENEWLINE lcase = resp.text.lower()NEWLINE return "abs0lut3pwn4g3" in lcaseNEWLINENEWLINE def __init__(self, conf, middleend):NEWLINE self.conf = confNEWLINE self.middle = middleendNEWLINE self.log = logging.getLogger(__name__)NEWLINENEWLINE if conf["url"] == "":NEWLINE raise RuntimeError("This backend requires a URL")NEWLINENEWLINENEWLINE # Help the user out a little bit, they can specify some various linksNEWLINE self.URL = self._baseurl(conf["url"])NEWLINE self.log.info(f"Attempting to use RTB-CTF instance at {self.URL}")NEWLINENEWLINE self.session = requests.Session()NEWLINENEWLINENEWLINE def run(self):NEWLINE self.running = TrueNEWLINE while self.running:NEWLINENEWLINE scoreboard = self._get_scoreboard()NEWLINENEWLINE if scoreboard is not None:NEWLINE self.middle.handle_snapshot(("scoreboard", { "scores": scoreboard }))NEWLINENEWLINE time.sleep(self.conf["poll-interval"])NEWLINENEWLINE def stop(self):NEWLINE self.running = FalseNEWLINENEWLINENEWLINE def update(self):NEWLINE passNEWLINENEWLINE def _baseurl(self, url):NEWLINE url = re.sub("/scoreboard.*", "", url)NEWLINE return urlNEWLINENEWLINE def _get_scoreboard(self):NEWLINENEWLINE teams = []NEWLINENEWLINE # Sadly, we have to parse the table, but that's allrightNEWLINE failed = FalseNEWLINE try:NEWLINE resp = self.session.get(self.URL + "/scoreboard")NEWLINE except:NEWLINE failed = TrueNEWLINENEWLINE if failed or resp.status_code != 200:NEWLINE self.log.warning("scoreboard fetch failed:")NEWLINE return NoneNEWLINENEWLINE # BS filters to find the elements that we are interested inNEWLINE filt_team_row = lambda tag: tag.name == "tr" and tag.parent.name == "tbody"NEWLINENEWLINE # Accidentally detects ISO-8859 due to some german team names or somethingNEWLINE # The page is explicitly encoded as utf-8, though.NEWLINE resp.encoding = "utf-8"NEWLINENEWLINE soup = BeautifulSoup(resp.text, "html.parser")NEWLINE rows = soup.findAll(filt_team_row)NEWLINENEWLINE # Important: BS4 objects have string representations, but they do notNEWLINE # belong in the data we pass to middle-end. NEWLINE # Convert strings to proper strings or face the consequences!NEWLINENEWLINE for r in rows:NEWLINE heads = r.find_all("th")NEWLINE cells = r.find_all("td")NEWLINENEWLINE t = {}NEWLINE t["name"] = str(cells[0].string)NEWLINE t["team_id"] = t["name"]NEWLINE t["place"] = int(heads[0].string)NEWLINE t["score"] = str(cells[1].string)NEWLINENEWLINE teams.append(t)NEWLINENEWLINE return teamsNEWLINENEWLINE |
# Try getting setup from setuptools first, then distutils.core.NEWLINE# http://goo.gl/BC32zk (StackOverflow)NEWLINEtry:NEWLINE from setuptools import setupNEWLINEexcept ImportError:NEWLINE from distutils.core import setupNEWLINENEWLINEclassifiers = [NEWLINE 'Development Status :: 3 - Alpha',NEWLINE 'Intended Audience :: Science/Research',NEWLINE 'License :: OSI Approved :: MIT License',NEWLINE 'Operating System :: OS Independent',NEWLINE 'Programming Language :: Python',NEWLINE 'Programming Language :: Python :: 2',NEWLINE 'Programming Language :: Python :: 3',NEWLINE 'Topic :: Scientific/Engineering :: Visualization'NEWLINE ]NEWLINENEWLINEsetup(NEWLINE name = "quickplot",NEWLINE packages = ['quickplot'],NEWLINE version = "0.1.2",NEWLINE description = "The batteries-included plotting wrapper for matplotlib",NEWLINE author = "Ken Sheedlo",NEWLINE author_email = "ovrkenthousand@gmail.com",NEWLINE url = "https://github.com/ksheedlo/quickplot",NEWLINE download_url = "https://github.com/ksheedlo/quickplot/archive/master.zip",NEWLINE classifiers = classifiers,NEWLINE dependency_links = ['https://github.com/matplotlib/matplotlib/tarball/v1.3.x#egg=matplotlib-1.3.0'],NEWLINE install_requires = [NEWLINE "numpy >= 1.5.0",NEWLINE "matplotlib >= 1.3.0"NEWLINE ]NEWLINE )NEWLINE |
import timeNEWLINENEWLINEfrom absl import loggingNEWLINENEWLINEfrom icubam.www.handlers import baseNEWLINEfrom icubam.www.handlers import homeNEWLINEfrom icubam.www import tokenNEWLINEfrom icubam.www import updaterNEWLINENEWLINENEWLINEclass UpdateHandler(base.BaseHandler):NEWLINENEWLINE ROUTE = updater.Updater.ROUTENEWLINE QUERY_ARG = 'id'NEWLINENEWLINE def initialize(self, config, db, queue):NEWLINE super().initialize(config, db)NEWLINE self.queue = queueNEWLINE self.updater = updater.Updater(self.config, self.db)NEWLINE self.token_encoder = token.TokenEncoder(self.config)NEWLINENEWLINE async def get(self):NEWLINE """Serves the page with a form to be filled by the user."""NEWLINE user_token = self.get_query_argument(self.QUERY_ARG)NEWLINE input_data = self.token_encoder.decode(user_token)NEWLINENEWLINE if input_data is None:NEWLINE return self.set_status(404)NEWLINENEWLINE data = self.updater.get_icu_data_by_id(NEWLINE input_data['icu_id'], locale=self.get_user_locale())NEWLINE data.update(input_data)NEWLINE data.update(version=self.config.version)NEWLINENEWLINE self.set_secure_cookie(self.COOKIE, user_token)NEWLINE self.render('update_form.html', **data)NEWLINENEWLINE async def post(self):NEWLINE """Reads the form and saves the data to DB"""NEWLINENEWLINE def parse(param):NEWLINE parts = param.split('=')NEWLINE value = int(parts[1]) if parts[1].isnumeric() else 0NEWLINE return parts[0], valueNEWLINENEWLINE cookie_data = self.token_encoder.decode(self.get_secure_cookie(self.COOKIE))NEWLINENEWLINE params_str = self.request.body.decode()NEWLINE data = dict([parse(p) for p in params_str.split('&')])NEWLINE data.update(cookie_data)NEWLINE await self.queue.put(data)NEWLINENEWLINE self.redirect(home.HomeHandler.ROUTE)NEWLINE |
"""Implements get optimizer method."""NEWLINEimport torchNEWLINENEWLINENEWLINEdef get_optimizer(optimizer):NEWLINE """Method to get optimizer from pytorch."""NEWLINE dir_optim = dir(torch.optim)NEWLINE opts = [o.lower() for o in dir_optim]NEWLINENEWLINE if isinstance(optimizer, str):NEWLINENEWLINE try:NEWLINE str_idx = opts.index(optimizer.lower())NEWLINE return getattr(torch.optim, dir_optim[str_idx])NEWLINE except ValueError:NEWLINE raise ValueError("Invalid optimizer string input - must match pytorch optimizer in torch.optim")NEWLINENEWLINE elif hasattr(optimizer, "step") and hasattr(optimizer, "zero_grad"):NEWLINENEWLINE return optimizerNEWLINENEWLINE else:NEWLINENEWLINE raise ValueError("Invalid optimizer input")NEWLINE |
# -*- coding: utf-8 -*-NEWLINE"""NEWLINENEWLINEScript Name: ProFile.pyNEWLINEAuthor: Do Trinh/Jimmy - 3D artist.NEWLINENEWLINEDescription:NEWLINENEWLINE"""NEWLINE# -------------------------------------------------------------------------------------------------------------NEWLINENEWLINEfrom pyPLM.Widgets import GroupGrid, LineEdit, Button, LabelNEWLINENEWLINEclass Profile(GroupGrid):NEWLINENEWLINE key = 'ProFile'NEWLINENEWLINE def __init__(self, parent=None):NEWLINE super(Profile, self).__init__(parent=parent)NEWLINENEWLINE self.parent = parentNEWLINENEWLINE self.layout.addWidget(Label({'txt': 'First Name'}), 0, 0, 1, 2)NEWLINE self.layout.addWidget(Label({'txt': 'Last Name'}), 1, 0, 1, 2)NEWLINE self.layout.addWidget(Label({'txt': 'Your Title'}), 2, 0, 1, 2)NEWLINE self.layout.addWidget(Label({'txt': 'Email'}), 3, 0, 1, 2)NEWLINE self.layout.addWidget(Label({'txt': 'Phone Number'}), 4, 0, 1, 2)NEWLINENEWLINE self.firstnameField = LineEdit()NEWLINE self.lastnameField = LineEdit()NEWLINE self.titleField = LineEdit()NEWLINE self.emailField = LineEdit()NEWLINE self.phoneField = LineEdit()NEWLINENEWLINE self.changeBtn = Button({'txt': "Update Profile", 'cl': self.update_profile})NEWLINENEWLINE self.layout.addWidget(self.firstnameField, 0, 2, 1, 4)NEWLINE self.layout.addWidget(self.lastnameField, 1, 2, 1, 4)NEWLINE self.layout.addWidget(self.titleField, 2, 2, 1, 4)NEWLINE self.layout.addWidget(self.emailField, 3, 2, 1, 4)NEWLINE self.layout.addWidget(self.phoneField, 4, 2, 1, 4)NEWLINE self.layout.addWidget(self.changeBtn, 5, 0, 1, 6)NEWLINENEWLINE def update_profile(self):NEWLINE passNEWLINENEWLINE# -------------------------------------------------------------------------------------------------------------NEWLINE# Created by panda on 28/11/2019 - 7:49 PMNEWLINE# © 2017 - 2018 DAMGteam. All rights reserved |
import pkgutilNEWLINEimport insightsNEWLINEimport jsonNEWLINENEWLINE# from insights.client.config import InsightsConfigNEWLINEfrom insights.client.collection_rules import InsightsUploadConfNEWLINEfrom mock.mock import patch, MockNEWLINEfrom insights.specs.default import DefaultSpecsNEWLINEfrom insights.specs.sos_archive import SosSpecsNEWLINEfrom insights.client.map_components import (map_rm_conf_to_components,NEWLINE _search_uploader_json,NEWLINE _get_component_by_symbolic_name)NEWLINENEWLINEuploader_json_file = pkgutil.get_data(insights.__name__, "client/uploader_json_map.json")NEWLINEuploader_json = json.loads(uploader_json_file)NEWLINEdefault_specs = vars(DefaultSpecs).keys()NEWLINEsos_specs = vars(SosSpecs).keys()NEWLINENEWLINENEWLINE@patch('insights.client.collection_rules.InsightsUploadConf.load_redaction_file', Mock(return_value={'test': 'test'}))NEWLINE@patch('insights.client.collection_rules.InsightsUploadConf.get_rm_conf_old', Mock(return_value={'test': 'test'}))NEWLINE@patch('insights.client.collection_rules.map_rm_conf_to_components')NEWLINEdef test_called_when_core_collection_enabled(map_rm_conf_to_components):NEWLINE '''NEWLINE Verify that the function is called from get_rm_conf when core_collect=TrueNEWLINE '''NEWLINE upload_conf = InsightsUploadConf(Mock(core_collect=True))NEWLINE upload_conf.get_rm_conf()NEWLINE map_rm_conf_to_components.assert_called_once_with({'test': 'test'})NEWLINENEWLINENEWLINE@patch('insights.client.collection_rules.InsightsUploadConf.load_redaction_file', Mock(return_value={'test': 'test'}))NEWLINE@patch('insights.client.collection_rules.InsightsUploadConf.get_rm_conf_old', Mock(return_value={'test': 'test'}))NEWLINE@patch('insights.client.collection_rules.map_rm_conf_to_components')NEWLINEdef test_not_called_when_core_collection_disabled(map_rm_conf_to_components):NEWLINE '''NEWLINE Verify that the function is not called from get_rm_conf when core_collect=FalseNEWLINE '''NEWLINE upload_conf = InsightsUploadConf(Mock(core_collect=False))NEWLINE upload_conf.get_rm_conf()NEWLINE map_rm_conf_to_components.assert_not_called()NEWLINENEWLINENEWLINEdef test_get_component_by_symbolic_name():NEWLINE '''NEWLINE Verify that all symbolic names in uploader.json can be mappedNEWLINE to valid components as prescribed in the conversion functionNEWLINE '''NEWLINE # some specs have been removed for core release so because they eitherNEWLINE # A) do not appear in uploader.json, orNEWLINE # B) DO appear in uploader.json, but have no associated rulesNEWLINE # Filter out the (B) specs with this listNEWLINE skipped_specs = [NEWLINE 'ceph_osd_df',NEWLINE 'gluster_peer_status',NEWLINE 'gluster_v_status',NEWLINE 'heat_crontab',NEWLINE 'httpd_on_nfs',NEWLINE 'ls_usr_sbin',NEWLINE 'lvmconfig',NEWLINE 'nova_migration_uid',NEWLINE 'ntpq_pn',NEWLINE 'rabbitmq_queues',NEWLINE 'rhev_data_center',NEWLINE 'root_crontab',NEWLINE 'yum_list_installed',NEWLINE 'zdump_v',NEWLINE 'cni_podman_bridge_conf',NEWLINE 'cobbler_modules_conf',NEWLINE 'cobbler_settings',NEWLINE 'cpu_smt_control',NEWLINE 'cpu_vulns_meltdown',NEWLINE 'cpu_vulns_spectre_v1',NEWLINE 'cpu_vulns_spectre_v2',NEWLINE 'cpu_vulns_spec_store_bypass',NEWLINE 'docker_storage',NEWLINE 'freeipa_healthcheck_log',NEWLINE 'ironic_conf',NEWLINE 'octavia_conf',NEWLINE 'rhn_entitlement_cert_xml',NEWLINE 'rhn_hibernate_conf',NEWLINE 'rhn_schema_version',NEWLINE 'rhn_search_daemon_log',NEWLINE 'rhn_taskomatic_daemon_log',NEWLINE 'rhosp_release',NEWLINE 'secure',NEWLINE 'foreman_tasks_config',NEWLINE 'ssh_foreman_config',NEWLINE 'swift_conf',NEWLINE 'sys_kernel_sched_features',NEWLINE 'sysconfig_memcached',NEWLINE 'sysconfig_mongod',NEWLINE 'systemd_system_origin_accounting',NEWLINE 'tuned_conf',NEWLINE 'vdsm_conf',NEWLINE 'vdsm_id',NEWLINE 'neutron_ml2_conf',NEWLINE 'sap_host_profile',NEWLINE 'sched_rt_runtime_us',NEWLINE 'libvirtd_qemu_log',NEWLINE 'mlx4_port',NEWLINE 'qpid_stat_g',NEWLINE 'lsinitrd'NEWLINE ]NEWLINENEWLINE # first, make sure our list is proper and one of theseNEWLINE # are in the default specsNEWLINE for s in skipped_specs:NEWLINE assert s not in default_specsNEWLINENEWLINE for category in ['commands', 'files', 'globs']:NEWLINE for entry in uploader_json[category]:NEWLINE full_component = _get_component_by_symbolic_name(entry['symbolic_name'])NEWLINENEWLINE if full_component is None:NEWLINE # this entry should not be in core, so assert that it's missingNEWLINE assert entry['symbolic_name'] not in default_specsNEWLINE continueNEWLINENEWLINE module, shortname = full_component.rsplit('.', 1)NEWLINENEWLINE # filter out specs without associated rulesNEWLINE if shortname in skipped_specs:NEWLINE continueNEWLINENEWLINE if module == "insights.specs.default.DefaultSpecs":NEWLINE assert shortname in default_specsNEWLINE elif module == "insights.specs.sos_archive.SosSpecs":NEWLINE assert shortname in sos_specsNEWLINE else:NEWLINE # invalid module nameNEWLINE assert FalseNEWLINENEWLINENEWLINEdef test_search_uploader_json():NEWLINE '''NEWLINE Verify that all valid input from an uploader.json-based remove.confNEWLINE will return a symbolic nameNEWLINE '''NEWLINE for cmd in uploader_json['commands']:NEWLINE assert _search_uploader_json(['commands'], cmd['command'])NEWLINE assert _search_uploader_json(['commands'], cmd['symbolic_name'])NEWLINE for fil in uploader_json['files']:NEWLINE assert _search_uploader_json(['files', 'globs'], fil['file'])NEWLINE assert _search_uploader_json(['files', 'globs'], fil['symbolic_name'])NEWLINE for glb in uploader_json['globs']:NEWLINE assert _search_uploader_json(['files', 'globs'], glb['symbolic_name'])NEWLINENEWLINENEWLINEdef test_search_uploader_json_invalid():NEWLINE '''NEWLINE Verify that invalid input will return NoneNEWLINE '''NEWLINE assert _search_uploader_json(['commands'], 'random value') is NoneNEWLINE assert _search_uploader_json(['files', 'globs'], 'random value') is NoneNEWLINENEWLINENEWLINEdef test_search_uploader_json_globs_symbolic_only():NEWLINE '''NEWLINE Verify that globs are matched by symbolic name onlyNEWLINE '''NEWLINE for glb in uploader_json['globs']:NEWLINE assert _search_uploader_json(['files', 'globs'], glb['glob']) is NoneNEWLINENEWLINENEWLINEdef test_map_rm_conf_to_components_sym_names():NEWLINE '''NEWLINE Verify that all symbolic names in uploader.json result asNEWLINE components in the outputNEWLINE '''NEWLINE # commandsNEWLINE for cmd in uploader_json['commands']:NEWLINE # run each possible command through the functionNEWLINE sym_name = cmd['symbolic_name']NEWLINE rm_conf = {'commands': [sym_name]}NEWLINE # figure out the destination name should beNEWLINE spec_name = _get_component_by_symbolic_name(sym_name)NEWLINE new_rm_conf = map_rm_conf_to_components(rm_conf)NEWLINE # commands should be empty, components should have 1 itemNEWLINE assert len(new_rm_conf['commands']) == 0NEWLINE assert len(new_rm_conf['components']) == 1NEWLINE assert new_rm_conf['components'][0] == spec_nameNEWLINENEWLINE # filesNEWLINE for fil in uploader_json['files']:NEWLINE # run each possible file through the functionNEWLINE sym_name = fil['symbolic_name']NEWLINE rm_conf = {'files': [sym_name]}NEWLINE # figure out the destination name should beNEWLINE spec_name = _get_component_by_symbolic_name(sym_name)NEWLINE new_rm_conf = map_rm_conf_to_components(rm_conf)NEWLINE # files should be empty, components should have 1 itemNEWLINE # except for these which cannot be mapped to specs.NEWLINE # in which case, components empty and these remain in filesNEWLINE if sym_name in ['grub2_efi_grubenv',NEWLINE 'grub2_grubenv',NEWLINE 'redhat_access_proactive_log']:NEWLINE assert len(new_rm_conf['files']) == 1NEWLINE assert new_rm_conf['files'][0] == sym_nameNEWLINE assert len(new_rm_conf['components']) == 0NEWLINE else:NEWLINE assert len(new_rm_conf['files']) == 0NEWLINE assert len(new_rm_conf['components']) == 1NEWLINE assert new_rm_conf['components'][0] == spec_nameNEWLINENEWLINE # globsNEWLINE for glb in uploader_json['globs']:NEWLINE # run each possible glob through the functionNEWLINE sym_name = glb['symbolic_name']NEWLINE rm_conf = {'files': [sym_name]}NEWLINE # figure out the destination name should beNEWLINE spec_name = _get_component_by_symbolic_name(sym_name)NEWLINE new_rm_conf = map_rm_conf_to_components(rm_conf)NEWLINE # files should be empty, components should have 1 itemNEWLINE assert len(new_rm_conf['files']) == 0NEWLINE assert len(new_rm_conf['components']) == 1NEWLINE assert new_rm_conf['components'][0] == spec_nameNEWLINENEWLINENEWLINEdef test_map_rm_conf_to_components_raw_cmds_files():NEWLINE '''NEWLINE Verify that all raw files/commands in uploader.json result asNEWLINE components in the outputNEWLINE '''NEWLINE # commandsNEWLINE for cmd in uploader_json['commands']:NEWLINE # run each possible command through the functionNEWLINE rm_conf = {'commands': [cmd['command']]}NEWLINE sym_name = cmd['symbolic_name']NEWLINE # figure out the destination name should beNEWLINE spec_name = _get_component_by_symbolic_name(sym_name)NEWLINE new_rm_conf = map_rm_conf_to_components(rm_conf)NEWLINE # commands should be empty, components should have 1 itemNEWLINE assert len(new_rm_conf['commands']) == 0NEWLINE assert len(new_rm_conf['components']) == 1NEWLINE assert new_rm_conf['components'][0] == spec_nameNEWLINENEWLINE # filesNEWLINE for fil in uploader_json['files']:NEWLINE # run each possible file through the functionNEWLINE rm_conf = {'files': [fil['file']]}NEWLINE sym_name = fil['symbolic_name']NEWLINE # figure out the destination name should beNEWLINE spec_name = _get_component_by_symbolic_name(sym_name)NEWLINE new_rm_conf = map_rm_conf_to_components(rm_conf)NEWLINE # files should be empty, components should have 1 itemNEWLINE # except for these which cannot be mapped to specs.NEWLINE # in which case, components empty and these remain in filesNEWLINE if fil['file'] in ['/boot/efi/EFI/redhat/grubenv',NEWLINE '/boot/grub2/grubenv',NEWLINE '/var/log/redhat_access_proactive/redhat_access_proactive.log']:NEWLINE assert len(new_rm_conf['files']) == 1NEWLINE assert new_rm_conf['files'][0] == fil['file']NEWLINE assert len(new_rm_conf['components']) == 0NEWLINE else:NEWLINE assert len(new_rm_conf['files']) == 0NEWLINE assert len(new_rm_conf['components']) == 1NEWLINE assert new_rm_conf['components'][0] == spec_nameNEWLINENEWLINENEWLINEdef test_map_rm_conf_to_components_invalid():NEWLINE '''NEWLINE Verify that matching commands/files are mapped to componentsNEWLINE '''NEWLINE rm_conf = {'commands': ['random', 'value'], 'files': ['other', 'invalid', 'data']}NEWLINE new_rm_conf = map_rm_conf_to_components(rm_conf)NEWLINE # rm_conf should be unchangedNEWLINE assert len(new_rm_conf['commands']) == 2NEWLINE assert len(new_rm_conf['files']) == 3NEWLINE assert len(new_rm_conf['components']) == 0NEWLINE assert new_rm_conf['commands'] == rm_conf['commands']NEWLINE assert new_rm_conf['files'] == rm_conf['files']NEWLINENEWLINENEWLINE@patch('insights.client.map_components._search_uploader_json')NEWLINEdef test_rm_conf_empty(_search_uploader_json):NEWLINE '''NEWLINE Verify the function returns rm_conf unchanged if calledNEWLINE with an empty dict or NoneNEWLINE '''NEWLINE rm_conf = {}NEWLINE new_rm_conf = map_rm_conf_to_components(rm_conf)NEWLINE _search_uploader_json.assert_not_called()NEWLINE assert new_rm_conf == {}NEWLINENEWLINE rm_conf = NoneNEWLINE new_rm_conf = map_rm_conf_to_components(rm_conf)NEWLINE _search_uploader_json.assert_not_called()NEWLINE assert new_rm_conf is NoneNEWLINENEWLINENEWLINE@patch('insights.client.map_components.logger.warning')NEWLINEdef test_log_long_key(logger_warning):NEWLINE '''NEWLINE Verify the conversion table is logged with properNEWLINE spacing, wrapping, and unconverted specs are not loggedNEWLINE '''NEWLINE rm_conf = {'commands': ["/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \\; -exec echo 'FileName= {}' \\;",NEWLINE "/usr/bin/md5sum /etc/pki/product/69.pem"],NEWLINE 'files': ["/etc/sysconfig/virt-who",NEWLINE "/etc/yum.repos.d/fedora-cisco-openh264.repo",NEWLINE "krb5_conf_d"]}NEWLINE map_rm_conf_to_components(rm_conf)NEWLINE logger_warning.assert_any_call("- /usr/bin/find /etc/origin/node => certificates_enddate\n /etc/origin/master /etc/pki /etc/ipa -type f\n -exec /usr/bin/openssl x509 -noout -enddate -in\n '{}' \\; -exec echo 'FileName= {}' \\;")NEWLINE logger_warning.assert_any_call("- /usr/bin/md5sum /etc/pki/product/69.pem => md5chk_files")NEWLINE logger_warning.assert_any_call("- /etc/sysconfig/virt-who => sysconfig_virt_who")NEWLINE logger_warning.assert_any_call("- krb5_conf_d => krb5")NEWLINENEWLINENEWLINE@patch('insights.client.map_components.logger.warning')NEWLINEdef test_log_short_key(logger_warning):NEWLINE '''NEWLINE Verify the conversion table is logged without wrapping or spacing when keyNEWLINE is shortNEWLINE '''NEWLINE rm_conf = {'commands': ["ss_tupna"]}NEWLINE map_rm_conf_to_components(rm_conf)NEWLINE logger_warning.assert_any_call("If possible, commands and files specified in the blacklist configuration will be converted to Insights component specs that will be disabled as needed.")NEWLINENEWLINENEWLINEdef test_components_added():NEWLINE '''NEWLINE Verify that the resulting component list isNEWLINE an aggregation of the current list and the conversion resultsNEWLINE with no duplicates.NEWLINE '''NEWLINE rm_conf = {'commands': ["/usr/bin/md5sum /etc/pki/product/69.pem"],NEWLINE 'components': ["insights.specs.default.DefaultSpecs.sysconfig_virt_who"]}NEWLINE results = map_rm_conf_to_components(rm_conf)NEWLINENEWLINE assert results == {'commands': [],NEWLINE 'files': [],NEWLINE 'components': ["insights.specs.default.DefaultSpecs.sysconfig_virt_who",NEWLINE "insights.specs.default.DefaultSpecs.md5chk_files"]}NEWLINE |
"""NEWLINETurner, Mann, Clandinin:NEWLINENEWLINEhttps://github.com/mhturner/SC-FCNEWLINEmhturner@stanford.eduNEWLINE"""NEWLINEfrom scfc import bridge, anatomical_connectivityNEWLINEimport osNEWLINENEWLINEdata_dir = bridge.getUserConfiguration()['data_dir']NEWLINENEWLINEinclude_inds_branson, name_list_branson = bridge.getBransonNames()NEWLINEBranson_JRC2018 = anatomical_connectivity.getAtlasConnectivity(include_inds_branson, name_list_branson, 'branson')NEWLINENEWLINE# Shortest path distance:NEWLINEshortest_path_dist = bridge.getShortestPathStats(Branson_JRC2018)NEWLINENEWLINE# saveNEWLINEshortest_path_dist.to_pickle(os.path.join(data_dir, 'Branson_ShortestPathDistance.pkl'))NEWLINE |
#!/bin/env python2.7NEWLINE# encoding: utf-8NEWLINEimport ctypesNEWLINEimport osNEWLINEimport reNEWLINEimport sysNEWLINEimport timeNEWLINENEWLINEIS_32_BITS_PYTHON = ctypes.sizeof(ctypes.c_voidp)==4NEWLINE# 4 for 32 bit or 8 for 64 bit. NEWLINENEWLINEfrom fixtures import *NEWLINENEWLINENEWLINEtry:NEWLINE FileNotFoundErrorNEWLINEexcept NameError:NEWLINE FileNotFoundError = OSErrorNEWLINENEWLINENEWLINEdef test_Version(rawfile):NEWLINE assert re.match(r"\d+\.\d+\.\d+\.\d+", rawfile.Version())NEWLINENEWLINEdef test_GetFileName(rawfile, rawfilename):NEWLINE assert rawfile.GetFileName() == os.path.abspath(rawfilename)NEWLINENEWLINEdef test_GetCreatorID(rawfile):NEWLINE assert rawfile.GetCreatorID() == 'Administrator'NEWLINENEWLINEdef test_GetVersionNumber(rawfile):NEWLINE assert rawfile.GetVersionNumber() == 50NEWLINENEWLINEdef test_GetCreationDate(rawfile):NEWLINE creation_date_iso = time.strftime('%Y-%m-%dT%H:%M:%SZ', rawfile.GetCreationDate())NEWLINE assert creation_date_iso == '1970-01-01T10:37:55Z'NEWLINENEWLINEdef test_IsError(rawfile):NEWLINE assert rawfile.IsError() == FalseNEWLINENEWLINEdef test_IsNewFile(rawfile):NEWLINE assert rawfile.IsNewFile() == FalseNEWLINENEWLINEdef test_IsThereMSData(rawfile):NEWLINE assert rawfile.IsThereMSData() == TrueNEWLINENEWLINEdef test_HasExpMethod(rawfile):NEWLINE assert rawfile.HasExpMethod() == TrueNEWLINENEWLINEdef test_InAcquisition(rawfile):NEWLINE assert rawfile.InAcquisition() == FalseNEWLINENEWLINEdef test_GetErrorCode(rawfile):NEWLINE assert rawfile.GetErrorCode() == FalseNEWLINENEWLINEdef test_GetErrorMessage(rawfile):NEWLINE assert rawfile.GetErrorMessage() == ''NEWLINENEWLINEdef test_GetWarningMessage(rawfile):NEWLINE assert rawfile.GetWarningMessage() == ''NEWLINENEWLINEdef test_RefreshViewOfFile(rawfile):NEWLINE assert rawfile.RefreshViewOfFile() == NoneNEWLINENEWLINEdef test_GetNumberOfControllers(rawfile):NEWLINE assert rawfile.GetNumberOfControllers() == 1NEWLINENEWLINEdef test_GetNumberOfControllersOfType_minus1(rawfile):NEWLINE assert rawfile.GetNumberOfControllersOfType(-1) == 0NEWLINENEWLINEdef test_GetNumberOfControllersOfType_zero(rawfile):NEWLINE assert rawfile.GetNumberOfControllersOfType(0) == 1NEWLINENEWLINEdef test_GetNumberOfControllersOfType_plus1(rawfile):NEWLINE assert rawfile.GetNumberOfControllersOfType(1) == 0NEWLINENEWLINEdef test_GetNumberOfControllersOfType_plus2(rawfile):NEWLINE assert rawfile.GetNumberOfControllersOfType(2) == 0NEWLINENEWLINEdef test_GetNumberOfControllersOfType_plus3(rawfile):NEWLINE assert rawfile.GetNumberOfControllersOfType(3) == 0NEWLINENEWLINEdef test_GetNumberOfControllersOfType_plus4(rawfile):NEWLINE assert rawfile.GetNumberOfControllersOfType(4) == 0NEWLINENEWLINEdef test_GetControllerType_zero(rawfile):NEWLINE assert rawfile.GetControllerType(0) == 'MS'NEWLINENEWLINE# def test_GetControllerType_one(rawfile):NEWLINE# assert rawfile.GetControllerType(1) == 1NEWLINENEWLINEdef test_GetCurrentController(rawfile):NEWLINE assert rawfile.GetCurrentController() == (0, 1)NEWLINENEWLINENEWLINE # print( 'GetCurrentController()', rawfile.GetCurrentController() )NEWLINE # # print( 'SetCurrentController(4,1)', rawfile.SetCurrentController(4,1) )NEWLINENEWLINE # print( 'GetCurrentController()', rawfile.GetCurrentController() )NEWLINE # # print( 'SetCurrentController(0,1)', rawfile.SetCurrentController(0,1) )NEWLINENEWLINE # print( 'GetCurrentController()', rawfile.GetCurrentController() )NEWLINENEWLINEdef test_GetExpectedRunTime(rawfile):NEWLINE assert rawfile.GetExpectedRunTime() == 100.0NEWLINENEWLINEdef test_GetMaxIntegratedIntensity(rawfile):NEWLINE assert rawfile.GetMaxIntegratedIntensity() == 1120672896.0NEWLINENEWLINEdef test_GetMaxIntensity(rawfile):NEWLINE assert rawfile.GetMaxIntensity() == 0NEWLINENEWLINEdef test_GetInletID(rawfile):NEWLINE assert rawfile.GetInletID() == 0NEWLINENEWLINEdef test_GetErrorFlag(rawfile):NEWLINE assert rawfile.GetErrorFlag() == 0NEWLINENEWLINEdef test_GetFlags(rawfile):NEWLINE assert rawfile.GetFlags() == ''NEWLINENEWLINEdef test_GetAcquisitionFileName(rawfile):NEWLINE assert rawfile.GetAcquisitionFileName() == ''NEWLINENEWLINEdef test_GetOperator(rawfile):NEWLINE assert rawfile.GetOperator() == ''NEWLINENEWLINEdef test_GetComment1(rawfile):NEWLINE assert rawfile.GetComment1() == ''NEWLINENEWLINEdef test_GetComment2(rawfile):NEWLINE assert rawfile.GetComment2() == ''NEWLINENEWLINEdef test_GetFilters(rawfile, filters_3_0, filters_3_1):NEWLINE DLL_VERSION = rawfile.VersionAsATuple()NEWLINE if DLL_VERSION.major<=3 and DLL_VERSION.minor<1:NEWLINE # GetFilters results for 3.0-NEWLINE assert rawfile.GetFilters() == filters_3_0NEWLINE assert len(rawfile.GetFilters()) == 1724NEWLINE else:NEWLINE # GetFilters results for 3.1+NEWLINE assert rawfile.GetFilters() == filters_3_1NEWLINE assert len(rawfile.GetFilters()) == 1690NEWLINENEWLINEdef test_GetMassTolerance(rawfile):NEWLINE assert rawfile.GetMassTolerance() == (False, 500.0, 0)NEWLINENEWLINEdef test_SetMassTolerance(rawfile):NEWLINE rawfile.SetMassTolerance(userDefined=True, massTolerance=555.0, units=2)NEWLINE assert rawfile.GetMassTolerance() == (True, 555.0, 2)NEWLINENEWLINEdef test_GetMassResolution(rawfile):NEWLINE assert rawfile.GetMassResolution() == 0.5NEWLINENEWLINEdef test_GetNumTrailerExtra(rawfile):NEWLINE assert rawfile.GetNumTrailerExtra() == 3316NEWLINENEWLINEdef test_GetLowMass(rawfile):NEWLINE assert rawfile.GetLowMass() == 100.0NEWLINENEWLINEdef test_GetHighMass(rawfile):NEWLINE assert rawfile.GetHighMass() == 2000.0NEWLINENEWLINEdef test_GetStartTime(rawfile):NEWLINE assert rawfile.GetStartTime() == 0.005666666666666667NEWLINENEWLINEdef test_GetEndTime(rawfile):NEWLINE assert rawfile.GetEndTime() == 99.97766666666666NEWLINENEWLINEdef test_GetNumSpectra(rawfile):NEWLINE assert rawfile.GetNumSpectra() == 3316NEWLINENEWLINEdef test_GetFirstSpectrumNumber(rawfile):NEWLINE assert rawfile.GetFirstSpectrumNumber() == 1NEWLINENEWLINEdef test_GetLastSpectrumNumber(rawfile):NEWLINE assert rawfile.GetLastSpectrumNumber() == 3316NEWLINENEWLINEdef test_GetAcquisitionDate(rawfile):NEWLINE assert rawfile.GetAcquisitionDate() == ''NEWLINENEWLINEdef test_GetUniqueCompoundNames(rawfile):NEWLINE assert rawfile.GetUniqueCompoundNames() == ('',)NEWLINENEWLINENEWLINE # print( '############################################## INSTRUMENT BEGIN')NEWLINENEWLINEdef test_GetInstrumentDescription(rawfile):NEWLINE assert rawfile.GetInstrumentDescription() == ''NEWLINENEWLINEdef test_GetInstrumentID(rawfile):NEWLINE assert rawfile.GetInstrumentID() == 0NEWLINENEWLINEdef test_GetInstSerialNumber(rawfile):NEWLINE assert rawfile.GetInstSerialNumber() == 'LC000718'NEWLINENEWLINEdef test_GetInstName(rawfile):NEWLINE assert rawfile.GetInstName() == 'LCQ'NEWLINENEWLINEdef test_GetInstModel(rawfile):NEWLINE assert rawfile.GetInstModel() == 'LCQ'NEWLINENEWLINEdef test_GetInstSoftwareVersion(rawfile):NEWLINE assert rawfile.GetInstSoftwareVersion() == '1.3'NEWLINENEWLINEdef test_GetInstHardwareVersion(rawfile):NEWLINE assert rawfile.GetInstHardwareVersion() == ''NEWLINENEWLINEdef test_GetInstFlags(rawfile):NEWLINE assert rawfile.GetInstFlags() == ''NEWLINENEWLINEdef test_GetInstNumChannelLabels(rawfile):NEWLINE assert rawfile.GetInstNumChannelLabels() == 0NEWLINENEWLINE# def test_GetInstChannelLabel(rawfile):NEWLINE# assert rawfile.GetInstChannelLabel(0) == 0NEWLINENEWLINEdef test_IsQExactive(rawfile):NEWLINE assert rawfile.IsQExactive() == FalseNEWLINENEWLINE # scanNumber = 1NEWLINE# print( '############################################## XCALIBUR INTERFACE BEGIN')NEWLINENEWLINEdef test_GetScanHeaderInfoForScanNum(rawfile):NEWLINE scanheader = rawfile.GetScanHeaderInfoForScanNum(scanNumber=1)NEWLINENEWLINE assert scanheader['numPackets'] == 0NEWLINE assert scanheader['StartTime'] == 0.005666666666666667NEWLINE assert scanheader['LowMass'] == 300.0NEWLINE assert scanheader['HighMass'] == 2000.0NEWLINE assert scanheader['TIC'] == 0.0NEWLINE assert scanheader['BasePeakMass'] == 0.0NEWLINE assert scanheader['BasePeakIntensity'] == 0.0NEWLINE assert scanheader['numChannels'] == 0NEWLINE assert scanheader['uniformTime'] == 0NEWLINE assert scanheader['Frequency'] == 0.0NEWLINENEWLINEdef test_GetTrailerExtraForScanNum(rawfile):NEWLINE scantrailer = rawfile.GetTrailerExtraForScanNum(scanNumber=1)NEWLINE assert scantrailer['Wideband Activation'] == 'Off'NEWLINE assert scantrailer['Micro Scan Count'] == 3.0NEWLINE assert scantrailer['Ion Injection Time (ms)'] == 49.98NEWLINE assert scantrailer['Scan Segment'] == 1.0NEWLINE assert scantrailer['Scan Event'] == 1.0NEWLINE assert scantrailer['Elapsed Scan Time (sec)'] == 1.38NEWLINE assert scantrailer['API Source CID Energy'] == 0.0NEWLINE assert scantrailer['Resolution'] == 'Low'NEWLINE assert scantrailer['Average Scan by Inst'] == 'No'NEWLINE assert scantrailer['BackGd Subtracted by Inst'] == 'No'NEWLINE assert scantrailer['Charge State'] == 0.0NEWLINENEWLINEdef test_GetNumTuneData(rawfile):NEWLINE assert rawfile.GetNumTuneData() == 2NEWLINENEWLINEdef test_GetTuneData(rawfile):NEWLINE assert rawfile.GetTuneData(0) == 'Capillary Temp (C):200.00\nAPCI Vaporizer Temp (C):450.00\nAGC:On\nAGC Off Ion Time (ms):5.00\nSheath Gas Flow ():0.00\nAux Gas Flow ():0.00\nSource Type:ESI\nInjection Waveforms:Off\n\nPOSITIVE POLARITY\nSource Voltage (kV):0.00\nSource Current (uA):80.00\nCapillary Voltage (V):25.00\nTube Lens Offset (V):10.00\nMultipole RF Amplifier (Vp-p):400.00\nMultipole 1 Offset (V):-7.00\nMultipole 2 Offset (V):-28.50\nInterMultipole Lens Voltage (V):-16.00\nTrap DC Offset Voltage (V):-10.00\nZoom Micro Scans:5\nZoom AGC Target:20000000.00\nZoom Max Ion Time (ms):50.00\nFull Micro Scans:3\nFull AGC Target:50000000.00\nFull Max Ion Time (ms):50.00\nSIM Micro Scans:5\nSIM AGC Target:40000000.00\nSIM Max Ion Time (ms):200.00\nMSn Micro Scans:3\nMSn AGC Target:40000000.00\nMSn Max Ion Time (ms):200.00\n\nNEGATIVE POLARITY\nSource Voltage (kV):4.00\nSource Current (uA):100.00\nCapillary Voltage (V):10.00\nTube Lens Offset (V):-50.00\nMultipole RF Amplifier (Vp-p):400.00\nMultipole 1 Offset (V):3.00\nMultipole 2 Offset (V):7.00\nInterMultipole Lens Voltage (V):16.00\nTrap DC Offset Voltage (V):10.00\nZoom Micro Scans:5\nZoom AGC Target:10000000.00\nZoom Max Ion Time (ms):50.00\nFull Micro Scans:3\nFull AGC Target:10000000.00\nFull Max Ion Time (ms):50.00\nSIM Micro Scans:5\nSIM AGC Target:20000000.00\nSIM Max Ion Time (ms):200.00\nMSn Micro Scans:3\nMSn AGC Target:20000000.00\nMSn Max Ion Time (ms):200.00\n'NEWLINENEWLINEdef test_GetNumInstMethods(rawfile):NEWLINE assert rawfile.GetNumInstMethods() == 1NEWLINENEWLINEdef test_GetInstMethodNames(rawfile):NEWLINE assert rawfile.GetInstMethodNames() == ('LCQ',)NEWLINENEWLINEdef test_GetInstMethod(rawfile, instmethod):NEWLINE assert rawfile.GetInstMethod(0) == instmethodNEWLINENEWLINEdef test_ExtractInstMethodFromRaw(rawfile):NEWLINE method_filename = rawfile.filename + '.meth'NEWLINE try:NEWLINE os.remove(method_filename)NEWLINE except FileNotFoundError:NEWLINE passNEWLINE rawfile.ExtractInstMethodFromRaw(method_filename)NEWLINE assert os.path.exists(method_filename)NEWLINENEWLINE# # # # # # # "View/Report/Sample Information" BEGINNEWLINENEWLINEdef test_GetVialNumber(rawfile):NEWLINE assert rawfile.GetVialNumber() == 0NEWLINENEWLINEdef test_GetInjectionVolume(rawfile):NEWLINE assert rawfile.GetInjectionVolume() == 0NEWLINENEWLINEdef test_GetInjectionAmountUnits(rawfile):NEWLINE assert rawfile.GetInjectionAmountUnits() == ''NEWLINENEWLINEdef test_GetSampleVolume(rawfile):NEWLINE assert rawfile.GetSampleVolume() == 0.0NEWLINENEWLINEdef test_GetSampleVolumeUnits(rawfile):NEWLINE assert rawfile.GetSampleVolumeUnits() == ''NEWLINENEWLINEdef test_GetSampleWeight(rawfile):NEWLINE assert rawfile.GetSampleWeight() == 0.0NEWLINENEWLINEdef test_GetSampleAmountUnits(rawfile):NEWLINE assert rawfile.GetSampleAmountUnits() == ''NEWLINENEWLINEdef test_GetSeqRowNumber(rawfile):NEWLINE assert rawfile.GetSeqRowNumber() == 1NEWLINENEWLINEdef test_GetSeqRowSampleType(rawfile):NEWLINE assert rawfile.GetSeqRowSampleType() == 'Unknown'NEWLINENEWLINEdef test_GetSeqRowDataPath(rawfile):NEWLINE assert rawfile.GetSeqRowDataPath() == ''NEWLINENEWLINEdef test_GetSeqRowRawFileName(rawfile):NEWLINE assert rawfile.GetSeqRowRawFileName() == 'Shew_246a_LCQa_15Oct04_Andro_0904-2_4-20.RAW'NEWLINENEWLINEdef test_GetSeqRowSampleName(rawfile):NEWLINE assert rawfile.GetSeqRowSampleName() == ''NEWLINENEWLINEdef test_GetSeqRowSampleID(rawfile):NEWLINE assert rawfile.GetSeqRowSampleID() == ''NEWLINENEWLINEdef test_GetSeqRowComment(rawfile):NEWLINE assert rawfile.GetSeqRowComment() == ''NEWLINENEWLINEdef test_GetSeqRowLevelName(rawfile):NEWLINE assert rawfile.GetSeqRowLevelName() == ''NEWLINENEWLINEdef test_GetSeqRowUserText(rawfile):NEWLINE assert rawfile.GetSeqRowUserText(index=0) == ''NEWLINENEWLINEdef test_GetSeqRowInstrumentMethod(rawfile):NEWLINE assert rawfile.GetSeqRowInstrumentMethod() == r'C:\xcalibur\methods\Std 100 min\4-20ddTop3_100min.meth'NEWLINENEWLINEdef test_GetSeqRowProcessingMethod(rawfile):NEWLINE assert rawfile.GetSeqRowProcessingMethod() == ''NEWLINENEWLINEdef test_GetSeqRowCalibrationFile(rawfile):NEWLINE assert rawfile.GetSeqRowCalibrationFile() == ''NEWLINENEWLINEdef test_GetSeqRowVial(rawfile):NEWLINE assert rawfile.GetSeqRowVial() == ''NEWLINENEWLINEdef test_GetSeqRowInjectionVolume(rawfile):NEWLINE assert rawfile.GetSeqRowInjectionVolume() == 0.0NEWLINENEWLINEdef test_GetSeqRowSampleWeight(rawfile):NEWLINE assert rawfile.GetSeqRowSampleWeight() == 0.0NEWLINENEWLINEdef test_GetSeqRowSampleVolume(rawfile):NEWLINE assert rawfile.GetSeqRowSampleVolume() == 0.0NEWLINENEWLINEdef test_GetSeqRowISTDAmount(rawfile):NEWLINE assert rawfile.GetSeqRowISTDAmount() == 0.0NEWLINENEWLINEdef test_GetSeqRowDilutionFactor(rawfile):NEWLINE assert rawfile.GetSeqRowDilutionFactor() == 1.0NEWLINENEWLINEdef test_GetSeqRowUserLabel0(rawfile):NEWLINE assert rawfile.GetSeqRowUserLabel(index=0) == 'Study'NEWLINENEWLINEdef test_GetSeqRowUserLabel1(rawfile):NEWLINE assert rawfile.GetSeqRowUserLabel(index=1) == 'Client'NEWLINENEWLINEdef test_GetSeqRowUserLabel2(rawfile):NEWLINE assert rawfile.GetSeqRowUserLabel(index=2) == 'Laboratory'NEWLINENEWLINEdef test_GetSeqRowUserLabel3(rawfile):NEWLINE assert rawfile.GetSeqRowUserLabel(index=3) == 'Company'NEWLINENEWLINEdef test_GetSeqRowUserLabel4(rawfile):NEWLINE assert rawfile.GetSeqRowUserLabel(index=4) == 'Phone'NEWLINENEWLINEdef test_GetSeqRowUserTextEx0(rawfile):NEWLINE assert rawfile.GetSeqRowUserTextEx(index=0) == ''NEWLINENEWLINEdef test_GetSeqRowUserTextEx1(rawfile):NEWLINE assert rawfile.GetSeqRowUserTextEx(index=1) == ''NEWLINENEWLINEdef test_GetSeqRowUserTextEx2(rawfile):NEWLINE assert rawfile.GetSeqRowUserTextEx(index=2) == ''NEWLINENEWLINEdef test_GetSeqRowUserTextEx3(rawfile):NEWLINE assert rawfile.GetSeqRowUserTextEx(index=3) == ''NEWLINENEWLINEdef test_GetSeqRowUserTextEx4(rawfile):NEWLINE assert rawfile.GetSeqRowUserTextEx(index=4) == ''NEWLINENEWLINEdef test_GetSeqRowBarcode(rawfile):NEWLINE assert rawfile.GetSeqRowBarcode() == ''NEWLINENEWLINEdef test_GetSeqRowBarcodeStatus(rawfile):NEWLINE assert rawfile.GetSeqRowBarcodeStatus() == 0NEWLINENEWLINENEWLINEdef test_GetNumStatusLog(rawfile):NEWLINE assert rawfile.GetNumStatusLog() == 2767NEWLINENEWLINEdef test_GetStatusLogForScanNum(rawfile):NEWLINE assert rawfile.GetStatusLogForScanNum(scanNumber=1) == (0.052666667848825455, [('API SOURCE', ''), ('Source Voltage (kV)', '0.03'), ('Source Current (uA)', '0.10'), ('Vaporizer Thermocouple OK', 'No'), ('Vaporizer Temp (C)', '-0.00'), ('Sheath Gas Flow Rate ()', '-0.20'), ('Aux Gas Flow Rate()', '-0.27'), ('Capillary RTD OK', 'Yes'), ('Capillary Voltage (V)', '25.39'), ('Capillary Temp (C)', '199.50'), ('Tube Lens Voltage (V, set point)', '10.00'), ('8 kV supply at limit', 'No'), ('', ''), ('VACUUM', ''), ('Vacuum OK', 'Yes'), ('Ion Gauge Pressure OK', 'Yes'), ('Ion Gauge Status', 'On'), ('Ion Gauge (x10e-5 Torr)', '1.64'), ('Convectron Pressure OK', 'Yes'), ('Convectron Gauge (Torr)', '0.94'), ('', ''), ('TURBO PUMP', ''), ('Status', 'Running'), ('Life (hours)', '54878'), ('Speed (rpm)', '60000'), ('Power (Watts)', '73'), ('Temperature (C)', '40.00'), ('', ''), ('ION OPTICS', ''), ('Multipole Frequency On', 'Yes'), ('Multipole 1 Offset (V)', '-6.74'), ('Lens Voltage (V)', '-15.15'), ('Multipole 2 Offset (V)', '-28.11'), ('Multipole RF Amplitude (Vp-p, set point)', '400.00'), ('Coarse Trap DC Offset (V)', '-9.88'), ('', ''), ('MAIN RF', ''), ('Reference Sine Wave OK', 'Yes'), ('Standing Wave Ratio OK', 'Yes'), ('Main RF DAC (steps)', '-33.00'), ('Main RF Detected (V)', '-0.00'), ('RF Detector Temp (C)', '37.45'), ('Main RF Modulation (V)', '0.04'), ('Main RF Amplifier (Vp-p)', '8.74'), ('RF Generator Temp (C)', '27.73'), ('', ''), ('ION DETECTION SYSTEM', ''), ('Multiplier Actual (V)', '-1182.88'), ('', ''), ('POWER SUPPLIES', ''), ('+5V Supply Voltage (V)', '5.14'), ('-15V Supply Voltage (V)', '-14.97'), ('+15V Supply Voltage (V)', '14.94'), ('+24V Supply Voltage (V)', '24.13'), ('-28V Supply Voltage (V)', '-28.09'), ('+28V Supply Voltage (V)', '28.29'), ('+28V Supply Current (Amps)', '0.80'), ('+35V Supply Voltage (V)', '35.55'), ('+36V Supply Voltage (V)', '36.22'), ('-150V Supply Voltage (V)', '-148.98'), ('+150V Supply Voltage (V)', '150.86'), ('-205V Supply Voltage (V)', '-203.87'), ('+205V Supply Voltage (V)', '205.34'), ('Ambient Temp (C)', '27.68'), ('', ''), ('INSTRUMENT STATUS', ''), ('Instrument', 'On'), ('Analysis', 'Acquiring'), ('', ''), ('SYRINGE PUMP', ''), ('Status', 'Ready'), ('Flow Rate (uL/min)', '3.00'), ('Infused Volume (uL)', '0.00'), ('Syringe Diameter (mm)', '2.30'), ('', ''), ('DIGITAL INPUTS', ''), ('READY IN is active', 'No'), ('START IN is active', 'No'), ('Divert/Inject valve', 'Load')])NEWLINENEWLINEdef test_GetStatusLogForPos(rawfile, statuslogforpos0):NEWLINE assert rawfile.GetStatusLogForPos(position=0) == statuslogforpos0NEWLINENEWLINEdef test_GetStatusLogPlottableIndex(rawfile):NEWLINE assert rawfile.GetStatusLogPlottableIndex() == (('Source Voltage (kV):', 'Source Current (uA):', 'Vaporizer Temp (C):', 'Sheath Gas Flow Rate ():', 'Aux Gas Flow Rate():', 'Capillary Voltage (V):', 'Capillary Temp (C):', 'Tube Lens Voltage (V, set point):', 'Ion Gauge (x10e-5 Torr):', 'Convectron Gauge (Torr):', 'Life (hours):', 'Speed (rpm):', 'Power (Watts):', 'Temperature (C):', 'Multipole 1 Offset (V):', 'Lens Voltage (V):', 'Multipole 2 Offset (V):', 'Multipole RF Amplitude (Vp-p, set point):', 'Coarse Trap DC Offset (V):', 'Main RF DAC (steps):', 'Main RF Detected (V):', 'RF Detector Temp (C):', 'Main RF Modulation (V):', 'Main RF Amplifier (Vp-p):', 'RF Generator Temp (C):', 'Multiplier Actual (V):', '+5V Supply Voltage (V):', '-15V Supply Voltage (V):', '+15V Supply Voltage (V):', '+24V Supply Voltage (V):', '-28V Supply Voltage (V):', '+28V Supply Voltage (V):', '+28V Supply Current (Amps):', '+35V Supply Voltage (V):', '+36V Supply Voltage (V):', '-150V Supply Voltage (V):', '+150V Supply Voltage (V):', '-205V Supply Voltage (V):', '+205V Supply Voltage (V):', 'Ambient Temp (C):', 'Flow Rate (uL/min):', 'Infused Volume (uL):', 'Syringe Diameter (mm):'), (1, 2, 4, 5, 6, 8, 9, 10, 17, 19, 23, 24, 25, 26, 30, 31, 32, 33, 34, 39, 40, 41, 42, 43, 44, 47, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 71, 72, 73))NEWLINENEWLINEdef test_GetNumErrorLog(rawfile):NEWLINE assert rawfile.GetNumErrorLog() == 1289NEWLINENEWLINEdef test_GetErrorLogItem(rawfile):NEWLINE assert rawfile.GetErrorLogItem(0) == ('Dynamic exclusion list is full. Mass 1026.89 has been dropped.', 15.657333374023438)NEWLINENEWLINEdef test_GetMassListFromScanNum(rawfile):NEWLINE assert rawfile.GetMassListFromScanNum(scanNumber=1) == (((), ()), None)NEWLINENEWLINEdef test_GetMassListRangeFromScanNum(rawfile):NEWLINE assert rawfile.GetMassListRangeFromScanNum(scanNumber=1) == (((), ()), None)NEWLINENEWLINEdef test_GetSegmentedMassListFromScanNum(rawfile):NEWLINE assert rawfile.GetSegmentedMassListFromScanNum(scanNumber=1) == (((), ()), None, (0,), 1)NEWLINENEWLINEdef test_GetAverageMassList(rawfile):NEWLINE assert rawfile.GetAverageMassList(firstAvgScanNumber=1, lastAvgScanNumber=11) == (((), ()), None)NEWLINENEWLINEdef test_GetAveragedMassSpectrum(rawfile):NEWLINE assert rawfile.GetAveragedMassSpectrum(listOfScanNumbers=[1,2,3]) == (((), ()), None)NEWLINENEWLINEdef test_GetSummedMassSpectrum(rawfile):NEWLINE assert rawfile.GetSummedMassSpectrum(listOfScanNumbers=[1,2,3]) == (((), ()), None)NEWLINENEWLINEdef test_GetLabelData(rawfile):NEWLINE labels, flags = rawfile.GetLabelData(scanNumber=1)NEWLINE assert labels.mass == ()NEWLINE assert labels.intensity == ()NEWLINE assert labels.resolution == ()NEWLINE assert labels.baseline == ()NEWLINE assert labels.noise == ()NEWLINE assert labels.charge == ()NEWLINE assert flags.saturated == ()NEWLINE assert flags.fragmented == ()NEWLINE assert flags.merged == ()NEWLINE assert flags.exception == ()NEWLINE assert flags.reference == ()NEWLINE assert flags.modified == ()NEWLINENEWLINEdef test_GetAveragedLabelData(rawfile):NEWLINE values, flags = rawfile.GetAveragedLabelData(listOfScanNumbers=[1,2,3])NEWLINE assert values == (((), (), (), (), (), ()))NEWLINE assert flags.saturated == ()NEWLINE assert flags.fragmented == ()NEWLINE assert flags.merged == ()NEWLINE assert flags.exception == ()NEWLINE assert flags.reference == ()NEWLINE assert flags.modified == ()NEWLINENEWLINENEWLINEdef test_GetAllMSOrderData(rawfile):NEWLINE labels, flags, numberOfMSOrders = rawfile.GetAllMSOrderData(scanNumber=1)NEWLINE assert labels.mass == ()NEWLINE assert labels.intensity == ()NEWLINE assert labels.resolution == ()NEWLINE assert labels.baseline == ()NEWLINE assert labels.noise == ()NEWLINE assert labels.charge == ()NEWLINE assert flags.activation_type == ()NEWLINE assert flags.is_precursor_range_valid == ()NEWLINE assert numberOfMSOrders == 0NEWLINENEWLINEdef test_GetChroData(rawfile, chrodata):NEWLINE chroData, peakFlags = rawfile.GetChroData(startTime=rawfile.StartTime,NEWLINE endTime=rawfile.EndTime,NEWLINE massRange1="{}-{}".format(rawfile.LowMass, rawfile.HighMass),NEWLINE scanFilter="Full ms ")NEWLINE assert chroData == chrodataNEWLINE assert peakFlags == NoneNEWLINENEWLINE# def test_GetChroByCompoundName(rawfile):NEWLINE# assert rawfile.GetChroByCompoundName(["methyltestosterone"]) == ''NEWLINENEWLINE# def test_GetMassPrecisionEstimate(rawfile):NEWLINE# assert rawfile.GetMassPrecisionEstimate(scanNumber=1) == ''NEWLINENEWLINEdef test_GetFullMSOrderPrecursorDataFromScanNum(rawfile):NEWLINE precursorData = rawfile.GetFullMSOrderPrecursorDataFromScanNum(scanNumber=1, MSOrder=0)NEWLINE assert precursorData.precursorMass == 50.0NEWLINE assert precursorData.isolationWidth == 1.0NEWLINE assert precursorData.collisionEnergy == 25.0NEWLINENEWLINE if (sys.version_info.major, sys.version_info.minor) == (2, 7) and IS_32_BITS_PYTHON:NEWLINE assert precursorData.collisionEnergyValid >= 1e+100NEWLINE else:NEWLINE assert precursorData.collisionEnergyValid <= 1e-200NEWLINENEWLINE assert precursorData.rangeIsValid == 0.0NEWLINE assert precursorData.firstPrecursorMass == 0.0NEWLINE assert precursorData.lastPrecursorMass == 0.0NEWLINENEWLINEdef test_GetPrecursorInfoFromScanNum(rawfile):NEWLINE assert rawfile.GetPrecursorInfoFromScanNum(scanNumber=1) == NoneNEWLINE |
from __future__ import absolute_importNEWLINEfrom __future__ import divisionNEWLINEfrom __future__ import print_functionNEWLINENEWLINEimport tensorflow as tfNEWLINENEWLINEfrom ray.rllib.models.model import ModelNEWLINEfrom ray.rllib.models.fcnet import FullyConnectedNetworkNEWLINEfrom ray.rllib.models.action_dist import ReshaperNEWLINENEWLINENEWLINEclass MultiAgentFullyConnectedNetwork(Model):NEWLINE """Multiagent fully connected network."""NEWLINENEWLINE def _build_layers(self, inputs, num_outputs, options):NEWLINE # Split the input and output tensorsNEWLINE input_shapes = options["custom_options"]["multiagent_obs_shapes"]NEWLINE output_shapes = options["custom_options"]["multiagent_act_shapes"]NEWLINE input_reshaper = Reshaper(input_shapes)NEWLINE output_reshaper = Reshaper(output_shapes)NEWLINE split_inputs = input_reshaper.split_tensor(inputs)NEWLINE num_actions = output_reshaper.split_number(num_outputs)NEWLINENEWLINE custom_options = options["custom_options"]NEWLINE hiddens = custom_options.get("multiagent_fcnet_hiddens",NEWLINE [[256, 256]] * 1)NEWLINENEWLINE # check for a shared modelNEWLINE shared_model = custom_options.get("multiagent_shared_model", 0)NEWLINE reuse = tf.AUTO_REUSE if shared_model else FalseNEWLINE outputs = []NEWLINE for i in range(len(hiddens)):NEWLINE scope = "multi" if shared_model else "multi{}".format(i)NEWLINE with tf.variable_scope(scope, reuse=reuse):NEWLINE sub_options = options.copy()NEWLINE sub_options.update({"fcnet_hiddens": hiddens[i]})NEWLINE # TODO(ev) make this support arbitrary networksNEWLINE fcnet = FullyConnectedNetwork(split_inputs[i],NEWLINE int(num_actions[i]), sub_options)NEWLINE output = fcnet.outputsNEWLINE outputs.append(output)NEWLINE overall_output = tf.concat(outputs, axis=1)NEWLINE return overall_output, outputsNEWLINE |
import reNEWLINEimport unittestNEWLINEimport markdownNEWLINEfrom biovis_media_extension.extension import BioVisPluginExtensionNEWLINENEWLINEclass TestUtils(unittest.TestCase):NEWLINE plugin = BioVisPluginExtension(configs={})NEWLINENEWLINE def test_invalid_plugin(self):NEWLINE text = """NEWLINE @invalid-plugin()NEWLINE """NEWLINE output = markdown.markdown(text, extensions=[self.plugin])NEWLINE matched = re.match("<div class='alert alert-danger' role='alert'>", output) != NoneNEWLINE self.assertEqual(matched, True)NEWLINENEWLINEif __name__ == '__main__':NEWLINE unittest.main()NEWLINE |
import jsonNEWLINEimport configparserNEWLINEimport argparseNEWLINEimport numpy as npNEWLINEimport pyodbcNEWLINEimport pandas as pdNEWLINENEWLINEfrom pyapacheatlas.auth import ServicePrincipalAuthenticationNEWLINEfrom pyapacheatlas.core.typedef import EntityTypeDefNEWLINEfrom pyapacheatlas.core import (NEWLINE AtlasAttributeDef,NEWLINE AtlasEntity,NEWLINE PurviewClient,NEWLINE RelationshipTypeDefNEWLINE)NEWLINEfrom pyapacheatlas.core.util import GuidTrackerNEWLINENEWLINEDB_ENTITY_NAME = "custom_snowflake_db"NEWLINESCHEMA_ENTITY_NAME = "custom_snowflake_schema"NEWLINETABLE_ENTITY_NAME = "custom_snowflake_table"NEWLINECOLUMN_ENTITY_NAME = "custom_snowflake_column"NEWLINEDB_SCHEMA_RELATIONSHIP_NAME = "custom_snowflake_db_schema"NEWLINESCHEMA_TABLE_RELATIONSHIP_NAME = "custom_snowflake_schema_table"NEWLINETABLE_COLUMN_RELATIONSHIP_NAME = "custom_snowflake_table_column"NEWLINETABLE_CATALOG = "TABLE_CATALOG"NEWLINETABLE_SCHEMA = "TABLE_SCHEMA"NEWLINETABLE_NAME = "TABLE_NAME"NEWLINECOLUMN_NAME = "COLUMN_NAME"NEWLINEDATA_TYPE = "DATA_TYPE"NEWLINENEWLINEdef createEntityDefinitions(client):NEWLINE # Add DBNEWLINE entityDefs = []NEWLINE entityDefs.append(EntityTypeDef(NEWLINE name=DB_ENTITY_NAME,NEWLINE superTypes=["azure_resource"],NEWLINE description=DB_ENTITY_NAME,NEWLINE typeVersion="1.0",NEWLINE serviceType="Snowflake Database"NEWLINE ))NEWLINE # Add SchemaNEWLINE entityDefs.append(EntityTypeDef(NEWLINE name=SCHEMA_ENTITY_NAME,NEWLINE superTypes=["Asset"],NEWLINE description=SCHEMA_ENTITY_NAME,NEWLINE typeVersion="1.0",NEWLINE serviceType="Snowflake Database"NEWLINE ))NEWLINE # Add TableNEWLINE entityDefs.append(EntityTypeDef(NEWLINE name=TABLE_ENTITY_NAME,NEWLINE superTypes=["DataSet"],NEWLINE description=TABLE_ENTITY_NAME,NEWLINE typeVersion="1.0",NEWLINE serviceType="Snowflake Database"NEWLINE ))NEWLINE # Add ColumnNEWLINE columnDef = EntityTypeDef(NEWLINE name=COLUMN_ENTITY_NAME,NEWLINE superTypes=["DataSet"],NEWLINE description=COLUMN_ENTITY_NAME,NEWLINE serviceType="Snowflake Database"NEWLINE )NEWLINE columnDef.addAttributeDef(NEWLINE AtlasAttributeDef(name="comment", typeName="string", isOptional=True))NEWLINE entityDefs.append(columnDef)NEWLINENEWLINE relationshipDefs = []NEWLINE # Add RelationshipsNEWLINE relationshipDefs.append(RelationshipTypeDef(NEWLINE name=DB_SCHEMA_RELATIONSHIP_NAME,NEWLINE attributeDefs=[],NEWLINE relationshipCategory="COMPOSITION", # Means the child can't exist without the parentNEWLINE endDef1={ # endDef1 decribes what the parent will have as an attributeNEWLINE "type":DB_ENTITY_NAME, # Type of the parentNEWLINE "name":"schemas", # What the parent will haveNEWLINE "isContainer": True,NEWLINE "cardinality":"SET", # This is related to the cardinality, in this case the parent Server will have a SET of Models.NEWLINE "isLegacyAttribute":FalseNEWLINE },NEWLINE endDef2={ # endDef2 decribes what the child will have as an attributeNEWLINE "type":SCHEMA_ENTITY_NAME, # Type of the childNEWLINE "name":"db", # What the child will haveNEWLINE "isContainer":False,NEWLINE "cardinality":"SINGLE",NEWLINE "isLegacyAttribute":FalseNEWLINE }NEWLINE ))NEWLINE relationshipDefs.append(RelationshipTypeDef(NEWLINE name=SCHEMA_TABLE_RELATIONSHIP_NAME,NEWLINE attributeDefs=[],NEWLINE relationshipCategory="COMPOSITION",NEWLINE endDef1={NEWLINE "type":SCHEMA_ENTITY_NAME,NEWLINE "name":"tables",NEWLINE "isContainer": True,NEWLINE "cardinality":"SET",NEWLINE "isLegacyAttribute":FalseNEWLINE },NEWLINE endDef2={NEWLINE "type":TABLE_ENTITY_NAME,NEWLINE "name":"schema",NEWLINE "isContainer":False,NEWLINE "cardinality":"SINGLE",NEWLINE "isLegacyAttribute":FalseNEWLINE }NEWLINE ))NEWLINE relationshipDefs.append(RelationshipTypeDef(NEWLINE name=TABLE_COLUMN_RELATIONSHIP_NAME,NEWLINE attributeDefs=[],NEWLINE relationshipCategory="COMPOSITION",NEWLINE endDef1={NEWLINE "type":TABLE_ENTITY_NAME,NEWLINE "name":"columns",NEWLINE "isContainer": True,NEWLINE "cardinality":"SET",NEWLINE "isLegacyAttribute":FalseNEWLINE },NEWLINE endDef2={NEWLINE "type":COLUMN_ENTITY_NAME,NEWLINE "name":"table",NEWLINE "isContainer":False,NEWLINE "cardinality":"SINGLE",NEWLINE "isLegacyAttribute":FalseNEWLINE }NEWLINE ))NEWLINE return client.upload_typedefs(entityDefs = entityDefs,relationshipDefs=relationshipDefs,force_update=True)NEWLINENEWLINEdef uploadEntity(client,entity):NEWLINE client.upload_entities(batch=[entity])NEWLINE #print(entity)NEWLINENEWLINEdef uploadRelationship(client,relationShipEntity):NEWLINE client.upload_relationship(relationShipEntity)NEWLINE #print(relationship)NEWLINENEWLINEdef createEntities(client,snowflakeMetaData):NEWLINE gt = GuidTracker()NEWLINE for db in snowflakeMetaData:NEWLINE dbGuid = gt.get_guid()NEWLINE dbe = AtlasEntity(NEWLINE name=db["name"], typeName=DB_ENTITY_NAME, qualified_name=db["qualifiedName"],guid=dbGuidNEWLINE )NEWLINE uploadEntity(client,dbe)NEWLINE # -----------------------------------NEWLINE # Create schema entitiesNEWLINE for schema in db["schemas"]:NEWLINE schemaGuid = gt.get_guid()NEWLINE sce = AtlasEntity(NEWLINE name=schema["name"],typeName=SCHEMA_ENTITY_NAME,qualified_name=schema["qualifiedName"],guid=schemaGuidNEWLINE )NEWLINE uploadEntity(client,sce)NEWLINE relationship = {NEWLINE "typeName": DB_SCHEMA_RELATIONSHIP_NAME,NEWLINE "attributes": {},NEWLINE "guid": -100,NEWLINE "provenanceType": 0,NEWLINE "end1": {NEWLINE "guid": dbGuid,NEWLINE "typeName": DB_ENTITY_NAME,NEWLINE "uniqueAttributes": {"qualifiedName": db["qualifiedName"]}NEWLINE },NEWLINE "end2": {NEWLINE "guid": schemaGuid,NEWLINE "typeName": SCHEMA_ENTITY_NAME,NEWLINE "uniqueAttributes": {"qualifiedName": schema["qualifiedName"]}NEWLINE }NEWLINE }NEWLINE uploadRelationship(client, relationship)NEWLINE # ----------------------------------------------------NEWLINE # Create table entitiesNEWLINE for table in schema["tables"]:NEWLINE tableGuid = gt.get_guid()NEWLINE te = AtlasEntity(NEWLINE name=table["name"],typeName=TABLE_ENTITY_NAME,qualified_name=table["qualifiedName"],guid=tableGuidNEWLINE )NEWLINE uploadEntity(client,te)NEWLINE relationship = {NEWLINE "typeName": SCHEMA_TABLE_RELATIONSHIP_NAME,NEWLINE "attributes": {},NEWLINE "guid": -100,NEWLINE "provenanceType": 0,NEWLINE "end1": {NEWLINE "guid": schemaGuid,NEWLINE "typeName": SCHEMA_ENTITY_NAME,NEWLINE "uniqueAttributes": {"qualifiedName": schema["qualifiedName"]}NEWLINE },NEWLINE "end2": {NEWLINE "guid": tableGuid,NEWLINE "typeName": TABLE_ENTITY_NAME,NEWLINE "uniqueAttributes": {"qualifiedName": table["qualifiedName"]}NEWLINE }NEWLINE }NEWLINE uploadRelationship(client,relationship)NEWLINENEWLINE # Create column entitiesNEWLINE for column in table["columns"]:NEWLINE columnGuid = gt.get_guid()NEWLINE ce = AtlasEntity(name=column["name"],typeName=COLUMN_ENTITY_NAME,NEWLINE qualified_name=column["qualifiedName"],guid=columnGuid,NEWLINE attributes={NEWLINE "type": column["type"]NEWLINE }NEWLINE )NEWLINE uploadEntity(client,ce)NEWLINE relationship = {NEWLINE "typeName": TABLE_COLUMN_RELATIONSHIP_NAME,NEWLINE "attributes": {},NEWLINE "guid": -100,NEWLINE "provenanceType": 0,NEWLINE "end1": {NEWLINE "guid": tableGuid,NEWLINE "typeName": TABLE_ENTITY_NAME,NEWLINE "uniqueAttributes": {"qualifiedName": table["qualifiedName"]}NEWLINE },NEWLINE "end2": {NEWLINE "guid": columnGuid,NEWLINE "typeName": COLUMN_ENTITY_NAME,NEWLINE "uniqueAttributes": {"qualifiedName": column["qualifiedName"]}NEWLINE }NEWLINE }NEWLINE uploadRelationship(client, relationship)NEWLINE print(column["name"])NEWLINENEWLINEdef cleanup(client):NEWLINE search = client.search_entities("\"Snowflake Database\"")NEWLINE for entity in search:NEWLINE client.delete_entity(guid=[entity["id"]])NEWLINE #print(json.dumps(entity, indent=2))NEWLINENEWLINE client.delete_type(name=DB_SCHEMA_RELATIONSHIP_NAME)NEWLINE client.delete_type(name=SCHEMA_TABLE_RELATIONSHIP_NAME)NEWLINE client.delete_type(name=TABLE_COLUMN_RELATIONSHIP_NAME)NEWLINE client.delete_type(name=DB_ENTITY_NAME)NEWLINE client.delete_type(name=SCHEMA_ENTITY_NAME)NEWLINE client.delete_type(name=TABLE_ENTITY_NAME)NEWLINE client.delete_type(name=COLUMN_ENTITY_NAME)NEWLINENEWLINEdef getSnowflakeMetadata(snowflakeConnectionString, database, snowflakeOdbcConnection):NEWLINE query = f"""SELECTNEWLINE TABLE_CATALOG,NEWLINE TABLE_SCHEMA,NEWLINE TABLE_NAME,NEWLINE COLUMN_NAME,NEWLINE DATA_TYPENEWLINE FROM "{database}".INFORMATION_SCHEMA.COLUMNSNEWLINE """NEWLINE conn = pyodbc.connect(snowflakeConnectionString)NEWLINE df = pd.read_sql_query(query, conn)NEWLINE jsonSchema = []NEWLINE df = df.sort_values(by=[TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME], ascending=True)NEWLINE dbs = df.groupby([TABLE_CATALOG]).groupsNEWLINE for db in dbs:NEWLINE currentdb = { "name": db,"qualifiedName" : snowflakeOdbcConnection,"schemas" :[]}NEWLINE schemas = df[df[TABLE_CATALOG] == db].groupby([TABLE_SCHEMA]).groupsNEWLINE for schema in schemas:NEWLINE currentSchema = {"name": schema,NEWLINE "qualifiedName" : snowflakeOdbcConnection + "/" + schema,NEWLINE "tables" :[]NEWLINE }NEWLINE tables = df[(df[TABLE_CATALOG] == db) & (df[TABLE_SCHEMA] == schema)].groupby([TABLE_NAME]).groupsNEWLINE for table in tables:NEWLINE currentTable= {"name": table,NEWLINE "qualifiedName" : snowflakeOdbcConnection + "/" + schema + "/" + table,NEWLINE "columns" :[]NEWLINE }NEWLINE columns = df[(df[TABLE_CATALOG] == db) & (df[TABLE_SCHEMA] == schema) &NEWLINE (df[TABLE_NAME] == table)].groupby([COLUMN_NAME, DATA_TYPE]).groupsNEWLINE for column, datatype in columns:NEWLINE currentColumn= {"name": column,NEWLINE "qualifiedName" : snowflakeOdbcConnection + "/" + schema + "/" + table + "/" + column,NEWLINE "type" : datatypeNEWLINE }NEWLINE currentTable["columns"].append(currentColumn)NEWLINENEWLINE currentSchema["tables"].append(currentTable)NEWLINENEWLINE currentdb["schemas"].append(currentSchema)NEWLINENEWLINE jsonSchema.append(currentdb)NEWLINENEWLINE return jsonSchemaNEWLINENEWLINEdef getSnowflakeDatabases(snowflakeConnectionString):NEWLINE query = """SELECTNEWLINE DATABASE_NAMENEWLINE FROM INFORMATION_SCHEMA.DATABASESNEWLINE WHERE IS_TRANSIENT = 'NO'NEWLINE """NEWLINE conn = pyodbc.connect(snowflakeConnectionString)NEWLINE df = pd.read_sql_query(query, conn)NEWLINENEWLINE return dfNEWLINENEWLINEif __name__ == "__main__":NEWLINE parser = argparse.ArgumentParser()NEWLINE parser.add_argument("--config", default="config.ini")NEWLINE args, _ = parser.parse_known_args()NEWLINENEWLINE config = configparser.RawConfigParser()NEWLINE config.read(args.config)NEWLINENEWLINE oauth = ServicePrincipalAuthentication(NEWLINE tenant_id=config["PurviewClient"]["TENANT_ID"],NEWLINE client_id=config["PurviewClient"]["CLIENT_ID"],NEWLINE client_secret=config["PurviewClient"]["CLIENT_SECRET"]NEWLINE )NEWLINE client = PurviewClient(NEWLINE account_name=config["PurviewClient"]["PURVIEW_ACCOUNT_NAME"],NEWLINE authentication=oauthNEWLINE )NEWLINENEWLINE snowflakeConnectionString = config["Snowflake"]["SNOWFLAKE_CONNECTION_STRING"].strip("'")NEWLINE snowflakeOdbcConnection = config["Snowflake"]["SNOWFLAKE_ODBC_CONNNECTION"].strip("'")NEWLINENEWLINE createEntityDefinitions(client)NEWLINENEWLINE df = getSnowflakeDatabases(snowflakeConnectionString)NEWLINE df.applymap(lambda db:NEWLINE createEntities(client, getSnowflakeMetadata(snowflakeConnectionString, db, snowflakeOdbcConnection + "/" + db))NEWLINE )NEWLINENEWLINE # snowflakeMetadata = getSnowflakeMetadata(snowflakeConnectionString,snowflakeOdbcConnection)NEWLINE # with open('snowflake_metadata.json', 'w') as f:NEWLINE # json.dump(snowflakeMetadata, f)NEWLINENEWLINE # createEntities(client, snowflakeMetadata)NEWLINENEWLINE # print(snowflakeMetadata)NEWLINENEWLINE # cleanup(client)NEWLINE |
import getpassNEWLINEimport os.path as pathNEWLINEimport osNEWLINEimport numpy as npNEWLINEfrom scipy.stats import describeNEWLINENEWLINEHOST, PORT = "141.37.176.188", 22NEWLINEBASE_URL = "/michael/DoD/baumer_test_prod_var/512x512_samplescale1.0_patchscale1.0/samples"NEWLINEBASE_DIR = "__sftpcache__"NEWLINENEWLINEclass SFTP():NEWLINE def __init__(self, host=HOST, port=PORT, user=None, password=None, base_dir=BASE_DIR):NEWLINE import paramikoNEWLINE if path.exists(base_dir) == False: os.makedirs(base_dir)NEWLINENEWLINE # Open a transportNEWLINE self.transport = paramiko.Transport((host, port))NEWLINE if user is None:NEWLINE user = input("Username:") NEWLINE if password is None:NEWLINE password = getpass.getpass("Password for " + user + ":") #Prompts for passwordNEWLINE self.transport.connect(username = user, password = password)NEWLINE self.sftp = paramiko.SFTPClient.from_transport(self.transport)NEWLINENEWLINE def cd(self, path):NEWLINE self.sftp.chdir(path)NEWLINE return selfNEWLINENEWLINE def get(self, src, dest=BASE_DIR):NEWLINE if src[-1] == "*":NEWLINE # download directory modeNEWLINE local_paths = []NEWLINE for f in dir(self):NEWLINE local_paths.append(os.path.join(dest, f))NEWLINE self.sftp.get(f, local_paths[-1])NEWLINE return local_pathsNEWLINE else:NEWLINE self.sftp.get(src, dest)NEWLINE return [dest]NEWLINENEWLINE def put(self, src, dest):NEWLINE self.sftp.put(src, dest)NEWLINE NEWLINE def __len__(self):NEWLINE return len(self.sftp.listdir())NEWLINE NEWLINE def __dir__(self):NEWLINE return self.sftp.listdir()NEWLINE NEWLINE def close(self):NEWLINE self.sftp.close()NEWLINE self.transport.close() |
import serialNEWLINEimport datetimeNEWLINEimport csvNEWLINEser = serial.Serial('/dev/cu.usbmodem1461', 9600)NEWLINEwhile (1):NEWLINE serial_line = ser.readline()NEWLINE a = (ser.readline().decode("utf-8"))NEWLINE rup = datetime.datetime.now().strftime('%m-%d %H:%M:%S')NEWLINE a = rup + ','+ aNEWLINE print (a.split(','))NEWLINE NEWLINE myData = a NEWLINE myFile = open('normalRoad.csv', 'a')NEWLINE with myFile:NEWLINE myFile.write(myData)NEWLINE NEWLINEser.close()NEWLINENEWLINENEWLINE |
from __future__ import print_functionNEWLINENEWLINEimport pytestNEWLINENEWLINEfrom urbansim_templates.shared import CoreTemplateSettingsNEWLINENEWLINENEWLINEdef test_property_persistence():NEWLINE """NEWLINE Confirm CoreTemplateSettings properties persist through to_dict() and from_dict().NEWLINE NEWLINE """NEWLINE obj = CoreTemplateSettings()NEWLINE obj.name = 'name'NEWLINE obj.tags = ['tag1', 'tag2']NEWLINE obj.notes = 'notes'NEWLINE obj.autorun = TrueNEWLINE obj.template = 'CoolNewTemplate'NEWLINE obj.template_version = '0.1.dev0'NEWLINE NEWLINE d = obj.to_dict()NEWLINE print(d)NEWLINE NEWLINE obj2 = CoreTemplateSettings.from_dict(d)NEWLINE assert(obj2.to_dict() == d)NEWLINENEWLINE |
"""NEWLINEData structures for sparse float data. Life is made simpler by dealing only withNEWLINEfloat64 dataNEWLINE"""NEWLINENEWLINE# pylint: disable=E1101,E1103,W0231,E0202NEWLINENEWLINEfrom numpy import nanNEWLINEimport numpy as npNEWLINENEWLINEfrom pandas.core.common import _pickle_array, _unpickle_array, _try_sortNEWLINEfrom pandas.core.index import Index, MultiIndex, _ensure_indexNEWLINEfrom pandas.core.series import SeriesNEWLINEfrom pandas.core.frame import (DataFrame, extract_index, _prep_ndarray,NEWLINE _default_index)NEWLINEfrom pandas.util.decorators import cache_readonlyNEWLINEimport pandas.core.common as comNEWLINEimport pandas.core.datetools as datetoolsNEWLINENEWLINEfrom pandas.sparse.series import SparseSeriesNEWLINEfrom pandas.util.decorators import AppenderNEWLINEimport pandas.lib as libNEWLINENEWLINENEWLINEclass _SparseMockBlockManager(object):NEWLINENEWLINE def __init__(self, sp_frame):NEWLINE self.sp_frame = sp_frameNEWLINENEWLINE def get(self, item):NEWLINE return self.sp_frame[item].valuesNEWLINENEWLINE def iget(self, i):NEWLINE return self.get(self.sp_frame.columns[i])NEWLINENEWLINE @propertyNEWLINE def shape(self):NEWLINE x, y = self.sp_frame.shapeNEWLINE return y, xNEWLINENEWLINE @propertyNEWLINE def axes(self):NEWLINE return [self.sp_frame.columns, self.sp_frame.index]NEWLINENEWLINEclass SparseDataFrame(DataFrame):NEWLINE """NEWLINE DataFrame containing sparse floating point data in the form of SparseSeriesNEWLINE objectsNEWLINENEWLINE ParametersNEWLINE ----------NEWLINE data : same types as can be passed to DataFrameNEWLINE index : array-like, optionalNEWLINE column : array-like, optionalNEWLINE default_kind : {'block', 'integer'}, default 'block'NEWLINE Default sparse kind for converting Series to SparseSeries. Will notNEWLINE override SparseSeries passed into constructorNEWLINE default_fill_value : floatNEWLINE Default fill_value for converting Series to SparseSeries. Will notNEWLINE override SparseSeries passed inNEWLINE """NEWLINE _verbose_info = FalseNEWLINE _columns = NoneNEWLINE _series = NoneNEWLINE _is_mixed_type = FalseNEWLINE ndim = 2NEWLINENEWLINE def __init__(self, data=None, index=None, columns=None,NEWLINE default_kind='block', default_fill_value=None):NEWLINE if default_fill_value is None:NEWLINE default_fill_value = np.nanNEWLINENEWLINE self.default_kind = default_kindNEWLINE self.default_fill_value = default_fill_valueNEWLINENEWLINE if isinstance(data, dict):NEWLINE sdict, columns, index = self._init_dict(data, index, columns)NEWLINE elif isinstance(data, (np.ndarray, list)):NEWLINE sdict, columns, index = self._init_matrix(data, index, columns)NEWLINE elif isinstance(data, DataFrame):NEWLINE sdict, columns, index = self._init_dict(data, data.index,NEWLINE data.columns)NEWLINE elif data is None:NEWLINE sdict = {}NEWLINENEWLINE if index is None:NEWLINE index = Index([])NEWLINENEWLINE if columns is None:NEWLINE columns = Index([])NEWLINE else:NEWLINE for c in columns:NEWLINE sdict[c] = Series(np.nan, index=index)NEWLINENEWLINE self._series = sdictNEWLINE self.columns = columnsNEWLINE self.index = indexNEWLINENEWLINE def _from_axes(self, data, axes):NEWLINE columns, index = axesNEWLINE return self._constructor(data, index=index, columns=columns)NEWLINENEWLINE @cache_readonlyNEWLINE def _data(self):NEWLINE return _SparseMockBlockManager(self)NEWLINENEWLINE def _consolidate_inplace(self):NEWLINE # do nothing when DataFrame calls this methodNEWLINE passNEWLINENEWLINE def convert_objects(self):NEWLINE # XXXNEWLINE return selfNEWLINENEWLINE @propertyNEWLINE def _constructor(self):NEWLINE def wrapper(data, index=None, columns=None):NEWLINE return SparseDataFrame(data, index=index, columns=columns,NEWLINE default_fill_value=self.default_fill_value,NEWLINE default_kind=self.default_kind)NEWLINE return wrapperNEWLINENEWLINE def _init_dict(self, data, index, columns, dtype=None):NEWLINE # pre-filter out columns if we passed itNEWLINE if columns is not None:NEWLINE columns = _ensure_index(columns)NEWLINE data = dict((k, v) for k, v in data.iteritems() if k in columns)NEWLINE else:NEWLINE columns = Index(_try_sort(data.keys()))NEWLINENEWLINE if index is None:NEWLINE index = extract_index(data)NEWLINENEWLINE sp_maker = lambda x: SparseSeries(x, index=index,NEWLINE kind=self.default_kind,NEWLINE fill_value=self.default_fill_value,NEWLINE copy=True)NEWLINENEWLINE sdict = {}NEWLINE for k, v in data.iteritems():NEWLINE if isinstance(v, Series):NEWLINE # Force alignment, no copy necessaryNEWLINE if not v.index.equals(index):NEWLINE v = v.reindex(index)NEWLINENEWLINE if not isinstance(v, SparseSeries):NEWLINE v = sp_maker(v)NEWLINE else:NEWLINE if isinstance(v, dict):NEWLINE v = [v.get(i, nan) for i in index]NEWLINENEWLINE v = sp_maker(v)NEWLINE sdict[k] = vNEWLINENEWLINE # TODO: figure out how to handle this case, all nan's?NEWLINE # add in any other columns we want to have (completeness)NEWLINE nan_vec = np.empty(len(index))NEWLINE nan_vec.fill(nan)NEWLINE for c in columns:NEWLINE if c not in sdict:NEWLINE sdict[c] = sp_maker(nan_vec)NEWLINENEWLINE return sdict, columns, indexNEWLINENEWLINE def _init_matrix(self, data, index, columns, dtype=None):NEWLINE data = _prep_ndarray(data, copy=False)NEWLINE N, K = data.shapeNEWLINE if index is None:NEWLINE index = _default_index(N)NEWLINE if columns is None:NEWLINE columns = _default_index(K)NEWLINENEWLINE if len(columns) != K:NEWLINE raise Exception('Column length mismatch: %d vs. %d' %NEWLINE (len(columns), K))NEWLINE if len(index) != N:NEWLINE raise Exception('Index length mismatch: %d vs. %d' %NEWLINE (len(index), N))NEWLINENEWLINE data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)])NEWLINE return self._init_dict(data, index, columns, dtype)NEWLINENEWLINE def __array_wrap__(self, result):NEWLINE return SparseDataFrame(result, index=self.index, columns=self.columns,NEWLINE default_kind=self.default_kind,NEWLINE default_fill_value=self.default_fill_value)NEWLINENEWLINE def __getstate__(self):NEWLINE series = dict((k, (v.sp_index, v.sp_values))NEWLINE for k, v in self.iteritems())NEWLINE columns = self.columnsNEWLINE index = self.indexNEWLINENEWLINE return (series, columns, index, self.default_fill_value,NEWLINE self.default_kind)NEWLINENEWLINE def __setstate__(self, state):NEWLINE series, cols, idx, fv, kind = stateNEWLINENEWLINE if not isinstance(cols, Index): # pragma: no coverNEWLINE columns = _unpickle_array(cols)NEWLINE else:NEWLINE columns = colsNEWLINENEWLINE if not isinstance(idx, Index): # pragma: no coverNEWLINE index = _unpickle_array(idx)NEWLINE else:NEWLINE index = idxNEWLINENEWLINE series_dict = {}NEWLINE for col, (sp_index, sp_values) in series.iteritems():NEWLINE series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,NEWLINE fill_value=fv)NEWLINENEWLINE self._series = series_dictNEWLINE self.index = indexNEWLINE self.columns = columnsNEWLINE self.default_fill_value = fvNEWLINE self.default_kind = kindNEWLINENEWLINE def to_dense(self):NEWLINE """NEWLINE Convert to dense DataFrameNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE df : DataFrameNEWLINE """NEWLINE data = dict((k, v.to_dense()) for k, v in self.iteritems())NEWLINE return DataFrame(data, index=self.index)NEWLINENEWLINE def astype(self, dtype):NEWLINE raise NotImplementedErrorNEWLINENEWLINE def copy(self, deep=True):NEWLINE """NEWLINE Make a copy of this SparseDataFrameNEWLINE """NEWLINE series = dict((k, v.copy()) for k, v in self.iteritems())NEWLINE return SparseDataFrame(series, index=self.index, columns=self.columns,NEWLINE default_fill_value=self.default_fill_value,NEWLINE default_kind=self.default_kind)NEWLINENEWLINE @propertyNEWLINE def density(self):NEWLINE """NEWLINE Ratio of non-sparse points to total (dense) data pointsNEWLINE represented in the frameNEWLINE """NEWLINE tot_nonsparse = sum([ser.sp_index.npointsNEWLINE for _, ser in self.iteritems()])NEWLINE tot = len(self.index) * len(self.columns)NEWLINE return tot_nonsparse / float(tot)NEWLINENEWLINE #----------------------------------------------------------------------NEWLINE # Support different internal rep'n of SparseDataFrameNEWLINENEWLINE def _set_item(self, key, value):NEWLINE sp_maker = lambda x: SparseSeries(x, index=self.index,NEWLINE fill_value=self.default_fill_value,NEWLINE kind=self.default_kind)NEWLINE if hasattr(value, '__iter__'):NEWLINE if isinstance(value, Series):NEWLINE clean_series = value.reindex(self.index)NEWLINE if not isinstance(value, SparseSeries):NEWLINE clean_series = sp_maker(clean_series)NEWLINE else:NEWLINE clean_series = sp_maker(value)NEWLINENEWLINE self._series[key] = clean_seriesNEWLINE # ScalarNEWLINE else:NEWLINE self._series[key] = sp_maker(value)NEWLINENEWLINE if key not in self.columns:NEWLINE self._insert_column(key)NEWLINENEWLINE def _insert_column(self, key):NEWLINE self.columns = self.columns.insert(len(self.columns), key)NEWLINENEWLINE def __delitem__(self, key):NEWLINE """NEWLINE Delete column from DataFrameNEWLINE """NEWLINE loc = self.columns.get_loc(key)NEWLINE del self._series[key]NEWLINE self._delete_column_index(loc)NEWLINENEWLINE def _delete_column_index(self, loc):NEWLINE if loc == len(self.columns) - 1:NEWLINE new_columns = self.columns[:loc]NEWLINE else:NEWLINE new_columns = Index(np.concatenate((self.columns[:loc],NEWLINE self.columns[loc+1:])))NEWLINE self.columns = new_columnsNEWLINENEWLINE _index = NoneNEWLINE def _set_index(self, index):NEWLINE self._index = _ensure_index(index)NEWLINE for v in self._series.values():NEWLINE v.index = self._indexNEWLINENEWLINE def _get_index(self):NEWLINE return self._indexNEWLINENEWLINE def _get_columns(self):NEWLINE return self._columnsNEWLINENEWLINE def _set_columns(self, cols):NEWLINE if len(cols) != len(self._series):NEWLINE raise Exception('Columns length %d did not match data %d!' %NEWLINE (len(cols), len(self._series)))NEWLINE self._columns = _ensure_index(cols)NEWLINENEWLINE index = property(fget=_get_index, fset=_set_index)NEWLINE columns = property(fget=_get_columns, fset=_set_columns)NEWLINENEWLINE def __getitem__(self, key):NEWLINE """NEWLINE Retrieve column or slice from DataFrameNEWLINE """NEWLINE try:NEWLINE # unsure about how kludgy this isNEWLINE s = self._series[key]NEWLINE s.name = keyNEWLINE return sNEWLINE except (TypeError, KeyError):NEWLINE if isinstance(key, slice):NEWLINE date_rng = self.index[key]NEWLINE return self.reindex(date_rng)NEWLINENEWLINE elif isinstance(key, (np.ndarray, list)):NEWLINE if isinstance(key, list):NEWLINE key = lib.list_to_object_array(key)NEWLINENEWLINE # also raises Exception if object array with NA valuesNEWLINE if com._is_bool_indexer(key):NEWLINE key = np.asarray(key, dtype=bool)NEWLINE return self._getitem_array(key)NEWLINE else: # pragma: no coverNEWLINE raiseNEWLINENEWLINE @Appender(DataFrame.get_value.__doc__, indents=0)NEWLINE def get_value(self, index, col):NEWLINE s = self._series[col]NEWLINE return s.get_value(index)NEWLINENEWLINE def set_value(self, index, col, value):NEWLINE """NEWLINE Put single value at passed column and indexNEWLINENEWLINE ParametersNEWLINE ----------NEWLINE index : row labelNEWLINE col : column labelNEWLINE value : scalar valueNEWLINENEWLINE NotesNEWLINE -----NEWLINE This method *always* returns a new object. It is currently notNEWLINE particularly efficient (and potentially very expensive) but is providedNEWLINE for API compatibility with DataFrameNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE frame : DataFrameNEWLINE """NEWLINE dense = self.to_dense().set_value(index, col, value)NEWLINE return dense.to_sparse(kind=self.default_kind,NEWLINE fill_value=self.default_fill_value)NEWLINENEWLINE def _slice(self, slobj, axis=0):NEWLINE if axis == 0:NEWLINE new_index = self.index[slobj]NEWLINE new_columns = self.columnsNEWLINE else:NEWLINE new_index = self.indexNEWLINE new_columns = self.columns[slobj]NEWLINENEWLINE return self.reindex(index=new_index, columns=new_columns)NEWLINENEWLINE def as_matrix(self, columns=None):NEWLINE """NEWLINE Convert the frame to its Numpy-array matrix representationNEWLINENEWLINE Columns are presented in sorted order unless a specific listNEWLINE of columns is provided.NEWLINE """NEWLINE if columns is None:NEWLINE columns = self.columnsNEWLINENEWLINE if len(columns) == 0:NEWLINE return np.zeros((len(self.index), 0), dtype=float)NEWLINENEWLINE return np.array([self[col].values for col in columns]).TNEWLINENEWLINE values = property(as_matrix)NEWLINENEWLINE def xs(self, key, axis=0, copy=False):NEWLINE """NEWLINE Returns a row (cross-section) from the SparseDataFrame as a SeriesNEWLINE object.NEWLINENEWLINE ParametersNEWLINE ----------NEWLINE key : some index contained in the indexNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE xs : SeriesNEWLINE """NEWLINE if axis == 1:NEWLINE data = self[key]NEWLINE return dataNEWLINENEWLINE i = self.index.get_loc(key)NEWLINE series = self._seriesNEWLINE values = [series[k][i] for k in self.columns]NEWLINE return Series(values, index=self.columns)NEWLINENEWLINE #----------------------------------------------------------------------NEWLINE # Arithmetic-related methodsNEWLINENEWLINE def _combine_frame(self, other, func, fill_value=None, level=None):NEWLINE this, other = self.align(other, join='outer', level=level,NEWLINE copy=False)NEWLINE new_index, new_columns = this.index, this.columnsNEWLINENEWLINE if level is not None:NEWLINE raise NotImplementedErrorNEWLINENEWLINE if self.empty and other.empty:NEWLINE return SparseDataFrame(index=new_index)NEWLINENEWLINE new_data = {}NEWLINE if fill_value is not None:NEWLINE # TODO: be a bit more intelligent hereNEWLINE for col in new_columns:NEWLINE if col in this and col in other:NEWLINE dleft = this[col].to_dense()NEWLINE dright = other[col].to_dense()NEWLINE result = dleft._binop(dright, func, fill_value=fill_value)NEWLINE result = result.to_sparse(fill_value=this[col].fill_value)NEWLINE new_data[col] = resultNEWLINE else:NEWLINE for col in new_columns:NEWLINE if col in this and col in other:NEWLINE new_data[col] = func(this[col], other[col])NEWLINENEWLINE return self._constructor(data=new_data, index=new_index,NEWLINE columns=new_columns)NEWLINENEWLINE def _combine_match_index(self, other, func, fill_value=None):NEWLINE new_data = {}NEWLINENEWLINE if fill_value is not None:NEWLINE raise NotImplementedErrorNEWLINENEWLINE new_index = self.index.union(other.index)NEWLINE this = selfNEWLINE if self.index is not new_index:NEWLINE this = self.reindex(new_index)NEWLINENEWLINE if other.index is not new_index:NEWLINE other = other.reindex(new_index)NEWLINENEWLINE for col, series in this.iteritems():NEWLINE new_data[col] = func(series.values, other.values)NEWLINENEWLINE return self._constructor(new_data, index=new_index,NEWLINE columns=self.columns)NEWLINENEWLINE def _combine_match_columns(self, other, func, fill_value):NEWLINE # patched version of DataFrame._combine_match_columns to account forNEWLINE # NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,NEWLINE # where 3.0 is numpy.float64 and series is a SparseSeries. StillNEWLINE # possible for this to happen, which is bothersomeNEWLINENEWLINE if fill_value is not None:NEWLINE raise NotImplementedErrorNEWLINENEWLINE new_data = {}NEWLINENEWLINE union = intersection = self.columnsNEWLINENEWLINE if not union.equals(other.index):NEWLINE union = other.index.union(self.columns)NEWLINE intersection = other.index.intersection(self.columns)NEWLINENEWLINE for col in intersection:NEWLINE new_data[col] = func(self[col], float(other[col]))NEWLINENEWLINE return self._constructor(new_data, index=self.index,NEWLINE columns=union)NEWLINENEWLINE def _combine_const(self, other, func):NEWLINE new_data = {}NEWLINE for col, series in self.iteritems():NEWLINE new_data[col] = func(series, other)NEWLINENEWLINE return self._constructor(data=new_data, index=self.index,NEWLINE columns=self.columns)NEWLINENEWLINE def _reindex_index(self, index, method, copy, level, fill_value=np.nan,NEWLINE limit=None):NEWLINE if level is not None:NEWLINE raise Exception('Reindex by level not supported for sparse')NEWLINENEWLINE if self.index.equals(index):NEWLINE if copy:NEWLINE return self.copy()NEWLINE else:NEWLINE return selfNEWLINENEWLINE if len(self.index) == 0:NEWLINE return SparseDataFrame(index=index, columns=self.columns)NEWLINENEWLINE indexer = self.index.get_indexer(index, method, limit=limit)NEWLINE indexer = com._ensure_platform_int(indexer)NEWLINE mask = indexer == -1NEWLINE need_mask = mask.any()NEWLINENEWLINE new_series = {}NEWLINE for col, series in self.iteritems():NEWLINE values = series.valuesNEWLINE new = values.take(indexer)NEWLINENEWLINE if need_mask:NEWLINE np.putmask(new, mask, fill_value)NEWLINENEWLINE new_series[col] = newNEWLINENEWLINE return SparseDataFrame(new_series, index=index, columns=self.columns,NEWLINE default_fill_value=self.default_fill_value)NEWLINENEWLINE def _reindex_columns(self, columns, copy, level, fill_value, limit=None):NEWLINE if level is not None:NEWLINE raise Exception('Reindex by level not supported for sparse')NEWLINENEWLINE if com.notnull(fill_value):NEWLINE raise NotImplementedErrorNEWLINENEWLINE if limit:NEWLINE raise NotImplementedErrorNEWLINENEWLINE # TODO: fill value handlingNEWLINE sdict = dict((k, v) for k, v in self.iteritems() if k in columns)NEWLINE return SparseDataFrame(sdict, index=self.index, columns=columns,NEWLINE default_fill_value=self.default_fill_value)NEWLINENEWLINE def _reindex_with_indexers(self, index, row_indexer, columns, col_indexer,NEWLINE copy, fill_value):NEWLINE if columns is None:NEWLINE columns = self.columnsNEWLINENEWLINE new_arrays = {}NEWLINE for col in columns:NEWLINE if col not in self:NEWLINE continueNEWLINE if row_indexer is not None:NEWLINE new_arrays[col] = com.take_1d(self[col].values, row_indexer,NEWLINE fill_value=fill_value)NEWLINE else:NEWLINE new_arrays[col] = self[col]NEWLINENEWLINE return self._constructor(new_arrays, index=index, columns=columns)NEWLINENEWLINE def _rename_index_inplace(self, mapper):NEWLINE self.index = [mapper(x) for x in self.index]NEWLINENEWLINE def _rename_columns_inplace(self, mapper):NEWLINE new_series = {}NEWLINE new_columns = []NEWLINENEWLINE for col in self.columns:NEWLINE new_col = mapper(col)NEWLINE if new_col in new_series: # pragma: no coverNEWLINE raise Exception('Non-unique mapping!')NEWLINE new_series[new_col] = self[col]NEWLINE new_columns.append(new_col)NEWLINENEWLINE self.columns = new_columnsNEWLINE self._series = new_seriesNEWLINENEWLINE def take(self, indices, axis=0):NEWLINE """NEWLINE Analogous to ndarray.take, return SparseDataFrame corresponding toNEWLINE requested indices along an axisNEWLINENEWLINE ParametersNEWLINE ----------NEWLINE indices : list / array of intsNEWLINE axis : {0, 1}NEWLINENEWLINE ReturnsNEWLINE -------NEWLINE taken : SparseDataFrameNEWLINE """NEWLINE indices = com._ensure_platform_int(indices)NEWLINE new_values = self.values.take(indices, axis=axis)NEWLINE if axis == 0:NEWLINE new_columns = self.columnsNEWLINE new_index = self.index.take(indices)NEWLINE else:NEWLINE new_columns = self.columns.take(indices)NEWLINE new_index = self.indexNEWLINE return self._constructor(new_values, index=new_index,NEWLINE columns=new_columns)NEWLINENEWLINE def add_prefix(self, prefix):NEWLINE f = (('%s' % prefix) + '%s').__mod__NEWLINE return self.rename(columns=f)NEWLINENEWLINE def add_suffix(self, suffix):NEWLINE f = ('%s' + ('%s' % suffix)).__mod__NEWLINE return self.rename(columns=f)NEWLINENEWLINE def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',NEWLINE sort=False):NEWLINE if on is not None:NEWLINE raise NotImplementedErrorNEWLINE else:NEWLINE return self._join_index(other, how, lsuffix, rsuffix)NEWLINENEWLINE def _join_index(self, other, how, lsuffix, rsuffix):NEWLINE if isinstance(other, Series):NEWLINE assert(other.name is not None)NEWLINE other = SparseDataFrame({other.name : other},NEWLINE default_fill_value=self.default_fill_value)NEWLINENEWLINE join_index = self.index.join(other.index, how=how)NEWLINENEWLINE this = self.reindex(join_index)NEWLINE other = other.reindex(join_index)NEWLINENEWLINE this, other = this._maybe_rename_join(other, lsuffix, rsuffix)NEWLINENEWLINE result_series = this._seriesNEWLINE other_series = other._seriesNEWLINE result_series.update(other_series)NEWLINENEWLINE return self._constructor(result_series, index=join_index)NEWLINENEWLINE def _maybe_rename_join(self, other, lsuffix, rsuffix):NEWLINE intersection = self.columns.intersection(other.columns)NEWLINENEWLINE if len(intersection) > 0:NEWLINE if not lsuffix and not rsuffix:NEWLINE raise Exception('columns overlap: %s' % intersection)NEWLINENEWLINE def lrenamer(x):NEWLINE if x in intersection:NEWLINE return '%s%s' % (x, lsuffix)NEWLINE return xNEWLINENEWLINE def rrenamer(x):NEWLINE if x in intersection:NEWLINE return '%s%s' % (x, rsuffix)NEWLINE return xNEWLINENEWLINE this = self.rename(columns=lrenamer)NEWLINE other = other.rename(columns=rrenamer)NEWLINE else:NEWLINE this = selfNEWLINENEWLINE return this, otherNEWLINENEWLINE def transpose(self):NEWLINE """NEWLINE Returns a DataFrame with the rows/columns switched.NEWLINE """NEWLINE return SparseDataFrame(self.values.T, index=self.columns,NEWLINE columns=self.index,NEWLINE default_fill_value=self.default_fill_value,NEWLINE default_kind=self.default_kind)NEWLINE T = property(transpose)NEWLINENEWLINE @Appender(DataFrame.count.__doc__)NEWLINE def count(self, axis=0, **kwds):NEWLINE return self.apply(lambda x: x.count(), axis=axis)NEWLINENEWLINE def cumsum(self, axis=0):NEWLINE """NEWLINE Return SparseDataFrame of cumulative sums over requested axis.NEWLINENEWLINE ParametersNEWLINE ----------NEWLINE axis : {0, 1}NEWLINE 0 for row-wise, 1 for column-wiseNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE y : SparseDataFrameNEWLINE """NEWLINE return self.apply(lambda x: x.cumsum(), axis=axis)NEWLINENEWLINE def shift(self, periods, freq=None, **kwds):NEWLINE """NEWLINE Analogous to DataFrame.shiftNEWLINE """NEWLINE from pandas.core.series import _resolve_offsetNEWLINENEWLINE offset = _resolve_offset(freq, kwds)NEWLINENEWLINE new_series = {}NEWLINE if offset is None:NEWLINE new_index = self.indexNEWLINE for col, s in self.iteritems():NEWLINE new_series[col] = s.shift(periods)NEWLINE else:NEWLINE new_index = self.index.shift(periods, offset)NEWLINE for col, s in self.iteritems():NEWLINE new_series[col] = SparseSeries(s.sp_values, index=new_index,NEWLINE sparse_index=s.sp_index,NEWLINE fill_value=s.fill_value)NEWLINENEWLINE return SparseDataFrame(new_series, index=new_index,NEWLINE columns=self.columns,NEWLINE default_fill_value=self.default_fill_value,NEWLINE default_kind=self.default_kind)NEWLINENEWLINE def apply(self, func, axis=0, broadcast=False):NEWLINE """NEWLINE Analogous to DataFrame.apply, for SparseDataFrameNEWLINENEWLINE ParametersNEWLINE ----------NEWLINE func : functionNEWLINE Function to apply to each columnNEWLINE axis : {0, 1}NEWLINE broadcast : bool, default FalseNEWLINE For aggregation functions, return object of same size with valuesNEWLINE propagatedNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE applied : Series or SparseDataFrameNEWLINE """NEWLINE if not len(self.columns):NEWLINE return selfNEWLINENEWLINE if isinstance(func, np.ufunc):NEWLINE new_series = {}NEWLINE for k, v in self.iteritems():NEWLINE applied = func(v)NEWLINE applied.fill_value = func(applied.fill_value)NEWLINE new_series[k] = appliedNEWLINE return SparseDataFrame(new_series, index=self.index,NEWLINE columns=self.columns,NEWLINE default_fill_value=self.default_fill_value,NEWLINE default_kind=self.default_kind)NEWLINE else:NEWLINE if not broadcast:NEWLINE return self._apply_standard(func, axis)NEWLINE else:NEWLINE return self._apply_broadcast(func, axis)NEWLINENEWLINE def applymap(self, func):NEWLINE """NEWLINE Apply a function to a DataFrame that is intended to operateNEWLINE elementwise, i.e. like doing map(func, series) for each series in theNEWLINE DataFrameNEWLINENEWLINE ParametersNEWLINE ----------NEWLINE func : functionNEWLINE Python function, returns a single value from a single valueNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE applied : DataFrameNEWLINE """NEWLINE return self.apply(lambda x: map(func, x))NEWLINENEWLINE @Appender(DataFrame.fillna.__doc__)NEWLINE def fillna(self, value=None, method='pad', inplace=False, limit=None):NEWLINE new_series = {}NEWLINE for k, v in self.iterkv():NEWLINE new_series[k] = v.fillna(value=value, method=method, limit=limit)NEWLINENEWLINE if inplace:NEWLINE self._series = new_seriesNEWLINE return selfNEWLINE else:NEWLINE return self._constructor(new_series, index=self.index,NEWLINE columns=self.columns)NEWLINENEWLINEdef stack_sparse_frame(frame):NEWLINE """NEWLINE Only makes sense when fill_value is NaNNEWLINE """NEWLINE lengths = [s.sp_index.npoints for _, s in frame.iteritems()]NEWLINE nobs = sum(lengths)NEWLINENEWLINE # this is pretty fastNEWLINE minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)NEWLINENEWLINE inds_to_concat = []NEWLINE vals_to_concat = []NEWLINE for _, series in frame.iteritems():NEWLINE if not np.isnan(series.fill_value):NEWLINE raise Exception('This routine assumes NaN fill value')NEWLINENEWLINE int_index = series.sp_index.to_int_index()NEWLINE inds_to_concat.append(int_index.indices)NEWLINE vals_to_concat.append(series.sp_values)NEWLINENEWLINE major_labels = np.concatenate(inds_to_concat)NEWLINE stacked_values = np.concatenate(vals_to_concat)NEWLINE index = MultiIndex(levels=[frame.index, frame.columns],NEWLINE labels=[major_labels, minor_labels])NEWLINENEWLINE lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,NEWLINE columns=['foo'])NEWLINE return lp.sortlevel(level=0)NEWLINENEWLINENEWLINEdef homogenize(series_dict):NEWLINE """NEWLINE Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndexNEWLINE corresponding to the locations where they all have dataNEWLINENEWLINE ParametersNEWLINE ----------NEWLINE series_dict : dict or DataFrameNEWLINENEWLINE NotesNEWLINE -----NEWLINE Using the dumbest algorithm I could think of. Should put some more thoughtNEWLINE into thisNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE homogenized : dict of SparseSeriesNEWLINE """NEWLINE index = NoneNEWLINENEWLINE need_reindex = FalseNEWLINENEWLINE for _, series in series_dict.iteritems():NEWLINE if not np.isnan(series.fill_value):NEWLINE raise Exception('this method is only valid with NaN fill values')NEWLINENEWLINE if index is None:NEWLINE index = series.sp_indexNEWLINE elif not series.sp_index.equals(index):NEWLINE need_reindex = TrueNEWLINE index = index.intersect(series.sp_index)NEWLINENEWLINE if need_reindex:NEWLINE output = {}NEWLINE for name, series in series_dict.iteritems():NEWLINE if not series.sp_index.equals(index):NEWLINE series = series.sparse_reindex(index)NEWLINENEWLINE output[name] = seriesNEWLINE else:NEWLINE output = series_dictNEWLINENEWLINE return outputNEWLINE |
from typing import Any, Dict, GeneratorNEWLINENEWLINEimport pytestNEWLINEfrom pydantic import BaseModelNEWLINENEWLINEfrom xpresso import App, Dependant, FromFormData, Path, SecurityNEWLINEfrom xpresso.security import OAuth2, OAuth2PasswordRequestFormStrictNEWLINEfrom xpresso.testclient import TestClientNEWLINEfrom xpresso.typing import AnnotatedNEWLINENEWLINEreusable_oauth2 = OAuth2(NEWLINE flows={NEWLINE "password": {NEWLINE "tokenUrl": "token",NEWLINE "scopes": {"read:users": "Read the users", "write:users": "Create users"},NEWLINE }NEWLINE }NEWLINE)NEWLINENEWLINENEWLINEclass User(BaseModel):NEWLINE username: strNEWLINENEWLINENEWLINE# Here we use string annotations to test themNEWLINEdef get_current_user(oauth_header: "Annotated[str, Security(reusable_oauth2)]"):NEWLINE user = User(username=oauth_header)NEWLINE return userNEWLINENEWLINENEWLINE# Here we use string annotations to test themNEWLINEdef login(form_data: "FromFormData[OAuth2PasswordRequestFormStrict]"):NEWLINE return form_dataNEWLINENEWLINENEWLINE# Here we use string annotations to test themNEWLINEdef read_current_user(current_user: "Annotated[User, Dependant(get_current_user)]"):NEWLINE return current_userNEWLINENEWLINENEWLINEapp = App(NEWLINE [NEWLINE Path("/users/me", get=read_current_user),NEWLINE Path("/login", post=login),NEWLINE ]NEWLINE)NEWLINENEWLINENEWLINE@pytest.fixtureNEWLINEdef client() -> Generator[TestClient, None, None]:NEWLINE with TestClient(app) as client:NEWLINE yield clientNEWLINENEWLINENEWLINEopenapi_schema: Dict[str, Any] = {NEWLINE "openapi": "3.0.3",NEWLINE "info": {"title": "API", "version": "0.1.0"},NEWLINE "paths": {NEWLINE "/users/me": {NEWLINE "get": {NEWLINE "responses": {"200": {"description": "Successful Response"}},NEWLINE "security": [{"OAuth2": []}],NEWLINE }NEWLINE },NEWLINE "/login": {NEWLINE "post": {NEWLINE "responses": {NEWLINE "200": {"description": "Successful Response"},NEWLINE "422": {NEWLINE "description": "Validation Error",NEWLINE "content": {NEWLINE "application/json": {NEWLINE "schema": {NEWLINE "$ref": "#/components/schemas/HTTPValidationError"NEWLINE }NEWLINE }NEWLINE },NEWLINE },NEWLINE },NEWLINE "requestBody": {NEWLINE "content": {NEWLINE "application/x-www-form-urlencoded": {NEWLINE "schema": {NEWLINE "required": [NEWLINE "grant_type",NEWLINE "username",NEWLINE "password",NEWLINE "scopes",NEWLINE ],NEWLINE "type": "object",NEWLINE "properties": {NEWLINE "grant_type": {NEWLINE "title": "Grant Type",NEWLINE "enum": ["password"],NEWLINE "type": "string",NEWLINE },NEWLINE "username": {"title": "Username", "type": "string"},NEWLINE "password": {"title": "Password", "type": "string"},NEWLINE "scopes": {NEWLINE "title": "Scopes",NEWLINE "type": "array",NEWLINE "items": {"type": "string"},NEWLINE },NEWLINE "client_id": {NEWLINE "title": "Client Id",NEWLINE "type": "string",NEWLINE "nullable": True,NEWLINE },NEWLINE "client_secret": {NEWLINE "title": "Client Secret",NEWLINE "type": "string",NEWLINE "nullable": True,NEWLINE },NEWLINE },NEWLINE },NEWLINE "encoding": {NEWLINE "grant_type": {"style": "form", "explode": True},NEWLINE "username": {"style": "form", "explode": True},NEWLINE "password": {"style": "form", "explode": True},NEWLINE "scopes": {"style": "spaceDelimited", "explode": False},NEWLINE "client_id": {"style": "form", "explode": True},NEWLINE "client_secret": {"style": "form", "explode": True},NEWLINE },NEWLINE }NEWLINE },NEWLINE "required": True,NEWLINE },NEWLINE }NEWLINE },NEWLINE },NEWLINE "components": {NEWLINE "schemas": {NEWLINE "ValidationError": {NEWLINE "title": "ValidationError",NEWLINE "required": ["loc", "msg", "type"],NEWLINE "type": "object",NEWLINE "properties": {NEWLINE "loc": {NEWLINE "title": "Location",NEWLINE "type": "array",NEWLINE "items": {"oneOf": [{"type": "string"}, {"type": "integer"}]},NEWLINE },NEWLINE "msg": {"title": "Message", "type": "string"},NEWLINE "type": {"title": "Error Type", "type": "string"},NEWLINE },NEWLINE },NEWLINE "HTTPValidationError": {NEWLINE "title": "HTTPValidationError",NEWLINE "type": "object",NEWLINE "properties": {NEWLINE "detail": {NEWLINE "title": "Detail",NEWLINE "type": "array",NEWLINE "items": {"$ref": "#/components/schemas/ValidationError"},NEWLINE }NEWLINE },NEWLINE },NEWLINE },NEWLINE "securitySchemes": {NEWLINE "OAuth2": {NEWLINE "type": "oauth2",NEWLINE "flows": {NEWLINE "password": {NEWLINE "scopes": {NEWLINE "read:users": "Read the users",NEWLINE "write:users": "Create users",NEWLINE },NEWLINE "tokenUrl": "token",NEWLINE }NEWLINE },NEWLINE }NEWLINE },NEWLINE },NEWLINE}NEWLINENEWLINENEWLINEdef test_openapi_schema(client: TestClient):NEWLINE response = client.get("/openapi.json")NEWLINE assert response.status_code == 200, response.textNEWLINE assert response.json() == openapi_schemaNEWLINENEWLINENEWLINEdef test_security_oauth2(client: TestClient):NEWLINE response = client.get("/users/me", headers={"Authorization": "Bearer footokenbar"})NEWLINE assert response.status_code == 200, response.textNEWLINE assert response.json() == {"username": "Bearer footokenbar"}NEWLINENEWLINENEWLINEdef test_security_oauth2_password_other_header(client: TestClient):NEWLINE response = client.get("/users/me", headers={"Authorization": "Other footokenbar"})NEWLINE assert response.status_code == 200, response.textNEWLINE assert response.json() == {"username": "Other footokenbar"}NEWLINENEWLINENEWLINEdef test_security_oauth2_password_bearer_no_header(client: TestClient):NEWLINE response = client.get("/users/me")NEWLINE assert response.status_code == 401, response.textNEWLINE assert response.json() == {"detail": "Not authenticated"}NEWLINENEWLINENEWLINErequired_params = {NEWLINE "detail": [NEWLINE {NEWLINE "loc": ["body", "grant_type"],NEWLINE "msg": "field required",NEWLINE "type": "value_error.missing",NEWLINE },NEWLINE {NEWLINE "loc": ["body", "username"],NEWLINE "msg": "field required",NEWLINE "type": "value_error.missing",NEWLINE },NEWLINE {NEWLINE "loc": ["body", "password"],NEWLINE "msg": "field required",NEWLINE "type": "value_error.missing",NEWLINE },NEWLINE ]NEWLINE}NEWLINENEWLINEgrant_type_required = {NEWLINE "detail": [NEWLINE {NEWLINE "loc": ["body", "grant_type"],NEWLINE "msg": "field required",NEWLINE "type": "value_error.missing",NEWLINE }NEWLINE ]NEWLINE}NEWLINENEWLINEgrant_type_incorrect = {NEWLINE "detail": [NEWLINE {NEWLINE "loc": ["body", "grant_type"],NEWLINE "msg": "unexpected value; permitted: 'password'",NEWLINE "type": "value_error.const",NEWLINE "ctx": {"given": "incorrect", "permitted": ["password"]},NEWLINE }NEWLINE ]NEWLINE}NEWLINENEWLINENEWLINE@pytest.mark.parametrize(NEWLINE "data,expected_status,expected_response",NEWLINE [NEWLINE (None, 422, required_params),NEWLINE ({"username": "johndoe", "password": "secret"}, 422, grant_type_required),NEWLINE (NEWLINE {"username": "johndoe", "password": "secret", "grant_type": "incorrect"},NEWLINE 422,NEWLINE grant_type_incorrect,NEWLINE ),NEWLINE (NEWLINE {"username": "johndoe", "password": "secret", "grant_type": "password"},NEWLINE 200,NEWLINE {NEWLINE "grant_type": "password",NEWLINE "username": "johndoe",NEWLINE "password": "secret",NEWLINE "scopes": [],NEWLINE "client_id": None,NEWLINE "client_secret": None,NEWLINE },NEWLINE ),NEWLINE ],NEWLINE)NEWLINEdef test_strict_login(data, expected_status, expected_response, client: TestClient):NEWLINE response = client.post(NEWLINE "/login",NEWLINE data=data,NEWLINE headers={"Content-Type": "application/x-www-form-urlencoded"},NEWLINE )NEWLINE assert response.status_code == expected_statusNEWLINE assert response.json() == expected_responseNEWLINE |
# setup.pyNEWLINE#!/usr/bin/env pythonNEWLINENEWLINEfrom setuptools import setup, find_packagesNEWLINENEWLINEsetup(NEWLINE name='indexserial',NEWLINE version='1.1.0',NEWLINE description='Serialize objects and allow random reading.',NEWLINE author='JamzumSum',NEWLINE author_email='zzzzss990315@gmail.com',NEWLINE install_requires=[NEWLINE 'torch',NEWLINE ],NEWLINE packages=find_packages(where="src"),NEWLINE package_dir={"": "src"},NEWLINE)NEWLINE |
import osNEWLINEimport clickNEWLINENEWLINEdef register(app):NEWLINE @app.cli.group()NEWLINE def translate():NEWLINE """translation and localization"""NEWLINE passNEWLINENEWLINE @translate.command()NEWLINE def update():NEWLINE """Update all languages."""NEWLINE if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):NEWLINE raise RuntimeError('extract command failed')NEWLINE if os.system('pybabel update -i messages.pot -d app/translations'):NEWLINE raise RuntimeError('update command failed')NEWLINE os.remove('messages.pot')NEWLINENEWLINENEWLINE @translate.command()NEWLINE def compile():NEWLINE """Compile all languages."""NEWLINE if os.system('pybabel compile -d app/translations'):NEWLINE raise RuntimeError('compile command failed')NEWLINENEWLINENEWLINENEWLINE @translate.command()NEWLINE @click.argument('lang')NEWLINE def init(lang):NEWLINE """Initialize a new language."""NEWLINE if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):NEWLINE raise RuntimeError('extract command failed')NEWLINE if os.system(NEWLINE 'pybabel init -i messages.pot -d app/translations -l ' + lang):NEWLINE raise RuntimeError('init command failed')NEWLINE os.remove('messages.pot')NEWLINE |
import subprocessNEWLINEfrom pkgmt import newNEWLINENEWLINEimport pytestNEWLINENEWLINENEWLINE@pytest.fixtureNEWLINEdef uninstall():NEWLINE yieldNEWLINE subprocess.check_call(['pip', 'uninstall', 'somepkg', '-y'])NEWLINENEWLINENEWLINEdef test_package(tmp_empty, uninstall):NEWLINE new.package('somepkg')NEWLINENEWLINE subprocess.check_call(['pip', 'install', 'somepkg/'])NEWLINE |
# encoding: utf-8NEWLINEimport jsonNEWLINENEWLINEimport pytestNEWLINEimport requests_mockNEWLINENEWLINEimport configfetcherNEWLINEimport phabricatorNEWLINEimport wikibugsNEWLINEfrom tests.common import rootNEWLINEfrom tests.wikibugs_network.common import parse_request, conduit_connect, unexpectedNEWLINENEWLINENEWLINEclass WikibugsFixture:NEWLINE def __init__(self):NEWLINE self.events = []NEWLINE self.wikibugs = wikibugs.Wikibugs2(NEWLINE configfetcher.ConfigFetcher(str(root / "config.json.example"))NEWLINE )NEWLINE self.wikibugs.process_event = lambda event: self.events.append(event)NEWLINENEWLINE def poll(self):NEWLINE self.wikibugs.poll()NEWLINENEWLINENEWLINE@pytest.fixture()NEWLINEdef bugs():NEWLINE return WikibugsFixture()NEWLINENEWLINENEWLINEdef feed_query_initial(request, context):NEWLINE content = parse_request(request)NEWLINE assert int(content['limit']) == 1NEWLINE assert 'before' not in contentNEWLINE return json.loads(r"""{"result":{"PHID-STRY-cdxv7sji5d7wnjmiuqgv":{"class":"PhabricatorApplicationTransactionFeedStory","epoch":1577802875,"authorPHID":"PHID-USER-pzp7mdlx7otgdlggnyhh","chronologicalKey":"6776611750075855743","data":{"objectPHID":"PHID-TASK-rnay3rzefpqhoaqm3guo","transactionPHIDs":{"PHID-XACT-TASK-5esu7y3d7evlsi2":"PHID-XACT-TASK-5esu7y3d7evlsi2"}}}},"error_code":null,"error_info":null}""") # noqaNEWLINENEWLINENEWLINEdef feed_query_second(request, context):NEWLINE content = parse_request(request)NEWLINE assert content['before'] == 6776611750075855743NEWLINE assert content['view'] == 'data'NEWLINE return json.loads(r"""{"result":[],"error_code":null,"error_info":null}""")NEWLINENEWLINENEWLINEdef feed_query_third(request, context):NEWLINE content = parse_request(request)NEWLINE assert content['before'] == 6776611750075855743NEWLINE assert content['view'] == 'data'NEWLINE return json.loads(r"""{"result":{"PHID-STRY-etrbfg7qqflcsoexaxqr":{"class":"PhabricatorApplicationTransactionFeedStory","epoch":1577804347,"authorPHID":"PHID-USER-idceizaw6elwiwm5xshb","chronologicalKey":"6776618070283272953","data":{"objectPHID":"PHID-TASK-he2h6hqmwrdrav3cxqew","transactionPHIDs":{"PHID-XACT-TASK-k6asmqpfv2t37tp":"PHID-XACT-TASK-k6asmqpfv2t37tp"}}},"PHID-STRY-x6pr64eeimmcjl3jbsay":{"class":"PhabricatorApplicationTransactionFeedStory","epoch":1577804344,"authorPHID":"PHID-USER-idceizaw6elwiwm5xshb","chronologicalKey":"6776618060350723377","data":{"objectPHID":"PHID-TASK-he2h6hqmwrdrav3cxqew","transactionPHIDs":{"PHID-XACT-TASK-ix5urhvrpvn22e2":"PHID-XACT-TASK-ix5urhvrpvn22e2"}}},"PHID-STRY-cpcsc3r3444i3vaw66bo":{"class":"PhabricatorApplicationTransactionFeedStory","epoch":1577804267,"authorPHID":"PHID-USER-muirnivxp5hzppn2a3z7","chronologicalKey":"6776617727166200626","data":{"objectPHID":"PHID-TASK-dgq26etiz4wecd24gkmb","transactionPHIDs":{"PHID-XACT-TASK-zd6b2kmmj5pnfwm":"PHID-XACT-TASK-zd6b2kmmj5pnfwm"}}}},"error_code":null,"error_info":null}""") # noqaNEWLINENEWLINENEWLINEdef feed_query_error_response(request, context):NEWLINE return json.loads(r"""{"result":null,"error_code":"ERR-CONDUIT-CORE","error_info":"Cursor \"6771969043218032437\" does not identify a valid object in query \"PhabricatorFeedQuery\"."}""") # noqaNEWLINENEWLINENEWLINEdef test_polling(bugs):NEWLINE with requests_mock.mock() as m:NEWLINE m.post('/api/conduit.connect', [{'json': conduit_connect}, {'json': unexpected}])NEWLINE m.post('/api/feed.query', [{'json': feed_query_initial}, {'json': feed_query_second}, {'json': unexpected}])NEWLINE bugs.poll()NEWLINE assert bugs.events == []NEWLINENEWLINE m.post('/api/feed.query', [{'json': feed_query_third}, {'json': unexpected}])NEWLINE bugs.poll()NEWLINENEWLINE assert len(bugs.events) == 3NEWLINENEWLINE # TODO: add more extensive testsNEWLINENEWLINENEWLINEdef test_error_response(bugs):NEWLINE with requests_mock.mock() as m:NEWLINE m.post('/api/conduit.connect', [{'json': conduit_connect}, {'json': unexpected}])NEWLINE m.post('/api/feed.query', [{'json': feed_query_initial}, {'json': feed_query_second}])NEWLINE bugs.poll()NEWLINENEWLINE m.post('/api/feed.query', [{'json': feed_query_error_response}, {'json': unexpected}])NEWLINE with pytest.raises(phabricator.PhabricatorException):NEWLINE bugs.poll()NEWLINE |
# (C) Copyright Artificial Brain 2021.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License");NEWLINE# you may not use this file except in compliance with the License.NEWLINE# You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINENEWLINENEWLINEfrom quantumcat.gates.custom_gates.braket.u_gate import UGateNEWLINEfrom quantumcat.gates.custom_gates.braket.u1_gate import U1GateNEWLINEfrom quantumcat.gates.custom_gates.braket.u2_gate import U2GateNEWLINEfrom quantumcat.gates.custom_gates.braket.u3_gate import U3GateNEWLINEfrom quantumcat.gates.custom_gates.braket.cu_gate import CUGateNEWLINEfrom quantumcat.gates.custom_gates.braket.ch_gate import CHGateNEWLINEfrom quantumcat.gates.custom_gates.braket.crx_gate import CRXGateNEWLINEfrom quantumcat.gates.custom_gates.braket.r_gate import RGateNEWLINEfrom quantumcat.gates.custom_gates.braket.cry_gate import CRYGateNEWLINEfrom quantumcat.gates.custom_gates.braket.crz_gate import CRZGateNEWLINEfrom quantumcat.gates.custom_gates.braket.csx_gate import CSXGateNEWLINEfrom quantumcat.gates.custom_gates.braket.cu1_gate import CU1GateNEWLINEfrom quantumcat.gates.custom_gates.braket.dcx_gate import DCXGateNEWLINEfrom quantumcat.gates.custom_gates.braket.rc3x_gate import RC3XGateNEWLINEfrom quantumcat.gates.custom_gates.braket.rccx_gate import RCCXGateNEWLINEfrom quantumcat.gates.custom_gates.braket.rzx_gate import RZXGateNEWLINEfrom quantumcat.gates.custom_gates.braket.cu3_gate import CU3GateNEWLINE |
import _plotly_utils.basevalidatorsNEWLINENEWLINENEWLINEclass LenValidator(_plotly_utils.basevalidators.NumberValidator):NEWLINE def __init__(NEWLINE self, plotly_name="len", parent_name="scatterpolargl.marker.colorbar", **kwargsNEWLINE ):NEWLINE super(LenValidator, self).__init__(NEWLINE plotly_name=plotly_name,NEWLINE parent_name=parent_name,NEWLINE edit_type=kwargs.pop("edit_type", "calc"),NEWLINE min=kwargs.pop("min", 0),NEWLINE **kwargsNEWLINE )NEWLINE |
#!/usr/bin/env python3NEWLINENEWLINE# do lxc list --format=json swift-runwayNEWLINE# ...and delete themNEWLINENEWLINENEWLINE# while it would be cool if this worked, it doesn't and the docs are badNEWLINE# https://linuxcontainers.org/lxc/documentation/#pythonNEWLINE# import lxcNEWLINE# for defined in (True, False):NEWLINE# for active in (True, False):NEWLINE# x = lxc.list_containers(active=active, defined=defined)NEWLINE# print(x, '=> lxc.list_containers(active=%s, defined=%s)' % (active, defined))NEWLINENEWLINENEWLINEimport argparseNEWLINEimport globNEWLINEimport jsonNEWLINEimport osNEWLINEimport reNEWLINEimport shlexNEWLINEimport shutilNEWLINEimport subprocessNEWLINEimport sysNEWLINENEWLINENEWLINEdef parse_profiles_list(cli_output):NEWLINE profiles = []NEWLINE lines = cli_output.split('\n')NEWLINE for line in lines:NEWLINE result = re.match('(^\|\s{1}|^)([\w-]+)', line)NEWLINE if result is not None:NEWLINE profiles.append(result.group(2))NEWLINE return profilesNEWLINENEWLINENEWLINEif os.geteuid() != 0:NEWLINE print('must be run as root')NEWLINE sys.exit(1)NEWLINENEWLINEDEFAULT_PREFIX = 'swift-runway-'NEWLINEparser = argparse.ArgumentParser()NEWLINEparser.add_argument('-a', '--all', action='store_true', default=False,NEWLINE help="Delete everything")NEWLINENEWLINEparser.add_argument('-p', '--prefix', default=None,NEWLINE help="Prefix to look for when deleting. Default: "NEWLINE "'{}'".format(DEFAULT_PREFIX))NEWLINENEWLINEargs = parser.parse_args()NEWLINENEWLINEdelete_everything = args.allNEWLINEprefix = args.prefixNEWLINEif prefix is None:NEWLINE prefix_was_provided = FalseNEWLINE prefix = DEFAULT_PREFIXNEWLINEelse:NEWLINE prefix_was_provided = TrueNEWLINENEWLINEVOLUME_GROUP = 'swift-runway-vg01'NEWLINENEWLINElist_command = 'lxc list --format=json'NEWLINEp = subprocess.run(shlex.split(list_command), stdout=subprocess.PIPE)NEWLINENEWLINEcontainers = json.loads(p.stdout.decode())NEWLINEto_delete = [x['name'] for x in containers if x['name'].startswith(prefix)]NEWLINENEWLINEif to_delete:NEWLINE delete_command = 'lxc delete --force %s' % ' '.join(to_delete)NEWLINE p = subprocess.run(shlex.split(delete_command))NEWLINE print('%d containers deleted' % len(to_delete))NEWLINEelse:NEWLINE print('No containers to delete')NEWLINENEWLINE# delete associated lvm volumesNEWLINEtry:NEWLINENEWLINE if prefix_was_provided:NEWLINE lvlist = glob.glob('/dev/%s/%s*' % (VOLUME_GROUP, prefix))NEWLINE else:NEWLINE # We'll delete all the lvm volumes if a prefix was not providedNEWLINE lvlist = glob.glob('/dev/%s/*' % VOLUME_GROUP)NEWLINEexcept FileNotFoundError:NEWLINE print('No volumes to delete')NEWLINEelse:NEWLINE num_deleted = 0NEWLINE for logical_volume in lvlist:NEWLINE delete_command = 'lvremove --yes %s' % logical_volumeNEWLINE try:NEWLINE p = subprocess.run(NEWLINE shlex.split(delete_command),NEWLINE stdout=subprocess.PIPE,NEWLINE stderr=subprocess.PIPE,NEWLINE universal_newlines=True,NEWLINE check=True)NEWLINE except subprocess.CalledProcessError as err:NEWLINE print('Error deleting %s:\n%s' % (logical_volume,NEWLINE err.stderr.rstrip()),NEWLINE file=sys.stderr)NEWLINE else:NEWLINE num_deleted += 1NEWLINE else:NEWLINE print('%d volumes deleted' % num_deleted)NEWLINENEWLINE# delete associated lxc profilesNEWLINEprofile_list_command = 'lxc profile list'NEWLINEp = subprocess.run(shlex.split(profile_list_command), stdout=subprocess.PIPE)NEWLINEto_delete = []NEWLINEfor line in p.stdout.decode().split('\n'):NEWLINE parts = line.split('|')NEWLINE try:NEWLINE profile_name = parts[1].strip()NEWLINE if profile_name.startswith(prefix):NEWLINE to_delete.append(profile_name)NEWLINE except IndexError:NEWLINE passNEWLINEif to_delete:NEWLINE for profile in to_delete:NEWLINE delete_command = 'lxc profile delete %s' % profileNEWLINE p = subprocess.run(shlex.split(delete_command))NEWLINE print('%d profiles deleted' % len(to_delete))NEWLINEelse:NEWLINE print('No profiles to delete')NEWLINENEWLINE# delete container working spacesNEWLINEfor dirname in os.listdir('guest_workspaces'):NEWLINE if dirname == 'README':NEWLINE continueNEWLINE dirname = 'guest_workspaces/' + dirnameNEWLINE shutil.rmtree(dirname)NEWLINENEWLINE# delete snapshotted container imagesNEWLINEimages_to_delete = []NEWLINEimage_list_command = 'lxc image list description="Created by swift runway"'NEWLINEp = subprocess.run(shlex.split(image_list_command), stdout=subprocess.PIPE)NEWLINEfor line in p.stdout.decode().split('\n'):NEWLINE if "Created by swift runway" in line:NEWLINE parts = line.split('|')NEWLINE fingerprint = parts[2].strip()NEWLINE alias = parts[1].strip()NEWLINE # If we're not deleting everything, we ONLY delete images whose aliasNEWLINE # starts with the given prefix.NEWLINE if delete_everything or (alias != "" and alias.startswith(prefix)):NEWLINE images_to_delete.append(fingerprint)NEWLINEif images_to_delete:NEWLINE print('Deleting %d images' % len(images_to_delete))NEWLINE image_delete_command = 'lxc image delete %s' % ' '.join(images_to_delete)NEWLINE p = subprocess.run(shlex.split(image_delete_command))NEWLINEelse:NEWLINE print('No images to delete')NEWLINE |
import loggingNEWLINENEWLINEimport numpyNEWLINEfrom numpy.linalg import pinvNEWLINENEWLINEfrom colormath import color_constantsNEWLINENEWLINElogger = logging.getLogger(__name__)NEWLINENEWLINENEWLINE# noinspection PyPep8NamingNEWLINEdef _get_adaptation_matrix(wp_src, wp_dst, observer, adaptation):NEWLINE """NEWLINE Calculate the correct transformation matrix based on origin and targetNEWLINE illuminants. The observer angle must be the same between illuminants.NEWLINENEWLINE See colormath.color_constants.ADAPTATION_MATRICES for a list of possibleNEWLINE adaptations.NEWLINENEWLINE Detailed conversion documentation is available at:NEWLINE http://brucelindbloom.com/Eqn_ChromAdapt.htmlNEWLINE """NEWLINE # Get the appropriate transformation matrix, [MsubA].NEWLINE m_sharp = color_constants.ADAPTATION_MATRICES[adaptation]NEWLINENEWLINE # In case the white-points are still input as stringsNEWLINE # Get white-points for illuminantNEWLINE if type(wp_src) == str:NEWLINE orig_illum = wp_src.lower()NEWLINE wp_src = color_constants.ILLUMINANTS[observer][orig_illum]NEWLINE elif hasattr(wp_src, '__iter__'):NEWLINE wp_src = wp_srcNEWLINENEWLINE if type(wp_dst) == str:NEWLINE targ_illum = wp_dst.lower()NEWLINE wp_dst = color_constants.ILLUMINANTS[observer][targ_illum]NEWLINE elif hasattr(wp_dst, '__iter__'):NEWLINE wp_dst = wp_dstNEWLINENEWLINE # Sharpened cone responses ~ rho gamma beta ~ sharpened r g bNEWLINE rgb_src = numpy.dot(m_sharp, wp_src)NEWLINE rgb_dst = numpy.dot(m_sharp, wp_dst)NEWLINENEWLINE # Ratio of whitepoint sharpened responsesNEWLINE m_rat = numpy.diag(rgb_dst / rgb_src)NEWLINENEWLINE # Final transformation matrixNEWLINE m_xfm = numpy.dot(numpy.dot(pinv(m_sharp), m_rat), m_sharp)NEWLINENEWLINE return m_xfmNEWLINENEWLINENEWLINE# noinspection PyPep8NamingNEWLINEdef apply_chromatic_adaptation(val_x, val_y, val_z, orig_illum, targ_illum,NEWLINE observer='2', adaptation='bradford'):NEWLINE """NEWLINE Applies a chromatic adaptation matrix to convert XYZ values betweenNEWLINE illuminants. It is important to recognize that color transformation resultsNEWLINE in color errors, determined by how far the original illuminant is from theNEWLINE target illuminant. For example, D65 to A could result in very high maximumNEWLINE deviance.NEWLINENEWLINE An informative article with estimate average Delta E values for eachNEWLINE illuminant conversion may be found at:NEWLINENEWLINE http://brucelindbloom.com/ChromAdaptEval.htmlNEWLINE """NEWLINENEWLINE # It's silly to have to do this, but some people may want to call thisNEWLINE # function directly, so we'll protect them from messing up upper/lower case.NEWLINE adaptation = adaptation.lower()NEWLINENEWLINE # Get white-points for illuminantNEWLINE if type(orig_illum) == str:NEWLINE orig_illum = orig_illum.lower()NEWLINE wp_src = color_constants.ILLUMINANTS[observer][orig_illum]NEWLINE elif hasattr(orig_illum, '__iter__'):NEWLINE wp_src = orig_illumNEWLINENEWLINE if type(targ_illum) == str:NEWLINE targ_illum = targ_illum.lower()NEWLINE wp_dst = color_constants.ILLUMINANTS[observer][targ_illum]NEWLINE elif hasattr(targ_illum, '__iter__'):NEWLINE wp_dst = targ_illumNEWLINENEWLINE logger.debug(" \* Applying adaptation matrix: %s", adaptation)NEWLINE # Retrieve the appropriate transformation matrix from the constants.NEWLINE transform_matrix = _get_adaptation_matrix(wp_src, wp_dst,NEWLINE observer, adaptation)NEWLINENEWLINE # Stuff the XYZ values into a NumPy matrix for conversion.NEWLINE XYZ_matrix = numpy.array((val_x, val_y, val_z))NEWLINE # Perform the adaptation via matrix multiplication.NEWLINE result_matrix = numpy.dot(transform_matrix, XYZ_matrix)NEWLINENEWLINE # Return individual X, Y, and Z coordinates.NEWLINE return result_matrix[0], result_matrix[1], result_matrix[2]NEWLINENEWLINENEWLINE# noinspection PyPep8NamingNEWLINEdef apply_chromatic_adaptation_on_color(color, targ_illum, adaptation='bradford'):NEWLINE """NEWLINE Convenience function to apply an adaptation directly to a Color object.NEWLINE """NEWLINENEWLINE xyz_x = color.xyz_xNEWLINE xyz_y = color.xyz_yNEWLINE xyz_z = color.xyz_zNEWLINE orig_illum = color.illuminantNEWLINE targ_illum = targ_illum.lower()NEWLINE observer = color.observerNEWLINE adaptation = adaptation.lower()NEWLINENEWLINE # Return individual X, Y, and Z coordinates.NEWLINE color.xyz_x, color.xyz_y, color.xyz_z = apply_chromatic_adaptation(NEWLINE xyz_x, xyz_y, xyz_z, orig_illum, targ_illum,NEWLINE observer=observer, adaptation=adaptation)NEWLINE color.set_illuminant(targ_illum)NEWLINENEWLINE return colorNEWLINE |
# Copyright (C) 2020-2021 Intel CorporationNEWLINE# SPDX-License-Identifier: Apache-2.0NEWLINENEWLINE"""You may copy this file as the starting point of your own model."""NEWLINENEWLINEimport numpy as npNEWLINEfrom logging import getLoggerNEWLINEfrom torchvision.datasets import ImageFolderNEWLINEfrom torchvision.transforms import ToTensorNEWLINEfrom torch.utils.data import random_splitNEWLINEfrom urllib.request import urlretrieveNEWLINEfrom hashlib import sha384NEWLINEfrom os import path, makedirsNEWLINEfrom zipfile import ZipFileNEWLINEfrom tqdm import tqdmNEWLINEimport torchNEWLINEfrom collections.abc import IterableNEWLINENEWLINElogger = getLogger(__name__)NEWLINENEWLINENEWLINEclass HistologyDataset(ImageFolder):NEWLINE """Colorectal Histology Dataset."""NEWLINENEWLINE URL = "https://zenodo.org/record/53169/files/Kather_" \NEWLINE "texture_2016_image_tiles_5000.zip?download=1"NEWLINE FILENAME = "Kather_texture_2016_image_tiles_5000.zip"NEWLINE FOLDER_NAME = "Kather_texture_2016_image_tiles_5000"NEWLINE ZIP_SHA384 = '7d86abe1d04e68b77c055820c2a4c582a1d25d2983e38ab724e'\NEWLINE 'ac75affce8b7cb2cbf5ba68848dcfd9d84005d87d6790'NEWLINE DEFAULT_PATH = path.join(path.expanduser('~'), '.openfl', 'data')NEWLINENEWLINE def __init__(self, root: str = DEFAULT_PATH, **kwargs) -> None:NEWLINE """Initialize."""NEWLINE makedirs(root, exist_ok=True)NEWLINE filepath = path.join(root, HistologyDataset.FILENAME)NEWLINE if not path.exists(filepath):NEWLINE self.pbar = tqdm(total=None)NEWLINE urlretrieve(HistologyDataset.URL, filepath, self.report_hook) # nosecNEWLINE assert sha384(open(filepath, 'rb').read( # nosecNEWLINE path.getsize(filepath))).hexdigest() == HistologyDataset.ZIP_SHA384NEWLINE with ZipFile(filepath, 'r') as f:NEWLINE f.extractall(root)NEWLINENEWLINE super(HistologyDataset, self).__init__(NEWLINE path.join(root, HistologyDataset.FOLDER_NAME), **kwargs)NEWLINENEWLINE def report_hook(self, count, block_size, total_size):NEWLINE """Update progressbar."""NEWLINE if self.pbar.total is None and total_size:NEWLINE self.pbar.total = total_sizeNEWLINE progress_bytes = count * block_sizeNEWLINE self.pbar.update(progress_bytes - self.pbar.n)NEWLINENEWLINE def __getitem__(self, index):NEWLINE """Allow getting items by slice index."""NEWLINE if isinstance(index, Iterable):NEWLINE return [super(HistologyDataset, self).__getitem__(i) for i in index]NEWLINE else:NEWLINE return super(HistologyDataset, self).__getitem__(index)NEWLINENEWLINENEWLINEdef one_hot(labels, classes):NEWLINE """NEWLINE One Hot encode a vector.NEWLINENEWLINE Args:NEWLINE labels (list): List of labels to onehot encodeNEWLINE classes (int): Total number of categorical classesNEWLINENEWLINE Returns:NEWLINE np.array: Matrix of one-hot encoded labelsNEWLINE """NEWLINE return np.eye(classes)[labels]NEWLINENEWLINENEWLINEdef _load_raw_datashards(shard_num, collaborator_count, train_split_ratio=0.8):NEWLINE """NEWLINE Load the raw data by shard.NEWLINENEWLINE Returns tuples of the dataset shard divided into training and validation.NEWLINENEWLINE Args:NEWLINE shard_num (int): The shard number to useNEWLINE collaborator_count (int): The number of collaborators in the federationNEWLINENEWLINE Returns:NEWLINE 2 tuples: (image, label) of the training, validation datasetNEWLINE """NEWLINE dataset = HistologyDataset(transform=ToTensor())NEWLINE n_train = int(train_split_ratio * len(dataset))NEWLINE n_valid = len(dataset) - n_trainNEWLINE ds_train, ds_val = random_split(NEWLINE dataset, lengths=[n_train, n_valid], generator=torch.manual_seed(0))NEWLINENEWLINE # create the shardsNEWLINE X_train, y_train = list(zip(*ds_train[shard_num::collaborator_count]))NEWLINE X_train, y_train = np.stack(X_train), np.array(y_train)NEWLINENEWLINE X_valid, y_valid = list(zip(*ds_val[shard_num::collaborator_count]))NEWLINE X_valid, y_valid = np.stack(X_valid), np.array(y_valid)NEWLINENEWLINE return (X_train, y_train), (X_valid, y_valid)NEWLINENEWLINENEWLINEdef load_histology_shard(shard_num, collaborator_count,NEWLINE categorical=False, channels_last=False, **kwargs):NEWLINE """NEWLINE Load the Histology dataset.NEWLINENEWLINE Args:NEWLINE shard_num (int): The shard to use from the datasetNEWLINE collaborator_count (int): The number of collaborators in the federationNEWLINE categorical (bool): True = convert the labels to one-hot encodedNEWLINE vectors (Default = True)NEWLINE channels_last (bool): True = The input images have the channelsNEWLINE last (Default = True)NEWLINE **kwargs: Additional parameters to pass to the functionNEWLINENEWLINE Returns:NEWLINE list: The input shapeNEWLINE int: The number of classesNEWLINE numpy.ndarray: The training dataNEWLINE numpy.ndarray: The training labelsNEWLINE numpy.ndarray: The validation dataNEWLINE numpy.ndarray: The validation labelsNEWLINE """NEWLINE img_rows, img_cols = 150, 150NEWLINE num_classes = 8NEWLINENEWLINE (X_train, y_train), (X_valid, y_valid) = _load_raw_datashards(NEWLINE shard_num, collaborator_count)NEWLINENEWLINE if channels_last:NEWLINE X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)NEWLINE X_valid = X_valid.reshape(X_valid.shape[0], img_rows, img_cols, 3)NEWLINE input_shape = (img_rows, img_cols, 3)NEWLINE else:NEWLINE X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)NEWLINE X_valid = X_valid.reshape(X_valid.shape[0], 3, img_rows, img_cols)NEWLINE input_shape = (3, img_rows, img_cols)NEWLINENEWLINE logger.info(f'Histology > X_train Shape : {X_train.shape}')NEWLINE logger.info(f'Histology > y_train Shape : {y_train.shape}')NEWLINE logger.info(f'Histology > Train Samples : {X_train.shape[0]}')NEWLINE logger.info(f'Histology > Valid Samples : {X_valid.shape[0]}')NEWLINENEWLINE if categorical:NEWLINE # convert class vectors to binary class matricesNEWLINE y_train = one_hot(y_train, num_classes)NEWLINE y_valid = one_hot(y_valid, num_classes)NEWLINENEWLINE return input_shape, num_classes, X_train, y_train, X_valid, y_validNEWLINE |
N = int(input(''))NEWLINEfor i in range(1, N + 1):NEWLINE if i % 2 == 0:NEWLINE print('%d^2' %i, '=', i **2) |
import loggingNEWLINEfrom pathlib import PathNEWLINEfrom typing import DictNEWLINENEWLINEimport colorlogNEWLINEfrom concurrent_log_handler import ConcurrentRotatingFileHandlerNEWLINEfrom logging.handlers import SysLogHandlerNEWLINENEWLINEfrom mogua.util.path import mkdir, path_from_rootNEWLINENEWLINENEWLINEdef initialize_logging(service_name: str, logging_config: Dict, root_path: Path):NEWLINE log_path = path_from_root(root_path, logging_config.get("log_filename", "log/debug.log"))NEWLINE log_date_format = "%Y-%m-%dT%H:%M:%S"NEWLINENEWLINE mkdir(str(log_path.parent))NEWLINE file_name_length = 33 - len(service_name)NEWLINE if logging_config["log_stdout"]:NEWLINE handler = colorlog.StreamHandler()NEWLINE handler.setFormatter(NEWLINE colorlog.ColoredFormatter(NEWLINE f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: "NEWLINE f"%(log_color)s%(levelname)-8s%(reset)s %(message)s",NEWLINE datefmt=log_date_format,NEWLINE reset=True,NEWLINE )NEWLINE )NEWLINENEWLINE logger = colorlog.getLogger()NEWLINE logger.addHandler(handler)NEWLINE else:NEWLINE logger = logging.getLogger()NEWLINE maxrotation = logging_config.get("log_maxfilesrotation", 7)NEWLINE handler = ConcurrentRotatingFileHandler(log_path, "a", maxBytes=20 * 1024 * 1024, backupCount=maxrotation)NEWLINE handler.setFormatter(NEWLINE logging.Formatter(NEWLINE fmt=f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: %(levelname)-8s %(message)s",NEWLINE datefmt=log_date_format,NEWLINE )NEWLINE )NEWLINE logger.addHandler(handler)NEWLINENEWLINE if logging_config.get("log_syslog", False):NEWLINE log_syslog_host = logging_config.get("log_syslog_host", "localhost")NEWLINE log_syslog_port = logging_config.get("log_syslog_port", 514)NEWLINE log_syslog_handler = SysLogHandler(address=(log_syslog_host, log_syslog_port))NEWLINE log_syslog_handler.setFormatter(logging.Formatter(fmt=f"{service_name} %(message)s", datefmt=log_date_format))NEWLINE logger = logging.getLogger()NEWLINE logger.addHandler(log_syslog_handler)NEWLINENEWLINE if "log_level" in logging_config:NEWLINE if logging_config["log_level"] == "CRITICAL":NEWLINE logger.setLevel(logging.CRITICAL)NEWLINE elif logging_config["log_level"] == "ERROR":NEWLINE logger.setLevel(logging.ERROR)NEWLINE elif logging_config["log_level"] == "WARNING":NEWLINE logger.setLevel(logging.WARNING)NEWLINE elif logging_config["log_level"] == "INFO":NEWLINE logger.setLevel(logging.INFO)NEWLINE elif logging_config["log_level"] == "DEBUG":NEWLINE logger.setLevel(logging.DEBUG)NEWLINE logging.getLogger("aiosqlite").setLevel(logging.INFO) # Too much logging on debug levelNEWLINE logging.getLogger("websockets").setLevel(logging.INFO) # Too much logging on debug levelNEWLINE else:NEWLINE logger.setLevel(logging.INFO)NEWLINE else:NEWLINE logger.setLevel(logging.INFO)NEWLINE |
#!/usr/bin/env python3NEWLINENEWLINE"""Unit tests for Mininet Topologies in mininet_test_topo"""NEWLINENEWLINEfrom unittest import TestCase, mainNEWLINENEWLINEfrom clib.config_generator import FaucetFakeOFTopoGeneratorNEWLINENEWLINENEWLINEclass FaucetTopoTest(TestCase):NEWLINE """Tests for Faucet test suite mininet Topo class generator"""NEWLINENEWLINE serial = 0NEWLINENEWLINE START_PORT = 5NEWLINE PORT_ORDER = [0, 1, 2, 3]NEWLINENEWLINE class FakeExtendedHost:NEWLINE """Fake class for a mininet extended host"""NEWLINENEWLINE def get_serialno(self, *_args, **_kwargs):NEWLINE """"Return mock serial number"""NEWLINE self.serial += 1NEWLINE return self.serialNEWLINENEWLINE def test_port_order(self):NEWLINE """Test port order extension & port order option"""NEWLINE port_order = [3, 2, 1, 0]NEWLINE extended = FaucetFakeOFTopoGenerator.extend_port_order(port_order, max_length=8)NEWLINE self.assertEqual(extended, [3, 2, 1, 0, 7, 6, 5, 4])NEWLINE port_order = [1, 2, 3, 4, 0]NEWLINE extended = FaucetFakeOFTopoGenerator.extend_port_order(port_order, max_length=10)NEWLINE self.assertEqual(extended, [1, 2, 3, 4, 0, 6, 7, 8, 9, 5])NEWLINE host_links = {0: [0], 1: [1]}NEWLINE host_vlans = {0: 0, 1: 0}NEWLINE switch_links = [(0, 1)]NEWLINE link_vlans = {(0, 1): [0]}NEWLINE port_order = [3, 2, 1, 0]NEWLINE expected_ports = [self.START_PORT + port for port in port_order]NEWLINE topo = FaucetFakeOFTopoGenerator(NEWLINE '', '', '',NEWLINE 2, False,NEWLINE host_links, host_vlans, switch_links, link_vlans,NEWLINE start_port=self.START_PORT, port_order=port_order,NEWLINE get_serialno=self.get_serialno)NEWLINE s1_name = topo.switches_by_id[0]NEWLINE s1_ports = list(topo.ports[s1_name].keys())NEWLINE self.assertEqual(s1_ports, expected_ports[:2])NEWLINE s2_name = topo.switches_by_id[1]NEWLINE s2_ports = list(topo.ports[s2_name].keys())NEWLINE self.assertEqual(s2_ports, expected_ports[:2])NEWLINENEWLINE def test_start_port(self):NEWLINE """Test the topology start port parameter option"""NEWLINE start_port = 55NEWLINE host_links = {0: [0], 1: [1]}NEWLINE host_vlans = {0: 0, 1: 0}NEWLINE switch_links = [(0, 1)]NEWLINE link_vlans = {(0, 1): [0]}NEWLINE port_order = [3, 2, 1, 0]NEWLINE expected_ports = [start_port + port for port in port_order]NEWLINE topo = FaucetFakeOFTopoGenerator(NEWLINE '', '', '',NEWLINE 2, False,NEWLINE host_links, host_vlans, switch_links, link_vlans,NEWLINE start_port=start_port, port_order=port_order,NEWLINE get_serialno=self.get_serialno)NEWLINE s1_name, s2_name = topo.switches_by_id.values()NEWLINE h1_name, h2_name = topo.hosts_by_id.values()NEWLINE self.assertEqual(topo.ports[s1_name][expected_ports[0]][0], s2_name)NEWLINE self.assertEqual(topo.ports[s2_name][expected_ports[0]][0], s1_name)NEWLINE self.assertEqual(topo.ports[s1_name][expected_ports[1]][0], h1_name)NEWLINE self.assertEqual(topo.ports[s2_name][expected_ports[1]][0], h2_name)NEWLINENEWLINE def test_hw_build(self):NEWLINE """Test the topology is built with hardware requirements"""NEWLINE host_links = {0: [0], 1: [1]}NEWLINE host_vlans = {0: 0, 1: 0}NEWLINE switch_links = [(0, 1)]NEWLINE link_vlans = {(0, 1): [0]}NEWLINE hw_dpid = 0x123NEWLINE hw_ports = {1: 'p1', 2: 'p2', 3: 'p3', 4: 'p4', 5: 'p5', 6: 'p6'}NEWLINE topo = FaucetFakeOFTopoGenerator(NEWLINE '', '', '',NEWLINE 2, False,NEWLINE host_links, host_vlans, switch_links, link_vlans,NEWLINE hw_dpid=hw_dpid, hw_ports=hw_ports,NEWLINE start_port=self.START_PORT, port_order=self.PORT_ORDER,NEWLINE get_serialno=self.get_serialno)NEWLINE self.assertEqual(topo.dpids_by_id[0], hw_dpid)NEWLINE self.assertEqual(list(topo.ports[topo.switches_by_id[0]].keys()), [1, 2])NEWLINENEWLINE def test_no_links(self):NEWLINE """Test single switch topology"""NEWLINE host_links = {0: [0]}NEWLINE host_vlans = {0: 0}NEWLINE switch_links = {}NEWLINE link_vlans = {}NEWLINE topo = FaucetFakeOFTopoGenerator(NEWLINE '', '', '',NEWLINE 2, False,NEWLINE host_links, host_vlans, switch_links, link_vlans,NEWLINE start_port=self.START_PORT, port_order=self.PORT_ORDER,NEWLINE get_serialno=self.get_serialno)NEWLINE self.assertEqual(len(topo.hosts()), 1)NEWLINE self.assertEqual(len(topo.switches()), 1)NEWLINE self.assertEqual(len(topo.links()), 1)NEWLINE host_name = topo.hosts_by_id[0]NEWLINE switch_name = topo.switches_by_id[0]NEWLINE self.assertEqual((switch_name, host_name), topo.links()[0])NEWLINENEWLINE def test_build(self):NEWLINE """Test the topology is built correctly"""NEWLINE host_links = {0: [0], 1: [1]}NEWLINE host_vlans = {0: 0, 1: [0, 1]}NEWLINE switch_links = [(0, 1), (0, 1), (0, 1)]NEWLINE link_vlans = {(0, 1): [0, 1]}NEWLINE topo = FaucetFakeOFTopoGenerator(NEWLINE '', '', '',NEWLINE 2, False,NEWLINE host_links, host_vlans, switch_links, link_vlans,NEWLINE start_port=self.START_PORT, port_order=self.PORT_ORDER,NEWLINE get_serialno=self.get_serialno)NEWLINE self.assertEqual(len(topo.dpids_by_id), 2)NEWLINE self.assertEqual(len(topo.hosts_by_id), 2)NEWLINE self.assertEqual(len(topo.switches_by_id), 2)NEWLINE _, host_port_maps, link_port_maps = topo.create_port_maps()NEWLINE self.assertEqual(len(link_port_maps[(0, 1)]), 3)NEWLINE self.assertEqual(len(host_port_maps[0]), 1)NEWLINE self.assertEqual(len(host_port_maps[1]), 1)NEWLINE host0, host1 = topo.hosts_by_id.values()NEWLINE dp0, dp1 = topo.switches_by_id.values()NEWLINE links = topo.links()NEWLINE self.assertIn((dp0, host0), links)NEWLINE self.assertIn((dp1, host1), links)NEWLINE self.assertIn((dp0, dp1), links)NEWLINE self.assertEqual(links.count((dp0, dp1)), 3)NEWLINENEWLINE def test_host_options(self):NEWLINE """Test the topology correctly provides mininet host options"""NEWLINE host_options = {NEWLINE 0: {'inNamespace': True, 'ip': '127.0.0.1'},NEWLINE 1: {'cls': self.FakeExtendedHost}}NEWLINE host_links = {0: [0], 1: [0]}NEWLINE host_vlans = {0: 0, 1: None}NEWLINE switch_links = []NEWLINE link_vlans = {}NEWLINE topo = FaucetFakeOFTopoGenerator(NEWLINE '', '', '',NEWLINE 2, False,NEWLINE host_links, host_vlans, switch_links, link_vlans,NEWLINE host_options=host_options,NEWLINE start_port=self.START_PORT, port_order=self.PORT_ORDER,NEWLINE get_serialno=self.get_serialno)NEWLINE for host_id, opts in host_options.items():NEWLINE info = topo.nodeInfo(topo.hosts_by_id[host_id])NEWLINE for key, value in opts.items():NEWLINE self.assertIn(key, info)NEWLINE self.assertEqual(value, info[key])NEWLINENEWLINE def test_link_port_map(self):NEWLINE """Test correctly generated link port map"""NEWLINE host_links = {0: [0], 1: [1]}NEWLINE host_vlans = {0: 0, 1: 0}NEWLINE switch_links = [(0, 1), (0, 1), (1, 2)]NEWLINE link_vlans = {edge: None for edge in switch_links}NEWLINE topo = FaucetFakeOFTopoGenerator(NEWLINE '', '', '',NEWLINE 2, False,NEWLINE host_links, host_vlans, switch_links, link_vlans,NEWLINE start_port=self.START_PORT, port_order=self.PORT_ORDER,NEWLINE get_serialno=self.get_serialno)NEWLINE link_port_maps = topo._create_link_port_map()NEWLINE self.assertEqual(NEWLINE link_port_maps,NEWLINE {(0, 1): [5, 6], (1, 0): [5, 6], (1, 2): [7], (2, 1): [5]})NEWLINENEWLINE def test_host_port_map(self):NEWLINE """Test correctly generated host port map"""NEWLINE host_links = {0: [0, 2], 1: [1]}NEWLINE host_vlans = {0: 0, 1: 0}NEWLINE switch_links = [(0, 1), (0, 1), (1, 2)]NEWLINE link_vlans = {edge: None for edge in switch_links}NEWLINE topo = FaucetFakeOFTopoGenerator(NEWLINE '', '', '',NEWLINE 2, False,NEWLINE host_links, host_vlans, switch_links, link_vlans,NEWLINE start_port=self.START_PORT, port_order=self.PORT_ORDER,NEWLINE get_serialno=self.get_serialno)NEWLINE host_port_maps = topo._create_host_port_map()NEWLINE self.assertEqual(NEWLINE host_port_maps,NEWLINE {0: {0: [7], 2: [6]}, 1: {1: [8]}})NEWLINENEWLINENEWLINEif __name__ == "__main__":NEWLINE main()NEWLINE |
import timeNEWLINEimport mrcfileNEWLINEimport argparseNEWLINEimport numpy as npNEWLINEimport multiprocessingNEWLINEfrom scipy import ndimage as ndiNEWLINEfrom scipy.stats import wasserstein_distanceNEWLINEfrom skimage import transform, measureNEWLINENEWLINESHIFT = ['Euclidean', 'L1', 'cosine'] # Metrics requiring real space translationNEWLINENEWLINEdef main():NEWLINE """calculates similarity between line projections from 2D class averages"""NEWLINE NEWLINE parser = argparse.ArgumentParser(description='compare similarity of 2D class averages based on common lines')NEWLINE NEWLINE parser.add_argument('-i', '--input', action='store', dest='mrc_input', required=True,NEWLINE help='path to mrcs file of 2D class averages')NEWLINE NEWLINE parser.add_argument('-o', '--outpath', action='store', dest='outpath', required=True,NEWLINE help='path for output files')NEWLINE NEWLINE parser.add_argument('-m', '--metric', action='store', dest='metric', required=False, NEWLINE default='Euclidean', choices=['Euclidean', 'L1', 'cosine', 'EMD', 'correlate'],NEWLINE help='choose scoring method, default Euclidean')NEWLINE NEWLINE parser.add_argument('-s', '--scale_factor', action='store', dest='scale_factor', required=False, type=float, default=1,NEWLINE help='scale factor for downsampling. (e.g. -s 2 converts 200pix box --> 100pix box)')NEWLINE NEWLINE parser.add_argument('-c', '--num_workers', action='store', dest='num_workers', required=False, type=int, default=1,NEWLINE help='number of CPUs to use, default 1')NEWLINE NEWLINE parser.add_argument('-d', '--domain', action='store', dest='domain', required=False, NEWLINE default='Fourier', choices=['Fourier', 'Real'], help='Fourier or Real space, default Fourier')NEWLINE NEWLINE parser.add_argument('-t', '--translate', action='store', dest='translate', required=False, NEWLINE default='full', choices=['full', 'valid'],NEWLINE help='indicate size of score vector, numpy convention, default full')NEWLINE NEWLINE parser.add_argument('-a', '--angular_sampling', action='store', dest='angular_sampling', required=False, NEWLINE type=int, default=5, help='angle sampling for 1D projections in degrees, default 5')NEWLINENEWLINE args = parser.parse_args()NEWLINENEWLINE if args.domain == 'Fourier':NEWLINE rotation_degrees = np.arange(0, 180, args.angular_sampling)NEWLINE else:NEWLINE rotation_degrees = np.arange(0, 360, args.angular_sampling)NEWLINE NEWLINE shape, projection_2D = get_projection_2D(mrcs=args.mrc_input, factor=args.scale_factor)NEWLINE NEWLINE num_class_avg = len(projection_2D)NEWLINE num_1D = num_class_avg*len(rotation_degrees)NEWLINE NEWLINE print("number of 2D class averages: {}".format(num_class_avg))NEWLINE print("number of 1D projection vectors: {}".format(num_1D))NEWLINE print("total number of pairwise scores: {}".format(int(num_1D*(num_1D-1)/2)))NEWLINENEWLINE if args.metric == 'Euclidean':NEWLINE pairwise_score = pairwise_l2NEWLINE elif args.metric == 'L1':NEWLINE pairwise_score = pairwise_l1NEWLINE elif args.metric == 'cosine':NEWLINE pairwise_score = pairwise_cosineNEWLINE elif args.metric == 'EMD':NEWLINE pairwise_score = pairwise_wassersteinNEWLINE elif args.metric == 'correlate':NEWLINE pairwise_score = pairwise_correlateNEWLINE NEWLINE if args.metric in SHIFT:NEWLINE wrapper_function = wrapper_slide_functionNEWLINE else:NEWLINE wrapper_function = wrapper_single_functionNEWLINE NEWLINE final_scores = {}NEWLINE NEWLINE with multiprocessing.Pool(args.num_workers) as pool:NEWLINE for i in range(num_class_avg-1):NEWLINE line_projections_1 = vectorize(i, projection_2D[i], rotation_degrees, shape, args.domain)NEWLINE for j in range(i+1, num_class_avg):NEWLINE line_projections_2 = vectorize(j, projection_2D[j], rotation_degrees, shape, args.domain)NEWLINE NEWLINE projection_pairs = []NEWLINE for line_1 in line_projections_1.values():NEWLINE for line_2 in line_projections_2.values():NEWLINE projection_pairs.append((line_1, line_2))NEWLINE NEWLINE pair_scores = pool.starmap(NEWLINE wrapper_function, NEWLINE [(pair, pairwise_score, args.translate, args.domain) for pair in projection_pairs]NEWLINE )NEWLINENEWLINE optimum = min(pair_scores, key = lambda x: x[4])NEWLINENEWLINE avg_1, deg_1, avg_2, deg_2, score = [value for value in optimum]NEWLINENEWLINE final_scores[(avg_1, avg_2)] = (deg_1, deg_2, score)NEWLINE final_scores[(avg_2, avg_1)] = (deg_2, deg_1, score)NEWLINE NEWLINE write_scores(final_scores, outpath=args.outpath)NEWLINENEWLINE NEWLINEclass Projection:NEWLINE """for 1D projection vectors"""NEWLINE NEWLINE def __init__(self, NEWLINE class_avg,NEWLINE angle,NEWLINE vector): NEWLINENEWLINE self.class_avg = class_avgNEWLINE self.angle = angleNEWLINE self.vector = vectorNEWLINE NEWLINE def size(self):NEWLINE return len(self.vector)NEWLINENEWLINE NEWLINEdef get_projection_2D(mrcs, factor):NEWLINE """read, scale and extract class averages"""NEWLINE NEWLINE projection_2D = {}NEWLINENEWLINE with mrcfile.open(mrcs) as mrc:NEWLINE for i, data in enumerate(mrc.data):NEWLINE projection_2D[i] = dataNEWLINE mrc.close()NEWLINENEWLINE shape = transform.rotate(projection_2D[0].copy(), 45, resize=True).shape[0] NEWLINE NEWLINE for k, avg in projection_2D.items():NEWLINE if factor == 1:NEWLINE projection_2D[k] = extract_class_avg(avg.copy())NEWLINE else:NEWLINE scaled_img = transform.rescale(NEWLINE avg, NEWLINE scale=(1/factor), NEWLINE anti_aliasing=True, NEWLINE multichannel=False, # Add to supress warningNEWLINE mode='constant' # Add to supress warningNEWLINE ) NEWLINE projection_2D[k] = extract_class_avg(scaled_img)NEWLINE NEWLINE return shape, projection_2DNEWLINENEWLINENEWLINEdef extract_class_avg(avg):NEWLINE """fit in minimal bounding box"""NEWLINE NEWLINE image = avg.copy()NEWLINE image[image < 0] = 0NEWLINENEWLINE struct = np.ones((2, 2), dtype=bool)NEWLINE dilate = ndi.binary_dilation(image, struct)NEWLINENEWLINE labeled = measure.label(dilate, connectivity=2)NEWLINE rprops = measure.regionprops(labeled, image, cache=False)NEWLINENEWLINE if len(rprops) == 1:NEWLINE select_region = 0NEWLINE NEWLINE else:NEWLINE img_y, img_x = image.shapeNEWLINENEWLINE if labeled[int(img_y/2), int(img_x/2)] != 0: # Check for central regionNEWLINE select_region = labeled[int(img_y/2), int(img_x/2)] - 1 # For indexNEWLINENEWLINE else:NEWLINE distances = [NEWLINE (i, np.linalg.norm(np.array((img_y/2, img_x/2)) - np.array(r.weighted_centroid))) NEWLINE for i, r in enumerate(rprops)NEWLINE ]NEWLINENEWLINE select_region = min(distances, key=lambda x: x[1])[0] # Pick first closest region NEWLINENEWLINE y_min, x_min, y_max, x_max = [p for p in rprops[select_region].bbox]NEWLINENEWLINE return image[y_min:y_max, x_min:x_max]NEWLINENEWLINENEWLINEdef vectorize(key, image, rotation_degrees, shape, domain):NEWLINE """NEWLINE takes image and creates 1D projectionsNEWLINE similar to Radon transformNEWLINE """NEWLINE projection_1D = {}NEWLINE projection_1D_FT = {}NEWLINE NEWLINE for degree in rotation_degrees:NEWLINE proj_1D = transform.rotate(image, degree, resize=True).sum(axis=0).astype('float32')NEWLINE trim_1D = np.trim_zeros(proj_1D, trim='fb')NEWLINE NEWLINE pad_1D = np.pad(proj_1D, (0, shape-len(proj_1D))) # Pad to largest possible shape from 2D NEWLINE F = abs(np.fft.rfft(pad_1D))NEWLINE NEWLINE projection_1D[(key, degree)] = Projection(class_avg=key, angle=degree, vector=trim_1D)NEWLINE projection_1D_FT[(key, degree)] = Projection(class_avg=key, angle=degree, vector=F)NEWLINE NEWLINE if domain == 'Fourier':NEWLINE return projection_1D_FTNEWLINE else:NEWLINE return projection_1DNEWLINE NEWLINE NEWLINEdef pairwise_l2(a, b):NEWLINE return np.linalg.norm(a - b)NEWLINENEWLINENEWLINEdef pairwise_l1(a, b):NEWLINE return np.linalg.norm(a - b, 1)NEWLINENEWLINENEWLINEdef pairwise_cosine(a, b):NEWLINE return 1 - (np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))NEWLINENEWLINENEWLINEdef pairwise_correlate(a, b, translate):NEWLINE s = np.correlate(a, b, mode=translate)NEWLINE return 1 / (1 + np.amax(s)) # Convert to distanceNEWLINENEWLINENEWLINEdef pairwise_wasserstein(a, b, translate):NEWLINE return wasserstein_distance(a, b)NEWLINENEWLINENEWLINEdef slide_score(a, b, pairwise_score, translate, domain):NEWLINE """NEWLINE finds minimum pairwise score for translations of 1D projectionsNEWLINE a, b are instances of the Projection classNEWLINE 'valid' is elements without zero paddingNEWLINE 'full' is scores at all translationsNEWLINE """NEWLINE scores = []NEWLINE NEWLINE if domain == 'Fourier':NEWLINE scores.append(pairwise_score(a.vector[1:], b.vector[1:])) #Drop 0th seems to helpNEWLINE NEWLINE else:NEWLINE if a.size() > b.size(): NEWLINE l, s = a.vector, b.vectorNEWLINE else:NEWLINE l, s = b.vector, a.vectorNEWLINENEWLINE l_size, s_size = len(l), len(s)NEWLINENEWLINE if translate == 'valid':NEWLINE diff_of_len = abs(l_size - s_size)NEWLINENEWLINE if diff_of_len == 0:NEWLINE scores.append(pairwise_score(l, s)) NEWLINE else:NEWLINE pad_s = np.pad(s, pad_width=(diff_of_len, diff_of_len))NEWLINE for i in range(0, diff_of_len+1):NEWLINE shift_s = pad_s[i:i+l_size]NEWLINE scores.append(pairwise_score(l, shift_s))NEWLINENEWLINE elif translate == 'full':NEWLINE pad_l = np.pad(l, pad_width=(s_size-1, s_size-1))NEWLINE pad_s = np.pad(s, pad_width=(l_size+s_size-2, l_size+s_size-2))NEWLINENEWLINE for i in range(0, l_size+s_size-1):NEWLINE shift_s = pad_s[i:i+len(pad_l)]NEWLINE scores.append(pairwise_score(pad_l, shift_s))NEWLINE NEWLINE return min(scores)NEWLINENEWLINENEWLINEdef wrapper_slide_function(pair, pairwise, translate, domain):NEWLINE """NEWLINE pair is tuple from Projection class to be scoredNEWLINE pairwise is function to score vectores (e.g. Euclidean)NEWLINE """NEWLINE score = slide_score(pair[0], pair[1], pairwise, translate, domain)NEWLINE return [pair[0].class_avg, pair[0].angle, pair[1].class_avg, pair[1].angle, score]NEWLINENEWLINENEWLINEdef wrapper_single_function(pair, pairwise, translate, domain):NEWLINE """same as above but for correlate and EMD"""NEWLINE score = pairwise(pair[0].vector[1:], pair[1].vector[1:], translate) # Skip 0th component NEWLINE return [pair[0].class_avg, pair[0].angle, pair[1].class_avg, pair[1].angle, score]NEWLINENEWLINE NEWLINEdef write_scores(final_scores, outpath):NEWLINE """NEWLINE tab separted file of final scoresNEWLINE load scores into the slicem guiNEWLINE """NEWLINE stamp = time.strftime('%Y%m%d_%H%M%S')NEWLINE NEWLINE header = ['projection_1', 'degree_1', 'projection_2', 'degree_2', 'score']NEWLINE NEWLINE with open(outpath+'/slicem_scores_{0}.txt'.format(stamp), 'w') as f:NEWLINE for h in header:NEWLINE f.write(h+'\t')NEWLINE f.write('\n')NEWLINE for p, v in final_scores.items():NEWLINE f.write(str(p[0])+'\t'+str(v[0])+'\t'+str(p[1])+'\t'+str(v[1])+'\t'+str(v[2])+'\n') NEWLINENEWLINE NEWLINEif __name__ == "__main__":NEWLINE starttime = time.time()NEWLINE main()NEWLINE print('Runtime: {} minutes'.format((time.time() - starttime)/60)) |
#!/usr/bin/env python3NEWLINENEWLINEimport argparseNEWLINEimport pandas as pdNEWLINEimport numpy as npNEWLINEimport sysNEWLINEimport matplotlibNEWLINEfrom matplotlib import useNEWLINEuse('Agg')NEWLINEimport matplotlib.pyplot as pltNEWLINENEWLINEEOL=chr(10)NEWLINENEWLINENEWLINEdef parseArguments():NEWLINE if len(sys.argv)<=1:NEWLINE sys.argv="mafplot.py $input $output".split()NEWLINE parser=argparse.ArgumentParser()NEWLINE parser.add_argument("--phenos", type=str, metavar='phenotypes', required=True)NEWLINE parser.add_argument("--skip-zero", dest="skip_zero", action="store_true", default=False)NEWLINE parser.add_argument('input', type=str, metavar='input'),NEWLINE parser.add_argument('output', type=str, metavar='output'),NEWLINE args = parser.parse_args()NEWLINE return argsNEWLINENEWLINENEWLINENEWLINENEWLINENEWLINEtransforms = [[1,np.log1p],[np.sqrt,np.cbrt]]NEWLINEtransform_names = [["no transform","log transform"],["square root transform","cube root transform"]]NEWLINENEWLINENEWLINEdef numfrm(x):NEWLINE xstr=str(x)NEWLINE if "." not in xstr: return xstrNEWLINE if x<0.1:NEWLINE xstr="%6.4E"%xNEWLINE else:NEWLINE xstr = str(x)NEWLINE xstr = xstr[:xstr.index(".")+3]NEWLINE return xstrNEWLINENEWLINENEWLINEdef summary2LaTeX(summary,output,suf,pheno):NEWLINE phelab = pheno.replace("_",":")NEWLINE lat = EOL+EOL+\NEWLINE r"\begin{table}[hb]"+EOL+\NEWLINE r"\begin{center}"+EOL+r"\begin{tabular}{l r D{.}{.}{3} D{.}{.}{3} D{.}{.}{4} D{.}{.}{4}} \\"+EOL + \NEWLINE r"Data & Count & \multicolumn{1}{c}{Min} & \multicolumn{1}{c}{Max} & \multicolumn{1}{c}{Ave} & \multicolumn{1}{c}{StdDev} \\\hline" +EOLNEWLINE for s in summary:NEWLINE lat = lat+" & ".join([s[0]]+list(map(numfrm,[s[1].count(),s[1].min(),s[1].max(),s[1].mean(),s[1].std()])))+ r"\\"+EOLNEWLINE lat = lat + r"\hline\end{tabular}"+EOL+r"\end{center}"+EOL+(r"""NEWLINE *-caption{Overview of phenotype *-protect*-url{%s} distribution}NEWLINE *-label{tab:overview:%s}NEWLINE *-end{table}NEWLINE """)%(pheno,phelab)NEWLINE lat = r"""NEWLINENEWLINE A summary of the data for \url{%s} can be found in the Table~*-ref{tab:overview:%s}, transformed using NEWLINE different transforms. A histogram is found in Figure \ref{fig:%s}.NEWLINENEWLINE """ + lat + r"""NEWLINENEWLINE NEWLINE \ourfig{fig:%s}{Histogram of *-protect*-url{%s} values under different transforms}{%s.%s}NEWLINENEWLINENEWLINE """NEWLINE return lat%(pheno,phelab,output,output,pheno,output,suf)NEWLINENEWLINENEWLINEdef errorMessage10(phe):NEWLINE print("""NEWLINENEWLINE A problem has been detected in file <%s> column <%s>.NEWLINENEWLINE There is some invalid data. I regret I can't tell you which row.NEWLINENEWLINENEWLINE Please check -- the data should be numeric only.NEWLINENEWLINENEWLINE If there is missing data, please use NANEWLINENEWLINENEWLINENEWLINE """%(args.input,phe))NEWLINENEWLINENEWLINEdef showPheno(pname,frm):NEWLINE if args.skip_zero:NEWLINE data = frm[frm[pname]>0][pname]NEWLINE else:NEWLINE data = frm[pname]NEWLINE fig,axs = plt.subplots(2,2)NEWLINE matplotlib.rcParams['xtick.labelsize']=13NEWLINE matplotlib.rcParams['ytick.labelsize']=13NEWLINE summary=[]NEWLINE for r in range(2):NEWLINE for c in range(2):NEWLINE axs[r][c].set_xlabel(transform_names[r][c],fontsize=12)NEWLINE axs[r][c].set_ylabel("Frequency",fontsize=12)NEWLINE fn = transforms[r][c]NEWLINE try:NEWLINE pdata = fn(data) if fn != 1 else dataNEWLINE pdata = pdata[pdata.notnull()]NEWLINE summary.append((transform_names[r][c],pdata))NEWLINE except:NEWLINE errorMessage10(pname)NEWLINE sys.exit(10)NEWLINE axs[r][c].hist(pdata,bins=100)NEWLINE plt.tight_layout()NEWLINE output = ("%s-%s"%(args.output,pname)).replace("_","-")NEWLINE plt.savefig("%s.pdf"%output)NEWLINE return summary2LaTeX(summary,output,"pdf",pname)NEWLINENEWLINEargs=parseArguments()NEWLINEfrm = pd.read_csv(args.input,delim_whitespace=True)NEWLINEphenos = args.phenos.split(",")NEWLINEoutput_latex= ""NEWLINEfor phen in phenos:NEWLINE dets = phen.split("/")NEWLINE pname = dets[0]NEWLINE if (frm[pname].dtype == np.float64 or frm[pname].dtype == np.int64)==False :NEWLINE errorMessage10(pname)NEWLINE sys.exit(11) NEWLINE output_latex = output_latex + showPheno(pname,frm)NEWLINENEWLINEg = open("%s.tex"%args.output,"w")NEWLINEg.write(output_latex.replace("*-",chr(92)).replace("##",chr(36)))NEWLINEg.close()NEWLINENEWLINENEWLINENEWLINE |
"""Auto-generated file, do not edit by hand. 81 metadata"""NEWLINEfrom ..phonemetadata import NumberFormatNEWLINENEWLINEPHONE_ALT_FORMAT_81 = [NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{4})', format=u'\\1-\\2-\\3', leading_digits_pattern=['(?:12|57|99)0']), NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})(\\d{2})', format=u'\\1-\\2-\\3-\\4', leading_digits_pattern=['(?:12|57|99)0']), NumberFormat(pattern='(\\d{3})(\\d{4})(\\d{2})', format=u'\\1-\\2-\\3', leading_digits_pattern=['(?:12|57|99)0'])]NEWLINE |
from sympy import Point, Line, SegmentNEWLINEfrom skimage import feature, measureNEWLINEimport numpy as npNEWLINEfrom scipy import ndimage as ndNEWLINENEWLINEdef thr_calculator(filtered_img,min_distance,stringency):NEWLINENEWLINE """NEWLINE Function used to calculate the threshold to use for the dotsNEWLINE counting in a 2D image. NEWLINENEWLINE Parameters:NEWLINE -----------NEWLINENEWLINE filtered_img: np.array float64NEWLINE preprocessed image used to count the dots.NEWLINE min_distance: intNEWLINE minimum distance that two maxima need to have in order to be defined as NEWLINE separete peaks.NEWLINE stringency: intNEWLINE integer used to select the stringency of the generatedNEWLINE threshold. By adding stringency to the thr_idx we can select a Thr with higherNEWLINE value from the thr_array.NEWLINENEWLINE Returns:NEWLINE -----------NEWLINENEWLINE counting_dict : dict NEWLINE dictionary containing all the counting infos:NEWLINE selected_thr: float64NEWLINE Thr used for counting after application of the stringency.NEWLINE calculated_thr: float64 NEWLINE Calculated ThrNEWLINE selected_peaks: int64 NEWLINE 2D coords of the peaks defined using the selected_thr.NEWLINE thr_array: float64 NEWLINE Thr array of 100 points distributed between (Img.min(),Img.max()).NEWLINE peaks_coords: float64 NEWLINE list of all the 3D coords calculated using the Thr array.NEWLINE total_peaks: list of int NEWLINE List of the peaks counts.NEWLINE thr_idx: int64 NEWLINE index of the calculated threshold.NEWLINE stringency: int64 NEWLINE stringency used for the identification of the selected_peaksNEWLINE """NEWLINE NEWLINE # List with the total peaks calculated for each thresholdNEWLINE total_peaks = []NEWLINE NEWLINE # List of ndarrays with the coords of the peaks calculated for each thresholdNEWLINE peaks_coords = []NEWLINENEWLINE # Define the Thr array to be testedNEWLINE thr_array = np.linspace(filtered_img.min(),filtered_img.max(),num=100)NEWLINENEWLINENEWLINE # Calculate the number of peaks for each threshold. In this calculationNEWLINE # the size of the objects is not consideredNEWLINE for thr in thr_array:NEWLINE # The border is excluded from the countingNEWLINE peaks = feature.peak_local_max(filtered_img,min_distance=min_distance,\NEWLINE threshold_abs=thr,exclude_border=False, indices=True,\NEWLINE num_peaks=np.inf, footprint=None,labels=None) NEWLINE # Stop the counting when the number of peaks detected falls below 3NEWLINE if len(peaks)<=3:NEWLINE stop_thr = thr # Move in the upper loop so you will stop at the previous thrNEWLINE breakNEWLINE else:NEWLINE peaks_coords.append(peaks) NEWLINE total_peaks.append(len(peaks))NEWLINENEWLINENEWLINE # Consider the case of no detectected peaks or if there is only one ThrNEWLINE # that create peaks (list total_peaks have only one element and )NEWLINE # if np.array(total_peaks).sum()>0 or len(total_peaks)>1:NEWLINE if len(total_peaks)>1:NEWLINENEWLINE # Trim the threshold array in order to match the stopping pointNEWLINE # used the [0][0] to get the first number and then take it out from listNEWLINE thr_array = thr_array[:np.where(thr_array==stop_thr)[0][0]]NEWLINENEWLINENEWLINE # Calculate the gradient of the number of peaks distributionNEWLINE grad = np.gradient(total_peaks)NEWLINE NEWLINE # Restructure the data in order to avoid to consider the min_peak in theNEWLINE # calculationsNEWLINENEWLINE # Coord of the gradient min_peakNEWLINE grad_min_peak_coord = np.argmin(grad)NEWLINE NEWLINE # Trim the data to remove the peak.NEWLINE trimmed_thr_array = thr_array[grad_min_peak_coord:]NEWLINE trimmed_grad = grad[grad_min_peak_coord:]NEWLINENEWLINE if trimmed_thr_array.shape>(1,):NEWLINENEWLINE # Trim the coords array in order to maintain the same length of the NEWLINE # tr and pkNEWLINE trimmed_peaks_coords = peaks_coords[grad_min_peak_coord:]NEWLINE trimmed_total_peaks = total_peaks[grad_min_peak_coord:]NEWLINENEWLINE # To determine the threshold we will determine the Thr with the biggestNEWLINE # distance to the segment that join the end points of the calculatedNEWLINE # gradientNEWLINENEWLINE # Distances listNEWLINE distances = []NEWLINENEWLINE # Calculate the coords of the end points of the gradientNEWLINE p1 = Point(trimmed_thr_array[0],trimmed_grad[0])NEWLINE p2 = Point(trimmed_thr_array[-1],trimmed_grad[-1])NEWLINE NEWLINE # Create a line that join the pointsNEWLINE s = Line(p1,p2)NEWLINE allpoints = np.arange(0,len(trimmed_thr_array))NEWLINE NEWLINE # Calculate the distance between all points and the lineNEWLINE for p in allpoints:NEWLINE dst = s.distance(Point(trimmed_thr_array[p],trimmed_grad[p]))NEWLINE distances.append(dst.evalf())NEWLINENEWLINE # Remove the end points from the listsNEWLINE trimmed_thr_array = trimmed_thr_array[1:-1]NEWLINE trimmed_grad = trimmed_grad[1:-1]NEWLINE trimmed_peaks_coords = trimmed_peaks_coords[1:-1]NEWLINE trimmed_total_peaks = trimmed_total_peaks[1:-1]NEWLINE trimmed_distances = distances[1:-1]NEWLINE NEWLINE # Determine the coords of the selected ThrNEWLINE # Converted trimmed_distances to array because it crashedNEWLINE # on Sanger.NEWLINE if trimmed_distances: # Most efficient way will be to consider the length of Thr listNEWLINE thr_idx=np.argmax(np.array(trimmed_distances))NEWLINE calculated_thr = trimmed_thr_array[thr_idx]NEWLINE # The selected threshold usually causes oversampling of the number of dotsNEWLINE # I added a stringency parameter (int n) to use to select the Thr+n NEWLINE # for the counting. It selects a stringency only if the trimmed_thr_arrayNEWLINE # is long enoughNEWLINE if thr_idx+stringency<len(trimmed_thr_array):NEWLINE selected_thr = trimmed_thr_array[thr_idx+stringency]NEWLINE selected_peaks = trimmed_peaks_coords[thr_idx+stringency]NEWLINE thr_idx = thr_idx+stringencyNEWLINE else:NEWLINE selected_thr = trimmed_thr_array[thr_idx]NEWLINE selected_peaks = trimmed_peaks_coords[thr_idx]NEWLINENEWLINE NEWLINE # Calculate the selected peaks after removal of the big and small objectsNEWLINE NEWLINE # Threshold the image using the selected thresholdNEWLINE if selected_thr>0:NEWLINE img_mask = filtered_img>selected_thrNEWLINE NEWLINE labels = nd.label(img_mask)[0]NEWLINE NEWLINE properties = measure.regionprops(labels)NEWLINE NEWLINE for ob in properties:NEWLINE if ob.area<6 or ob.area>200:NEWLINE img_mask[ob.coords[:,0],ob.coords[:,1]]=0NEWLINE NEWLINE labels = nd.label(img_mask)[0]NEWLINE selected_peaks = feature.peak_local_max(filtered_img, min_distance=min_distance, threshold_abs=selected_thr, exclude_border=False, indices=True, num_peaks=np.inf, footprint=None, labels=labels)NEWLINE NEWLINE if selected_peaks.size:NEWLINE # Intensity counting of the max peaksNEWLINE selected_peaks_int = filtered_img[selected_peaks[:,0],selected_peaks[:,1]]NEWLINE NEWLINE NEWLINE else:NEWLINE selected_thr = 0NEWLINE calculated_thr = 0NEWLINE selected_peaks = 0NEWLINE peaks_coords = 0NEWLINE total_peaks = 0NEWLINE thr_idx = 0NEWLINE selected_peaks_int = 0NEWLINE trimmed_thr_array = 0NEWLINE trimmed_peaks_coords = 0NEWLINE NEWLINE else:NEWLINE selected_thr = 0NEWLINE calculated_thr = 0NEWLINE selected_peaks = 0NEWLINE peaks_coords = 0NEWLINE total_peaks = 0NEWLINE thr_idx = 0NEWLINE selected_peaks_int = 0NEWLINE trimmed_thr_array = 0NEWLINE trimmed_peaks_coords = 0NEWLINE else:NEWLINE selected_thr = 0NEWLINE calculated_thr = 0NEWLINE selected_peaks = 0NEWLINE peaks_coords = 0NEWLINE total_peaks = 0NEWLINE thr_idx = 0NEWLINE selected_peaks_int = 0NEWLINE trimmed_thr_array = 0NEWLINE trimmed_peaks_coords = 0NEWLINENEWLINENEWLINE else:NEWLINE selected_thr = 0NEWLINE calculated_thr = 0NEWLINE selected_peaks = 0NEWLINE peaks_coords = 0NEWLINE total_peaks = 0NEWLINE thr_idx = 0NEWLINE selected_peaks_int = 0NEWLINE trimmed_thr_array = 0NEWLINE trimmed_peaks_coords = 0NEWLINENEWLINE counting_dict={}NEWLINENEWLINE counting_dict['selected_thr'] = selected_thrNEWLINE counting_dict['calculated_thr'] = calculated_thrNEWLINE counting_dict['selected_peaks'] = selected_peaksNEWLINE counting_dict['thr_array'] = thr_arrayNEWLINE counting_dict['trimmed_thr_array'] = trimmed_thr_arrayNEWLINE counting_dict['peaks_coords'] = peaks_coordsNEWLINE counting_dict['trimmed_peaks_coords'] = trimmed_peaks_coordsNEWLINE counting_dict['total_peaks'] = total_peaks NEWLINE counting_dict['thr_idx'] = thr_idxNEWLINE counting_dict['stringency'] = stringencyNEWLINE counting_dict['selected_peaks_int'] = selected_peaks_intNEWLINE NEWLINE return counting_dict |
# -*- coding: utf-8 -*-NEWLINEfrom torch.utils.tensorboard import SummaryWriterNEWLINENEWLINENEWLINEclass TensorboardHandler(object):NEWLINE """docstring for TensorboardHandler"""NEWLINE __TENSORBOARD_HANDLER = NoneNEWLINENEWLINE def __new__(cls, *args: str, **kwargs: str) -> object:NEWLINE if cls.__TENSORBOARD_HANDLER is None:NEWLINE cls.__TENSORBOARD_HANDLER = object.__new__(cls)NEWLINE return cls.__TENSORBOARD_HANDLERNEWLINENEWLINE def __init__(self, args: object) -> None:NEWLINE super().__init__()NEWLINE self.__arg = argsNEWLINE self.__writer = SummaryWriter(log_dir=args.log)NEWLINENEWLINE def _write_file(self, epoch: int, mode_id: int,NEWLINE data_title: str, data_list: list,NEWLINE data_state: str) -> None:NEWLINE for i, data_item in enumerate(data_list):NEWLINE self.__writer.add_scalar(data_title % (mode_id, i, data_state),NEWLINE data_item, epoch)NEWLINENEWLINE def write_data(self, epoch: int, model_loss_list: list,NEWLINE model_acc_list: list, data_state: str) -> None:NEWLINE data_loss_title = 'model:%d/l%d/%s'NEWLINE data_acc_title = 'model:%d/acc%d/%s'NEWLINE assert len(model_loss_list) == len(model_acc_list)NEWLINENEWLINE for i, loss_list_item in enumerate(model_loss_list):NEWLINE self._write_file(epoch, i, data_loss_title, loss_list_item, data_state)NEWLINE self._write_file(epoch, i, data_acc_title, model_acc_list[i], data_state)NEWLINE |
import socketNEWLINEimport selectNEWLINENEWLINEHEADER_LENGTH = 10NEWLINENEWLINEIP = "127.0.0.1"NEWLINEPORT = 1234NEWLINENEWLINENEWLINEserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)NEWLINENEWLINEserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)NEWLINEserver_socket.bind((IP, PORT))NEWLINENEWLINE# Fazendo o servidor listar as conexõesNEWLINEserver_socket.listen()NEWLINENEWLINEsockets_list = [server_socket]NEWLINENEWLINE# Lista de clientes conectados - socket usa cabeçalho e nome como dadosNEWLINEclients = {}NEWLINENEWLINEprint(f'Listening for connections on {IP}:{PORT}...')NEWLINENEWLINEdef receive_message(client_socket):NEWLINENEWLINE try:NEWLINENEWLINE # Recebe o cabeçalho contendo o tamanho da mensagemNEWLINE message_header = client_socket.recv(HEADER_LENGTH)NEWLINENEWLINE # Se houver o recebimento da mensagem, fecha-se a conexãoNEWLINE if not len(message_header):NEWLINE return FalseNEWLINENEWLINE # Convertendo cabeçalho para um valor inteiroNEWLINE message_length = int(message_header.decode('utf-8').strip())NEWLINENEWLINE # Retornando o objeto da mensagem de cabaçalho e os dados da mensagemNEWLINE return {'header': message_header, 'data': client_socket.recv(message_length)}NEWLINENEWLINE except:NEWLINENEWLINE # Em caso de erro:NEWLINE return FalseNEWLINENEWLINEwhile True:NEWLINENEWLINE NEWLINE read_sockets, _, exception_sockets = select.select(sockets_list, [], sockets_list)NEWLINENEWLINENEWLINE # Iterate over notified socketsNEWLINE for notified_socket in read_sockets:NEWLINENEWLINE # Se a notificação do socket é um servidor socket - nova conexão, aceitaNEWLINE if notified_socket == server_socket:NEWLINENEWLINE NEWLINE client_socket, client_address = server_socket.accept()NEWLINENEWLINE NEWLINE user = receive_message(client_socket)NEWLINENEWLINE NEWLINE if user is False:NEWLINE continueNEWLINENEWLINE NEWLINE sockets_list.append(client_socket)NEWLINENEWLINE NEWLINE clients[client_socket] = userNEWLINENEWLINE print('Accepted new connection from {}:{}, username: {}'.format(*client_address, user['data'].decode('utf-8')))NEWLINENEWLINE # Se o socket existente estiver enviando uma mensagem NEWLINE else:NEWLINENEWLINE # Recebendo a mensagemNEWLINE message = receive_message(notified_socket)NEWLINENEWLINE # Se falso, cliente é desconectadoNEWLINE if message is False:NEWLINE print('Closed connection from: {}'.format(clients[notified_socket]['data'].decode('utf-8')))NEWLINENEWLINE # Removendo da lista de socketsNEWLINE sockets_list.remove(notified_socket)NEWLINENEWLINE # Removendo da lista de usuáriosNEWLINE del clients[notified_socket]NEWLINENEWLINE continueNEWLINENEWLINE # Passando o usuário que enviou a mensagemNEWLINE user = clients[notified_socket]NEWLINENEWLINE print(f'Received message from {user["data"].decode("utf-8")}: {message["data"].decode("utf-8")}')NEWLINENEWLINE NEWLINE for client_socket in clients:NEWLINENEWLINE NEWLINE if client_socket != notified_socket:NEWLINENEWLINE # Usuário e respectiva mensagemNEWLINE client_socket.send(user['header'] + user['data'] + message['header'] + message['data'])NEWLINENEWLINE # Exceções NEWLINE for notified_socket in exception_sockets:NEWLINENEWLINE # Remove da lista de socketsNEWLINE sockets_list.remove(notified_socket)NEWLINENEWLINE # Remove da lista de usuáriosNEWLINE del clients[notified_socket] |
import torchNEWLINEimport torch.nn as nnNEWLINEimport torch.nn.functional as FNEWLINEimport randomNEWLINENEWLINEfrom torch.distributions import NormalNEWLINENEWLINENEWLINEclass Retina:NEWLINE """A visual retina.NEWLINENEWLINE Extracts a foveated glimpse `phi` around location `l`NEWLINE from an image `x`.NEWLINENEWLINE Concretely, encodes the region around `l` at aNEWLINE high-resolution but uses a progressively lowerNEWLINE resolution for pixels further from `l`, resultingNEWLINE in a compressed representation of the originalNEWLINE image `x`.NEWLINENEWLINE Args:NEWLINE x: a 4D Tensor of shape (B, H, W, C). The minibatchNEWLINE of images.NEWLINE l: a 2D Tensor of shape (B, 2). Contains normalizedNEWLINE coordinates in the range [-1, 1].NEWLINE g: size of the first square patch.NEWLINE k: number of patches to extract in the glimpse.NEWLINE s: scaling factor that controls the size ofNEWLINE successive patches.NEWLINENEWLINE Returns:NEWLINE phi: a 5D tensor of shape (B, k, g, g, C). TheNEWLINE foveated glimpse of the image.NEWLINE """NEWLINENEWLINE def __init__(self, g, k, s):NEWLINE self.g = gNEWLINE self.k = kNEWLINE self.s = sNEWLINENEWLINE def foveate(self, x, l):NEWLINE """Extract `k` square patches of size `g`, centeredNEWLINE at location `l`. The initial patch is a square ofNEWLINE size `g`, and each subsequent patch is a squareNEWLINE whose side is `s` times the size of the previousNEWLINE patch.NEWLINENEWLINE The `k` patches are finally resized to (g, g) andNEWLINE concatenated into a tensor of shape (B, k, g, g, C).NEWLINE """NEWLINE phi = []NEWLINE size = self.gNEWLINENEWLINE # extract k patches of increasing sizeNEWLINE for i in range(self.k):NEWLINE phi.append(self.extract_patch(x, l, size))NEWLINE size = int(self.s * size)NEWLINENEWLINE # resize the patches to squares of size gNEWLINE for i in range(1, len(phi)):NEWLINE k = phi[i].shape[-1] // self.gNEWLINE phi[i] = F.avg_pool2d(phi[i], k)NEWLINENEWLINE # concatenate into a single tensor and flattenNEWLINE phi = torch.cat(phi, 1)NEWLINE phi = phi.view(phi.shape[0], -1)NEWLINENEWLINE return phiNEWLINENEWLINE def extract_patch(self, x, l, size):NEWLINE """Extract a single patch for each image in `x`.NEWLINENEWLINE Args:NEWLINE x: a 4D Tensor of shape (B, H, W, C). The minibatchNEWLINE of images.NEWLINE l: a 2D Tensor of shape (B, 2).NEWLINE size: a scalar defining the size of the extracted patch.NEWLINENEWLINE Returns:NEWLINE patch: a 4D Tensor of shape (B, size, size, C)NEWLINE """NEWLINE B, C, H, W = x.shapeNEWLINENEWLINE start = self.denormalize(H, l)NEWLINE end = start + sizeNEWLINENEWLINE # pad with zerosNEWLINE x = F.pad(x, (size, size, size, size))NEWLINENEWLINE # loop through mini-batch and extract patchesNEWLINE patch = []NEWLINE for i in range(B):NEWLINE patch.append(x[i, :, start[i, 1] : end[i, 1], start[i, 0] : end[i, 0]])NEWLINE return torch.stack(patch)NEWLINENEWLINE def denormalize(self, T, coords):NEWLINE """Convert coordinates in the range [-1, 1] toNEWLINE coordinates in the range [0, T] where `T` isNEWLINE the size of the image.NEWLINE """NEWLINE return (0.5 * ((coords + 1.0) * T)).long()NEWLINENEWLINE def exceeds(self, from_x, to_x, from_y, to_y, T):NEWLINE """Check whether the extracted patch will exceedNEWLINE the boundaries of the image of size `T`.NEWLINE """NEWLINE if (from_x < 0) or (from_y < 0) or (to_x > T) or (to_y > T):NEWLINE return TrueNEWLINE return FalseNEWLINENEWLINENEWLINEclass HeadNet(nn.Module):NEWLINE def __init__(self, ):NEWLINE super(HeadNet, self).__init__()NEWLINE mult = 256NEWLINE self.fc1 = nn.Linear(mult, 128)NEWLINE self.fc11 = nn.Linear(2, 128)NEWLINE self.fc2 = nn.Linear(128, 1)NEWLINE self.fc1.apply(weights_init)NEWLINE self.fc2.apply(weights_init)NEWLINENEWLINE def forward(self, h_t, a_t):NEWLINE x = F.relu(self.fc1(h_t)+self.fc11(a_t))NEWLINE x = F.relu(self.fc2(x))NEWLINE return xNEWLINENEWLINEclass EnsembleNet(nn.Module):NEWLINE def __init__(self, n_ensemble, hidden_size):NEWLINE super(EnsembleNet, self).__init__()NEWLINE self.n_emseble = n_ensembleNEWLINE self.corenet_list = nn.ModuleList([LocationNetwork(input_size=hidden_size, output_size=2, std=0.1) for k in range(n_ensemble)])NEWLINENEWLINE self.net_list = nn.ModuleList([HeadNet() for k in range(n_ensemble)])NEWLINENEWLINE def _core(self, x, epoch, t, is_test):NEWLINE return [net(x, epoch, t, is_test) for net in self.corenet_list]NEWLINENEWLINE def forward(self, h_t, k, epoch, t, is_test, refer_action=None):NEWLINE if k is not None:NEWLINE if refer_action is not None:NEWLINE return refer_action.detach(), self.net_list[k](h_t.detach(), refer_action.detach())NEWLINE else:NEWLINE return self.corenet_list[k](h_t, epoch, t, is_test), self.net_list[k](h_t.detach(), self.corenet_list[k](h_t, epoch, t, is_test))NEWLINE else:NEWLINE if refer_action is not None:NEWLINE net_heads = []NEWLINE for k in range(self.n_emseble):NEWLINE net_heads.append(self.net_list[k](h_t.detach(), refer_action.detach()))NEWLINE refer_action = [refer_action.detach() for k in range(self.n_emseble)]NEWLINE return refer_action, net_headsNEWLINE else:NEWLINE core_cache = self._core(h_t, epoch, t, is_test)NEWLINE net_heads = []NEWLINE for k, core in enumerate(core_cache):NEWLINE net_heads.append(self.net_list[k](h_t.detach(), core_cache[k]))NEWLINE return core_cache, net_headsNEWLINENEWLINENEWLINEclass GlimpseNetwork(nn.Module):NEWLINE """The glimpse network.NEWLINENEWLINE Combines the "what" and the "where" into a glimpseNEWLINE feature vector `g_t`.NEWLINENEWLINE - "what": glimpse extracted from the retina.NEWLINE - "where": location tuple where glimpse was extracted.NEWLINENEWLINE Concretely, feeds the output of the retina `phi` toNEWLINE a fc layer and the glimpse location vector `l_t_prev`NEWLINE to a fc layer. Finally, these outputs are fed eachNEWLINE through a fc layer and their sum is rectified.NEWLINENEWLINE In other words:NEWLINENEWLINE `g_t = relu( fc( fc(l) ) + fc( fc(phi) ) )`NEWLINENEWLINE Args:NEWLINE h_g: hidden layer size of the fc layer for `phi`.NEWLINE h_l: hidden layer size of the fc layer for `l`.NEWLINE g: size of the square patches in the glimpses extractedNEWLINE by the retina.NEWLINE k: number of patches to extract per glimpse.NEWLINE s: scaling factor that controls the size of successive patches.NEWLINE c: number of channels in each image.NEWLINE x: a 4D Tensor of shape (B, H, W, C). The minibatchNEWLINE of images.NEWLINE l_t_prev: a 2D tensor of shape (B, 2). Contains the glimpseNEWLINE coordinates [x, y] for the previous timestep `t-1`.NEWLINENEWLINE Returns:NEWLINE g_t: a 2D tensor of shape (B, hidden_size).NEWLINE The glimpse representation returned byNEWLINE the glimpse network for the currentNEWLINE timestep `t`.NEWLINE """NEWLINENEWLINE def __init__(self, h_g, h_l, g, k, s, c):NEWLINE super().__init__()NEWLINENEWLINE self.retina = Retina(g, k, s)NEWLINENEWLINE # glimpse layerNEWLINE D_in = k * g * g * cNEWLINE self.fc1 = nn.Linear(D_in, h_g)NEWLINENEWLINE # location layerNEWLINE D_in = 2NEWLINE self.fc2 = nn.Linear(D_in, h_l)NEWLINENEWLINE self.fc3 = nn.Linear(h_g, h_g + h_l)NEWLINE self.fc4 = nn.Linear(h_l, h_g + h_l)NEWLINENEWLINE def forward(self, x, l_t_prev):NEWLINE # generate glimpse phi from image xNEWLINE phi = self.retina.foveate(x, l_t_prev)NEWLINENEWLINE # flatten location vectorNEWLINE l_t_prev = l_t_prev.view(l_t_prev.size(0), -1)NEWLINENEWLINE # feed phi and l to respective fc layersNEWLINE phi_out = F.relu(self.fc1(phi))NEWLINE l_out = F.relu(self.fc2(l_t_prev))NEWLINENEWLINE what = self.fc3(phi_out)NEWLINE where = self.fc4(l_out)NEWLINENEWLINE # feed to fc layerNEWLINE g_t = F.relu(what + where)NEWLINE # print('g_t',g_t)NEWLINE # print("g_t_norm",torch.norm(g_t))NEWLINENEWLINE return g_tNEWLINENEWLINENEWLINEclass CoreNetwork(nn.Module):NEWLINE """The core network.NEWLINENEWLINE An RNN that maintains an internal state by integratingNEWLINE information extracted from the history of past observations.NEWLINE It encodes the agent's knowledge of the environment throughNEWLINE a state vector `h_t` that gets updated at every time step `t`.NEWLINENEWLINE Concretely, it takes the glimpse representation `g_t` as input,NEWLINE and combines it with its internal state `h_t_prev` at the previousNEWLINE time step, to produce the new internal state `h_t` at the currentNEWLINE time step.NEWLINENEWLINE In other words:NEWLINENEWLINE `h_t = relu( fc(h_t_prev) + fc(g_t) )`NEWLINENEWLINE Args:NEWLINE input_size: input size of the rnn.NEWLINE hidden_size: hidden size of the rnn.NEWLINE g_t: a 2D tensor of shape (B, hidden_size). The glimpseNEWLINE representation returned by the glimpse network for theNEWLINE current timestep `t`.NEWLINE h_t_prev: a 2D tensor of shape (B, hidden_size). TheNEWLINE hidden state vector for the previous timestep `t-1`.NEWLINENEWLINE Returns:NEWLINE h_t: a 2D tensor of shape (B, hidden_size). The hiddenNEWLINE state vector for the current timestep `t`.NEWLINE """NEWLINENEWLINE def __init__(self, input_size, hidden_size):NEWLINE super().__init__()NEWLINENEWLINE self.input_size = input_sizeNEWLINE self.hidden_size = hidden_sizeNEWLINENEWLINE self.i2h = nn.Linear(input_size, hidden_size)NEWLINE self.h2h = nn.Linear(hidden_size, hidden_size)NEWLINENEWLINE def forward(self, g_t, h_t_prev):NEWLINE h1 = self.i2h(g_t)NEWLINE # np.set_printoptions(threshold=np.inf)NEWLINE # print('h1', h1.detach().cpu().numpy())NEWLINE h2 = self.h2h(h_t_prev)NEWLINE h_t = F.relu(h1 + h2)NEWLINE # print('h_t',h_t)NEWLINE # print('h_t_norm',torch.norm(h_t))NEWLINE return h_tNEWLINENEWLINENEWLINEclass ActionNetwork(nn.Module):NEWLINE """The action network.NEWLINENEWLINE Uses the internal state `h_t` of the core network toNEWLINE produce the final output classification.NEWLINENEWLINE Concretely, feeds the hidden state `h_t` through a fcNEWLINE layer followed by a softmax to create a vector ofNEWLINE output probabilities over the possible classes.NEWLINENEWLINE Hence, the environment action `a_t` is drawn from aNEWLINE distribution conditioned on an affine transformationNEWLINE of the hidden state vector `h_t`, or in other words,NEWLINE the action network is simply a linear softmax classifier.NEWLINENEWLINE Args:NEWLINE input_size: input size of the fc layer.NEWLINE output_size: output size of the fc layer.NEWLINE h_t: the hidden state vector of the core networkNEWLINE for the current time step `t`.NEWLINENEWLINE Returns:NEWLINE a_t: output probability vector over the classes.NEWLINE """NEWLINENEWLINE def __init__(self, input_size, output_size, log_std=0):NEWLINE super().__init__()NEWLINENEWLINE self.fc = nn.Linear(input_size, output_size)NEWLINE # self.action_log_std = nn.Parameter(torch.ones(output_size) * log_std)NEWLINENEWLINE def forward(self, h_t):NEWLINE a_t = self.fc(h_t)NEWLINE # m = torch.distributions.Normal(a_t, torch.exp(0.5 * self.action_log_std))NEWLINE # sketch_anchor_embedding = m.sample()NEWLINE # log_prob = m.log_prob(sketch_anchor_embedding).sum()NEWLINENEWLINE return a_tNEWLINENEWLINEclass BootNetwork(nn.Module):NEWLINENEWLINE def __init__(self, input_size, output_size):NEWLINE super().__init__()NEWLINENEWLINE self.fc = nn.Linear(input_size, output_size)NEWLINE # self.action_log_std = nn.Parameter(torch.ones(output_size) * log_std)NEWLINENEWLINE def forward(self, h_t):NEWLINE h_t = self.fc(h_t)NEWLINE # m = torch.distributions.Normal(a_t, torch.exp(0.5 * self.action_log_std))NEWLINE # sketch_anchor_embedding = m.sample()NEWLINE # log_prob = m.log_prob(sketch_anchor_embedding).sum()NEWLINENEWLINE return h_tNEWLINENEWLINEdef weights_init(m):NEWLINE """custom weights initialization"""NEWLINE classtype = m.__class__NEWLINE if classtype == nn.Linear or classtype == nn.Conv2d:NEWLINE print("default init")NEWLINE #m.weight.data.normal_(0.0, 0.02)NEWLINE #m.bias.data.fill_(0)NEWLINE elif classtype == nn.BatchNorm2d:NEWLINE m.weight.data.normal_(1.0, 0.02)NEWLINE m.bias.data.fill_(0)NEWLINE else:NEWLINE print('%s is not initialized.' %classtype)NEWLINENEWLINEclass LocationNetwork(nn.Module):NEWLINE """The location network.NEWLINENEWLINE Uses the internal state `h_t` of the core network toNEWLINE produce the location coordinates `l_t` for the nextNEWLINE time step.NEWLINENEWLINE Concretely, feeds the hidden state `h_t` through a fcNEWLINE layer followed by a tanh to clamp the output beweenNEWLINE [-1, 1]. This produces a 2D vector of means used toNEWLINE parametrize a two-component Gaussian with a fixedNEWLINE variance from which the location coordinates `l_t`NEWLINE for the next time step are sampled.NEWLINENEWLINE Hence, the location `l_t` is chosen stochasticallyNEWLINE from a distribution conditioned on an affineNEWLINE transformation of the hidden state vector `h_t`.NEWLINENEWLINE Args:NEWLINE input_size: input size of the fc layer.NEWLINE output_size: output size of the fc layer.NEWLINE std: standard deviation of the normal distribution.NEWLINE h_t: the hidden state vector of the core network forNEWLINE the current time step `t`.NEWLINENEWLINE Returns:NEWLINE mu: a 2D vector of shape (B, 2).NEWLINE l_t: a 2D vector of shape (B, 2).NEWLINE """NEWLINENEWLINE def __init__(self, input_size, output_size, std):NEWLINE super().__init__()NEWLINENEWLINE self.std = stdNEWLINE # self.std = nn.Parameter(torch.ones(2) * std)NEWLINENEWLINE hid_size = input_size // 2NEWLINE self.fc = nn.Linear(input_size, hid_size)NEWLINE self.fc.apply(weights_init)NEWLINE self.fc_lt = nn.Linear(hid_size, output_size)NEWLINE self.fc_lt.apply(weights_init)NEWLINENEWLINE def forward(self, h_t, epoch, t, is_test):NEWLINE # compute meanNEWLINE feat = F.relu(self.fc(h_t.detach()))NEWLINE l_t = torch.tanh(self.fc_lt(feat))NEWLINENEWLINENEWLINE # log_pi = Normal(mu, self.std).log_prob(l_t)NEWLINE # we assume both dimensions are independentNEWLINE # 1. pdf of the joint is the product of the pdfsNEWLINE # 2. log of the product is the sum of the logsNEWLINE # log_pi = torch.sum(log_pi, dim=1)NEWLINENEWLINE # bound between [-1, 1]NEWLINENEWLINE # entropy = Normal(mu, self.std).entropy()NEWLINENEWLINE return l_tNEWLINENEWLINENEWLINEclass BaselineNetwork(nn.Module):NEWLINE """The baseline network.NEWLINENEWLINE This network regresses the baseline in theNEWLINE reward function to reduce the variance ofNEWLINE the gradient update.NEWLINENEWLINE Args:NEWLINE input_size: input size of the fc layer.NEWLINE output_size: output size of the fc layer.NEWLINE h_t: the hidden state vector of the core networkNEWLINE for the current time step `t`.NEWLINENEWLINE Returns:NEWLINE b_t: a 2D vector of shape (B, 1). The baselineNEWLINE for the current time step `t`.NEWLINE """NEWLINENEWLINE def __init__(self, input_size, output_size):NEWLINE super().__init__()NEWLINENEWLINE self.fc = nn.Linear(input_size, output_size)NEWLINENEWLINE def forward(self, h_t):NEWLINE b_t = self.fc(h_t.detach())NEWLINE return b_tNEWLINE |
import osNEWLINENEWLINEimport torchNEWLINEimport torch.nn as nnNEWLINEimport torch.nn.functional as FNEWLINEimport torchvisionNEWLINEimport torchvision.datasets as datasetsNEWLINEimport torchvision.transforms as transformsNEWLINEfrom pl_bolts.datamodules import CIFAR10DataModuleNEWLINEfrom pl_bolts.callbacks import PrintTableMetricsCallbackNEWLINEfrom pl_bolts.transforms.dataset_normalizations import cifar10_normalizationNEWLINEfrom pytorch_lightning import LightningModule, seed_everything, TrainerNEWLINEfrom pytorch_lightning.callbacks import LearningRateMonitorNEWLINEfrom pytorch_lightning.loggers import TensorBoardLogger, MLFlowLoggerNEWLINEfrom pytorch_lightning.callbacks import CallbackNEWLINEfrom pytorch_lightning.callbacks import EarlyStopping, ModelCheckpointNEWLINEfrom torch.optim.lr_scheduler import OneCycleLRNEWLINEfrom torch.optim.swa_utils import AveragedModel, update_bnNEWLINEfrom torchmetrics.functional import accuracyNEWLINEimport sysNEWLINEsys.path.append("../../")NEWLINEfrom autotorch.models.network import init_networkNEWLINEfrom autotorch.autoptl.custom_trainer import CustomTrainerNEWLINENEWLINEseed_everything(7)NEWLINENEWLINEPATH_DATASETS = os.environ.get(NEWLINE '/media/robin/DATA/datatsets/image_data/cifar10')NEWLINEAVAIL_GPUS = min(1, torch.cuda.device_count())NEWLINEBATCH_SIZE = 16 if AVAIL_GPUS else 32NEWLINENUM_WORKERS = int(os.cpu_count() / 2)NEWLINENEWLINEtrain_transforms = torchvision.transforms.Compose([NEWLINE torchvision.transforms.RandomResizedCrop(32),NEWLINE torchvision.transforms.RandomHorizontalFlip(),NEWLINE torchvision.transforms.ToTensor(),NEWLINE cifar10_normalization(),NEWLINE])NEWLINENEWLINEtest_transforms = torchvision.transforms.Compose([NEWLINE torchvision.transforms.RandomResizedCrop(32),NEWLINE torchvision.transforms.ToTensor(),NEWLINE cifar10_normalization(),NEWLINE])NEWLINENEWLINEcifar10_dm = CIFAR10DataModule(NEWLINE data_dir=PATH_DATASETS,NEWLINE batch_size=BATCH_SIZE,NEWLINE num_workers=NUM_WORKERS,NEWLINE train_transforms=train_transforms,NEWLINE test_transforms=test_transforms,NEWLINE val_transforms=test_transforms,NEWLINE)NEWLINENEWLINE# Data loading codeNEWLINEroot_dir = '/media/robin/DATA/datatsets/image_data/shopee-iet/images'NEWLINEtraindir = os.path.join(root_dir, 'train')NEWLINEvaldir = os.path.join(root_dir, 'val')NEWLINEnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],NEWLINE std=[0.229, 0.224, 0.225])NEWLINENEWLINEtrain_dataset = datasets.ImageFolder(NEWLINE traindir,NEWLINE transforms.Compose([NEWLINE transforms.RandomResizedCrop(224),NEWLINE transforms.RandomHorizontalFlip(),NEWLINE transforms.ToTensor(),NEWLINE normalize,NEWLINE ]))NEWLINENEWLINEtrain_sampler = NoneNEWLINEtrain_loader = torch.utils.data.DataLoader(train_dataset,NEWLINE batch_size=32,NEWLINE shuffle=(train_sampler is None),NEWLINE num_workers=0,NEWLINE pin_memory=True,NEWLINE sampler=train_sampler)NEWLINENEWLINEval_dataset = datasets.ImageFolder(NEWLINE valdir,NEWLINE transforms.Compose([NEWLINE transforms.Resize(256),NEWLINE transforms.CenterCrop(224),NEWLINE transforms.ToTensor(),NEWLINE normalize,NEWLINE ]))NEWLINENEWLINEval_loader = torch.utils.data.DataLoader(val_dataset,NEWLINE batch_size=32,NEWLINE shuffle=False,NEWLINE num_workers=0,NEWLINE pin_memory=True)NEWLINENEWLINENEWLINEdef create_model(model_name):NEWLINE model = torchvision.models.resnet18(pretrained=False, num_classes=4)NEWLINE # model = init_network(model_name, num_class=10, pretrained=True)NEWLINE # model.conv1 = nn.Conv2d(3,NEWLINE # 64,NEWLINE # kernel_size=(3, 3),NEWLINE # stride=(1, 1),NEWLINE # padding=(1, 1),NEWLINE # bias=False)NEWLINE # model.maxpool = nn.Identity()NEWLINE return modelNEWLINENEWLINENEWLINEclass LitResnet(LightningModule):NEWLINE def __init__(self, lr=0.05):NEWLINE super().__init__()NEWLINENEWLINE self.save_hyperparameters()NEWLINE self.model = create_model('resnet18')NEWLINENEWLINE def forward(self, x):NEWLINE out = self.model(x)NEWLINE return F.log_softmax(out, dim=1)NEWLINENEWLINE def training_step(self, batch, batch_idx):NEWLINE x, y = batchNEWLINE y_hat = self.model(x)NEWLINE loss = F.cross_entropy(y_hat, y)NEWLINENEWLINE # logs metrics for each training_step,NEWLINE # and the average across the epoch, to the progress bar and loggerNEWLINE self.log('train_loss',NEWLINE loss,NEWLINE on_step=True,NEWLINE on_epoch=True,NEWLINE prog_bar=True,NEWLINE logger=True,NEWLINE sync_dist=True)NEWLINE return lossNEWLINENEWLINE def evaluate(self, batch, stage=None):NEWLINE x, y = batchNEWLINE logits = self(x)NEWLINE loss = F.nll_loss(logits, y)NEWLINE preds = torch.argmax(logits, dim=1)NEWLINE acc = accuracy(preds, y)NEWLINENEWLINE if stage:NEWLINE self.log(f'{stage}_loss', loss, prog_bar=True)NEWLINE self.log(f'{stage}_acc', acc, prog_bar=True)NEWLINENEWLINE def validation_step(self, batch, batch_idx):NEWLINE self.evaluate(batch, 'val')NEWLINENEWLINE # def test_step(self, batch, batch_idx):NEWLINE # self.evaluate(batch, 'test')NEWLINENEWLINE def test_step(self, batch, batch_idx):NEWLINE x, y = batchNEWLINE # implement your ownNEWLINE logits = self(x)NEWLINE loss = F.nll_loss(logits, y)NEWLINE preds = torch.argmax(logits, dim=1)NEWLINE acc = accuracy(preds, y)NEWLINE # log the outputs!NEWLINE self.log_dict({'test_loss': loss, 'test_acc': acc})NEWLINENEWLINE def configure_optimizers(self):NEWLINE optimizer = torch.optim.SGD(NEWLINE self.parameters(),NEWLINE lr=self.hparams.lr,NEWLINE momentum=0.9,NEWLINE weight_decay=5e-4,NEWLINE )NEWLINE steps_per_epoch = 45000 // BATCH_SIZENEWLINE scheduler_dict = {NEWLINE 'scheduler':NEWLINE OneCycleLR(NEWLINE optimizer,NEWLINE 0.1,NEWLINE epochs=self.trainer.max_epochs,NEWLINE steps_per_epoch=steps_per_epoch,NEWLINE ),NEWLINE 'interval':NEWLINE 'step',NEWLINE }NEWLINENEWLINE return {NEWLINE 'optimizer': optimizer,NEWLINE 'lr_scheduler': scheduler_dict,NEWLINE }NEWLINENEWLINE def configure_callbacks(self):NEWLINE checkpoint = ModelCheckpoint(monitor="val_loss")NEWLINE return [checkpoint]NEWLINENEWLINENEWLINEclass PrintCallback(Callback):NEWLINE def on_train_start(self, trainer, pl_module):NEWLINE print("Training is started!")NEWLINENEWLINE def on_train_end(self, trainer, pl_module):NEWLINE print("Training is done.")NEWLINENEWLINENEWLINEearly_stop_callback = EarlyStopping(monitor='val_acc',NEWLINE min_delta=0.00,NEWLINE patience=3,NEWLINE verbose=False,NEWLINE mode='max')NEWLINENEWLINEmodel = LitResnet(lr=0.05)NEWLINEmodel.datamodule = cifar10_dmNEWLINEtrainer = CustomTrainer(NEWLINE progress_bar_refresh_rate=50,NEWLINE log_every_n_steps=1,NEWLINE log_gpu_memory='all',NEWLINE max_epochs=10,NEWLINE gpus=AVAIL_GPUS,NEWLINE sync_batchnorm=True,NEWLINE limit_train_batches=1.0,NEWLINE checkpoint_callback=True,NEWLINE check_val_every_n_epoch=1,NEWLINE precision=16,NEWLINE profiler="simple",NEWLINE val_check_interval=1.0,NEWLINE weights_summary='top',NEWLINE auto_scale_batch_size=True,NEWLINE benchmark=True,NEWLINE weights_save_path='lightning_logs/',NEWLINE default_root_dir=os.getcwd(),NEWLINE max_time={NEWLINE "days": 1,NEWLINE "hours": 5NEWLINE },NEWLINE logger=[NEWLINE TensorBoardLogger(save_dir='lightning_logs/',NEWLINE version="0",NEWLINE name='resnet'),NEWLINE MLFlowLogger(save_dir='mlflow_logs/')NEWLINE ],NEWLINE callbacks=[NEWLINE LearningRateMonitor(logging_interval='step'),NEWLINE PrintTableMetricsCallback(),NEWLINE early_stop_callback,NEWLINE ],NEWLINE)NEWLINENEWLINE# trainer.fit(model, cifar10_dm)NEWLINE# trainer.test(model, datamodule=cifar10_dm)NEWLINENEWLINEtrainer.fit(model, train_dataloader=train_loader, val_dataloaders=val_loader)NEWLINEtrainer.test(model, val_loader)NEWLINENEWLINE |
root = {NEWLINENEWLINE "general" : {NEWLINENEWLINE "display_viewer" : False,NEWLINE #The visible GPUS will be restricted to the numbers listed here. The pytorch (cuda:0) numeration will start at 0NEWLINE #This is a trick to get everything onto the wanted gpus because just setting cuda:4 in the function calls willNEWLINE #not work for mmdetection. There will still be things on gpu cuda:0.NEWLINE "cuda_visible_devices" : "3",NEWLINE "save_track_results" : TrueNEWLINENEWLINE },NEWLINENEWLINE "data" : {NEWLINE # To increase the speed while developing an specific interval of all frames can be set.NEWLINE "selection_interval" : [0,10000],NEWLINENEWLINE "source" : {NEWLINE # "base_folder" : "/u40/zhanr110/MTA_ext_short/test",NEWLINE "base_folder" : "/Users/nolanzhang/Projects/mtmct/data/MTA_ext_short/test",NEWLINE "cam_ids" : [0,1,2,3,4,5]NEWLINE }NEWLINENEWLINENEWLINE },NEWLINENEWLINE "tracker" : {NEWLINE "type" : "DeepSort",NEWLINE "nn_budget" : 100NEWLINENEWLINE }NEWLINE}NEWLINENEWLINE |
import requestsNEWLINENEWLINE#sresponse = requests.get() |
"""NEWLINEUnit tests for the budgeteer main app models.NEWLINE"""NEWLINEimport datetimeNEWLINEimport randomNEWLINEimport stringNEWLINEimport calendarNEWLINEfrom decimal import DecimalNEWLINENEWLINEfrom unittest_data_provider import data_providerNEWLINENEWLINEfrom django.core.exceptions import ValidationErrorNEWLINEfrom django.db.models.deletion import ProtectedErrorNEWLINEfrom django.db.utils import IntegrityErrorNEWLINEfrom django.test import TestCaseNEWLINENEWLINEimport budgeteer.models as modelsNEWLINENEWLINE#pylint: disable=missing-function-docstringNEWLINE#pylint: disable=missing-class-docstringNEWLINENEWLINEclass CategoryTests(TestCase):NEWLINENEWLINE def test_name_save(self):NEWLINE category = models.Category()NEWLINE category.name = ''.join(random.choices(string.ascii_letters + string.digits, k=20))NEWLINE category.full_clean()NEWLINE category.save()NEWLINENEWLINE category_from_db = models.Category.objects.get(pk=category.pk)NEWLINENEWLINE self.assertEqual(category.name, category_from_db.name)NEWLINENEWLINE def test_name_max_length_not_ok(self):NEWLINE category = models.Category()NEWLINE category.name = ''.join(random.choices(string.ascii_letters + string.digits, k=201))NEWLINE with self.assertRaises(ValidationError):NEWLINE category.full_clean()NEWLINENEWLINE def test_str(self):NEWLINE expected_name = _get_random_name()NEWLINE category = models.Category(name=expected_name)NEWLINE self.assertEqual(expected_name, category.__str__())NEWLINENEWLINEclass SheetTests(TestCase):NEWLINENEWLINE def test_month_save(self):NEWLINE expected_month = random.randint(1, 12)NEWLINENEWLINE sheet = models.Sheet()NEWLINE sheet.month = expected_monthNEWLINE sheet.year = 1NEWLINE sheet.full_clean()NEWLINE sheet.save()NEWLINENEWLINE sheet_from_db = models.Sheet.objects.get(pk=sheet.pk)NEWLINENEWLINE self.assertEqual(expected_month, sheet_from_db.month)NEWLINENEWLINE def test_month_allowed_values(self):NEWLINE for month in range(1, 12):NEWLINE sheet = models.Sheet()NEWLINE sheet.month = monthNEWLINE sheet.year = 1NEWLINENEWLINE try:NEWLINE sheet.full_clean()NEWLINE except ValidationError:NEWLINE self.fail(f"Month {month} failed to validate")NEWLINENEWLINE def test_month_min_value(self):NEWLINE sheet = models.Sheet()NEWLINE sheet.year = 1NEWLINE sheet.month = 0NEWLINE with self.assertRaises(ValidationError):NEWLINE sheet.full_clean()NEWLINENEWLINE def test_month_max_value(self):NEWLINE sheet = models.Sheet()NEWLINE sheet.year = 1NEWLINE sheet.month = 13NEWLINE with self.assertRaises(ValidationError):NEWLINE sheet.full_clean()NEWLINENEWLINE def test_year_save(self):NEWLINE expected_year = random.randint(1980, 2100)NEWLINENEWLINE sheet = models.Sheet()NEWLINE sheet.month = 1NEWLINE sheet.year = expected_yearNEWLINE sheet.full_clean()NEWLINE sheet.save()NEWLINENEWLINE sheet_from_db = models.Sheet.objects.get(pk=sheet.pk)NEWLINENEWLINE self.assertEqual(expected_year, sheet_from_db.year)NEWLINENEWLINE def test_year_no_negative_values(self):NEWLINE sheet = models.Sheet()NEWLINE sheet.month = 1NEWLINE sheet.year = -1NEWLINENEWLINE with self.assertRaises(IntegrityError):NEWLINE sheet.save()NEWLINENEWLINE def test_combination_unique(self):NEWLINE sheet_1 = models.Sheet(month=1, year=1)NEWLINE sheet_1.full_clean()NEWLINE sheet_1.save()NEWLINENEWLINE sheet_2 = models.Sheet(month=1, year=1)NEWLINE with self.assertRaises(ValidationError):NEWLINE sheet_2.full_clean()NEWLINENEWLINE def test_carryover_save(self):NEWLINE expected_value = (Decimal(random.uniform(-999999999.99, 999999999.99))NEWLINE .quantize(Decimal('.01')))NEWLINENEWLINE sheet = models.Sheet(month=6, year=2929)NEWLINE sheet.carryover = expected_valueNEWLINE sheet.full_clean()NEWLINE sheet.save()NEWLINENEWLINE sheet_in_db = models.Sheet.objects.get(pk=sheet.pk)NEWLINENEWLINE self.assertEqual(expected_value, sheet_in_db.carryover)NEWLINENEWLINE def test_carryover_max_digits(self):NEWLINE expected_value = Decimal('12345678901.23')NEWLINENEWLINE sheet = models.Sheet(month=6, year=2020)NEWLINE sheet.carryover = expected_valueNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE sheet.full_clean()NEWLINENEWLINE def test_carryover_decimal_places(self):NEWLINE expected_value = Decimal('123456789.123')NEWLINENEWLINE sheet = models.Sheet(month=6, year=2020)NEWLINE sheet.carryover = expected_valueNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE sheet.full_clean()NEWLINENEWLINE def test_get_transactions(self):NEWLINE sheet = models.Sheet(month=2, year=2020)NEWLINE sheet.save()NEWLINENEWLINE transaction_to_expect_1 = _create_transaction(2, 2020)NEWLINE transaction_to_expect_2 = _create_transaction(2, 2020)NEWLINE transaction_to_expect_3 = _create_transaction(2, 2020)NEWLINENEWLINE _create_transaction(2, 2019)NEWLINE _create_transaction(2, 2021)NEWLINE _create_transaction(1, 2020)NEWLINE _create_transaction(3, 2020)NEWLINE _create_transaction(3, 2021)NEWLINE _create_transaction(1, 2019)NEWLINE _create_transaction(3, 2019)NEWLINE _create_transaction(1, 2021)NEWLINENEWLINE expected_transactions = [transaction_to_expect_1,NEWLINE transaction_to_expect_2,NEWLINE transaction_to_expect_3]NEWLINE actual_transactions = list(sheet.transactions)NEWLINENEWLINE self.assertCountEqual(expected_transactions, actual_transactions)NEWLINENEWLINE def test_available(self):NEWLINE sheet = models.Sheet(month=2, year=2020)NEWLINE sheet.save()NEWLINENEWLINE transactions = [_create_transaction(2, 2020) for _ in range(10)]NEWLINE inflow = sum(trans.value.quantize(Decimal('.01'))NEWLINE for trans in filter(lambda t: t.value > 0, transactions))NEWLINENEWLINE entries = [_create_sheet_entry(sheet)]NEWLINE budget = sum(e.value.quantize(Decimal('.01')) for e in entries)NEWLINENEWLINE expected_available = inflow - budgetNEWLINENEWLINE self.assertAlmostEqual(expected_available, sheet.available, 2)NEWLINENEWLINE def test_available_with_carryover(self):NEWLINE sheet = models.Sheet(month=12, year=2020)NEWLINE sheet.save()NEWLINENEWLINE transactions = [_create_transaction(12, 2020) for _ in range(10)]NEWLINE inflow = sum(trans.value.quantize(Decimal('.01'))NEWLINE for trans in filter(lambda t: t.value > 0, transactions))NEWLINENEWLINE entries = [_create_sheet_entry(sheet)]NEWLINE budget = sum(e.value.quantize(Decimal('.01')) for e in entries)NEWLINENEWLINE previous_sheets = [_create_sheet(month=month, year=2020) for month in range(1, 12)]NEWLINENEWLINE expected_available = inflow - budget + previous_sheets[-1].availableNEWLINENEWLINE self.assertAlmostEqual(expected_available, sheet.available, 2)NEWLINENEWLINE def test_available_with_locked_carryover(self):NEWLINE sheet = models.Sheet(month=12, year=2020)NEWLINE sheet.save()NEWLINENEWLINE transactions = [_create_transaction(12, 2020) for _ in range(10)]NEWLINE inflow = sum(trans.value.quantize(Decimal('.01'))NEWLINE for trans in filter(lambda t: t.value > 0, transactions))NEWLINENEWLINE entries = [_create_sheet_entry(sheet)]NEWLINE budget = sum(e.value.quantize(Decimal('.01')) for e in entries)NEWLINENEWLINE previous_sheet = models.Sheet(month=11, year=2020)NEWLINE previous_sheet.carryover = Decimal(random.uniform(-999.99, 999.99))NEWLINE previous_sheet.save()NEWLINENEWLINE for month in range(1, 11):NEWLINE _create_sheet(month=month, year=2020)NEWLINENEWLINE expected_available = inflow - budget + previous_sheet.carryoverNEWLINENEWLINE self.assertAlmostEqual(expected_available, sheet.available, 2)NEWLINENEWLINE def test_get_previous_exists_same_year(self):NEWLINE sheet = models.Sheet(month=2, year=2020)NEWLINE sheet.save()NEWLINENEWLINE previous_sheet = models.Sheet(month=1, year=2020)NEWLINE previous_sheet.save()NEWLINENEWLINE sheet_in_db = models.Sheet.objects.get(pk=sheet.pk)NEWLINENEWLINE self.assertEqual(previous_sheet, sheet_in_db.previous)NEWLINENEWLINE def test_get_previous_exists_other_year(self):NEWLINE sheet = models.Sheet(month=1, year=2020)NEWLINE sheet.save()NEWLINENEWLINE previous_sheet = models.Sheet(month=12, year=2019)NEWLINE previous_sheet.save()NEWLINENEWLINE sheet_in_db = models.Sheet.objects.get(pk=sheet.pk)NEWLINENEWLINE self.assertEqual(previous_sheet, sheet_in_db.previous)NEWLINENEWLINE def test_get_previous_not_exists(self):NEWLINE sheet = models.Sheet(month=1, year=2020)NEWLINE sheet.save()NEWLINENEWLINE sheet_in_db = models.Sheet.objects.get(pk=sheet.pk)NEWLINENEWLINE self.assertIsNone(sheet_in_db.previous)NEWLINENEWLINE def test_initialize_entries_on_creation(self):NEWLINE expected_categories = [_create_category() for _ in range(10)]NEWLINENEWLINE sheet = models.Sheet(month=2, year=2020)NEWLINE sheet.save()NEWLINENEWLINE sheet_in_db = models.Sheet.objects.get(pk=sheet.pk)NEWLINE self.assertListEqual(expected_categories,NEWLINE list(map(lambda e: e.category, sheet_in_db.sheetentry_set.all())))NEWLINE for entry in sheet_in_db.sheetentry_set.all():NEWLINE self.assertEqual(Decimal(0), entry.value)NEWLINENEWLINE @data_provider(lambda: (NEWLINE (12, 1, "12/1"),NEWLINE (6, 2020, "06/2020"),NEWLINE (1, 12345, "01/12345"),NEWLINE (10, 1988, "10/1988")NEWLINE ))NEWLINE def test_str(self, month, year, expected_name):NEWLINE sheet = models.Sheet(month=month, year=year)NEWLINE self.assertEqual(expected_name, sheet.__str__())NEWLINENEWLINEclass SheetEntryTest(TestCase):NEWLINENEWLINE def setUp(self):NEWLINE self.sheet = models.Sheet(month=1, year=1)NEWLINE self.sheet.save()NEWLINENEWLINE self.category = models.Category(name=_get_random_name())NEWLINE self.category.save()NEWLINENEWLINE def test_entry_save(self):NEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=0)NEWLINE entry.save()NEWLINENEWLINE entry_in_db = models.SheetEntry.objects.get(pk=entry.pk)NEWLINENEWLINE self.assertEqual(entry, entry_in_db)NEWLINENEWLINE def test_foreign_key_sheet(self):NEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=0)NEWLINE entry.save()NEWLINENEWLINE entry_in_db = models.SheetEntry.objects.get(pk=entry.pk)NEWLINENEWLINE self.assertEqual(self.sheet, entry_in_db.sheet)NEWLINENEWLINE def test_foreign_key_category(self):NEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=0)NEWLINE entry.save()NEWLINENEWLINE entry_in_db = models.SheetEntry.objects.get(pk=entry.pk)NEWLINENEWLINE self.assertEqual(self.category, entry_in_db.category)NEWLINENEWLINE def test_sheet_cascade(self):NEWLINE sheet = models.Sheet(month=2, year=1)NEWLINE sheet.save()NEWLINENEWLINE entry = models.SheetEntry(sheet=sheet, category=self.category, value=0)NEWLINE entry.save()NEWLINENEWLINE sheet.delete()NEWLINENEWLINE actual_count = models.SheetEntry.objects.filter(pk=entry.pk).count()NEWLINENEWLINE self.assertEqual(0, actual_count)NEWLINENEWLINE def test_category_cascade(self):NEWLINE category = models.Category(name="Test")NEWLINE category.save()NEWLINENEWLINE entry = models.SheetEntry(sheet=self.sheet, category=category, value=0)NEWLINE entry.save()NEWLINENEWLINE category.delete()NEWLINENEWLINE actual_count = models.SheetEntry.objects.filter(pk=entry.pk).count()NEWLINENEWLINE self.assertEqual(0, actual_count)NEWLINENEWLINE def test_value_save(self):NEWLINE expected_value = (Decimal(random.uniform(-999999999.99, 999999999.99))NEWLINE .quantize(Decimal('.01')))NEWLINENEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=expected_value)NEWLINE entry.save()NEWLINENEWLINE entry_in_db = models.SheetEntry.objects.get(pk=entry.pk)NEWLINENEWLINE self.assertEqual(expected_value, entry_in_db.value)NEWLINENEWLINE def test_value_max_digits(self):NEWLINE expected_value = Decimal('12345678901.23')NEWLINENEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=expected_value)NEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE entry.full_clean()NEWLINENEWLINE def test_value_decimal_places(self):NEWLINE expected_value = Decimal('123456789.123')NEWLINENEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=expected_value)NEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE entry.full_clean()NEWLINENEWLINE def test_locked(self):NEWLINE expected_lock = bool(random.getrandbits(1))NEWLINENEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=Decimal(0))NEWLINE entry.locked = expected_lockNEWLINE entry.full_clean()NEWLINE entry.save()NEWLINENEWLINE entry_in_db = models.SheetEntry.objects.get(pk=entry.pk)NEWLINENEWLINE self.assertEqual(expected_lock, entry_in_db.locked)NEWLINENEWLINE def test_locked_default_false(self):NEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=Decimal(0))NEWLINE entry.full_clean()NEWLINE entry.save()NEWLINENEWLINE entry_in_db = models.SheetEntry.objects.get(pk=entry.pk)NEWLINENEWLINE self.assertFalse(entry_in_db.locked)NEWLINENEWLINE def test_locked_no_change_to_sheet(self):NEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=Decimal(0))NEWLINE entry.locked = TrueNEWLINE entry.full_clean()NEWLINE entry.save()NEWLINENEWLINE entry_in_db = models.SheetEntry.objects.get(pk=entry.pk)NEWLINENEWLINE new_sheet = models.Sheet(month=1, year=self.sheet.year + 1)NEWLINE new_sheet.save()NEWLINENEWLINE entry_in_db.sheet = new_sheetNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE entry_in_db.full_clean()NEWLINENEWLINE def test_locked_no_change_to_category(self):NEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=Decimal(0))NEWLINE entry.locked = TrueNEWLINE entry.full_clean()NEWLINE entry.save()NEWLINENEWLINE entry_in_db = models.SheetEntry.objects.get(pk=entry.pk)NEWLINENEWLINE new_category = models.Category(name=_get_random_name())NEWLINE new_category.save()NEWLINENEWLINE entry_in_db.category = new_categoryNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE entry_in_db.full_clean()NEWLINENEWLINE def test_locked_no_change_to_value(self):NEWLINE entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=Decimal(0))NEWLINE entry.locked = TrueNEWLINE entry.full_clean()NEWLINE entry.save()NEWLINENEWLINE entry_in_db = models.SheetEntry.objects.get(pk=entry.pk)NEWLINENEWLINE entry_in_db.value = Decimal(1)NEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE entry_in_db.full_clean()NEWLINENEWLINE def test_created_for_open_sheets_when_category_created(self):NEWLINE open_sheets = [_create_sheet(month, 2020) for month in range(1, 13)]NEWLINE closed_sheets = [_create_sheet(month, 2021) for month in range(1, 13)]NEWLINENEWLINE for sheet in closed_sheets:NEWLINE sheet.carryover = Decimal(random.uniform(-999.99, 999.99))NEWLINE sheet.save()NEWLINENEWLINE new_categories = [_create_category() for _ in range(10)]NEWLINENEWLINE for category in new_categories:NEWLINE for sheet in open_sheets:NEWLINE self.assertEqual(1, models.SheetEntry.objects.filter(category=category,NEWLINE sheet=sheet).count())NEWLINE for sheet in closed_sheets:NEWLINE self.assertEqual(0, models.SheetEntry.objects.filter(category=category,NEWLINE sheet=sheet).count())NEWLINENEWLINE def test_str(self):NEWLINE value = Decimal(random.uniform(-999.99, 999.99))NEWLINE expected_name = f"[{str(self.sheet)}] {str(self.category)}: {str(value)}"NEWLINE sheet_entry = models.SheetEntry(sheet=self.sheet, category=self.category, value=value)NEWLINE self.assertEqual(expected_name, sheet_entry.__str__())NEWLINENEWLINEclass AccountTest(TestCase):NEWLINENEWLINE def test_name_save(self):NEWLINE expected_name = ''.join(random.choices(string.ascii_letters + string.digits, k=200))NEWLINENEWLINE account = models.Account()NEWLINE account.name = expected_nameNEWLINE account.balance = Decimal(0)NEWLINE account.full_clean()NEWLINE account.save()NEWLINENEWLINE account_in_db = models.Account.objects.get(pk=account.pk)NEWLINENEWLINE self.assertEqual(expected_name, account_in_db.name)NEWLINENEWLINE def test_name_max_value(self):NEWLINE expected_name = ''.join(random.choices(string.ascii_letters + string.digits, k=201))NEWLINENEWLINE account = models.Account()NEWLINE account.name = expected_nameNEWLINE account.balanace = 0NEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE account.full_clean()NEWLINENEWLINE def test_balance(self):NEWLINE expected_balance = (Decimal(random.uniform(-999999999.99, 999999999.99))NEWLINE .quantize(Decimal('.01')))NEWLINENEWLINE account = models.Account()NEWLINE account.balance = expected_balanceNEWLINE account.save()NEWLINENEWLINE account_in_db = models.Account.objects.get(pk=account.pk)NEWLINENEWLINE self.assertEqual(expected_balance, account_in_db.balance)NEWLINENEWLINE def test_balance_max_digits(self):NEWLINE balance = Decimal('12345678901.23')NEWLINENEWLINE account = models.Account(name=_get_random_name(), balance=balance)NEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE account.full_clean()NEWLINENEWLINE def test_balance_decimal_places(self):NEWLINE balance = Decimal('123456789.123')NEWLINENEWLINE account = models.Account(name=_get_random_name(), balance=balance)NEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE account.full_clean()NEWLINENEWLINE def test_total_no_transactions(self):NEWLINE expected_total = (Decimal(random.uniform(-999999999.99, 999999999.99))NEWLINE .quantize(Decimal('.01')))NEWLINENEWLINE account = models.Account()NEWLINE account.balance = expected_totalNEWLINE account.save()NEWLINENEWLINE account_in_db = models.Account.objects.get(pk=account.pk)NEWLINENEWLINE self.assertEqual(expected_total, account_in_db.total)NEWLINENEWLINE def test_total_only_unlocked_transactions(self):NEWLINE starting_balance = (Decimal(random.uniform(-9999.99, 9999.99))NEWLINE .quantize(Decimal('.01')))NEWLINENEWLINE account = models.Account()NEWLINE account.balance = starting_balanceNEWLINE account.save()NEWLINENEWLINE tomorrow = datetime.date.today() + datetime.timedelta(days=1)NEWLINE transactions = ([_create_transaction(tomorrow.month, tomorrow.year, account)NEWLINE for _ in range(1)])NEWLINENEWLINE expected_total = ((starting_balance + sum(Decimal(t.value) for t in transactions))NEWLINE .quantize(Decimal('.01')))NEWLINENEWLINE account_in_db = models.Account.objects.get(pk=account.pk)NEWLINENEWLINE self.assertEqual(expected_total, account_in_db.total)NEWLINENEWLINE def test_total_ignore_other_accounts(self):NEWLINE #pylint: disable=unused-variableNEWLINE starting_balance = Decimal(random.uniform(-9999.99, 9999.99)).quantize(Decimal('.01'))NEWLINENEWLINE account = models.Account()NEWLINE account.balance = starting_balanceNEWLINE account.save()NEWLINENEWLINE tomorrow = datetime.date.today() + datetime.timedelta(days=1)NEWLINE transactions = ([_create_transaction(tomorrow.month, tomorrow.year, account)NEWLINE for _ in range(10)])NEWLINE for _ in range(10):NEWLINE _create_transaction(tomorrow.month, tomorrow.year, account, locked=True)NEWLINENEWLINE expected_total = ((starting_balance + sum(Decimal(t.value) for t in transactions))NEWLINE .quantize(Decimal('.01')))NEWLINENEWLINE account_in_db = models.Account.objects.get(pk=account.pk)NEWLINENEWLINE self.assertAlmostEqual(expected_total, account_in_db.total, 2)NEWLINENEWLINE def test_str(self):NEWLINE expected_name = _get_random_name()NEWLINE account = models.Account(name=expected_name)NEWLINE self.assertEqual(expected_name, account.__str__())NEWLINENEWLINEclass TransactionTest(TestCase):NEWLINENEWLINE def setUp(self):NEWLINE self.category = models.Category(name="Test")NEWLINE self.category.save()NEWLINE self.account = models.Account(name="Test", balance=Decimal(0))NEWLINE self.account.save()NEWLINENEWLINE def test_partner_save(self):NEWLINE expected_name = ''.join(random.choices(string.ascii_letters + string.digits, k=200))NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = expected_nameNEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE self.assertEqual(expected_name, transaction_in_db.partner)NEWLINENEWLINE def test_partner_max_length(self):NEWLINE expected_name = ''.join(random.choices(string.ascii_letters + string.digits, k=201))NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = expected_nameNEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE transaction.full_clean()NEWLINENEWLINE def test_date_save(self):NEWLINE expected_date = datetime.date(random.randrange(1980, 2100),NEWLINE random.randrange(1, 12),NEWLINE random.randrange(1, 28))NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = expected_dateNEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE self.assertEqual(expected_date,NEWLINE transaction_in_db.date)NEWLINENEWLINE def test_value_save(self):NEWLINE expected_value = (Decimal(random.uniform(-999999999.99, 999999999.99))NEWLINE .quantize(Decimal('.01')))NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = expected_valueNEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE self.assertEqual(expected_value, transaction_in_db.value)NEWLINENEWLINE def test_value_max_digits(self):NEWLINE expected_value = Decimal('12345678901.23')NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = expected_valueNEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE transaction.full_clean()NEWLINENEWLINE def test_value_decimal_places(self):NEWLINE expected_value = Decimal('123456789.123')NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = expected_valueNEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE transaction.full_clean()NEWLINENEWLINE def test_category(self):NEWLINE expected_category = models.Category(name="Expected category")NEWLINE expected_category.save()NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = expected_categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE self.assertEqual(expected_category, transaction_in_db.category)NEWLINENEWLINE def test_category_must_be_set(self):NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = NoneNEWLINE transaction.account = self.accountNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE transaction.full_clean()NEWLINENEWLINE def test_category_prevent_deletion(self):NEWLINE category = models.Category(name="Expected category")NEWLINE category.save()NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE with self.assertRaises(ProtectedError):NEWLINE category.delete()NEWLINENEWLINE def test_account(self):NEWLINE expected_account = models.Account(name="Expected account", balance=Decimal(0))NEWLINE expected_account.save()NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = expected_accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE self.assertEqual(expected_account, transaction_in_db.account)NEWLINENEWLINE def test_account_must_be_net(self):NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = NoneNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE transaction.full_clean()NEWLINENEWLINE def test_account_prevent_deletion(self):NEWLINE account = models.Account(name="Expected account", balance=Decimal(0))NEWLINE account.save()NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE with self.assertRaises(ProtectedError):NEWLINE account.delete()NEWLINENEWLINE def test_locked(self):NEWLINE expected_lock = bool(random.getrandbits(1))NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINE transaction.locked = expected_lockNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE transaction_in_db.full_clean()NEWLINE self.assertEqual(expected_lock, transaction_in_db.locked)NEWLINENEWLINE def test_locked_default_false(self):NEWLINE transaction = models.Transaction()NEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE self.assertFalse(transaction_in_db.locked)NEWLINENEWLINE def test_locked_no_change_to_partner(self):NEWLINE transaction = models.Transaction()NEWLINE transaction.locked = TrueNEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE transaction_in_db.partner = "b"NEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE transaction_in_db.full_clean()NEWLINENEWLINE def test_locked_no_change_to_date(self):NEWLINE transaction = models.Transaction()NEWLINE transaction.locked = TrueNEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE transaction_in_db.date = datetime.date.today() + datetime.timedelta(days=1)NEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE transaction_in_db.full_clean()NEWLINENEWLINE def test_locked_no_change_to_value(self):NEWLINE transaction = models.Transaction()NEWLINE transaction.locked = TrueNEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE transaction_in_db.value = 1NEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE transaction_in_db.full_clean()NEWLINENEWLINE def test_locked_no_change_to_category(self):NEWLINE transaction = models.Transaction()NEWLINE transaction.locked = TrueNEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE category = models.Category(name=_get_random_name())NEWLINE category.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE transaction_in_db.category = categoryNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE transaction_in_db.full_clean()NEWLINENEWLINE def test_locked_no_change_to_account(self):NEWLINE transaction = models.Transaction()NEWLINE transaction.locked = TrueNEWLINE transaction.partner = "a"NEWLINE transaction.date = datetime.date.today()NEWLINE transaction.value = 0NEWLINE transaction.category = self.categoryNEWLINE transaction.account = self.accountNEWLINE transaction.full_clean()NEWLINE transaction.save()NEWLINENEWLINE account = models.Account(name=_get_random_name(), balance=0)NEWLINE account.save()NEWLINENEWLINE transaction_in_db = models.Transaction.objects.get(pk=transaction.pk)NEWLINENEWLINE transaction_in_db.account = accountNEWLINENEWLINE with self.assertRaises(ValidationError):NEWLINE transaction_in_db.full_clean()NEWLINENEWLINE @data_provider(lambda: (NEWLINE (datetime.date(1000, 12, 15), "1000-12-15"),NEWLINE (datetime.date(2020, 6, 1), "2020-06-01"),NEWLINE (datetime.date(1234, 1, 31), "1234-01-31"),NEWLINE (datetime.date(1988, 10, 27), "1988-10-27")NEWLINE ))NEWLINE def test_str(self, date, expected_date):NEWLINE partner = _get_random_name()NEWLINE transaction = models.Transaction(partner=partner, date=date, value=Decimal(0),NEWLINE category=self.category, account=self.account)NEWLINE expected_name = (NEWLINE f"[{expected_date}] {str(transaction.account)} -> "NEWLINE f"{str(transaction.partner)} ({str(transaction.category)})"NEWLINE )NEWLINENEWLINE self.assertEqual(expected_name, transaction.__str__())NEWLINENEWLINEdef _create_transaction(month, year, account=None, locked=False) -> models.Transaction:NEWLINE category = models.Category(name=_get_random_name())NEWLINE category.save()NEWLINE if account is None:NEWLINE account = models.Account(name=_get_random_name(), balance=Decimal(0))NEWLINE account.save()NEWLINENEWLINE transaction = models.Transaction()NEWLINE transaction.category = categoryNEWLINE transaction.account = accountNEWLINE transaction.value = Decimal(random.uniform(-999.99, 999.99)).quantize(Decimal(".01"))NEWLINE transaction.partner = "Test partner"NEWLINE transaction.locked = lockedNEWLINE transaction.date = _random_day_in_month(month, year)NEWLINENEWLINE transaction.save()NEWLINE return transactionNEWLINENEWLINEdef _create_sheet_entry(sheet) -> models.SheetEntry:NEWLINE category = models.Category(name=_get_random_name())NEWLINE category.save()NEWLINENEWLINE entry = models.SheetEntry()NEWLINE entry.sheet = sheetNEWLINE entry.value = Decimal(random.uniform(-999.99, 999.99)).quantize(Decimal(".01"))NEWLINE entry.category = categoryNEWLINE entry.save()NEWLINENEWLINE return entryNEWLINENEWLINEdef _random_day_in_month(month, year):NEWLINE dates = calendar.Calendar().itermonthdates(year, month)NEWLINE return random.choice([date for date in dates if date.month == month])NEWLINENEWLINEdef _create_sheet(month, year) -> models.Sheet:NEWLINE sheet = models.Sheet(month=month, year=year)NEWLINE sheet.save()NEWLINENEWLINE for _ in range(10):NEWLINE _create_transaction(month, year)NEWLINE _create_sheet_entry(sheet)NEWLINENEWLINE return sheetNEWLINENEWLINEdef _create_category() -> models.Category:NEWLINE category = models.Category(name=_get_random_name())NEWLINE category.save()NEWLINE return categoryNEWLINENEWLINEdef _get_random_name() -> string:NEWLINE return "".join(random.choice(string.ascii_letters) for _ in range(10))NEWLINE |
# import numpyNEWLINEimport numpy as npNEWLINENEWLINE# importing qiskitNEWLINEimport qiskitNEWLINEfrom qiskit import QuantumCircuit, ClassicalRegister, QuantumRegisterNEWLINEfrom qiskit.compiler import schedule, transpileNEWLINENEWLINEfrom qiskit.test.mock.backends.almaden import FakeAlmadenNEWLINEbackend = FakeAlmaden()NEWLINENEWLINEfrom qiskit.pulse.instructions.play import PlayNEWLINENEWLINE# importing audio utilsNEWLINEfrom scipy.io.wavfile import writeNEWLINENEWLINE# CONSTANTSNEWLINEsampling_rate = 44100 * 3NEWLINENEWLINEdef test_qiskit(): return qiskit.__qiskit_version__NEWLINENEWLINEdef char_to_qc(char_str):NEWLINE char_bin = '0'+' '.join(format(ord(x), 'b') for x in char_str)NEWLINENEWLINE char = QuantumRegister(8, name='char')NEWLINE output = QuantumRegister(1, name='output')NEWLINE meas = ClassicalRegister(8, name='meas')NEWLINE char_qc = QuantumCircuit(char, output, meas)NEWLINENEWLINE char_qc.h(char[:])NEWLINE char_qc.h(output)NEWLINE char_qc.z(output)NEWLINE char_qc.barrier()NEWLINENEWLINE for i, bit in enumerate(char_bin):NEWLINE if int(bit): char_qc.cx(char[i], output[0])NEWLINE char_qc.barrier()NEWLINENEWLINE char_qc.h(char[:])NEWLINE char_qc.barrier()NEWLINENEWLINE return char_qc.reverse_bits()NEWLINENEWLINEdef pulse_schedule_to_complex_waveform(pulse_schedule):NEWLINE instructions = pulse_schedule.instructionsNEWLINE waveform = [instruction[1].pulse.samples for instruction in instructions if type(instruction[1]) == Play]NEWLINE waveform = np.concatenate(waveform).ravel()NEWLINE return waveformNEWLINENEWLINEdef complex_waveform_to_amplitude_waveform(waveform): return np.asarray([np.absolute(z) for z in waveform])NEWLINENEWLINEdef get_audio_waveform(string):NEWLINE words = string.split(" ")NEWLINE audio_waveform = np.array([])NEWLINE for word in words:NEWLINE word_waveforms = [ pulse_schedule_to_complex_waveform(schedule(transpile(char_to_qc(char), backend), backend)) for char in word ]NEWLINE waveform_size = max([waveform.size for waveform in word_waveforms])NEWLINE word_waveform = np.zeros(waveform_size)NEWLINE for waveform in word_waveforms: NEWLINE word_waveform = word_waveform + np.pad(waveform, (0, waveform_size - waveform.size), mode='constant')NEWLINE audio_waveform = np.concatenate((audio_waveform, complex_waveform_to_amplitude_waveform(waveform)))NEWLINE return audio_waveformNEWLINENEWLINEdef generate_wav(string):NEWLINE data = get_audio_waveform(string)NEWLINE scaled = np.int16(data/np.max(np.abs(data)) * 32767)NEWLINE write('/tmp/output.wav', sampling_rate, scaled) |
# coding=utf-8NEWLINE"""NEWLINEThis code was generated byNEWLINE\ / _ _ _| _ _NEWLINE | (_)\/(_)(_|\/| |(/_ v1.0.0NEWLINE / /NEWLINE"""NEWLINENEWLINEfrom twilio.base import deserializeNEWLINEfrom twilio.base import valuesNEWLINEfrom twilio.base.instance_context import InstanceContextNEWLINEfrom twilio.base.instance_resource import InstanceResourceNEWLINEfrom twilio.base.list_resource import ListResourceNEWLINEfrom twilio.base.page import PageNEWLINENEWLINENEWLINEclass KeyList(ListResource):NEWLINE """ """NEWLINENEWLINE def __init__(self, version, account_sid):NEWLINE """NEWLINE Initialize the KeyListNEWLINENEWLINE :param Version version: Version that contains the resourceNEWLINE :param account_sid: A 34 character string that uniquely identifies this resource.NEWLINENEWLINE :returns: twilio.rest.api.v2010.account.key.KeyListNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyListNEWLINE """NEWLINE super(KeyList, self).__init__(version)NEWLINENEWLINE # Path SolutionNEWLINE self._solution = {'account_sid': account_sid, }NEWLINE self._uri = '/Accounts/{account_sid}/Keys.json'.format(**self._solution)NEWLINENEWLINE def stream(self, limit=None, page_size=None):NEWLINE """NEWLINE Streams KeyInstance records from the API as a generator stream.NEWLINE This operation lazily loads records as efficiently as possible until the limitNEWLINE is reached.NEWLINE The results are returned as a generator, so this operation is memory efficient.NEWLINENEWLINE :param int limit: Upper limit for the number of records to return. stream()NEWLINE guarantees to never return more than limit. Default is no limitNEWLINE :param int page_size: Number of records to fetch per request, when not set will useNEWLINE the default value of 50 records. If no page_size is definedNEWLINE but a limit is defined, stream() will attempt to read theNEWLINE limit with the most efficient page size, i.e. min(limit, 1000)NEWLINENEWLINE :returns: Generator that will yield up to limit resultsNEWLINE :rtype: list[twilio.rest.api.v2010.account.key.KeyInstance]NEWLINE """NEWLINE limits = self._version.read_limits(limit, page_size)NEWLINENEWLINE page = self.page(page_size=limits['page_size'], )NEWLINENEWLINE return self._version.stream(page, limits['limit'], limits['page_limit'])NEWLINENEWLINE def list(self, limit=None, page_size=None):NEWLINE """NEWLINE Lists KeyInstance records from the API as a list.NEWLINE Unlike stream(), this operation is eager and will load `limit` records intoNEWLINE memory before returning.NEWLINENEWLINE :param int limit: Upper limit for the number of records to return. list() guaranteesNEWLINE never to return more than limit. Default is no limitNEWLINE :param int page_size: Number of records to fetch per request, when not set will useNEWLINE the default value of 50 records. If no page_size is definedNEWLINE but a limit is defined, list() will attempt to read the limitNEWLINE with the most efficient page size, i.e. min(limit, 1000)NEWLINENEWLINE :returns: Generator that will yield up to limit resultsNEWLINE :rtype: list[twilio.rest.api.v2010.account.key.KeyInstance]NEWLINE """NEWLINE return list(self.stream(limit=limit, page_size=page_size, ))NEWLINENEWLINE def page(self, page_token=values.unset, page_number=values.unset,NEWLINE page_size=values.unset):NEWLINE """NEWLINE Retrieve a single page of KeyInstance records from the API.NEWLINE Request is executed immediatelyNEWLINENEWLINE :param str page_token: PageToken provided by the APINEWLINE :param int page_number: Page Number, this value is simply for client stateNEWLINE :param int page_size: Number of records to return, defaults to 50NEWLINENEWLINE :returns: Page of KeyInstanceNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyPageNEWLINE """NEWLINE params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })NEWLINENEWLINE response = self._version.page(NEWLINE 'GET',NEWLINE self._uri,NEWLINE params=params,NEWLINE )NEWLINENEWLINE return KeyPage(self._version, response, self._solution)NEWLINENEWLINE def get_page(self, target_url):NEWLINE """NEWLINE Retrieve a specific page of KeyInstance records from the API.NEWLINE Request is executed immediatelyNEWLINENEWLINE :param str target_url: API-generated URL for the requested results pageNEWLINENEWLINE :returns: Page of KeyInstanceNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyPageNEWLINE """NEWLINE response = self._version.domain.twilio.request(NEWLINE 'GET',NEWLINE target_url,NEWLINE )NEWLINENEWLINE return KeyPage(self._version, response, self._solution)NEWLINENEWLINE def get(self, sid):NEWLINE """NEWLINE Constructs a KeyContextNEWLINENEWLINE :param sid: The unique string that identifies the resourceNEWLINENEWLINE :returns: twilio.rest.api.v2010.account.key.KeyContextNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyContextNEWLINE """NEWLINE return KeyContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )NEWLINENEWLINE def __call__(self, sid):NEWLINE """NEWLINE Constructs a KeyContextNEWLINENEWLINE :param sid: The unique string that identifies the resourceNEWLINENEWLINE :returns: twilio.rest.api.v2010.account.key.KeyContextNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyContextNEWLINE """NEWLINE return KeyContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )NEWLINENEWLINE def __repr__(self):NEWLINE """NEWLINE Provide a friendly representationNEWLINENEWLINE :returns: Machine friendly representationNEWLINE :rtype: strNEWLINE """NEWLINE return '<Twilio.Api.V2010.KeyList>'NEWLINENEWLINENEWLINEclass KeyPage(Page):NEWLINE """ """NEWLINENEWLINE def __init__(self, version, response, solution):NEWLINE """NEWLINE Initialize the KeyPageNEWLINENEWLINE :param Version version: Version that contains the resourceNEWLINE :param Response response: Response from the APINEWLINE :param account_sid: A 34 character string that uniquely identifies this resource.NEWLINENEWLINE :returns: twilio.rest.api.v2010.account.key.KeyPageNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyPageNEWLINE """NEWLINE super(KeyPage, self).__init__(version, response)NEWLINENEWLINE # Path SolutionNEWLINE self._solution = solutionNEWLINENEWLINE def get_instance(self, payload):NEWLINE """NEWLINE Build an instance of KeyInstanceNEWLINENEWLINE :param dict payload: Payload response from the APINEWLINENEWLINE :returns: twilio.rest.api.v2010.account.key.KeyInstanceNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyInstanceNEWLINE """NEWLINE return KeyInstance(self._version, payload, account_sid=self._solution['account_sid'], )NEWLINENEWLINE def __repr__(self):NEWLINE """NEWLINE Provide a friendly representationNEWLINENEWLINE :returns: Machine friendly representationNEWLINE :rtype: strNEWLINE """NEWLINE return '<Twilio.Api.V2010.KeyPage>'NEWLINENEWLINENEWLINEclass KeyContext(InstanceContext):NEWLINE """ """NEWLINENEWLINE def __init__(self, version, account_sid, sid):NEWLINE """NEWLINE Initialize the KeyContextNEWLINENEWLINE :param Version version: Version that contains the resourceNEWLINE :param account_sid: The SID of the Account that created the resource to fetchNEWLINE :param sid: The unique string that identifies the resourceNEWLINENEWLINE :returns: twilio.rest.api.v2010.account.key.KeyContextNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyContextNEWLINE """NEWLINE super(KeyContext, self).__init__(version)NEWLINENEWLINE # Path SolutionNEWLINE self._solution = {'account_sid': account_sid, 'sid': sid, }NEWLINE self._uri = '/Accounts/{account_sid}/Keys/{sid}.json'.format(**self._solution)NEWLINENEWLINE def fetch(self):NEWLINE """NEWLINE Fetch a KeyInstanceNEWLINENEWLINE :returns: Fetched KeyInstanceNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyInstanceNEWLINE """NEWLINE params = values.of({})NEWLINENEWLINE payload = self._version.fetch(NEWLINE 'GET',NEWLINE self._uri,NEWLINE params=params,NEWLINE )NEWLINENEWLINE return KeyInstance(NEWLINE self._version,NEWLINE payload,NEWLINE account_sid=self._solution['account_sid'],NEWLINE sid=self._solution['sid'],NEWLINE )NEWLINENEWLINE def update(self, friendly_name=values.unset):NEWLINE """NEWLINE Update the KeyInstanceNEWLINENEWLINE :param unicode friendly_name: A string to describe the resourceNEWLINENEWLINE :returns: Updated KeyInstanceNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyInstanceNEWLINE """NEWLINE data = values.of({'FriendlyName': friendly_name, })NEWLINENEWLINE payload = self._version.update(NEWLINE 'POST',NEWLINE self._uri,NEWLINE data=data,NEWLINE )NEWLINENEWLINE return KeyInstance(NEWLINE self._version,NEWLINE payload,NEWLINE account_sid=self._solution['account_sid'],NEWLINE sid=self._solution['sid'],NEWLINE )NEWLINENEWLINE def delete(self):NEWLINE """NEWLINE Deletes the KeyInstanceNEWLINENEWLINE :returns: True if delete succeeds, False otherwiseNEWLINE :rtype: boolNEWLINE """NEWLINE return self._version.delete('delete', self._uri)NEWLINENEWLINE def __repr__(self):NEWLINE """NEWLINE Provide a friendly representationNEWLINENEWLINE :returns: Machine friendly representationNEWLINE :rtype: strNEWLINE """NEWLINE context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())NEWLINE return '<Twilio.Api.V2010.KeyContext {}>'.format(context)NEWLINENEWLINENEWLINEclass KeyInstance(InstanceResource):NEWLINE """ """NEWLINENEWLINE def __init__(self, version, payload, account_sid, sid=None):NEWLINE """NEWLINE Initialize the KeyInstanceNEWLINENEWLINE :returns: twilio.rest.api.v2010.account.key.KeyInstanceNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyInstanceNEWLINE """NEWLINE super(KeyInstance, self).__init__(version)NEWLINENEWLINE # Marshaled PropertiesNEWLINE self._properties = {NEWLINE 'sid': payload['sid'],NEWLINE 'friendly_name': payload['friendly_name'],NEWLINE 'date_created': deserialize.rfc2822_datetime(payload['date_created']),NEWLINE 'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),NEWLINE }NEWLINENEWLINE # ContextNEWLINE self._context = NoneNEWLINE self._solution = {'account_sid': account_sid, 'sid': sid or self._properties['sid'], }NEWLINENEWLINE @propertyNEWLINE def _proxy(self):NEWLINE """NEWLINE Generate an instance context for the instance, the context is capable ofNEWLINE performing various actions. All instance actions are proxied to the contextNEWLINENEWLINE :returns: KeyContext for this KeyInstanceNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyContextNEWLINE """NEWLINE if self._context is None:NEWLINE self._context = KeyContext(NEWLINE self._version,NEWLINE account_sid=self._solution['account_sid'],NEWLINE sid=self._solution['sid'],NEWLINE )NEWLINE return self._contextNEWLINENEWLINE @propertyNEWLINE def sid(self):NEWLINE """NEWLINE :returns: The unique string that identifies the resourceNEWLINE :rtype: unicodeNEWLINE """NEWLINE return self._properties['sid']NEWLINENEWLINE @propertyNEWLINE def friendly_name(self):NEWLINE """NEWLINE :returns: The string that you assigned to describe the resourceNEWLINE :rtype: unicodeNEWLINE """NEWLINE return self._properties['friendly_name']NEWLINENEWLINE @propertyNEWLINE def date_created(self):NEWLINE """NEWLINE :returns: The RFC 2822 date and time in GMT that the resource was createdNEWLINE :rtype: datetimeNEWLINE """NEWLINE return self._properties['date_created']NEWLINENEWLINE @propertyNEWLINE def date_updated(self):NEWLINE """NEWLINE :returns: The RFC 2822 date and time in GMT that the resource was last updatedNEWLINE :rtype: datetimeNEWLINE """NEWLINE return self._properties['date_updated']NEWLINENEWLINE def fetch(self):NEWLINE """NEWLINE Fetch a KeyInstanceNEWLINENEWLINE :returns: Fetched KeyInstanceNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyInstanceNEWLINE """NEWLINE return self._proxy.fetch()NEWLINENEWLINE def update(self, friendly_name=values.unset):NEWLINE """NEWLINE Update the KeyInstanceNEWLINENEWLINE :param unicode friendly_name: A string to describe the resourceNEWLINENEWLINE :returns: Updated KeyInstanceNEWLINE :rtype: twilio.rest.api.v2010.account.key.KeyInstanceNEWLINE """NEWLINE return self._proxy.update(friendly_name=friendly_name, )NEWLINENEWLINE def delete(self):NEWLINE """NEWLINE Deletes the KeyInstanceNEWLINENEWLINE :returns: True if delete succeeds, False otherwiseNEWLINE :rtype: boolNEWLINE """NEWLINE return self._proxy.delete()NEWLINENEWLINE def __repr__(self):NEWLINE """NEWLINE Provide a friendly representationNEWLINENEWLINE :returns: Machine friendly representationNEWLINE :rtype: strNEWLINE """NEWLINE context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())NEWLINE return '<Twilio.Api.V2010.KeyInstance {}>'.format(context)NEWLINE |
#!/usr/bin/env pythonNEWLINE# -*- coding: utf-8 -*-NEWLINE#NEWLINE# SigPro documentation build configuration file, created byNEWLINE# sphinx-quickstart on Fri Jun 9 13:47:02 2017.NEWLINE#NEWLINE# This file is execfile()d with the current directory set to itsNEWLINE# containing dir.NEWLINE#NEWLINE# Note that not all possible configuration values are present in thisNEWLINE# autogenerated file.NEWLINE#NEWLINE# All configuration values have a default; values that are commented outNEWLINE# serve to show the default.NEWLINENEWLINE# If extensions (or modules to document with autodoc) are in anotherNEWLINE# directory, add these directories to sys.path here. If the directory isNEWLINE# relative to the documentation root, use os.path.abspath to make itNEWLINE# absolute, like shown here.NEWLINENEWLINEimport sphinx_rtd_theme # For read the docs themeNEWLINENEWLINEimport sigproNEWLINENEWLINE# -- General configuration ---------------------------------------------NEWLINENEWLINE# If your documentation needs a minimal Sphinx version, state it here.NEWLINE#NEWLINE# needs_sphinx = '1.0'NEWLINENEWLINE# Add any Sphinx extension module names here, as strings. They can beNEWLINE# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.NEWLINEextensions = [NEWLINE 'm2r',NEWLINE 'sphinx.ext.autodoc',NEWLINE 'sphinx.ext.githubpages',NEWLINE 'sphinx.ext.viewcode',NEWLINE 'sphinx.ext.napoleon',NEWLINE 'autodocsumm',NEWLINE]NEWLINENEWLINEautodoc_default_options = {NEWLINE 'autosummary': True,NEWLINE}NEWLINENEWLINE# Add any paths that contain templates here, relative to this directory.NEWLINEtemplates_path = ['_templates']NEWLINENEWLINE# The suffix(es) of source filenames.NEWLINE# You can specify multiple suffix as a list of string:NEWLINEsource_suffix = ['.rst', '.md']NEWLINENEWLINE# The master toctree document.NEWLINEmaster_doc = 'index'NEWLINENEWLINE# General information about the project.NEWLINEproject = 'SigPro'NEWLINEslug = 'sigpro'NEWLINEtitle = project + ' Documentation',NEWLINEcopyright = '2020, MIT Data To AI Lab'NEWLINEauthor = 'MIT Data To AI Lab'NEWLINEdescription = 'Signal Processing Tools for Machine Learning'NEWLINEuser = 'sintel-dev'NEWLINENEWLINE# The version info for the project you're documenting, acts as replacementNEWLINE# for |version| and |release|, also used in various other places throughoutNEWLINE# the built documents.NEWLINE#NEWLINE# The short X.Y version.NEWLINEversion = sigpro.__version__NEWLINE# The full version, including alpha/beta/rc tags.NEWLINErelease = sigpro.__version__NEWLINENEWLINE# The language for content autogenerated by Sphinx. Refer to documentationNEWLINE# for a list of supported languages.NEWLINE#NEWLINE# This is also used if you do content translation via gettext catalogs.NEWLINE# Usually you set "language" from the command line for these cases.NEWLINElanguage = NoneNEWLINENEWLINE# List of patterns, relative to source directory, that match files andNEWLINE# directories to ignore when looking for source files.NEWLINE# This patterns also effect to html_static_path and html_extra_pathNEWLINEexclude_patterns = ['.py', '_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']NEWLINENEWLINE# The name of the Pygments (syntax highlighting) style to use.NEWLINEpygments_style = 'sphinx'NEWLINENEWLINE# If true, `todo` and `todoList` produce output, else they produce nothing.NEWLINEtodo_include_todos = FalseNEWLINENEWLINE# -- Options for HTML output -------------------------------------------NEWLINENEWLINE# The theme to use for HTML and HTML Help pages. See the documentation forNEWLINE# a list of builtin themes.NEWLINE#NEWLINEhtml_theme = 'sphinx_rtd_theme'NEWLINEhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]NEWLINENEWLINE# Readthedocs additionsNEWLINEhtml_context = {NEWLINE 'display_github': True,NEWLINE 'github_user': user,NEWLINE 'github_repo': project,NEWLINE 'github_version': 'master',NEWLINE 'conf_py_path': '/docs/',NEWLINE}NEWLINENEWLINE# Theme options are theme-specific and customize the look and feel of aNEWLINE# theme further. For a list of options available for each theme, see theNEWLINE# documentation.NEWLINEhtml_theme_options = {NEWLINE 'collapse_navigation': False,NEWLINE 'display_version': False,NEWLINE}NEWLINENEWLINE# Add any paths that contain custom static files (such as style sheets) here,NEWLINE# relative to this directory. They are copied after the builtin static files,NEWLINE# so a file named "default.css" will overwrite the builtin "default.css".NEWLINE# html_static_path = ['_static']NEWLINENEWLINE# The name of an image file (relative to this directory) to use as a favicon ofNEWLINE# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32NEWLINE# pixels large.NEWLINEhtml_favicon = 'images/dai-logo-white.ico'NEWLINENEWLINE# If given, this must be the name of an image file (path relative to theNEWLINE# configuration directory) that is the logo of the docs. It is placed atNEWLINE# the top of the sidebar; its width should therefore not exceed 200 pixels.NEWLINEhtml_logo = 'images/dai-logo-white-200.png'NEWLINENEWLINE# -- Options for HTMLHelp output ---------------------------------------NEWLINENEWLINE# Output file base name for HTML help builder.NEWLINEhtmlhelp_basename = slug + 'doc'NEWLINENEWLINENEWLINE# -- Options for LaTeX output ------------------------------------------NEWLINENEWLINElatex_elements = {NEWLINE # The paper size ('letterpaper' or 'a4paper').NEWLINE #NEWLINE # 'papersize': 'letterpaper',NEWLINENEWLINE # The font size ('10pt', '11pt' or '12pt').NEWLINE #NEWLINE # 'pointsize': '10pt',NEWLINENEWLINE # Additional stuff for the LaTeX preamble.NEWLINE #NEWLINE # 'preamble': '',NEWLINENEWLINE # Latex figure (float) alignmentNEWLINE #NEWLINE # 'figure_align': 'htbp',NEWLINE}NEWLINENEWLINE# Grouping the document tree into LaTeX files. List of tuplesNEWLINE# (source start file, target name, title, author, documentclassNEWLINE# [howto, manual, or own class]).NEWLINElatex_documents = [(NEWLINE master_doc,NEWLINE slug + '.tex',NEWLINE title,NEWLINE author,NEWLINE 'manual'NEWLINE)]NEWLINENEWLINENEWLINE# -- Options for manual page output ------------------------------------NEWLINENEWLINE# One entry per manual page. List of tuplesNEWLINE# (source start file, name, description, authors, manual section).NEWLINEman_pages = [(NEWLINE master_doc,NEWLINE slug,NEWLINE title,NEWLINE [author],NEWLINE 1NEWLINE)]NEWLINENEWLINENEWLINE# -- Options for Texinfo output ----------------------------------------NEWLINENEWLINE# Grouping the document tree into Texinfo files. List of tuplesNEWLINE# (source start file, target name, title, author,NEWLINE# dir menu entry, description, category)NEWLINEtexinfo_documents = [(NEWLINE master_doc,NEWLINE slug,NEWLINE title,NEWLINE author,NEWLINE slug,NEWLINE description,NEWLINE 'Miscellaneous'NEWLINE)]NEWLINE |
# -*- coding: utf-8 -*-NEWLINE# Generated by Django 1.9.7 on 2016-07-18 04:20NEWLINEfrom __future__ import unicode_literalsNEWLINENEWLINEfrom django.db import migrations, modelsNEWLINENEWLINENEWLINEclass Migration(migrations.Migration):NEWLINENEWLINE dependencies = [NEWLINE ('wl_main', '0004_wildlifelicencetype_application_schema'),NEWLINE ]NEWLINENEWLINE operations = [NEWLINE migrations.AddField(NEWLINE model_name='wildlifelicence',NEWLINE name='locations',NEWLINE field=models.TextField(blank=True),NEWLINE ),NEWLINE ]NEWLINE |
import timeNEWLINEimport mrcfileNEWLINEimport argparseNEWLINEimport numpy as npNEWLINEimport multiprocessingNEWLINEfrom scipy import ndimage as ndiNEWLINEfrom scipy.stats import wasserstein_distanceNEWLINEfrom skimage import transform, measureNEWLINENEWLINESHIFT = ['Euclidean', 'L1', 'cosine'] # Metrics requiring real space translationNEWLINENEWLINEdef main():NEWLINE """calculates similarity between line projections from 2D class averages"""NEWLINE NEWLINE parser = argparse.ArgumentParser(description='compare similarity of 2D class averages based on common lines')NEWLINE NEWLINE parser.add_argument('-i', '--input', action='store', dest='mrc_input', required=True,NEWLINE help='path to mrcs file of 2D class averages')NEWLINE NEWLINE parser.add_argument('-o', '--outpath', action='store', dest='outpath', required=True,NEWLINE help='path for output files')NEWLINE NEWLINE parser.add_argument('-m', '--metric', action='store', dest='metric', required=False, NEWLINE default='Euclidean', choices=['Euclidean', 'L1', 'cosine', 'EMD', 'correlate'],NEWLINE help='choose scoring method, default Euclidean')NEWLINE NEWLINE parser.add_argument('-s', '--scale_factor', action='store', dest='scale_factor', required=False, type=float, default=1,NEWLINE help='scale factor for downsampling. (e.g. -s 2 converts 200pix box --> 100pix box)')NEWLINE NEWLINE parser.add_argument('-c', '--num_workers', action='store', dest='num_workers', required=False, type=int, default=1,NEWLINE help='number of CPUs to use, default 1')NEWLINE NEWLINE parser.add_argument('-d', '--domain', action='store', dest='domain', required=False, NEWLINE default='Fourier', choices=['Fourier', 'Real'], help='Fourier or Real space, default Fourier')NEWLINE NEWLINE parser.add_argument('-t', '--translate', action='store', dest='translate', required=False, NEWLINE default='full', choices=['full', 'valid'],NEWLINE help='indicate size of score vector, numpy convention, default full')NEWLINE NEWLINE parser.add_argument('-a', '--angular_sampling', action='store', dest='angular_sampling', required=False, NEWLINE type=int, default=5, help='angle sampling for 1D projections in degrees, default 5')NEWLINENEWLINE args = parser.parse_args()NEWLINENEWLINE if args.domain == 'Fourier':NEWLINE rotation_degrees = np.arange(0, 180, args.angular_sampling)NEWLINE else:NEWLINE rotation_degrees = np.arange(0, 360, args.angular_sampling)NEWLINE NEWLINE shape, projection_2D = get_projection_2D(mrcs=args.mrc_input, factor=args.scale_factor)NEWLINE NEWLINE num_class_avg = len(projection_2D)NEWLINE num_1D = num_class_avg*len(rotation_degrees)NEWLINE NEWLINE print("number of 2D class averages: {}".format(num_class_avg))NEWLINE print("number of 1D projection vectors: {}".format(num_1D))NEWLINE print("total number of pairwise scores: {}".format(int(num_1D*(num_1D-1)/2)))NEWLINENEWLINE if args.metric == 'Euclidean':NEWLINE pairwise_score = pairwise_l2NEWLINE elif args.metric == 'L1':NEWLINE pairwise_score = pairwise_l1NEWLINE elif args.metric == 'cosine':NEWLINE pairwise_score = pairwise_cosineNEWLINE elif args.metric == 'EMD':NEWLINE pairwise_score = pairwise_wassersteinNEWLINE elif args.metric == 'correlate':NEWLINE pairwise_score = pairwise_correlateNEWLINE NEWLINE if args.metric in SHIFT:NEWLINE wrapper_function = wrapper_slide_functionNEWLINE else:NEWLINE wrapper_function = wrapper_single_functionNEWLINE NEWLINE final_scores = {}NEWLINE NEWLINE with multiprocessing.Pool(args.num_workers) as pool:NEWLINE for i in range(num_class_avg-1):NEWLINE line_projections_1 = vectorize(i, projection_2D[i], rotation_degrees, shape, args.domain)NEWLINE for j in range(i+1, num_class_avg):NEWLINE line_projections_2 = vectorize(j, projection_2D[j], rotation_degrees, shape, args.domain)NEWLINE NEWLINE projection_pairs = []NEWLINE for line_1 in line_projections_1.values():NEWLINE for line_2 in line_projections_2.values():NEWLINE projection_pairs.append((line_1, line_2))NEWLINE NEWLINE pair_scores = pool.starmap(NEWLINE wrapper_function, NEWLINE [(pair, pairwise_score, args.translate, args.domain) for pair in projection_pairs]NEWLINE )NEWLINENEWLINE optimum = min(pair_scores, key = lambda x: x[4])NEWLINENEWLINE avg_1, deg_1, avg_2, deg_2, score = [value for value in optimum]NEWLINENEWLINE final_scores[(avg_1, avg_2)] = (deg_1, deg_2, score)NEWLINE final_scores[(avg_2, avg_1)] = (deg_2, deg_1, score)NEWLINE NEWLINE write_scores(final_scores, outpath=args.outpath)NEWLINENEWLINE NEWLINEclass Projection:NEWLINE """for 1D projection vectors"""NEWLINE NEWLINE def __init__(self, NEWLINE class_avg,NEWLINE angle,NEWLINE vector): NEWLINENEWLINE self.class_avg = class_avgNEWLINE self.angle = angleNEWLINE self.vector = vectorNEWLINE NEWLINE def size(self):NEWLINE return len(self.vector)NEWLINENEWLINE NEWLINEdef get_projection_2D(mrcs, factor):NEWLINE """read, scale and extract class averages"""NEWLINE NEWLINE projection_2D = {}NEWLINENEWLINE with mrcfile.open(mrcs) as mrc:NEWLINE for i, data in enumerate(mrc.data):NEWLINE projection_2D[i] = dataNEWLINE mrc.close()NEWLINENEWLINE shape = transform.rotate(projection_2D[0].copy(), 45, resize=True).shape[0] NEWLINE NEWLINE for k, avg in projection_2D.items():NEWLINE if factor == 1:NEWLINE projection_2D[k] = extract_class_avg(avg.copy())NEWLINE else:NEWLINE scaled_img = transform.rescale(NEWLINE avg, NEWLINE scale=(1/factor), NEWLINE anti_aliasing=True, NEWLINE multichannel=False, # Add to supress warningNEWLINE mode='constant' # Add to supress warningNEWLINE ) NEWLINE projection_2D[k] = extract_class_avg(scaled_img)NEWLINE NEWLINE return shape, projection_2DNEWLINENEWLINENEWLINEdef extract_class_avg(avg):NEWLINE """fit in minimal bounding box"""NEWLINE NEWLINE image = avg.copy()NEWLINE image[image < 0] = 0NEWLINENEWLINE struct = np.ones((2, 2), dtype=bool)NEWLINE dilate = ndi.binary_dilation(image, struct)NEWLINENEWLINE labeled = measure.label(dilate, connectivity=2)NEWLINE rprops = measure.regionprops(labeled, image, cache=False)NEWLINENEWLINE if len(rprops) == 1:NEWLINE select_region = 0NEWLINE NEWLINE else:NEWLINE img_y, img_x = image.shapeNEWLINENEWLINE if labeled[int(img_y/2), int(img_x/2)] != 0: # Check for central regionNEWLINE select_region = labeled[int(img_y/2), int(img_x/2)] - 1 # For indexNEWLINENEWLINE else:NEWLINE distances = [NEWLINE (i, np.linalg.norm(np.array((img_y/2, img_x/2)) - np.array(r.weighted_centroid))) NEWLINE for i, r in enumerate(rprops)NEWLINE ]NEWLINENEWLINE select_region = min(distances, key=lambda x: x[1])[0] # Pick first closest region NEWLINENEWLINE y_min, x_min, y_max, x_max = [p for p in rprops[select_region].bbox]NEWLINENEWLINE return image[y_min:y_max, x_min:x_max]NEWLINENEWLINENEWLINEdef vectorize(key, image, rotation_degrees, shape, domain):NEWLINE """NEWLINE takes image and creates 1D projectionsNEWLINE similar to Radon transformNEWLINE """NEWLINE projection_1D = {}NEWLINE projection_1D_FT = {}NEWLINE NEWLINE for degree in rotation_degrees:NEWLINE proj_1D = transform.rotate(image, degree, resize=True).sum(axis=0).astype('float32')NEWLINE trim_1D = np.trim_zeros(proj_1D, trim='fb')NEWLINE NEWLINE pad_1D = np.pad(proj_1D, (0, shape-len(proj_1D))) # Pad to largest possible shape from 2D NEWLINE F = abs(np.fft.rfft(pad_1D))NEWLINE NEWLINE projection_1D[(key, degree)] = Projection(class_avg=key, angle=degree, vector=trim_1D)NEWLINE projection_1D_FT[(key, degree)] = Projection(class_avg=key, angle=degree, vector=F)NEWLINE NEWLINE if domain == 'Fourier':NEWLINE return projection_1D_FTNEWLINE else:NEWLINE return projection_1DNEWLINE NEWLINE NEWLINEdef pairwise_l2(a, b):NEWLINE return np.linalg.norm(a - b)NEWLINENEWLINENEWLINEdef pairwise_l1(a, b):NEWLINE return np.linalg.norm(a - b, 1)NEWLINENEWLINENEWLINEdef pairwise_cosine(a, b):NEWLINE return 1 - (np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))NEWLINENEWLINENEWLINEdef pairwise_correlate(a, b, translate):NEWLINE s = np.correlate(a, b, mode=translate)NEWLINE return 1 / (1 + np.amax(s)) # Convert to distanceNEWLINENEWLINENEWLINEdef pairwise_wasserstein(a, b, translate):NEWLINE return wasserstein_distance(a, b)NEWLINENEWLINENEWLINEdef slide_score(a, b, pairwise_score, translate, domain):NEWLINE """NEWLINE finds minimum pairwise score for translations of 1D projectionsNEWLINE a, b are instances of the Projection classNEWLINE 'valid' is elements without zero paddingNEWLINE 'full' is scores at all translationsNEWLINE """NEWLINE scores = []NEWLINE NEWLINE if domain == 'Fourier':NEWLINE scores.append(pairwise_score(a.vector[1:], b.vector[1:])) #Drop 0th seems to helpNEWLINE NEWLINE else:NEWLINE if a.size() > b.size(): NEWLINE l, s = a.vector, b.vectorNEWLINE else:NEWLINE l, s = b.vector, a.vectorNEWLINENEWLINE l_size, s_size = len(l), len(s)NEWLINENEWLINE if translate == 'valid':NEWLINE diff_of_len = abs(l_size - s_size)NEWLINENEWLINE if diff_of_len == 0:NEWLINE scores.append(pairwise_score(l, s)) NEWLINE else:NEWLINE pad_s = np.pad(s, pad_width=(diff_of_len, diff_of_len))NEWLINE for i in range(0, diff_of_len+1):NEWLINE shift_s = pad_s[i:i+l_size]NEWLINE scores.append(pairwise_score(l, shift_s))NEWLINENEWLINE elif translate == 'full':NEWLINE pad_l = np.pad(l, pad_width=(s_size-1, s_size-1))NEWLINE pad_s = np.pad(s, pad_width=(l_size+s_size-2, l_size+s_size-2))NEWLINENEWLINE for i in range(0, l_size+s_size-1):NEWLINE shift_s = pad_s[i:i+len(pad_l)]NEWLINE scores.append(pairwise_score(pad_l, shift_s))NEWLINE NEWLINE return min(scores)NEWLINENEWLINENEWLINEdef wrapper_slide_function(pair, pairwise, translate, domain):NEWLINE """NEWLINE pair is tuple from Projection class to be scoredNEWLINE pairwise is function to score vectores (e.g. Euclidean)NEWLINE """NEWLINE score = slide_score(pair[0], pair[1], pairwise, translate, domain)NEWLINE return [pair[0].class_avg, pair[0].angle, pair[1].class_avg, pair[1].angle, score]NEWLINENEWLINENEWLINEdef wrapper_single_function(pair, pairwise, translate, domain):NEWLINE """same as above but for correlate and EMD"""NEWLINE score = pairwise(pair[0].vector[1:], pair[1].vector[1:], translate) # Skip 0th component NEWLINE return [pair[0].class_avg, pair[0].angle, pair[1].class_avg, pair[1].angle, score]NEWLINENEWLINE NEWLINEdef write_scores(final_scores, outpath):NEWLINE """NEWLINE tab separted file of final scoresNEWLINE load scores into the slicem guiNEWLINE """NEWLINE stamp = time.strftime('%Y%m%d_%H%M%S')NEWLINE NEWLINE header = ['projection_1', 'degree_1', 'projection_2', 'degree_2', 'score']NEWLINE NEWLINE with open(outpath+'/slicem_scores_{0}.txt'.format(stamp), 'w') as f:NEWLINE for h in header:NEWLINE f.write(h+'\t')NEWLINE f.write('\n')NEWLINE for p, v in final_scores.items():NEWLINE f.write(str(p[0])+'\t'+str(v[0])+'\t'+str(p[1])+'\t'+str(v[1])+'\t'+str(v[2])+'\n') NEWLINENEWLINE NEWLINEif __name__ == "__main__":NEWLINE starttime = time.time()NEWLINE main()NEWLINE print('Runtime: {} minutes'.format((time.time() - starttime)/60)) |
from typing import List, Dict, SetNEWLINEfrom itertools import chainNEWLINEimport reNEWLINEfrom collections import defaultdict, CounterNEWLINENEWLINENEWLINEclass BytePairEncoding(object):NEWLINE """ Byte Pair Encoding classNEWLINE We aren't gonna use this class for encoding. Because it is too slow......NEWLINE We will use sentence piece Google have made.NEWLINE Thus, this class is just for special token index reference.NEWLINE """NEWLINE PAD_token = '<pad>'NEWLINE PAD_token_idx = 0NEWLINE UNK_token = '<unk>'NEWLINE UNK_token_idx = 1NEWLINE CLS_token = '<cls>'NEWLINE CLS_token_idx = 2NEWLINE SEP_token = '<sep>'NEWLINE SEP_token_idx = 3NEWLINE MSK_token = '<msk>'NEWLINE MSK_token_idx = 4NEWLINENEWLINE WORD_END = '_'NEWLINENEWLINE def __init__(self, corpus: List[List[str]], max_vocab_size: int) -> None:NEWLINE self.idx2word = build_bpe(corpus, max_vocab_size)NEWLINENEWLINE def encode(self, sentence: List[str]) -> List[int]:NEWLINE return encode(sentence, self.idx2word)NEWLINENEWLINE def decoder(self, tokens: List[int]) -> List[str]:NEWLINE return decode(tokens, self.idx2word)NEWLINENEWLINENEWLINEdef build_bpe(NEWLINE corpus: List[str],NEWLINE max_vocab_size: intNEWLINE) -> List[int]:NEWLINE """ BPE Vocabulary BuilderNEWLINE Implement vocabulary builder for byte pair encoding.NEWLINE Please sort your idx2word by subword length in descending manner.NEWLINENEWLINE Hint: Counter in collection library would be helpfulNEWLINENEWLINE Note: If you convert sentences list to word frequence dictionary,NEWLINE building speed is enhanced significantly because duplicated words areNEWLINE preprocessed togetherNEWLINENEWLINE Arguments:NEWLINE corpus -- List of words to build vocabNEWLINE max_vocab_size -- The maximum size of vocabNEWLINENEWLINE Return:NEWLINE idx2word -- Subword listNEWLINE """NEWLINE # Special tokensNEWLINE PAD = BytePairEncoding.PAD_token # Index of <PAD> must be 0NEWLINE UNK = BytePairEncoding.UNK_token # Index of <UNK> must be 1NEWLINE CLS = BytePairEncoding.CLS_token # Index of <CLS> must be 2NEWLINE SEP = BytePairEncoding.SEP_token # Index of <SEP> must be 3NEWLINE MSK = BytePairEncoding.MSK_token # Index of <MSK> must be 4NEWLINE SPECIAL = [PAD, UNK, CLS, SEP, MSK]NEWLINENEWLINE WORD_END = BytePairEncoding.WORD_END # Use this token as the end of a wordNEWLINE # YOUR CODE HERENEWLINE # 1. character vocabulary로 symbol vocab 초기화하고 단어를 sequence of chars로 표현NEWLINE vocab = {" ".join(list(word) + [WORD_END]): ct for word, ct in Counter(corpus).items()}NEWLINE chars = list(set([char for word in corpus for char in word]))NEWLINE num_merges = max_vocab_size - len(SPECIAL) - 1 - len(chars)NEWLINE # 2. number of merge operation에 도달할 때 까지 아래 두 과정을 반복한다NEWLINE for _ in range(num_merges):NEWLINE # 2-a. symbol pair를 센다. 합칠 pair가 없다면 loop을 종료한다.NEWLINE pairs = defaultdict(int)NEWLINE for word, freq in vocab.items():NEWLINE symbols = word.split()NEWLINE for i in range(len(symbols)-1):NEWLINE pairs[symbols[i],symbols[i+1]] += freqNEWLINE if not pairs:NEWLINE breakNEWLINE # 2-b. 가장 빈번히 등장하는 pairs를 합쳐 새로운 symbol로 대체한다NEWLINE best = max(pairs, key=pairs.get)NEWLINE new_vocab = {}NEWLINE bigram = re.escape(' '.join(best))NEWLINE p = re.compile(r'(?<!\S)' + bigram + r'(?!\S)')NEWLINE for word in vocab:NEWLINE w_out = p.sub(''.join(best), word)NEWLINE new_vocab[w_out] = vocab[word]NEWLINE vocab = new_vocabNEWLINE chars.append(''.join(best))NEWLINE idx2word = SPECIAL + sorted(chars, key=len, reverse=True) + [WORD_END]NEWLINE return idx2wordNEWLINE |
from threading import EventNEWLINEimport osNEWLINEos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hide'NEWLINEimport pygameNEWLINENEWLINEexit = Event()NEWLINENEWLINENEWLINEdef main():NEWLINE print('chime.py - Play an alert every X seconds')NEWLINE try:NEWLINE interval = int(input('How many seconds between chimes? '))NEWLINE print('Press Ctrl+C to exit.')NEWLINE except ValueError as e:NEWLINE print(f'Could not interpret input: {e}')NEWLINE exit.set()NEWLINENEWLINE pygame.mixer.init()NEWLINE pygame.mixer.music.load('Maj5_ascending.mp3')NEWLINENEWLINE while not exit.is_set():NEWLINE pygame.mixer.music.play()NEWLINE exit.wait(interval)NEWLINE print('\nExiting.')NEWLINENEWLINENEWLINEdef quit(_signo, _frame):NEWLINE exit.set()NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE import signalNEWLINENEWLINE for sig in ('TERM', 'HUP', 'INT'):NEWLINE signal.signal(getattr(signal, 'SIG' + sig), quit)NEWLINENEWLINE main()NEWLINE |
#NEWLINE# © Copyright 2020 Hewlett Packard Enterprise Development LPNEWLINE#NEWLINE# This file was auto-generated by the Python SDK generator; DO NOT EDIT.NEWLINE#NEWLINENEWLINEfrom ..resource import Resource, CollectionNEWLINEfrom ..exceptions import NimOSAPIOperationUnsupportedNEWLINENEWLINEclass Controller(Resource):NEWLINE """NEWLINE Controller is a redundant collection of hardware capable of running the array software.NEWLINENEWLINE Parameters:NEWLINE - id : Identifier of the controller.NEWLINE - name : Name of the controller.NEWLINE - array_name : Name of the array containing this controller.NEWLINE - array_id : Rest ID of the array containing this controller.NEWLINE - partial_response_ok : Indicate that it is ok to provide partially available response.NEWLINE - serial : Serial number for this controller.NEWLINE - hostname : Host name for the controller.NEWLINE - support_address : IP address used for support.NEWLINE - support_netmask : IP netmask used for support.NEWLINE - support_nic : Network card used for support.NEWLINE - power_status : Overall power supply status for the controller.NEWLINE - fan_status : Overall fan status for the controller.NEWLINE - temperature_status : Overall temperature status for the controller.NEWLINE - power_supplies : Status for each power supply in the controller.NEWLINE - fans : Status for each fan in the controller.NEWLINE - temperature_sensors : Status for temperature sensor in the controller.NEWLINE - partition_status : Status of the system's raid partitions.NEWLINE - ctrlr_side : Identifies which controller this is on its array.NEWLINE - state : Indicates whether this controller is active or not.NEWLINE - nvme_cards_enabled : Indicates if the NVMe accelerator card is enabled.NEWLINE - nvme_cards : List of NVMe accelerator cards.NEWLINE - asup_time : Time of the last autosupport by the controller.NEWLINE """NEWLINENEWLINE def create(self, **kwargs):NEWLINE raise NimOSAPIOperationUnsupported("create operation not supported")NEWLINENEWLINE def delete(self, **kwargs):NEWLINE raise NimOSAPIOperationUnsupported("delete operation not supported")NEWLINENEWLINE def update(self, **kwargs):NEWLINE raise NimOSAPIOperationUnsupported("update operation not supported")NEWLINENEWLINEclass ControllerList(Collection):NEWLINE resource = ControllerNEWLINE resource_type = "controllers"NEWLINENEWLINE def create(self, **kwargs):NEWLINE raise NimOSAPIOperationUnsupported("create operation not supported")NEWLINENEWLINE def delete(self, **kwargs):NEWLINE raise NimOSAPIOperationUnsupported("delete operation not supported")NEWLINENEWLINE def update(self, **kwargs):NEWLINE raise NimOSAPIOperationUnsupported("update operation not supported")NEWLINE |
"""NEWLINE Created by howie.hu at 2022-01-21.NEWLINE Description: 执行分发动作NEWLINE - 执行命令: PIPENV_DOTENV_LOCATION=./pro.env pipenv run python src/sender/action.pyNEWLINE Changelog: all notable changes to this file will be documentedNEWLINE"""NEWLINEimport timeNEWLINENEWLINEfrom src.config import ConfigNEWLINEfrom src.databases import MongodbManagerNEWLINEfrom src.sender.send_factory import send_factoryNEWLINEfrom src.utils.log import LOGGERNEWLINENEWLINENEWLINEdef send_doc(sender_conf: dict):NEWLINE """NEWLINE 对文章进行分发NEWLINE Args:NEWLINE sender_conf (dict): 分发配置NEWLINE """NEWLINE sender_list = sender_conf["sender_list"]NEWLINE query_days = sender_conf.get("query_days", 2)NEWLINE delta_time = sender_conf.get("delta_time", 3)NEWLINE skip_ads = sender_conf.get("skip_ads", False)NEWLINE if sender_list:NEWLINE # 是否启用分发器NEWLINE mongo_base = MongodbManager.get_mongo_base(mongodb_config=Config.MONGODB_CONFIG)NEWLINE coll = mongo_base.get_collection(coll_name="liuli_articles")NEWLINE cur_ts = int(time.time())NEWLINE filter_dict = {NEWLINE # 时间范围,除第一次外后面其实可以去掉NEWLINE "doc_ts": {"$gte": cur_ts - (query_days * 24 * 60 * 60), "$lte": cur_ts},NEWLINE }NEWLINE if skip_ads:NEWLINE filter_dict.update(NEWLINE {NEWLINE # 至少打上一个模型标签NEWLINE "cos_model": {"$exists": True},NEWLINE # 判定结果为非广告NEWLINE "cos_model.result": 1,NEWLINE }NEWLINE )NEWLINE # 查找所有可分发文章NEWLINE for each_data in coll.find(filter_dict):NEWLINE # 分别分发给各个目标NEWLINE for send_type in sender_list:NEWLINE # 暂时固定,测试NEWLINE init_config = sender_conf.get(f"{send_type}_init_config", {})NEWLINE cos_model_resp = each_data.get("cos_model", {})NEWLINE doc_cus_des = ""NEWLINE if cos_model_resp:NEWLINE # 经过模型判断NEWLINE if cos_model_resp["result"] == 1:NEWLINE # 广告标记NEWLINE doc_cus_des = f"👿广告[概率:{cos_model_resp['probability']}]"NEWLINE else:NEWLINE doc_cus_des = "🤓非广告"NEWLINENEWLINE each_data["doc_cus_des"] = doc_cus_desNEWLINE # 每次分发休眠一定时间NEWLINE time.sleep(delta_time)NEWLINE send_factory(NEWLINE send_type=send_type, init_config=init_config, send_data=each_dataNEWLINE )NEWLINE else:NEWLINE LOGGER.warn("未配置分发器!")NEWLINENEWLINENEWLINEif __name__ == "__main__":NEWLINE send_config = {NEWLINE "sender_list": ["wecom"],NEWLINE "query_days": 7,NEWLINE "skip_ads": False,NEWLINE "delta_time": 3,NEWLINE }NEWLINE send_doc(send_config)NEWLINE |
# --------------------------------------------------------------------------------------------NEWLINE# Copyright (c) Microsoft Corporation. All rights reserved.NEWLINE# Licensed under the MIT License. See License.txt in the project root for license information.NEWLINE# --------------------------------------------------------------------------------------------NEWLINENEWLINEimport osNEWLINEimport unittest # pylint: disable=unused-importNEWLINENEWLINEfrom azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)NEWLINENEWLINENEWLINETEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))NEWLINENEWLINENEWLINEclass ContainerappComposePreviewTransportOverridesScenarioTest(ScenarioTest):NEWLINE @ResourceGroupPreparer(name_prefix='cli_test_containerapp_preview', location='eastus')NEWLINE def test_containerapp_compose_create_with_transport_arg(self, resource_group):NEWLINE compose_text = """NEWLINEservices:NEWLINE foo:NEWLINE image: mcr.microsoft.com/azuredocs/aks-helloworld:v1NEWLINE ports: 8080:80NEWLINE"""NEWLINE compose_file_name = f"{self._testMethodName}_compose.yml"NEWLINE docker_compose_file = open(compose_file_name, "w", encoding='utf-8')NEWLINE _ = docker_compose_file.write(compose_text)NEWLINE docker_compose_file.close()NEWLINENEWLINE self.kwargs.update({NEWLINE 'environment': self.create_random_name(prefix='containerapp-compose', length=24),NEWLINE 'workspace': self.create_random_name(prefix='containerapp-compose', length=24),NEWLINE 'compose': compose_file_name,NEWLINE 'transport': "foo=http2 bar=auto",NEWLINE 'second_transport': "baz=http",NEWLINE })NEWLINENEWLINE command_string = 'containerapp compose create'NEWLINE command_string += ' --compose-file-path {compose}'NEWLINE command_string += ' --resource-group {rg}'NEWLINE command_string += ' --environment {environment}'NEWLINE command_string += ' --logs-workspace {workspace}'NEWLINE command_string += ' --transport {transport}'NEWLINE command_string += ' --transport {second_transport}'NEWLINE self.cmd(command_string, checks=[NEWLINE self.check('[?name==`foo`].properties.configuration.ingress.transport', ["Http2"]),NEWLINE ])NEWLINENEWLINE if os.path.exists(compose_file_name):NEWLINE os.remove(compose_file_name)NEWLINE |
"""Tests for TypeVar."""NEWLINENEWLINEfrom pytype import file_utilsNEWLINEfrom pytype.tests import test_baseNEWLINENEWLINENEWLINEclass TypeVarTest(test_base.TargetPython3BasicTest):NEWLINE """Tests for TypeVar."""NEWLINENEWLINE def test_id(self):NEWLINE ty = self.Infer("""NEWLINE import typingNEWLINE T = typing.TypeVar("T")NEWLINE def f(x: T) -> T:NEWLINE return __any_object__NEWLINE v = f(42)NEWLINE w = f("")NEWLINE """)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE from typing import AnyNEWLINE typing = ... # type: moduleNEWLINE T = TypeVar("T")NEWLINE def f(x: T) -> T: ...NEWLINE v = ... # type: intNEWLINE w = ... # type: strNEWLINE """)NEWLINE self.assertTrue(ty.Lookup("f").signatures[0].template)NEWLINENEWLINE def test_extract_item(self):NEWLINE ty = self.Infer("""NEWLINE from typing import List, TypeVarNEWLINE S = TypeVar("S") # unusedNEWLINE T = TypeVar("T")NEWLINE def f(x: List[T]) -> T:NEWLINE return __any_object__NEWLINE v = f(["hello world"])NEWLINE w = f([True])NEWLINE """)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE S = TypeVar("S")NEWLINE T = TypeVar("T")NEWLINE def f(x: typing.List[T]) -> T: ...NEWLINE v = ... # type: strNEWLINE w = ... # type: boolNEWLINE """)NEWLINE self.assertTrue(ty.Lookup("f").signatures[0].template)NEWLINENEWLINE def test_wrap_item(self):NEWLINE ty = self.Infer("""NEWLINE from typing import List, TypeVarNEWLINE T = TypeVar("T")NEWLINE def f(x: T) -> List[T]:NEWLINE return __any_object__NEWLINE v = f(True)NEWLINE w = f(3.14)NEWLINE """)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE T = TypeVar("T")NEWLINE def f(x: T) -> typing.List[T]: ...NEWLINE v = ... # type: typing.List[bool]NEWLINE w = ... # type: typing.List[float]NEWLINE """)NEWLINENEWLINE def test_import_typevar_name_change(self):NEWLINE with file_utils.Tempdir() as d:NEWLINE d.create_file("a.pyi", """NEWLINE from typing import TypeVarNEWLINE T = TypeVar("T")NEWLINE X = TypeVar("X")NEWLINE """)NEWLINE _, errors = self.InferWithErrors("""NEWLINE # This is illegal: A TypeVar("T") needs to be stored under the name "T".NEWLINE from a import T as T2 # invalid-typevar[e1]NEWLINE from a import XNEWLINE Y = X # invalid-typevar[e2]NEWLINE def f(x: T2) -> T2: ...NEWLINE """, pythonpath=[d.path])NEWLINE self.assertErrorRegexes(errors, {"e1": r"T.*T2", "e2": r"X.*Y"})NEWLINENEWLINE def test_multiple_substitution(self):NEWLINE ty = self.Infer("""NEWLINE from typing import Dict, Tuple, TypeVarNEWLINE K = TypeVar("K")NEWLINE V = TypeVar("V")NEWLINE def f(x: Dict[K, V]) -> Tuple[V, K]:NEWLINE return __any_object__NEWLINE v = f({})NEWLINE w = f({"test": 42})NEWLINE """, deep=False)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE from typing import Any, Dict, Tuple, TypeVarNEWLINE K = TypeVar("K")NEWLINE V = TypeVar("V")NEWLINE def f(x: Dict[K, V]) -> Tuple[V, K]: ...NEWLINE v = ... # type: Tuple[Any, Any]NEWLINE w = ... # type: Tuple[int, str]NEWLINE """)NEWLINENEWLINE def test_union(self):NEWLINE ty = self.Infer("""NEWLINE from typing import TypeVar, UnionNEWLINE S = TypeVar("S")NEWLINE T = TypeVar("T")NEWLINE def f(x: S, y: T) -> Union[S, T]:NEWLINE return __any_object__NEWLINE v = f("", 42)NEWLINE w = f(3.14, False)NEWLINE """, deep=False)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE from typing import TypeVar, UnionNEWLINE S = TypeVar("S")NEWLINE T = TypeVar("T")NEWLINE def f(x: S, y: T) -> Union[S, T]: ...NEWLINE v = ... # type: Union[str, int]NEWLINE w = ... # type: Union[float, bool]NEWLINE """)NEWLINENEWLINE def test_bad_substitution(self):NEWLINE _, errors = self.InferWithErrors("""NEWLINE from typing import List, TypeVarNEWLINE S = TypeVar("S")NEWLINE T = TypeVar("T")NEWLINE def f1(x: S) -> List[S]:NEWLINE return {x} # bad-return-type[e1]NEWLINE def f2(x: S) -> S:NEWLINE return 42 # no error because never calledNEWLINE def f3(x: S) -> S:NEWLINE return 42 # bad-return-type[e2] # bad-return-type[e3]NEWLINE def f4(x: S, y: T, z: T) -> List[S]:NEWLINE return [y] # bad-return-type[e4]NEWLINE f3("")NEWLINE f3(16) # okNEWLINE f3(False)NEWLINE f4(True, 3.14, 0)NEWLINE f4("hello", "world", "domination") # okNEWLINE """)NEWLINE self.assertErrorRegexes(errors, {NEWLINE "e1": r"List\[S\].*set", "e2": r"str.*int", "e3": r"bool.*int",NEWLINE "e4": r"List\[bool\].*List\[Union\[float, int\]\]"})NEWLINENEWLINE def test_use_constraints(self):NEWLINE ty, errors = self.InferWithErrors("""NEWLINE from typing import TypeVarNEWLINE T = TypeVar("T", int, float)NEWLINE def f(x: T) -> T:NEWLINE return __any_object__NEWLINE v = f("") # wrong-arg-types[e]NEWLINE w = f(True) # okNEWLINE u = f(__any_object__) # okNEWLINE """)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE from typing import Any, TypeVarNEWLINE T = TypeVar("T", int, float)NEWLINE def f(x: T) -> T: ...NEWLINE v = ... # type: AnyNEWLINE w = ... # type: boolNEWLINE u = ... # type: int or floatNEWLINE """)NEWLINE self.assertErrorRegexes(errors, {"e": r"Union\[float, int\].*str"})NEWLINENEWLINE def test_type_parameter_type(self):NEWLINE ty = self.Infer("""NEWLINE from typing import Type, TypeVarNEWLINE T = TypeVar("T")NEWLINE def f(x: Type[T]) -> T:NEWLINE return __any_object__NEWLINE v = f(int)NEWLINE """)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE from typing import Type, TypeVarNEWLINE T = TypeVar("T")NEWLINE def f(x: Type[T]) -> T: ...NEWLINE v = ... # type: intNEWLINE """)NEWLINENEWLINE def test_type_parameter_type_error(self):NEWLINE errors = self.CheckWithErrors("""NEWLINE from typing import Sequence, Type, TypeVarNEWLINE T = TypeVar('T')NEWLINE def f(x: int):NEWLINE passNEWLINE def g(x: Type[Sequence[T]]) -> T:NEWLINE print(f(x)) # wrong-arg-types[e]NEWLINE return x()[0]NEWLINE """)NEWLINE self.assertErrorRegexes(NEWLINE errors, {"e": r"Expected.*int.*Actual.*Type\[Sequence\]"})NEWLINENEWLINE def test_print_nested_type_parameter(self):NEWLINE _, errors = self.InferWithErrors("""NEWLINE from typing import List, TypeVarNEWLINE T = TypeVar("T", int, float)NEWLINE def f(x: List[T]): ...NEWLINE f([""]) # wrong-arg-types[e]NEWLINE """)NEWLINE self.assertErrorRegexes(errors, {NEWLINE "e": r"List\[Union\[float, int\]\].*List\[str\]"})NEWLINENEWLINE def test_constraint_subtyping(self):NEWLINE _, errors = self.InferWithErrors("""NEWLINE from typing import TypeVarNEWLINE T = TypeVar("T", int, float)NEWLINE def f(x: T, y: T): ...NEWLINE f(True, False) # okNEWLINE f(True, 42) # wrong-arg-types[e]NEWLINE """)NEWLINE self.assertErrorRegexes(errors, {"e": r"Expected.*y: bool.*Actual.*y: int"})NEWLINENEWLINE def test_filter_value(self):NEWLINE _, errors = self.InferWithErrors("""NEWLINE from typing import TypeVarNEWLINE T = TypeVar("T", str, float)NEWLINE def f(x: T, y: T): ...NEWLINE x = ''NEWLINE x = 42.0NEWLINE f(x, '') # wrong-arg-types[e]NEWLINE f(x, 42.0) # okNEWLINE """)NEWLINE self.assertErrorRegexes(NEWLINE errors, {"e": r"Expected.*y: float.*Actual.*y: str"})NEWLINENEWLINE def test_filter_class(self):NEWLINE self.Check("""NEWLINE from typing import TypeVarNEWLINE class A(object): passNEWLINE class B(object): passNEWLINE T = TypeVar("T", A, B)NEWLINE def f(x: T, y: T): ...NEWLINE x = A()NEWLINE x.__class__ = BNEWLINE # Setting __class__ makes the type ambiguous to pytype.NEWLINE f(x, A())NEWLINE f(x, B())NEWLINE """)NEWLINENEWLINE def test_split(self):NEWLINE ty = self.Infer("""NEWLINE from typing import TypeVarNEWLINE T = TypeVar("T", int, type(None))NEWLINE def f(x: T) -> T:NEWLINE return __any_object__NEWLINE if __random__:NEWLINE x = NoneNEWLINE else:NEWLINE x = 3NEWLINE v = id(x) if x else 42NEWLINE """, deep=False)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE import typesNEWLINE from typing import Optional, TypeVarNEWLINE v = ... # type: intNEWLINE x = ... # type: Optional[int]NEWLINE T = TypeVar("T", int, None)NEWLINE def f(x: T) -> T: ...NEWLINE """)NEWLINENEWLINE def test_enforce_non_constrained_typevar(self):NEWLINE _, errors = self.InferWithErrors("""NEWLINE from typing import TypeVarNEWLINE T = TypeVar("T")NEWLINE def f(x: T, y: T): ...NEWLINE f(42, True) # okNEWLINE f(42, "") # wrong-arg-types[e1]NEWLINE f(42, 16j) # okNEWLINE f(object(), 42) # okNEWLINE f(42, object()) # okNEWLINE f(42.0, "") # wrong-arg-types[e2]NEWLINE """)NEWLINE self.assertErrorRegexes(errors, {NEWLINE "e1": r"Expected.*y: int.*Actual.*y: str",NEWLINE "e2": r"Expected.*y: float.*Actual.*y: str"})NEWLINENEWLINE def test_useless_typevar(self):NEWLINE self.InferWithErrors("""NEWLINE from typing import Tuple, TypeVarNEWLINE T = TypeVar("T")NEWLINE S = TypeVar("S", int, float)NEWLINE def f1(x: T): ... # invalid-annotationNEWLINE def f2() -> T: ... # invalid-annotationNEWLINE def f3(x: Tuple[T]): ... # invalid-annotationNEWLINE def f4(x: Tuple[T, T]): ... # okNEWLINE def f5(x: S): ... # okNEWLINE def f6(x: "U"): ... # invalid-annotationNEWLINE def f7(x: T, y: "T"): ... # okNEWLINE def f8(x: "U") -> "U": ... # okNEWLINE U = TypeVar("U")NEWLINE """)NEWLINENEWLINE def test_use_bound(self):NEWLINE ty, errors = self.InferWithErrors("""NEWLINE from typing import TypeVarNEWLINE T = TypeVar("T", bound=float)NEWLINE def f(x: T) -> T:NEWLINE return xNEWLINE v1 = f(__any_object__) # okNEWLINE v2 = f(True) # okNEWLINE v3 = f(42) # okNEWLINE v4 = f(3.14) # okNEWLINE v5 = f("") # wrong-arg-types[e]NEWLINE """)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE from typing import Any, TypeVarNEWLINE T = TypeVar("T", bound=float)NEWLINE def f(x: T) -> TNEWLINE v1 = ... # type: floatNEWLINE v2 = ... # type: boolNEWLINE v3 = ... # type: intNEWLINE v4 = ... # type: floatNEWLINE v5 = ... # type: AnyNEWLINE """)NEWLINE self.assertErrorRegexes(errors, {"e": r"x: float.*x: str"})NEWLINENEWLINE def test_bad_return(self):NEWLINE self.assertNoCrash(self.Check, """NEWLINE from typing import AnyStr, DictNEWLINENEWLINE class Foo(object):NEWLINE def f(self) -> AnyStr: return __any_object__NEWLINE def g(self) -> Dict[AnyStr, Dict[AnyStr, AnyStr]]:NEWLINE return {'foo': {'bar': self.f()}}NEWLINE """)NEWLINENEWLINE def test_optional_typevar(self):NEWLINE _, errors = self.InferWithErrors("""NEWLINE from typing import Optional, TypeVarNEWLINE T = TypeVar("T", bound=str)NEWLINE def f() -> Optional[T]:NEWLINE return 42 if __random__ else None # bad-return-type[e]NEWLINE """, deep=True)NEWLINE self.assertErrorRegexes(errors, {"e": r"Optional\[T\].*int"})NEWLINENEWLINE def test_unicode_literals(self):NEWLINE ty = self.Infer("""NEWLINE from __future__ import unicode_literalsNEWLINE import typingNEWLINE T = typing.TypeVar("T")NEWLINE def f(x: T) -> T:NEWLINE return __any_object__NEWLINE v = f(42)NEWLINE """)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE import __future__NEWLINE from typing import AnyNEWLINE typing = ... # type: moduleNEWLINE unicode_literals = ... # type: __future__._FeatureNEWLINE T = TypeVar("T")NEWLINE def f(x: T) -> T: ...NEWLINE v = ... # type: intNEWLINE """)NEWLINENEWLINE def test_any_as_bound(self):NEWLINE self.Check("""NEWLINE from typing import Any, TypeVarNEWLINE T = TypeVar("T", bound=Any)NEWLINE def f(x: T) -> T:NEWLINE return xNEWLINE f(42)NEWLINE """)NEWLINENEWLINE def test_any_as_constraint(self):NEWLINE self.Check("""NEWLINE from typing import Any, TypeVarNEWLINE T = TypeVar("T", str, Any)NEWLINE def f(x: T) -> T:NEWLINE return xNEWLINE f(42)NEWLINE """)NEWLINENEWLINE def test_name_reuse(self):NEWLINE self.Check("""NEWLINE from typing import Generic, TypeVarNEWLINE T = TypeVar("T", int, float)NEWLINE class Foo(Generic[T]):NEWLINE def __init__(self, x: T):NEWLINE self.x = xNEWLINE def f(foo: Foo[T]) -> T:NEWLINE return foo.xNEWLINE """)NEWLINENEWLINE def test_property_type_param(self):NEWLINE # We should allow property signatures of the form f(self) -> X[T] withoutNEWLINE # needing to annotate 'self' if the class is generic and we use its typeNEWLINE # parameter in the property's signature.NEWLINE ty = self.Infer("""NEWLINE from typing import TypeVar, GenericNEWLINE T = TypeVar('T')NEWLINE class A(Generic[T]):NEWLINE def __init__(self, foo: T):NEWLINE self._foo = fooNEWLINE @propertyNEWLINE def foo(self) -> T:NEWLINE return self._fooNEWLINE @foo.setterNEWLINE def foo(self, foo: T) -> None:NEWLINE self._foo = fooNEWLINE """)NEWLINE # types inferred as Any due to b/123835298NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE from typing import TypeVar, Generic, AnyNEWLINE T = TypeVar('T')NEWLINE class A(Generic[T]):NEWLINE _foo: AnyNEWLINE foo: AnyNEWLINE def __init__(self, foo: T) -> NoneNEWLINE """)NEWLINENEWLINE def test_return_typevar(self):NEWLINE errors = self.CheckWithErrors("""NEWLINE from typing import TypeVarNEWLINE T = TypeVar('T')NEWLINE def f(x: T) -> T:NEWLINE return T # bad-return-type[e]NEWLINE """)NEWLINE self.assertErrorRegexes(errors, {"e": "Expected.*T.*Actual.*TypeVar"})NEWLINENEWLINE def test_typevar_in_union_alias(self):NEWLINE ty = self.Infer("""NEWLINE from typing import Dict, List, TypeVar, UnionNEWLINE T = TypeVar("T")NEWLINE U = TypeVar("U")NEWLINE Foo = Union[T, List[T], Dict[T, List[U]], complex]NEWLINE def f(x: Foo[int, str]): ...NEWLINE """)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE from typing import Dict, List, TypeVar, Union, AnyNEWLINE T = TypeVar("T")NEWLINE U = TypeVar("U")NEWLINE Foo: AnyNEWLINE def f(x: Union[Dict[int, List[str]], List[int], complex, int]) -> None: ...NEWLINE """)NEWLINENEWLINE def test_typevar_in_union_alias_error(self):NEWLINE err = self.CheckWithErrors("""NEWLINE from typing import Dict, List, TypeVar, UnionNEWLINE T = TypeVar("T")NEWLINE U = TypeVar("U")NEWLINE Foo = Union[T, List[T], Dict[T, List[U]], complex]NEWLINE def f(x: Foo[int]): ... # invalid-annotation[e]NEWLINE """)NEWLINE self.assertErrorRegexes(err, {"e": "Union.*2.*instantiated.*1"})NEWLINENEWLINE def test_use_unsupported_typevar(self):NEWLINE # Test that we don't crash when using this pattern (b/162274390)NEWLINE self.CheckWithErrors("""NEWLINE from typing import List, TypeVar, UnionNEWLINE T = TypeVar("T")NEWLINE Tree = Union[T, List['Tree']] # not-supported-yetNEWLINE def f(x: Tree[int]): ... # no error since Tree is set to AnyNEWLINE """)NEWLINENEWLINENEWLINEclass TypeVarTestPy3(test_base.TargetPython3FeatureTest):NEWLINE """Tests for TypeVar in Python 3."""NEWLINENEWLINE def test_use_constraints_from_pyi(self):NEWLINE with file_utils.Tempdir() as d:NEWLINE d.create_file("foo.pyi", """NEWLINE from typing import AnyStr, TypeVarNEWLINE T = TypeVar("T", int, float)NEWLINE def f(x: T) -> T: ...NEWLINE def g(x: AnyStr) -> AnyStr: ...NEWLINE """)NEWLINE _, errors = self.InferWithErrors("""NEWLINE import fooNEWLINE foo.f("") # wrong-arg-types[e1]NEWLINE foo.g(0) # wrong-arg-types[e2]NEWLINE """, pythonpath=[d.path])NEWLINE self.assertErrorRegexes(errors, {NEWLINE "e1": r"Union\[float, int\].*str",NEWLINE "e2": r"Union\[bytes, str\].*int"})NEWLINENEWLINE def test_subprocess(self):NEWLINE ty = self.Infer("""NEWLINE import subprocessNEWLINE from typing import ListNEWLINE def run(args: List[str]):NEWLINE result = subprocess.run(NEWLINE args,NEWLINE stdout=subprocess.PIPE,NEWLINE stderr=subprocess.PIPE,NEWLINE universal_newlines=True)NEWLINE if result.returncode:NEWLINE raise subprocess.CalledProcessError(NEWLINE result.returncode, args, result.stdout)NEWLINE return result.stdoutNEWLINE """)NEWLINE self.assertTypesMatchPytd(ty, """NEWLINE from typing import ListNEWLINE subprocess: moduleNEWLINE def run(args: List[str]) -> strNEWLINE """)NEWLINENEWLINE def test_abstract_classmethod(self):NEWLINE self.Check("""NEWLINE from abc import ABC, abstractmethodNEWLINE from typing import Type, TypeVarNEWLINENEWLINE T = TypeVar('T', bound='Foo')NEWLINENEWLINE class Foo(ABC):NEWLINE @classmethodNEWLINE @abstractmethodNEWLINE def f(cls: Type[T]) -> T:NEWLINE return cls()NEWLINE """)NEWLINENEWLINENEWLINEtest_base.main(globals(), __name__ == "__main__")NEWLINE |
import osNEWLINENEWLINEfrom celery import CeleryNEWLINENEWLINE# set the default Django settings module for the 'celery' program.NEWLINEos.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")NEWLINENEWLINEapp = Celery("finance_api")NEWLINENEWLINE# Using a string here means the worker doesn't have to serializeNEWLINE# the configuration object to child processes.NEWLINE# - namespace='CELERY' means all celery-related configuration keysNEWLINE# should have a `CELERY_` prefix.NEWLINEapp.config_from_object("django.conf:settings", namespace="CELERY")NEWLINENEWLINE# Load task modules from all registered Django app configs.NEWLINEapp.autodiscover_tasks()NEWLINE |
from random import randintNEWLINEfrom tkinter import *NEWLINEfrom tkinter import ttkNEWLINENEWLINENEWLINEclass Node:NEWLINE def __init__(self, x, y, aValue):NEWLINE self.x = xNEWLINE self.y = yNEWLINE self.leftNode = 0NEWLINE self.bottomNode = 0NEWLINE self.rightNode = 0NEWLINE self.topNode = 0NEWLINE self.aValue = aValueNEWLINENEWLINENEWLINEclass AObject:NEWLINE def __init__(self, finder, start, pokemon, tablero):NEWLINE self.openQ = []NEWLINE self.closeQ = []NEWLINE self.rightWay = []NEWLINE self.steps = []NEWLINENEWLINE def insertStep(node):NEWLINE if not self.rightWay:NEWLINE print('primer paso')NEWLINE self.rightWay.append(node)NEWLINE # print(self.rightWay, node, self.rightWay[0].rightNode)NEWLINENEWLINE else:NEWLINE print('entre')NEWLINE for i in self.rightWay:NEWLINE print('right', node.x, i.rightNode.x, node.y, i.rightNode.y)NEWLINE print('left', node.x, i.leftNode.x, node.y, i.leftNode.y)NEWLINE print('top', node.x, i.topNode.x, node.y, i.topNode.y)NEWLINE print('bottom', node.x, i.bottomNode.x,NEWLINE node.y, i.bottomNode.y)NEWLINE if i.rightNode != 0:NEWLINE if (node.x == i.rightNode.x and node.y == i.rightNode.y):NEWLINE self.rightWay = self.rightWay[0: self.rightWay.index(NEWLINE i) + 1]NEWLINE breakNEWLINE if i.leftNode != 0:NEWLINE if (node.x == i.leftNode.x and node.y == i.leftNode.y):NEWLINE self.rightWay = self.rightWay[0: self.rightWay.index(NEWLINE i) + 1]NEWLINE breakNEWLINE if i.topNode != 0:NEWLINE if (node.x == i.topNode.x and node.y == i.topNode.y):NEWLINE self.rightWay = self.rightWay[0: self.rightWay.index(NEWLINE i) + 1]NEWLINE breakNEWLINE if i.bottomNode != 0:NEWLINE if (node.x == i.bottomNode.x and node.y == i.bottomNode.y):NEWLINE self.rightWay = self.rightWay[0: self.rightWay.index(NEWLINE i) + 1]NEWLINE breakNEWLINENEWLINE def insertClose(node):NEWLINE if self.openQ:NEWLINE for i in self.openQ:NEWLINE if node.x == i.x and node.y == i.y:NEWLINE self.openQ.remove(i)NEWLINE breakNEWLINE if self.closeQ:NEWLINE for i in self.closeQ:NEWLINE if node.aValue <= i.aValue:NEWLINE self.closeQ.insert(self.closeQ.index(i), node)NEWLINE breakNEWLINE if node.aValue > self.closeQ[-1].aValue:NEWLINE self.closeQ.append(node)NEWLINE else:NEWLINE self.closeQ.append(node)NEWLINENEWLINE def insertOpen(node):NEWLINE # print('Agregando nodo')NEWLINE if self.closeQ:NEWLINE for i in self.closeQ:NEWLINE if node.x == i.x and node.y == i.y:NEWLINE returnNEWLINE if self.openQ:NEWLINE for i in self.openQ:NEWLINE # print('buscando lugar para el nodo')NEWLINE if node.aValue <= i.aValue:NEWLINE self.openQ.insert(self.openQ.index(i), node)NEWLINE # print('nodo agregado')NEWLINE breakNEWLINE if node.aValue > self.openQ[-1].aValue:NEWLINE self.openQ.append(node)NEWLINE # print('nodo agregado')NEWLINE else:NEWLINE self.openQ.append(node)NEWLINE # print('primer nodo agregado')NEWLINENEWLINE def findWay(goal):NEWLINE self.rightWay = []NEWLINENEWLINE def wayWithoutObstacle(finder):NEWLINE obstacles = {}NEWLINE if finder.x > 0:NEWLINE if (tablero[finder.y][finder.x - 1].name != 'Rock') and (tablero[finder.y][finder.x - 1].name != 'Van'):NEWLINE obstacles['left'] = (True)NEWLINE else:NEWLINE obstacles['left'] = (False)NEWLINE else:NEWLINE obstacles['left'] = (False)NEWLINE if finder.x < 9:NEWLINE if (tablero[finder.y][finder.x + 1].name != 'Rock') and (tablero[finder.y][finder.x + 1].name != 'Van'):NEWLINE obstacles['right'] = (True)NEWLINE else:NEWLINE obstacles['right'] = (False)NEWLINE else:NEWLINE obstacles['right'] = (False)NEWLINE if finder.y > 0:NEWLINE if (tablero[finder.y - 1][finder.x].name != 'Rock') and (tablero[finder.y - 1][finder.x].name != 'Van'):NEWLINE obstacles['up'] = (True)NEWLINE else:NEWLINE obstacles['up'] = (False)NEWLINE else:NEWLINE obstacles['up'] = (False)NEWLINE if finder.y < 9:NEWLINE if (tablero[finder.y + 1][finder.x].name != 'Rock') and (tablero[finder.y + 1][finder.x].name != 'Van'):NEWLINE obstacles['down'] = (True)NEWLINE else:NEWLINE obstacles['down'] = (False)NEWLINE else:NEWLINE obstacles['down'] = (False)NEWLINE return obstaclesNEWLINENEWLINE def manhatan(startX, startY, goal):NEWLINE return abs(startX - goal.x) + abs(startY - goal.y)NEWLINE g_n_ = manhatan(finder.x, finder.y, start)NEWLINE h_n_ = manhatan(finder.x, finder.y, goal)NEWLINE currentTrainer = Trainer(finder.y, finder.x)NEWLINE while True:NEWLINE a = input()NEWLINE print('Pokemon', goal.x, goal.y)NEWLINE if self.openQ:NEWLINE currentTrainer = Trainer(self.openQ[0].y, self.openQ[0].x)NEWLINE g_n_ = manhatan(currentTrainer.x, currentTrainer.y, start)NEWLINE h_n_ = manhatan(currentTrainer.x, currentTrainer.y, goal)NEWLINENEWLINE print('Pokebola', currentTrainer.x, currentTrainer.y)NEWLINE currentNode = Node(NEWLINE currentTrainer.x, currentTrainer.y, g_n_ + h_n_)NEWLINE obstacles = wayWithoutObstacle(currentTrainer)NEWLINE print(obstacles)NEWLINE insertClose(currentNode)NEWLINE # for k in self.closeQ:NEWLINE # print('Cola cerrada', '[', k.x, k.y, k.aValue, ']')NEWLINENEWLINE if obstacles['left']:NEWLINE # print('izq')NEWLINE g_n_ = manhatan(currentTrainer.x - 1,NEWLINE currentTrainer.y, start)NEWLINE h_n_ = manhatan(currentTrainer.x - 1,NEWLINE currentTrainer.y, goal)NEWLINE insertOpen(Node(currentTrainer.x - 1,NEWLINE currentTrainer.y, g_n_ + h_n_))NEWLINE currentNode.leftNode = Node(NEWLINE currentTrainer.x - 1, currentTrainer.y, g_n_ + h_n_)NEWLINE if obstacles['right']:NEWLINE # print('der')NEWLINE g_n_ = manhatan(currentTrainer.x + 1,NEWLINE currentTrainer.y, start)NEWLINE h_n_ = manhatan(currentTrainer.x + 1,NEWLINE currentTrainer.y, goal)NEWLINE insertOpen(Node(currentTrainer.x + 1,NEWLINE currentTrainer.y, g_n_ + h_n_))NEWLINE currentNode.rightNode = Node(NEWLINE currentTrainer.x - 1, currentTrainer.y, g_n_ + h_n_)NEWLINE if obstacles['up']:NEWLINE # print('arriba')NEWLINE g_n_ = manhatan(currentTrainer.x,NEWLINE currentTrainer.y - 1, start)NEWLINE h_n_ = manhatan(currentTrainer.x,NEWLINE currentTrainer.y - 1, goal)NEWLINE insertOpen(NEWLINE Node(currentTrainer.x, currentTrainer.y - 1, g_n_ + h_n_))NEWLINE currentNode.topNode = Node(NEWLINE currentTrainer.x - 1, currentTrainer.y, g_n_ + h_n_)NEWLINE if obstacles['down']:NEWLINE # print('abajo')NEWLINE g_n_ = manhatan(currentTrainer.x,NEWLINE currentTrainer.y + 1, start)NEWLINE h_n_ = manhatan(currentTrainer.x,NEWLINE currentTrainer.y + 1, goal)NEWLINE insertOpen(NEWLINE Node(currentTrainer.x, currentTrainer.y + 1, g_n_ + h_n_))NEWLINE currentNode.bottomNode = Node(NEWLINE currentTrainer.x - 1, currentTrainer.y, g_n_ + h_n_)NEWLINENEWLINE insertStep(currentNode)NEWLINENEWLINE # for k in self.openQ:NEWLINE # print('Cola abierta', '[', k.x, k.y, k.aValue, ']')NEWLINENEWLINE if currentTrainer.x == goal.x and currentTrainer.y == goal.y:NEWLINE for k in self.rightWay:NEWLINE print('Paso', '[', k.x, k.y, ']')NEWLINE return self.rightWayNEWLINENEWLINE self.steps.append(findWay(pokemon[0]))NEWLINENEWLINENEWLINEclass Pokemon:NEWLINE def __init__(self, i, j, pokemonId, container):NEWLINE self.name = 'Pokemon'NEWLINE self.pokemonId = pokemonIdNEWLINE self.image = PhotoImage(file='images/' + str(pokemonId) + '.png')NEWLINE self.y = iNEWLINE self.x = jNEWLINE self.label = Label(NEWLINE container,NEWLINE height='64',NEWLINE width='64',NEWLINE borderwidth='2',NEWLINE image=self.imageNEWLINE )NEWLINENEWLINENEWLINEclass Grass:NEWLINE def __init__(self, i, j, container):NEWLINE self.name = 'Grass'NEWLINE self.image = PhotoImage(file='images/grass.png')NEWLINE self.y = iNEWLINE self.x = jNEWLINE self.label = Label(NEWLINE container,NEWLINE height='64',NEWLINE width='64',NEWLINE borderwidth='2',NEWLINE image=self.imageNEWLINE )NEWLINENEWLINENEWLINEclass Rock:NEWLINE def __init__(self, i, j, container):NEWLINE self.name = 'Rock'NEWLINE self.image = PhotoImage(file='images/rock.png')NEWLINE self.y = iNEWLINE self.x = jNEWLINE self.label = Label(NEWLINE container,NEWLINE height='64',NEWLINE width='64',NEWLINE borderwidth='2',NEWLINE image=self.imageNEWLINE )NEWLINENEWLINENEWLINEclass Bean:NEWLINE def __init__(self, i, j, container):NEWLINE self.name = 'Bean'NEWLINE self.image = PhotoImage(file='images/jelly-beans.png')NEWLINE self.y = iNEWLINE self.x = jNEWLINE self.label = Label(NEWLINE container,NEWLINE height='64',NEWLINE width='64',NEWLINE borderwidth='2',NEWLINE image=self.imageNEWLINE )NEWLINENEWLINENEWLINEclass Trainer:NEWLINE def __init__(self, i, j, container=False, pokeball=False):NEWLINE self.name = 'Trainer'NEWLINE self.y = iNEWLINE self.x = jNEWLINE self.back = FalseNEWLINE if container:NEWLINE self.image = PhotoImage(file='images/' + pokeball + '.png')NEWLINE self.label = Label(NEWLINE container,NEWLINE height='64',NEWLINE width='64',NEWLINE borderwidth='2',NEWLINE image=self.imageNEWLINE )NEWLINENEWLINENEWLINEclass Van:NEWLINE def __init__(self, i, j, container):NEWLINE self.name = 'Van'NEWLINE self.image = PhotoImage(file='images/van.png')NEWLINE self.y = iNEWLINE self.x = jNEWLINE self.label = Label(NEWLINE container,NEWLINE height='64',NEWLINE width='64',NEWLINE borderwidth='2',NEWLINE image=self.imageNEWLINE )NEWLINENEWLINENEWLINEclass Tablero:NEWLINE def __init__(self, size):NEWLINE self.window = Tk()NEWLINE self.window.title('Pokemon Finder')NEWLINE self.size = sizeNEWLINE self.tablero = []NEWLINE self.pokemonArray = []NEWLINE self.trainer = Trainer(randint(0, self.size), randint(NEWLINE 0, self.size), self.window, 'pokeball2')NEWLINENEWLINE for i in range(10):NEWLINE self.tablero.append([])NEWLINE for j in range(10):NEWLINE if ((j == self.trainer.x) & (i == self.trainer.y - 1)):NEWLINE self.van = Van(i, j, self.window)NEWLINE self.tablero[i].append(self.van)NEWLINE elif randint(0, 6) == 1:NEWLINE pokemon = Pokemon(i, j, randint(1, 19), self.window)NEWLINE self.pokemonArray.append(pokemon)NEWLINE self.tablero[i].append(pokemon)NEWLINE elif randint(0, 6) == 1:NEWLINE rock = Rock(i, j, self.window)NEWLINE self.tablero[i].append(rock)NEWLINE else:NEWLINE grass = Grass(i, j, self.window)NEWLINE self.tablero[i].append(grass)NEWLINENEWLINE for i in range(10):NEWLINE for j in range(10):NEWLINE self.tablero[i][j].label.grid(NEWLINE column=self.tablero[i][j].x, row=self.tablero[i][j].y)NEWLINENEWLINE self.window.after(500, self.findPokemon)NEWLINE self.window.mainloop()NEWLINENEWLINE def findPokemon(self):NEWLINENEWLINE def Move(trainer):NEWLINE def rightMove(leaveBean=False):NEWLINE if leaveBean:NEWLINE # self.tablero[trainer.y][trainer.x] = Bean(trainer.y, trainer.y, self.window)NEWLINE self.tablero[trainer.y][trainer.x + 1] = Trainer(NEWLINE trainer.y, trainer.x + 1, self.window, 'pokeball1')NEWLINE else:NEWLINE self.tablero[trainer.y][trainer.x + 1] = Trainer(NEWLINE trainer.y, trainer.x + 1, self.window, 'pokeball2')NEWLINENEWLINE self.tablero[trainer.y][trainer.x] = Grass(NEWLINE trainer.y, trainer.x, self.window)NEWLINE self.tablero[trainer.y][trainer.x].label.grid(NEWLINE column=trainer.x, row=trainer.y)NEWLINE self.tablero[trainer.y][trainer.x +NEWLINE 1].label.grid(column=trainer.x + 1, row=trainer.y)NEWLINE trainer.x += 1NEWLINENEWLINE def leftMove(leaveBean=False):NEWLINE if leaveBean:NEWLINE # self.tablero[trainer.y][trainer.x] = Bean(trainer.y, trainer.y, self.window)NEWLINE self.tablero[trainer.y][trainer.x - 1] = Trainer(NEWLINE trainer.y, trainer.x - 1, self.window, 'pokeball1')NEWLINE else:NEWLINE self.tablero[trainer.y][trainer.x - 1] = Trainer(NEWLINE trainer.y, trainer.x - 1, self.window, 'pokeball2')NEWLINENEWLINE self.tablero[trainer.y][trainer.x] = Grass(NEWLINE trainer.y, trainer.x, self.window)NEWLINE self.tablero[trainer.y][trainer.x].label.grid(NEWLINE column=trainer.x, row=trainer.y)NEWLINE self.tablero[trainer.y][trainer.x -NEWLINE 1].label.grid(column=trainer.x - 1, row=trainer.y)NEWLINE trainer.x -= 1NEWLINENEWLINE def downMove(leaveBean=False):NEWLINE if leaveBean:NEWLINE # self.tablero[trainer.y][trainer.x] = Bean(trainer.y, trainer.y, self.window)NEWLINE self.tablero[trainer.y + 1][trainer.x] = Trainer(NEWLINE trainer.y + 1, trainer.x, self.window, 'pokeball1')NEWLINE else:NEWLINE self.tablero[trainer.y + 1][trainer.x] = Trainer(NEWLINE trainer.y + 1, trainer.x, self.window, 'pokeball2')NEWLINENEWLINE self.tablero[trainer.y][trainer.x] = Grass(NEWLINE trainer.y, trainer.x, self.window)NEWLINE self.tablero[trainer.y][trainer.x].label.grid(NEWLINE column=trainer.x, row=trainer.y)NEWLINE self.tablero[trainer.y +NEWLINE 1][trainer.x].label.grid(column=trainer.x, row=trainer.y + 1)NEWLINE trainer.y += 1NEWLINENEWLINE def upMove(leaveBean=False):NEWLINE if leaveBean:NEWLINE # self.tablero[trainer.y][trainer.x] = Bean(trainer.y, trainer.y, self.window)NEWLINE self.tablero[trainer.y - 1][trainer.x] = Trainer(NEWLINE trainer.y - 1, trainer.x, self.window, 'pokeball1')NEWLINE else:NEWLINE self.tablero[trainer.y - 1][trainer.x] = Trainer(NEWLINE trainer.y - 1, trainer.x, self.window, 'pokeball2')NEWLINENEWLINE self.tablero[trainer.y][trainer.x] = Grass(NEWLINE trainer.y, trainer.x, self.window)NEWLINE self.tablero[trainer.y][trainer.x].label.grid(NEWLINE column=trainer.x, row=trainer.y)NEWLINE self.tablero[trainer.y -NEWLINE 1][trainer.x].label.grid(column=trainer.x, row=trainer.y - 1)NEWLINE trainer.y -= 1NEWLINENEWLINE def isPokemonClose():NEWLINE if trainer.x < self.size - 1 and self.tablero[trainer.y][trainer.x+1].name == 'Pokemon':NEWLINE return 'right'NEWLINE elif trainer.x > 0 and self.tablero[trainer.y][trainer.x-1].name == 'Pokemon':NEWLINE return 'left'NEWLINE elif trainer.y < self.size - 1 and self.tablero[trainer.y + 1][trainer.x].name == 'Pokemon':NEWLINE return 'down'NEWLINE elif trainer.y > 0 and self.tablero[trainer.y - 1][trainer.x].name == 'Pokemon':NEWLINE return 'up'NEWLINENEWLINE def wayWithoutObstacle():NEWLINE obstacles = {}NEWLINE if trainer.x > 0:NEWLINE if (self.tablero[trainer.y][trainer.x - 1].name != 'Rock') and (self.tablero[trainer.y][trainer.x - 1].name != 'Van'):NEWLINE obstacles['left'] = (True)NEWLINE else:NEWLINE obstacles['left'] = (False)NEWLINE else:NEWLINE obstacles['left'] = (False)NEWLINE if trainer.x < self.size - 1:NEWLINE if (self.tablero[trainer.y][trainer.x + 1].name != 'Rock') and (self.tablero[trainer.y][trainer.x + 1].name != 'Van'):NEWLINE obstacles['right'] = (True)NEWLINE else:NEWLINE obstacles['right'] = (False)NEWLINE else:NEWLINE obstacles['right'] = (False)NEWLINE if trainer.y > 0:NEWLINE if (self.tablero[trainer.y - 1][trainer.x].name != 'Rock') and (self.tablero[trainer.y - 1][trainer.x].name != 'Van'):NEWLINE obstacles['up'] = (True)NEWLINE else:NEWLINE obstacles['up'] = (False)NEWLINE else:NEWLINE obstacles['up'] = (False)NEWLINE if trainer.y < self.size - 1:NEWLINE if (self.tablero[trainer.y + 1][trainer.x].name != 'Rock') and (self.tablero[trainer.y + 1][trainer.x].name != 'Van'):NEWLINE obstacles['down'] = (True)NEWLINE else:NEWLINE obstacles['down'] = (False)NEWLINE else:NEWLINE obstacles['down'] = (False)NEWLINE return obstaclesNEWLINENEWLINE def chooseWay(obstacles):NEWLINE choose = randint(0, 3)NEWLINE if choose == 0 and obstacles['left']:NEWLINE return 'left'NEWLINE elif choose == 1 and obstacles['right']:NEWLINE return 'right'NEWLINE elif choose == 2 and obstacles['up']:NEWLINE return 'up'NEWLINE elif choose == 3 and obstacles['down']:NEWLINE return 'down'NEWLINE else:NEWLINE return chooseWay(obstacles)NEWLINENEWLINE def backToVan():NEWLINENEWLINE def chooseBackWay():NEWLINE min = abs(trainer.x + 1 - self.van.x) + \NEWLINE abs(trainer.y - self.van.y)NEWLINE if (abs(trainer.x - 1 - self.van.x) + abs(trainer.y - self.van.y) < min) and wayWithoutObstacle()['left'] and isPokemonClose() != 'left':NEWLINE return 'left'NEWLINE elif (abs(trainer.x - self.van.x) + abs(trainer.y + 1 - self.van.y) < min) and wayWithoutObstacle()['down'] and isPokemonClose() != 'down':NEWLINE return 'down'NEWLINE elif (abs(trainer.x - self.van.x) + abs(trainer.y - 1 - self.van.y) < min) and wayWithoutObstacle()['up'] and isPokemonClose() != 'up':NEWLINE return 'up'NEWLINE elif wayWithoutObstacle()['right'] and isPokemonClose() != 'right':NEWLINE return 'right'NEWLINE else:NEWLINE NoneNEWLINENEWLINE def isVanClose():NEWLINE if self.trainer.x < self.size - 1:NEWLINE if self.tablero[trainer.y][trainer.x+1].name == 'Van':NEWLINE return TrueNEWLINE if self.trainer.x > 0:NEWLINE if self.tablero[trainer.y][trainer.x-1].name == 'Van':NEWLINE return TrueNEWLINE if self.trainer.y < self.size - 1:NEWLINE if self.tablero[trainer.y+1][trainer.x].name == 'Van':NEWLINE return TrueNEWLINE if self.trainer.y > 0:NEWLINE if self.tablero[trainer.y-1][trainer.x].name == 'Van':NEWLINE return TrueNEWLINE else:NEWLINE return FalseNEWLINE pokemonGotcha(True)NEWLINE try:NEWLINE if isVanClose():NEWLINE pokemonGotcha(False)NEWLINE elif chooseBackWay() == 'right':NEWLINE rightMove(True)NEWLINE elif chooseBackWay() == 'left':NEWLINE leftMove(True)NEWLINE elif chooseBackWay() == 'down':NEWLINE downMove(True)NEWLINE elif chooseBackWay() == 'up':NEWLINE upMove(True)NEWLINE except Exception as error:NEWLINE print(error)NEWLINENEWLINE def pokemonGotcha(gotIt):NEWLINE self.trainer.back = gotItNEWLINE self.trainer.image = PhotoImage(file='images/pokeball1.png')NEWLINE self.trainer.label.config(image=self.trainer.image)NEWLINENEWLINE self.a = AObject(self.trainer, self.van,NEWLINE self.pokemonArray, self.tablero)NEWLINE # print(self.a.openQ, self.a.closeQ)NEWLINENEWLINE Move(self.trainer)NEWLINE self.window.after(500, self.findPokemon)NEWLINENEWLINENEWLINEdef main():NEWLINE tierra = Tablero(10)NEWLINENEWLINENEWLINE# x = j | y = iNEWLINEif __name__ == '__main__':NEWLINE main()NEWLINE |
from Service import ServiceNEWLINEfrom Graph import Graph, NodeNEWLINEfrom Parser import port_dicNEWLINEfrom dcm import*NEWLINENEWLINE# The maximum number of texts or images for each user.NEWLINE# This is to prevent the server from over-loading.NEWLINEMAX_DOC_NUM_PER_USER = 30 # non-negative inetegrNEWLINENEWLINE# Train or load the query classifier.NEWLINE# If you set it to 'load', it is assumed thatNEWLINE# models are already saved in `../models`.NEWLINETRAIN_OR_LOAD = 'train' # either 'train' or 'load'NEWLINENEWLINENEWLINENEWLINE####################### How does a workflow work? Reference firstWorkFlow as a walkthrough example.NEWLINENEWLINE#Contains serviceName and data to pass. Needed for batch (and thereby parallel) processing.NEWLINEclass serviceRequestData(object):NEWLINENEWLINE def __init__(self,nameOfService,argData):NEWLINE self.argumentData = argDataNEWLINE self.serviceName = nameOfServiceNEWLINE NEWLINENEWLINEclass workFlow(object):NEWLINE def __init__(self):NEWLINE self.currentState = 0; # What state on the state graphNEWLINE self.isEnd = False;NEWLINE self.batchedData = []NEWLINE NEWLINE NEWLINE NEWLINE NEWLINENEWLINEclass firstWorkflow(workFlow):NEWLINENEWLINE NEWLINE def processCurrentState(self,inputModifierText,inputModifierImage):NEWLINE print "Executing state logic";NEWLINE NEWLINE if(self.currentState==0):NEWLINE print "State 0";NEWLINE self.currentState = 1; # This decides what state to go to nextNEWLINE # batchedData contains a list of service Requests. The function parameter is serviceRequestData(serviceName,dataToPassToService).NEWLINE # Eg. "QA",inputModifierText[0]) means to pass to QA microservice with whatever was in the inputModifierText[0] (The text from the Lucida prompt))NEWLINE self.batchedData = [serviceRequestData("QA",[unicode("How old is Johann")]),serviceRequestData("QA",inputModifierText[0])];NEWLINE return;NEWLINE NEWLINE if(self.currentState==1):NEWLINE print "State 1";NEWLINE # [1] is being passed as the input. This value came from: serviceRequestData("QA",inputModifierText[0])NEWLINE # It is based on the positioning of the previous serviceRequestData batch.NEWLINE # Eg. [0] = serviceRequestData("QA",[unicode("How old is Johann")], [1] = serviceRequestData("QA",inputModifierText[0])NEWLINE #That means the second entry from state0 is being passed to it.NEWLINE self.batchedData = [serviceRequestData("QA",inputModifierText[1])] NEWLINE self.isEnd = True # This indicates the workflow is completeNEWLINE return;NEWLINENEWLINENEWLINENEWLINEclass QAWF(workFlow):NEWLINE def processCurrentState(self,inputModifierText,inputModifierImage):NEWLINE if(self.currentState==0):NEWLINE self.batchedData = [serviceRequestData("QA",inputModifierText[0])];NEWLINE self.isEnd = True;NEWLINE return;NEWLINENEWLINEclass IMMWF(workFlow):NEWLINE def processCurrentState(self,inputModifierText,inputModifierImage):NEWLINE if(self.currentState==0):NEWLINE self.batchedData = [serviceRequestData("IMM",inputModifierImage[0])];NEWLINE self.isEnd = True;NEWLINE return;NEWLINENEWLINEclass CAWF(workFlow):NEWLINE def processCurrentState(self,inputModifierText,inputModifierImage):NEWLINE if(self.currentState==0):NEWLINE self.batchedData = [serviceRequestData("CA",inputModifierText[0])];NEWLINE self.isEnd = True;NEWLINE return;NEWLINENEWLINEclass IMCWF(workFlow):NEWLINE def processCurrentState(self,inputModifierText,inputModifierImage):NEWLINE if(self.currentState==0):NEWLINE self.batchedData = [serviceRequestData("IMC",inputModifierImage[0])];NEWLINE self.isEnd = True;NEWLINE return;NEWLINENEWLINEclass FACEWF(workFlow):NEWLINE def processCurrentState(self,inputModifierText,inputModifierImage):NEWLINE if(self.currentState==0):NEWLINE self.batchedData = [serviceRequestData("FACE",inputModifierImage[0])];NEWLINE self.isEnd = True;NEWLINE return;NEWLINENEWLINEclass DIGWF(workFlow):NEWLINE def processCurrentState(self,inputModifierText,inputModifierImage):NEWLINE if(self.currentState==0):NEWLINE self.batchedData = [serviceRequestData("DIG",inputModifierImage[0])];NEWLINE self.isEnd = True;NEWLINE return;NEWLINENEWLINEclass ENSEMBLEWF(workFlow):NEWLINE def processCurrentState(self,inputModifierText,inputModifierImage):NEWLINE if(self.currentState==0):NEWLINE self.batchedData = [serviceRequestData("ENSEMBLE",inputModifierText[0])];NEWLINE self.isEnd = True;NEWLINE return;NEWLINENEWLINENEWLINEclass MSWF(workFlow):NEWLINE def processCurrentState(self,inputModifierText,inputModifierImage):NEWLINE if(self.currentState==0):NEWLINE self.batchedData = [serviceRequestData("MS",inputModifierText[0])];NEWLINE self.isEnd = True;NEWLINE return;NEWLINENEWLINEclass WEWF(workFlow):NEWLINE def processCurrentState(self,inputModifierText,inputModifierImage):NEWLINE if(self.currentState==0):NEWLINE self.batchedData = [serviceRequestData("WE",inputModifierText[0])];NEWLINE self.isEnd = True;NEWLINE return;NEWLINENEWLINEWFList = {NEWLINE "IMMWF" : IMMWF(),NEWLINE "firstWorkFlow" : firstWorkflow(),NEWLINE "QAWF" : QAWF(),NEWLINE "CAWF" : CAWF(),NEWLINE "IMCWF" : IMCWF(),NEWLINE "FACEWF" : FACEWF(),NEWLINE "DIGWF" : DIGWF(),NEWLINE "ENSEMBLEWF" : ENSEMBLEWF(),NEWLINE "MSWF" : MSWF(),NEWLINE "WEWF" : WEWF()NEWLINE }NEWLINENEWLINENEWLINENEWLINENEWLINE# Pre-configured services.NEWLINE# The ThriftClient assumes that the following services are running.NEWLINE# Host IP addresses are resolved dynamically:NEWLINE# either set by Kubernetes or localhost.NEWLINENEWLINESERVICES = {NEWLINE 'IMM' : Service('IMM', int(port_dic["imm_port"]), 'image', 'image'),NEWLINE 'QA' : Service('QA', int(port_dic["qa_port"]), 'text', 'text'),NEWLINE 'CA' : Service('CA', int(port_dic["ca_port"]), 'text', None),NEWLINE 'IMC' : Service('IMC', int(port_dic["imc_port"]), 'image', None),NEWLINE 'FACE' : Service('FACE', int(port_dic["face_port"]), 'image', None),NEWLINE 'DIG' : Service('DIG', int(port_dic["dig_port"]), 'image', None),NEWLINE 'WE' : Service('WE', int(port_dic["we_port"]), 'text', None),NEWLINE 'MS' : Service('MS', int(port_dic["ms_port"]), 'text', None),NEWLINE }NEWLINENEWLINECLASSIFIER_DESCRIPTIONS = {NEWLINE 'text' : { 'class_QA' : Graph([Node('QAWF')]),NEWLINE 'class_CA' : Graph([Node('CAWF')]),NEWLINE 'class_WE' : Graph([Node('WEWF')]),NEWLINE 'class_MS' : Graph([Node('MSWF')]) },NEWLINE 'image' : { 'class_IMM' : Graph([Node('IMMWF')]),NEWLINE 'class_IMC' : Graph([Node('IMCWF')]),NEWLINE 'class_FACE' : Graph([Node('FACEWF')]),NEWLINE 'class_DIG' : Graph([Node('DIGWF')]) },NEWLINE 'text_image' : { 'class_QA': Graph([Node('QAWF')]),NEWLINE 'class_IMM' : Graph([Node('IMMWF')]),NEWLINE 'class_IMC' : Graph([Node('IMCWF')]),NEWLINE 'class_FACE' : Graph([Node('FACEWF')]),NEWLINE 'class_DIG' : Graph([Node('DIGWF')]), }NEWLINE }NEWLINENEWLINE# TODO: Should I have this in its own Config file?NEWLINE# Structure used to save the state/context across requests in a sessionNEWLINE# example:NEWLINE# SESSION = { <user>:NEWLINE# 'graph': <Graph>,NEWLINE# 'data': <response_data>NEWLINE# }NEWLINESESSION = {}NEWLINE |
import xbmcNEWLINEimport xbmcguiNEWLINEimport xbmcaddonNEWLINEimport jsonNEWLINEimport requestsNEWLINEimport sysNEWLINEimport mathNEWLINENEWLINEfrom .lib.ssdp import SSDP_ProviderNEWLINEfrom .lib.helpers import display_message, log_errorNEWLINENEWLINEclass HandoverUI():NEWLINENEWLINE def __init__(self,):NEWLINE self.addon = xbmcaddon.Addon()NEWLINE self.player_time = 0NEWLINE self.player_file = self.get_player_file()NEWLINE self.target = ""NEWLINE self.main()NEWLINENEWLINE def main(self):NEWLINE xbmc.executebuiltin('ActivateWindow(busydialognocancel)')NEWLINE try:NEWLINE if not self.player_file:NEWLINE message = xbmcgui.Dialog()NEWLINE message.ok('Handover', self.addon.getLocalizedString(32026))NEWLINE xbmc.executebuiltin('Dialog.Close(busydialognocancel)')NEWLINE sys.exit(0)NEWLINENEWLINE ssdp = SSDP_Provider(self.addon)NEWLINE ssdp.discover()NEWLINE ssdp.resolve()NEWLINENEWLINE addresses = []NEWLINE menu_items = []NEWLINE for service in ssdp.resolved_devices:NEWLINE addresses.append(service)NEWLINE menu_items.append(ssdp.resolved_devices[service]['friendlyName'])NEWLINE finally:NEWLINE xbmc.executebuiltin('Dialog.Close(busydialognocancel)')NEWLINENEWLINE if len(menu_items):NEWLINE window = xbmcgui.Dialog()NEWLINE menu = window.contextmenu(menu_items)NEWLINENEWLINE self.target = addresses[menu]NEWLINE self.send()NEWLINE else:NEWLINE message = xbmcgui.Dialog()NEWLINE message.ok('Handover', self.addon.getLocalizedString(32027))NEWLINENEWLINE def get_player_file(self):NEWLINE player = xbmc.Player()NEWLINE if player.isPlaying():NEWLINE self.player_time = int(math.floor(player.getTime()))NEWLINE return player.getPlayingFile()NEWLINE else:NEWLINE return FalseNEWLINENEWLINE def send(self):NEWLINE post_data = {NEWLINE "jsonrpc": "2.0",NEWLINE "method": "Player.Open",NEWLINE "params": {NEWLINE "item": {NEWLINE "file": self.player_fileNEWLINE }NEWLINE },NEWLINE "id": 1NEWLINE }NEWLINENEWLINE if not self.addon.getSettingBool('noresume'):NEWLINE offset = self.addon.getSettingInt('offset')NEWLINE time = max(0, self.player_time - offset)NEWLINE m, s = divmod(time, 60)NEWLINE h, m = divmod(m, 60)NEWLINE post_data['params']['options'] = {NEWLINE "resume": {"hours": h, "minutes": m, "seconds": s, "milliseconds": 0}NEWLINE }NEWLINENEWLINE try:NEWLINE request = requests.post(NEWLINE 'http://%s:8080/jsonrpc' % self.target,NEWLINE data = json.dumps(post_data),NEWLINE timeout = 10NEWLINE )NEWLINE except requests.exceptions.RequestException as e:NEWLINE display_message(self.addon.getLocalizedString(32024), 'ERROR')NEWLINE log_error(str(e), True)NEWLINENEWLINE if request.status_code != requests.codes.ok:NEWLINE display_message('%s: %s' % (self.addon.getLocalizedString(32025), str(request.status_code)), 'ERROR')NEWLINE |
"""Python TAXII 2.0 Client API"""NEWLINENEWLINEfrom __future__ import unicode_literalsNEWLINENEWLINEimport datetimeNEWLINEimport jsonNEWLINEimport timeNEWLINENEWLINEimport pytzNEWLINEimport requestsNEWLINEimport requests.structures # is this public API?NEWLINEimport sixNEWLINEimport six.moves.urllib.parse as urlparseNEWLINENEWLINE__version__ = '0.5.0'NEWLINENEWLINEMEDIA_TYPE_STIX_V20 = "application/vnd.oasis.stix+json; version=2.0"NEWLINEMEDIA_TYPE_TAXII_V20 = "application/vnd.oasis.taxii+json; version=2.0"NEWLINEDEFAULT_USER_AGENT = "taxii2-client/" + __version__NEWLINENEWLINENEWLINEclass TAXIIServiceException(Exception):NEWLINE """Base class for exceptions raised by this library."""NEWLINE passNEWLINENEWLINENEWLINEclass InvalidArgumentsError(TAXIIServiceException):NEWLINE """Invalid arguments were passed to a method."""NEWLINE passNEWLINENEWLINENEWLINEclass AccessError(TAXIIServiceException):NEWLINE """Attempt was made to read/write to a collection when the collectionNEWLINE doesn't allow that operation."""NEWLINE passNEWLINENEWLINENEWLINEclass ValidationError(TAXIIServiceException):NEWLINE """Data validation failed for a property or group of properties"""NEWLINE passNEWLINENEWLINENEWLINEclass InvalidJSONError(TAXIIServiceException):NEWLINE """A server endpoint gave us invalid JSON"""NEWLINE passNEWLINENEWLINENEWLINEdef _format_datetime(dttm):NEWLINE """Convert a datetime object into a valid STIX timestamp string.NEWLINENEWLINE 1. Convert to timezone-awareNEWLINE 2. Convert to UTCNEWLINE 3. Format in ISO formatNEWLINE 4. Ensure correct precisionNEWLINE a. Add subsecond value if non-zero and precision not definedNEWLINE 5. Add "Z"NEWLINENEWLINE """NEWLINENEWLINE if dttm.tzinfo is None or dttm.tzinfo.utcoffset(dttm) is None:NEWLINE # dttm is timezone-naive; assume UTCNEWLINE zoned = pytz.utc.localize(dttm)NEWLINE else:NEWLINE zoned = dttm.astimezone(pytz.utc)NEWLINE ts = zoned.strftime("%Y-%m-%dT%H:%M:%S")NEWLINE ms = zoned.strftime("%f")NEWLINE precision = getattr(dttm, "precision", None)NEWLINE if precision == "second":NEWLINE pass # Already precise to the secondNEWLINE elif precision == "millisecond":NEWLINE ts = ts + "." + ms[:3]NEWLINE elif zoned.microsecond > 0:NEWLINE ts = ts + "." + ms.rstrip("0")NEWLINE return ts + "Z"NEWLINENEWLINENEWLINEdef _ensure_datetime_to_string(maybe_dttm):NEWLINE """If maybe_dttm is a datetime instance, convert to a STIX-compliantNEWLINE string representation. Otherwise return the value unchanged."""NEWLINE if isinstance(maybe_dttm, datetime.datetime):NEWLINE maybe_dttm = _format_datetime(maybe_dttm)NEWLINE return maybe_dttmNEWLINENEWLINENEWLINEdef _filter_kwargs_to_query_params(filter_kwargs):NEWLINE """NEWLINE Convert API keyword args to a mapping of URL query parameters. Except forNEWLINE "added_after", all keywords are mapped to match filters, i.e. to a queryNEWLINE parameter of the form "match[<kwarg>]". "added_after" is left alone, sinceNEWLINE it's a special filter, as defined in the spec.NEWLINENEWLINE Each value can be a single value or iterable of values. "version" andNEWLINE "added_after" get special treatment, since they are timestamp-valued:NEWLINE datetime.datetime instances are supported and automatically converted toNEWLINE STIX-compliant strings. Other than that, all values must be strings. NoneNEWLINE values, empty lists, etc are silently ignored.NEWLINENEWLINE Args:NEWLINE filter_kwargs: The filter information, as a mapping.NEWLINENEWLINE Returns:NEWLINE query_params (dict): The query parameter map, mapping strings toNEWLINE strings.NEWLINENEWLINE """NEWLINE query_params = {}NEWLINE for kwarg, arglist in six.iteritems(filter_kwargs):NEWLINE # If user passes an empty list, None, etc, silently skip?NEWLINE if not arglist:NEWLINE continueNEWLINENEWLINE # force iterability, for the sake of code uniformityNEWLINE if not hasattr(arglist, "__iter__") or \NEWLINE isinstance(arglist, six.string_types):NEWLINE arglist = arglist,NEWLINENEWLINE if kwarg == "version":NEWLINE query_params["match[version]"] = ",".join(NEWLINE _ensure_datetime_to_string(val) for val in arglistNEWLINE )NEWLINENEWLINE elif kwarg == "added_after":NEWLINE if len(arglist) > 1:NEWLINE raise InvalidArgumentsError("No more than one value for filter"NEWLINE " 'added_after' may be given")NEWLINENEWLINE query_params["added_after"] = ",".join(NEWLINE _ensure_datetime_to_string(val) for val in arglistNEWLINE )NEWLINENEWLINE else:NEWLINE query_params["match[" + kwarg + "]"] = ",".join(arglist)NEWLINENEWLINE return query_paramsNEWLINENEWLINENEWLINEclass _TAXIIEndpoint(object):NEWLINE """Contains some data and functionality common to all TAXII endpointNEWLINE classes: a URL, connection, and ability to close the connection. It alsoNEWLINE yields support in subclasses for use as context managers, to ensureNEWLINE resources are released.NEWLINENEWLINE """NEWLINE def __init__(self, url, conn=None, user=None, password=None, verify=True,NEWLINE proxies=None):NEWLINE """Create a TAXII endpoint.NEWLINENEWLINE Args:NEWLINE user (str): username for authentication (optional)NEWLINE password (str): password for authentication (optional)NEWLINE verify (bool): validate the entity credentials (default: True)NEWLINE conn (_HTTPConnection): A connection to reuse (optional)NEWLINE proxies (dict): key/value pair for http/https proxy settings.NEWLINE (optional)NEWLINENEWLINE """NEWLINE if conn and (user or password):NEWLINE raise InvalidArgumentsError("A connection and user/password may"NEWLINE " not both be provided.")NEWLINE elif conn:NEWLINE self._conn = connNEWLINE else:NEWLINE self._conn = _HTTPConnection(user, password, verify, proxies)NEWLINENEWLINE # Add trailing slash to TAXII endpoint if missingNEWLINE # https://github.com/oasis-open/cti-taxii-client/issues/50NEWLINE if url[-1] == "/":NEWLINE self.url = urlNEWLINE else:NEWLINE self.url = url + "/"NEWLINENEWLINE def close(self):NEWLINE self._conn.close()NEWLINENEWLINE def __enter__(self):NEWLINE return selfNEWLINENEWLINE def __exit__(self, exc_type, exc_val, exc_tb):NEWLINE self.close()NEWLINE return FalseNEWLINENEWLINENEWLINEclass Status(_TAXIIEndpoint):NEWLINE """TAXII Status Resource.NEWLINENEWLINE This class represents the ``Get Status`` endpoint (section 4.3) and alsoNEWLINE contains the information about the Status Resource (section 4.3.1)NEWLINENEWLINE """NEWLINE # We don't need to jump through the same lazy-load as with Collection,NEWLINE # since it's *far* less likely people will create these manually ratherNEWLINE # than just getting them returned from Collection.add_objects(), and thereNEWLINE # aren't other endpoints to call on the Status object.NEWLINENEWLINE def __init__(self, url, conn=None, user=None, password=None, verify=True,NEWLINE proxies=None, status_info=None):NEWLINE """Create an API root resource endpoint.NEWLINENEWLINE Args:NEWLINE url (str): URL of a TAXII status resource endpointNEWLINE user (str): username for authentication (optional)NEWLINE password (str): password for authentication (optional)NEWLINE conn (_HTTPConnection): reuse connection object, as an alternativeNEWLINE to providing username/passwordNEWLINE status_info (dict): Parsed JSON representing a response from theNEWLINE status endpoint, if already known. If not given, theNEWLINE endpoint will be queried. (optional)NEWLINE verify (bool): validate the entity credentials. (default: True)NEWLINE proxies (dict): key/value pair for http/https proxy settings.NEWLINE (optional)NEWLINENEWLINE """NEWLINE super(Status, self).__init__(url, conn, user, password, verify, proxies)NEWLINE self.__raw = NoneNEWLINE if status_info:NEWLINE self._populate_fields(**status_info)NEWLINE self.__raw = status_infoNEWLINE else:NEWLINE self.refresh()NEWLINENEWLINE def __nonzero__(self):NEWLINE return self.status == "complete"NEWLINENEWLINE __bool__ = __nonzero__NEWLINENEWLINE @propertyNEWLINE def _raw(self):NEWLINE """Get the "raw" status response (parsed JSON)."""NEWLINE return self.__rawNEWLINENEWLINE @propertyNEWLINE def custom_properties(self):NEWLINE return self._custom_propertiesNEWLINENEWLINE def refresh(self, accept=MEDIA_TYPE_TAXII_V20):NEWLINE """Updates Status information"""NEWLINE response = self.__raw = self._conn.get(self.url,NEWLINE headers={"Accept": accept})NEWLINE self._populate_fields(**response)NEWLINENEWLINE def wait_until_final(self, poll_interval=1, timeout=60):NEWLINE """It will poll the URL to grab the latest status resource in a givenNEWLINE timeout and time interval.NEWLINENEWLINE Args:NEWLINE poll_interval (int): how often to poll the status service.NEWLINE timeout (int): how long to poll the URL until giving up. Use <= 0NEWLINE to wait foreverNEWLINENEWLINE """NEWLINE start_time = time.time()NEWLINE elapsed = 0NEWLINE while (self.status != "complete" andNEWLINE (timeout <= 0 or elapsed < timeout)):NEWLINE time.sleep(poll_interval)NEWLINE self.refresh()NEWLINE elapsed = time.time() - start_timeNEWLINENEWLINE def _populate_fields(self, id=None, status=None, total_count=None,NEWLINE success_count=None, failure_count=None,NEWLINE pending_count=None, request_timestamp=None,NEWLINE successes=None, failures=None, pendings=None,NEWLINE **kwargs):NEWLINE self.id = id # requiredNEWLINE self.status = status # requiredNEWLINE self.request_timestamp = request_timestamp # optionalNEWLINE self.total_count = total_count # requiredNEWLINE self.success_count = success_count # requiredNEWLINE self.failure_count = failure_count # requiredNEWLINE self.pending_count = pending_count # requiredNEWLINE self.successes = successes or [] # optionalNEWLINE self.failures = failures or [] # optionalNEWLINE self.pendings = pendings or [] # optionalNEWLINENEWLINE # Anything not captured by the optional arguments is treated as customNEWLINE self._custom_properties = kwargsNEWLINENEWLINE self._validate_status()NEWLINENEWLINE def _validate_status(self):NEWLINE """Validates Status information. Raises errors for requiredNEWLINE properties."""NEWLINE if not self.id:NEWLINE msg = "No 'id' in Status for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if not self.status:NEWLINE msg = "No 'status' in Status for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if self.total_count is None:NEWLINE msg = "No 'total_count' in Status for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if self.success_count is None:NEWLINE msg = "No 'success_count' in Status for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if self.failure_count is None:NEWLINE msg = "No 'failure_count' in Status for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if self.pending_count is None:NEWLINE msg = "No 'pending_count' in Status for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if len(self.successes) != self.success_count:NEWLINE msg = "Found successes={}, but success_count={} in status '{}'"NEWLINE raise ValidationError(msg.format(self.successes,NEWLINE self.success_count,NEWLINE self.id))NEWLINENEWLINE if len(self.pendings) != self.pending_count:NEWLINE msg = "Found pendings={}, but pending_count={} in status '{}'"NEWLINE raise ValidationError(msg.format(self.pendings,NEWLINE self.pending_count,NEWLINE self.id))NEWLINENEWLINE if len(self.failures) != self.failure_count:NEWLINE msg = "Found failures={}, but failure_count={} in status '{}'"NEWLINE raise ValidationError(msg.format(self.failures,NEWLINE self.failure_count,NEWLINE self.id))NEWLINENEWLINE if (self.success_count + self.pending_count + self.failure_count !=NEWLINE self.total_count):NEWLINE msg = ("(success_count={} + pending_count={} + "NEWLINE "failure_count={}) != total_count={} in status '{}'")NEWLINE raise ValidationError(msg.format(self.success_count,NEWLINE self.pending_count,NEWLINE self.failure_count,NEWLINE self.total_count,NEWLINE self.id))NEWLINENEWLINENEWLINEclass Collection(_TAXIIEndpoint):NEWLINE """Information about a TAXII Collection.NEWLINENEWLINE This class represents the ``Get a Collection`` endpoint (section 5.2), andNEWLINE contains the information returned in the ``Collection Resource`` (sectionNEWLINE 5.2.1).NEWLINENEWLINE Methods on this class can be used to invoke the following endpoints:NEWLINE - ``Get Objects`` (section 5.3)NEWLINE - ``Add Objects`` (section 5.4)NEWLINE - ``Get an Object`` (section 5.5)NEWLINE - ``Get Object Manifests`` (section 5.6)NEWLINENEWLINE As obtained from an ApiRoot, an instance of this class shares connection(s)NEWLINE with all other collections obtained from the same ApiRoot, as well as theNEWLINE ApiRoot instance itself. Closing one will close them all. If this isNEWLINE undesirable, you may manually create Collection instances.NEWLINENEWLINE """NEWLINENEWLINE def __init__(self, url, conn=None, user=None, password=None, verify=True,NEWLINE proxies=None, collection_info=None):NEWLINE """NEWLINE Initialize a new Collection. Either user/password or conn may beNEWLINE given, but not both. The latter is intended for internal use, whenNEWLINE sharing connection pools with an ApiRoot, mocking a connection forNEWLINE testing, etc. Users should use user/password (if required) which willNEWLINE create a new connection.NEWLINENEWLINE Args:NEWLINE url (str): A TAXII endpoint for a collectionNEWLINE user (str): User name for authentication (optional)NEWLINE password (str): Password for authentication (optional)NEWLINE verify (bool): Either a boolean, in which case it controls whetherNEWLINE we verify the server's TLS certificate, or a string, in whichNEWLINE case it must be a path to a CA bundle to use. Defaults toNEWLINE `True` (optional)NEWLINE conn (_HTTPConnection): A connection to reuse (optional)NEWLINE collection_info: Collection metadata, if known in advance (optional)NEWLINE verify (bool): validate the entity credentials. (default: True)NEWLINE proxies (dict): key/value pair for http/https proxy settings.NEWLINE (optional)NEWLINENEWLINE """NEWLINENEWLINE super(Collection, self).__init__(url, conn, user, password, verify, proxies)NEWLINENEWLINE self._loaded = FalseNEWLINE self.__raw = NoneNEWLINENEWLINE # Since the API Root "Get Collections" endpoint returns information onNEWLINE # all collections as a list, it's possible that we can create multipleNEWLINE # Collection objects from a single HTTPS request, and not need to callNEWLINE # `refresh` for each one.NEWLINE if collection_info:NEWLINE self._populate_fields(**collection_info)NEWLINE self.__raw = collection_infoNEWLINE self._loaded = TrueNEWLINENEWLINE @propertyNEWLINE def id(self):NEWLINE self._ensure_loaded()NEWLINE return self._idNEWLINENEWLINE @propertyNEWLINE def title(self):NEWLINE self._ensure_loaded()NEWLINE return self._titleNEWLINENEWLINE @propertyNEWLINE def description(self):NEWLINE self._ensure_loaded()NEWLINE return self._descriptionNEWLINENEWLINE @propertyNEWLINE def can_read(self):NEWLINE self._ensure_loaded()NEWLINE return self._can_readNEWLINENEWLINE @propertyNEWLINE def can_write(self):NEWLINE self._ensure_loaded()NEWLINE return self._can_writeNEWLINENEWLINE @propertyNEWLINE def media_types(self):NEWLINE self._ensure_loaded()NEWLINE return self._media_typesNEWLINENEWLINE @propertyNEWLINE def custom_properties(self):NEWLINE self._ensure_loaded()NEWLINE return self._custom_propertiesNEWLINENEWLINE @propertyNEWLINE def objects_url(self):NEWLINE return self.url + "objects/"NEWLINENEWLINE @propertyNEWLINE def _raw(self):NEWLINE """Get the "raw" collection information response (parsed JSON)."""NEWLINE self._ensure_loaded()NEWLINE return self.__rawNEWLINENEWLINE def _populate_fields(self, id=None, title=None, description=None,NEWLINE can_read=None, can_write=None, media_types=None,NEWLINE **kwargs):NEWLINE self._id = id # requiredNEWLINE self._title = title # requiredNEWLINE self._description = description # optionalNEWLINE self._can_read = can_read # requiredNEWLINE self._can_write = can_write # requiredNEWLINE self._media_types = media_types or [] # optionalNEWLINENEWLINE # Anything not captured by the optional arguments is treated as customNEWLINE self._custom_properties = kwargsNEWLINENEWLINE self._validate_collection()NEWLINENEWLINE def _validate_collection(self):NEWLINE """Validates Collection information. Raises errors for requiredNEWLINE properties."""NEWLINE if not self._id:NEWLINE msg = "No 'id' in Collection for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if not self._title:NEWLINE msg = "No 'title' in Collection for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if self._can_read is None:NEWLINE msg = "No 'can_read' in Collection for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if self._can_write is None:NEWLINE msg = "No 'can_write' in Collection for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if self._id not in self.url:NEWLINE msg = "The collection '{}' does not match the url for queries '{}'"NEWLINE raise ValidationError(msg.format(self._id, self.url))NEWLINENEWLINE def _ensure_loaded(self):NEWLINE if not self._loaded:NEWLINE self.refresh()NEWLINENEWLINE def _verify_can_read(self):NEWLINE if not self.can_read:NEWLINE msg = "Collection '{}' does not allow reading."NEWLINE raise AccessError(msg.format(self.url))NEWLINENEWLINE def _verify_can_write(self):NEWLINE if not self.can_write:NEWLINE msg = "Collection '{}' does not allow writing."NEWLINE raise AccessError(msg.format(self.url))NEWLINENEWLINE def refresh(self, accept=MEDIA_TYPE_TAXII_V20):NEWLINE """Update Collection information"""NEWLINE response = self.__raw = self._conn.get(self.url,NEWLINE headers={"Accept": accept})NEWLINE self._populate_fields(**response)NEWLINE self._loaded = TrueNEWLINENEWLINE def get_objects(self, accept=MEDIA_TYPE_STIX_V20, **filter_kwargs):NEWLINE """Implement the ``Get Objects`` endpoint (section 5.3)"""NEWLINE self._verify_can_read()NEWLINE query_params = _filter_kwargs_to_query_params(filter_kwargs)NEWLINE return self._conn.get(self.objects_url, headers={"Accept": accept},NEWLINE params=query_params)NEWLINENEWLINE def get_object(self, obj_id, version=None, accept=MEDIA_TYPE_STIX_V20):NEWLINE """Implement the ``Get an Object`` endpoint (section 5.5)"""NEWLINE self._verify_can_read()NEWLINE url = self.objects_url + str(obj_id) + "/"NEWLINE query_params = NoneNEWLINE if version:NEWLINE query_params = _filter_kwargs_to_query_params({"version": version})NEWLINE return self._conn.get(url, headers={"Accept": accept},NEWLINE params=query_params)NEWLINENEWLINE def add_objects(self, bundle, wait_for_completion=True, poll_interval=1,NEWLINE timeout=60, accept=MEDIA_TYPE_TAXII_V20,NEWLINE content_type=MEDIA_TYPE_STIX_V20):NEWLINE """Implement the ``Add Objects`` endpoint (section 5.4)NEWLINENEWLINE Add objects to the collection. This may be performed eitherNEWLINE synchronously or asynchronously. To add asynchronously, setNEWLINE wait_for_completion to False. If False, the latter two args areNEWLINE unused. If the caller wishes to monitor the status of the addition,NEWLINE it may do so in its own way. To add synchronously, setNEWLINE wait_for_completion to True, and optionally set the poll and timeoutNEWLINE intervals. After initiating the addition, the caller will block,NEWLINE and the TAXII "status" service will be polled until the timeoutNEWLINE expires, or the operation completes.NEWLINENEWLINE Args:NEWLINE bundle: A STIX bundle with the objects to add (string, dict, binary)NEWLINE wait_for_completion (bool): Whether to wait for the add operationNEWLINE to complete before returningNEWLINE poll_interval (int): If waiting for completion, how often to pollNEWLINE the status service (seconds)NEWLINE timeout (int): If waiting for completion, how long to poll untilNEWLINE giving up (seconds). Use <= 0 to wait foreverNEWLINE accept (str): media type to include in the ``Accept:`` header.NEWLINE content_type (str): media type to include in the ``Content-Type:``NEWLINE header.NEWLINENEWLINE Returns:NEWLINE If ``wait_for_completion`` is False, a Status object correspondingNEWLINE to the initial status data returned from the service, is returned.NEWLINE The status may not yet be complete at this point.NEWLINENEWLINE If ``wait_for_completion`` is True, a Status object correspondingNEWLINE to the completed operation is returned if it didn't time out;NEWLINE otherwise a Status object corresponding to the most recent dataNEWLINE obtained before the timeout, is returned.NEWLINENEWLINE """NEWLINE self._verify_can_write()NEWLINENEWLINE headers = {NEWLINE "Accept": accept,NEWLINE "Content-Type": content_type,NEWLINE }NEWLINENEWLINE if isinstance(bundle, dict):NEWLINE json_text = json.dumps(bundle, ensure_ascii=False)NEWLINE data = json_text.encode("utf-8")NEWLINENEWLINE elif isinstance(bundle, six.text_type):NEWLINE data = bundle.encode("utf-8")NEWLINENEWLINE elif isinstance(bundle, six.binary_type):NEWLINE data = bundleNEWLINENEWLINE else:NEWLINE raise TypeError("Don't know how to handle type '{}'".format(NEWLINE type(bundle).__name__))NEWLINENEWLINE status_json = self._conn.post(self.objects_url, headers=headers,NEWLINE data=data)NEWLINENEWLINE status_url = urlparse.urljoin(NEWLINE self.url,NEWLINE "../../status/{}".format(status_json["id"])NEWLINE )NEWLINENEWLINE status = Status(url=status_url, conn=self._conn,NEWLINE status_info=status_json)NEWLINENEWLINE if not wait_for_completion or status.status == "complete":NEWLINE return statusNEWLINENEWLINE status.wait_until_final(poll_interval, timeout)NEWLINENEWLINE return statusNEWLINENEWLINE def get_manifest(self, accept=MEDIA_TYPE_TAXII_V20, **filter_kwargs):NEWLINE """Implement the ``Get Object Manifests`` endpoint (section 5.6)."""NEWLINE self._verify_can_read()NEWLINE query_params = _filter_kwargs_to_query_params(filter_kwargs)NEWLINE return self._conn.get(self.url + "manifest/",NEWLINE headers={"Accept": accept},NEWLINE params=query_params)NEWLINENEWLINENEWLINEclass ApiRoot(_TAXIIEndpoint):NEWLINE """Information about a TAXII API Root.NEWLINENEWLINE This class corresponds to the ``Get API Root Information`` (section 4.2)NEWLINE and ``Get Collections`` (section 5.1) endpoints, and contains theNEWLINE information found in the corresponding ``API Root Resource``NEWLINE (section 4.2.1) and ``Collections Resource`` (section 5.1.1).NEWLINENEWLINE As obtained from a Server, each ApiRoot instance gets its own connectionNEWLINE pool(s). Collections returned by instances of this class share the sameNEWLINE pools as the instance, so closing one closes all. Also, the sameNEWLINE username/password is used to connect to them, as was used for this ApiRoot.NEWLINE If either of these is undesirable, Collection instances may be createdNEWLINE manually.NEWLINENEWLINE """NEWLINENEWLINE def __init__(self, url, conn=None, user=None, password=None, verify=True,NEWLINE proxies=None):NEWLINE """Create an API root resource endpoint.NEWLINENEWLINE Args:NEWLINE url (str): URL of a TAXII API root resource endpointNEWLINE user (str): username for authentication (optional)NEWLINE password (str): password for authentication (optional)NEWLINE conn (_HTTPConnection): reuse connection object, as an alternativeNEWLINE to providing username/passwordNEWLINE verify (bool): validate the entity credentials. (default: True)NEWLINE proxies (dict): key/value pair for http/https proxy settings.NEWLINE (optional)NEWLINENEWLINE """NEWLINE super(ApiRoot, self).__init__(url, conn, user, password, verify, proxies)NEWLINENEWLINE self._loaded_collections = FalseNEWLINE self._loaded_information = FalseNEWLINE self.__raw = NoneNEWLINENEWLINE @propertyNEWLINE def collections(self):NEWLINE if not self._loaded_collections:NEWLINE self.refresh_collections()NEWLINE return self._collectionsNEWLINENEWLINE @propertyNEWLINE def title(self):NEWLINE self._ensure_loaded_information()NEWLINE return self._titleNEWLINENEWLINE @propertyNEWLINE def description(self):NEWLINE self._ensure_loaded_information()NEWLINE return self._descriptionNEWLINENEWLINE @propertyNEWLINE def versions(self):NEWLINE self._ensure_loaded_information()NEWLINE return self._versionsNEWLINENEWLINE @propertyNEWLINE def max_content_length(self):NEWLINE self._ensure_loaded_information()NEWLINE return self._max_content_lengthNEWLINENEWLINE @propertyNEWLINE def custom_properties(self):NEWLINE self._ensure_loaded_information()NEWLINE return self._custom_propertiesNEWLINENEWLINE @propertyNEWLINE def _raw(self):NEWLINE """Get the "raw" API root information response (parsed JSON)."""NEWLINE self._ensure_loaded_information()NEWLINE return self.__rawNEWLINENEWLINE def _ensure_loaded_information(self):NEWLINE if not self._loaded_information:NEWLINE self.refresh_information()NEWLINENEWLINE def _validate_api_root(self):NEWLINE """Validates API Root information. Raises errors for requiredNEWLINE properties."""NEWLINE if not self._title:NEWLINE msg = "No 'title' in API Root for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if not self._versions:NEWLINE msg = "No 'versions' in API Root for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE if self._max_content_length is None:NEWLINE msg = "No 'max_content_length' in API Root for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE def _populate_fields(self, title=None, description=None, versions=None,NEWLINE max_content_length=None, **kwargs):NEWLINE self._title = title # requiredNEWLINE self._description = description # optionalNEWLINE self._versions = versions or [] # requiredNEWLINE self._max_content_length = max_content_length # requiredNEWLINENEWLINE # Anything not captured by the optional arguments is treated as customNEWLINE self._custom_properties = kwargsNEWLINENEWLINE self._validate_api_root()NEWLINENEWLINE def refresh(self, accept=MEDIA_TYPE_TAXII_V20):NEWLINE """Update the API Root's information and list of Collections"""NEWLINE self.refresh_information(accept)NEWLINE self.refresh_collections(accept)NEWLINENEWLINE def refresh_information(self, accept=MEDIA_TYPE_TAXII_V20):NEWLINE """Update the properties of this API Root.NEWLINENEWLINE This invokes the ``Get API Root Information`` endpoint.NEWLINE """NEWLINE response = self.__raw = self._conn.get(self.url,NEWLINE headers={"Accept": accept})NEWLINE self._populate_fields(**response)NEWLINE self._loaded_information = TrueNEWLINENEWLINE def refresh_collections(self, accept=MEDIA_TYPE_TAXII_V20):NEWLINE """Update the list of Collections contained by this API Root.NEWLINENEWLINE This invokes the ``Get Collections`` endpoint.NEWLINE """NEWLINE url = self.url + "collections/"NEWLINE response = self._conn.get(url, headers={"Accept": accept})NEWLINENEWLINE self._collections = []NEWLINE for item in response.get("collections", []): # optionalNEWLINE collection_url = url + item["id"] + "/"NEWLINE collection = Collection(collection_url, conn=self._conn,NEWLINE collection_info=item)NEWLINE self._collections.append(collection)NEWLINENEWLINE self._loaded_collections = TrueNEWLINENEWLINE def get_status(self, status_id, accept=MEDIA_TYPE_TAXII_V20):NEWLINE status_url = self.url + "status/" + status_id + "/"NEWLINE response = self._conn.get(status_url, headers={"Accept": accept})NEWLINE return Status(status_url, conn=self._conn, status_info=response)NEWLINENEWLINENEWLINEclass Server(_TAXIIEndpoint):NEWLINE """Information about a server hosting a Discovery service.NEWLINENEWLINE This class corresponds to the Server Discovery endpoint (section 4.1) andNEWLINE the Discovery Resource returned from that endpoint (section 4.1.1).NEWLINENEWLINE ApiRoot instances obtained from an instance of this class areNEWLINE created with the same username/password as was used in this instance. IfNEWLINE that's incorrect, an ApiRoot instance may be created directly with theNEWLINE desired username and password. Also, they use separate connection poolsNEWLINE so that they can be independent: closing one won't close others, andNEWLINE closing this server object won't close any of the ApiRoot objects (whichNEWLINE may refer to different hosts than was used for discovery).NEWLINENEWLINE """NEWLINENEWLINE def __init__(self, url, conn=None, user=None, password=None, verify=True,NEWLINE proxies=None):NEWLINE """Create a server discovery endpoint.NEWLINENEWLINE Args:NEWLINE url (str): URL of a TAXII server discovery endpointNEWLINE user (str): username for authentication (optional)NEWLINE password (str): password for authentication (optional)NEWLINE conn (_HTTPConnection): reuse connection object, as an alternativeNEWLINE to providing username/passwordNEWLINE verify (bool): validate the entity credentials. (default: True)NEWLINE proxies (dict): key/value pair for http/https proxy settings.NEWLINE (optional)NEWLINENEWLINE """NEWLINE super(Server, self).__init__(url, conn, user, password, verify, proxies)NEWLINENEWLINE self._user = userNEWLINE self._password = passwordNEWLINE self._verify = verifyNEWLINE self._proxies = proxiesNEWLINE self._loaded = FalseNEWLINE self.__raw = NoneNEWLINENEWLINE @propertyNEWLINE def title(self):NEWLINE self._ensure_loaded()NEWLINE return self._titleNEWLINENEWLINE @propertyNEWLINE def description(self):NEWLINE self._ensure_loaded()NEWLINE return self._descriptionNEWLINENEWLINE @propertyNEWLINE def contact(self):NEWLINE self._ensure_loaded()NEWLINE return self._contactNEWLINENEWLINE @propertyNEWLINE def default(self):NEWLINE self._ensure_loaded()NEWLINE return self._defaultNEWLINENEWLINE @propertyNEWLINE def api_roots(self):NEWLINE self._ensure_loaded()NEWLINE return self._api_rootsNEWLINENEWLINE @propertyNEWLINE def custom_properties(self):NEWLINE self._ensure_loaded()NEWLINE return self._custom_propertiesNEWLINENEWLINE @propertyNEWLINE def _raw(self):NEWLINE """Get the "raw" server discovery response (parsed JSON)."""NEWLINE self._ensure_loaded()NEWLINE return self.__rawNEWLINENEWLINE def _ensure_loaded(self):NEWLINE if not self._loaded:NEWLINE self.refresh()NEWLINENEWLINE def _validate_server(self):NEWLINE """Validates server information. Raises errors for required properties.NEWLINE """NEWLINE if not self._title:NEWLINE msg = "No 'title' in Server Discovery for request '{}'"NEWLINE raise ValidationError(msg.format(self.url))NEWLINENEWLINE def _populate_fields(self, title=None, description=None, contact=None,NEWLINE api_roots=None, default=None, **kwargs):NEWLINE self._title = title # requiredNEWLINE self._description = description # optionalNEWLINE self._contact = contact # optionalNEWLINE roots = api_roots or [] # optionalNEWLINE self._api_roots = [ApiRoot(url,NEWLINE user=self._user,NEWLINE password=self._password,NEWLINE verify=self._verify,NEWLINE proxies=self._proxies)NEWLINE for url in roots]NEWLINE # If 'default' is one of the existing API Roots, reuse that objectNEWLINE # rather than creating a duplicate. The TAXII 2.0 spec says that theNEWLINE # `default` API Root MUST be an item in `api_roots`.NEWLINE root_dict = dict(zip(roots, self._api_roots))NEWLINE self._default = root_dict.get(default) # optionalNEWLINENEWLINE # Anything not captured by the optional arguments is treated as customNEWLINE self._custom_properties = kwargsNEWLINENEWLINE self._validate_server()NEWLINENEWLINE def refresh(self):NEWLINE """Update the Server information and list of API Roots"""NEWLINE response = self.__raw = self._conn.get(self.url)NEWLINE self._populate_fields(**response)NEWLINE self._loaded = TrueNEWLINENEWLINENEWLINEclass _HTTPConnection(object):NEWLINE """This library uses the ``requests`` library, which presents a convenienceNEWLINE API which hides many network details like actual connection objects. SoNEWLINE this class doesn't represent a traditional ``connection`` either. It's aNEWLINE sort of approximation: sets of connections (or connection pools) and commonNEWLINE metadata for a particular server interaction. You can send requests toNEWLINE any hosts via the same instance; hosts/ports are not checked and newNEWLINE connection pools pop into existence as needed, but all connections areNEWLINE closed when the close() method is called. So this is intended to be usedNEWLINE for an independent self-contained interaction.NEWLINENEWLINE Attributes:NEWLINE session (requests.Session): A requests session object.NEWLINENEWLINE """NEWLINENEWLINE def __init__(self, user=None, password=None, verify=True, proxies=None,NEWLINE user_agent=DEFAULT_USER_AGENT):NEWLINE """Create a connection session.NEWLINENEWLINE Args:NEWLINE user (str): username for authentication (optional)NEWLINE password (str): password for authentication (optional)NEWLINE verify (bool): validate the entity credentials. (default: True)NEWLINE proxies (dict): key/value pair for http/https proxy settings.NEWLINE (optional)NEWLINE user_agent (str): A value to use for the User-Agent header inNEWLINE requests. If not given, use a default value which representsNEWLINE this library.NEWLINE """NEWLINE self.session = requests.Session()NEWLINE self.session.verify = verifyNEWLINE # enforce that we always have a connection-default user agent.NEWLINE self.user_agent = user_agent or DEFAULT_USER_AGENTNEWLINE if user and password:NEWLINE self.session.auth = requests.auth.HTTPBasicAuth(user, password)NEWLINE if proxies:NEWLINE self.session.proxies.update(proxies)NEWLINENEWLINE def valid_content_type(self, content_type, accept):NEWLINE """Check that the server is returning a valid Content-TypeNEWLINENEWLINE Args:NEWLINE content_type (str): ``Content-Type:`` header valueNEWLINE accept (str): media type to include in the ``Accept:`` header.NEWLINENEWLINE """NEWLINE accept_tokens = accept.replace(' ', '').split(';')NEWLINE content_type_tokens = content_type.replace(' ', '').split(';')NEWLINENEWLINE return (NEWLINE all(elem in content_type_tokens for elem in accept_tokens) andNEWLINE (content_type_tokens[0] == 'application/vnd.oasis.taxii+json' orNEWLINE content_type_tokens[0] == 'application/vnd.oasis.stix+json')NEWLINE )NEWLINENEWLINE def get(self, url, headers=None, params=None):NEWLINE """Perform an HTTP GET, using the saved requests.Session and auth info.NEWLINE If "Accept" isn't one of the given headers, a default TAXII mime type isNEWLINE used. Regardless, the response type is checked against the acceptNEWLINE header value, and an exception is raised if they don't match.NEWLINENEWLINE Args:NEWLINE url (str): URL to retrieveNEWLINE headers (dict): Any other headers to be added to the request.NEWLINE params: dictionary or bytes to be sent in the query string for theNEWLINE request. (optional)NEWLINENEWLINE """NEWLINENEWLINE merged_headers = self._merge_headers(headers)NEWLINENEWLINE if "Accept" not in merged_headers:NEWLINE merged_headers["Accept"] = MEDIA_TYPE_TAXII_V20NEWLINE accept = merged_headers["Accept"]NEWLINENEWLINE resp = self.session.get(url, headers=merged_headers, params=params)NEWLINENEWLINE resp.raise_for_status()NEWLINENEWLINE content_type = resp.headers["Content-Type"]NEWLINENEWLINE if not self.valid_content_type(content_type=content_type, accept=accept):NEWLINE msg = "Unexpected Response. Got Content-Type: '{}' for Accept: '{}'"NEWLINE raise TAXIIServiceException(msg.format(content_type, accept))NEWLINENEWLINE return _to_json(resp)NEWLINENEWLINE def post(self, url, headers=None, params=None, **kwargs):NEWLINE """Send a JSON POST request with the given request headers, additionalNEWLINE URL query parameters, and the given JSON in the request body. TheNEWLINE extra query parameters are merged with any which already exist in theNEWLINE URL. The 'json' and 'data' parameters may not both be given.NEWLINENEWLINE Args:NEWLINE url (str): URL to retrieveNEWLINE headers (dict): Any other headers to be added to the request.NEWLINE params: dictionary or bytes to be sent in the query string for theNEWLINE request. (optional)NEWLINE json: json to send in the body of the Request. This must be aNEWLINE JSON-serializable object. (optional)NEWLINE data: raw request body data. May be a dictionary, list of tuples,NEWLINE bytes, or file-like object to send in the body of the Request.NEWLINE (optional)NEWLINE """NEWLINENEWLINE if len(kwargs) > 1:NEWLINE raise InvalidArgumentsError("Too many extra args ({} > 1)".format(NEWLINE len(kwargs)))NEWLINENEWLINE if kwargs:NEWLINE kwarg = next(iter(kwargs))NEWLINE if kwarg not in ("json", "data"):NEWLINE raise InvalidArgumentsError("Invalid kwarg: " + kwarg)NEWLINENEWLINE resp = self.session.post(url, headers=headers, params=params, **kwargs)NEWLINE resp.raise_for_status()NEWLINE return _to_json(resp)NEWLINENEWLINE def close(self):NEWLINE """Closes connections. This object is no longer usable."""NEWLINE self.session.close()NEWLINENEWLINE def _merge_headers(self, call_specific_headers):NEWLINE """NEWLINE Merge headers from different sources together. Headers passed to theNEWLINE post/get methods have highest priority, then headers associated withNEWLINE the connection object itself have next priority.NEWLINENEWLINE :param call_specific_headers: A header dict from the get/post call, orNEWLINE None (the default for those methods).NEWLINE :return: A key-case-insensitive MutableMapping object which containsNEWLINE the merged headers. (This doesn't actually return a dict.)NEWLINE """NEWLINENEWLINE # A case-insensitive mapping is necessary here so that there isNEWLINE # predictable behavior. If a plain dict were used, you'd get keys inNEWLINE # the merged dict which differ only in case. The requests libraryNEWLINE # would merge them internally, and it would be unpredictable which keyNEWLINE # is chosen for the final set of headers. Another possible approachNEWLINE # would be to upper/lower-case everything, but this seemed easier. OnNEWLINE # the other hand, I don't know if CaseInsensitiveDict is public API...?NEWLINENEWLINE # First establish defaultsNEWLINE merged_headers = requests.structures.CaseInsensitiveDict({NEWLINE "User-Agent": self.user_agentNEWLINE })NEWLINENEWLINE # Then overlay with specifics from post/get methodsNEWLINE if call_specific_headers:NEWLINE merged_headers.update(call_specific_headers)NEWLINENEWLINE # Special "User-Agent" header check, to ensure one is always sent.NEWLINE # The call-specific overlay could have null'd out that header.NEWLINE if not merged_headers.get("User-Agent"):NEWLINE merged_headers["User-Agent"] = self.user_agentNEWLINENEWLINE return merged_headersNEWLINENEWLINENEWLINEdef _to_json(resp):NEWLINE """NEWLINE Factors out some JSON parse code with error handling, to hopefully improveNEWLINE error messages.NEWLINENEWLINE :param resp: A "requests" library responseNEWLINE :return: Parsed JSON.NEWLINE :raises: InvalidJSONError If JSON parsing failed.NEWLINE """NEWLINE try:NEWLINE return resp.json()NEWLINE except ValueError as e:NEWLINE # Maybe better to report the original request URL?NEWLINE six.raise_from(InvalidJSONError(NEWLINE "Invalid JSON was received from " + resp.request.urlNEWLINE ), e)NEWLINE |
from setuptools import setup, find_packagesNEWLINENEWLINENEWLINEsetup(NEWLINE name='zeit.find',NEWLINE version='3.0.10.dev0',NEWLINE author='gocept, Zeit Online',NEWLINE author_email='zon-backend@zeit.de',NEWLINE url='http://www.zeit.de/',NEWLINE description="vivi UI for querying elastic search",NEWLINE packages=find_packages('src'),NEWLINE package_dir={'': 'src'},NEWLINE include_package_data=True,NEWLINE zip_safe=False,NEWLINE license='BSD',NEWLINE namespace_packages=['zeit'],NEWLINE install_requires=[NEWLINE 'gocept.httpserverlayer',NEWLINE 'gocept.selenium',NEWLINE 'grokcore.component',NEWLINE 'plone.testing',NEWLINE 'setuptools',NEWLINE 'zc.iso8601',NEWLINE 'zeit.cms >= 3.12.0.dev0',NEWLINE 'zeit.content.image',NEWLINE 'zeit.retresco >= 1.31.0.dev0',NEWLINE ],NEWLINE entry_points={NEWLINE 'fanstatic.libraries': [NEWLINE 'zeit_find=zeit.find.browser.resources:lib',NEWLINE ],NEWLINE 'console_scripts': [NEWLINE 'search-elastic=zeit.find.cli:search_elastic',NEWLINE ],NEWLINE },NEWLINE)NEWLINE |
#!/usr/bin/env pythonNEWLINEfrom setuptools import setupNEWLINENEWLINENEWLINEwith open('README.rst', 'r') as f:NEWLINE long_description = f.read()NEWLINENEWLINEsetup(NEWLINE name='githubstars',NEWLINE version='0.0.6',NEWLINE description='List repository stars and info through Gituhb v4 GraphQL API',NEWLINE long_description=long_description,NEWLINE url='https://github.com/hanksudo/githubstars',NEWLINE author='Hank Wang',NEWLINE author_email='drapho@gmail.com',NEWLINE license='MIT',NEWLINE classifiers=[NEWLINE 'Development Status :: 2 - Pre-Alpha',NEWLINE 'Intended Audience :: Developers',NEWLINE 'Natural Language :: English',NEWLINE 'License :: OSI Approved :: MIT License',NEWLINE 'Programming Language :: Python :: 2',NEWLINE 'Programming Language :: Python :: 2.7',NEWLINE 'Programming Language :: Python :: 3',NEWLINE 'Programming Language :: Python :: 3.4',NEWLINE 'Programming Language :: Python :: 3.5',NEWLINE 'Programming Language :: Python :: 3.6'NEWLINE ],NEWLINE keywords='stars github graphql',NEWLINE py_modules=['githubstars'],NEWLINE install_requires=[],NEWLINE entry_points={NEWLINE 'console_scripts': [NEWLINE 'githubstars=githubstars:main'NEWLINE ]NEWLINE }NEWLINE)NEWLINE |
#!/usr/bin/env pythonNEWLINE# -*- coding: utf-8 -*-NEWLINE#NEWLINE# conda_devenv documentation build configuration file, created byNEWLINE# sphinx-quickstart on Tue Jul 9 22:26:36 2013.NEWLINE#NEWLINE# This file is execfile()d with the current directory set to itsNEWLINE# containing dir.NEWLINE#NEWLINE# Note that not all possible configuration values are present in thisNEWLINE# autogenerated file.NEWLINE#NEWLINE# All configuration values have a default; values that are commented outNEWLINE# serve to show the default.NEWLINENEWLINEimport sysNEWLINEimport osNEWLINEfrom typing import DictNEWLINENEWLINE# If extensions (or modules to document with autodoc) are in anotherNEWLINE# directory, add these directories to sys.path here. If the directory isNEWLINE# relative to the documentation root, use os.path.abspath to make itNEWLINE# absolute, like shown here.NEWLINE# sys.path.insert(0, os.path.abspath('.'))NEWLINENEWLINE# Get the project root dir, which is the parent dir of thisNEWLINEcwd = os.getcwd()NEWLINEproject_root = os.path.dirname(cwd)NEWLINENEWLINE# Insert the project root dir as the first element in the PYTHONPATH.NEWLINE# This lets us ensure that the source package is imported, and that itsNEWLINE# version is used.NEWLINEsys.path.insert(0, project_root)NEWLINENEWLINEimport conda_devenvNEWLINENEWLINE# -- General configuration ---------------------------------------------NEWLINENEWLINE# If your documentation needs a minimal Sphinx version, state it here.NEWLINE# needs_sphinx = '1.0'NEWLINENEWLINE# Add any Sphinx extension module names here, as strings. They can beNEWLINE# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.NEWLINEextensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]NEWLINENEWLINE# Add any paths that contain templates here, relative to this directory.NEWLINEtemplates_path = ["_templates"]NEWLINENEWLINE# The suffix of source filenames.NEWLINEsource_suffix = ".rst"NEWLINENEWLINE# The encoding of source files.NEWLINE# source_encoding = 'utf-8-sig'NEWLINENEWLINE# The master toctree document.NEWLINEmaster_doc = "index"NEWLINENEWLINE# General information about the project.NEWLINEproject = "conda-devenv"NEWLINEcopyright = "2016, ESSS"NEWLINENEWLINE# The version info for the project you're documenting, acts as replacementNEWLINE# for |version| and |release|, also used in various other places throughoutNEWLINE# the built documents.NEWLINE#NEWLINE# The short X.Y version.NEWLINEversion = conda_devenv.__version__NEWLINE# The full version, including alpha/beta/rc tags.NEWLINErelease = conda_devenv.__version__NEWLINENEWLINE# The language for content autogenerated by Sphinx. Refer to documentationNEWLINE# for a list of supported languages.NEWLINE# language = NoneNEWLINENEWLINE# There are two options for replacing |today|: either, you set today toNEWLINE# some non-false value, then it is used:NEWLINE# today = ''NEWLINE# Else, today_fmt is used as the format for a strftime call.NEWLINE# today_fmt = '%B %d, %Y'NEWLINENEWLINE# List of patterns, relative to source directory, that match files andNEWLINE# directories to ignore when looking for source files.NEWLINEexclude_patterns = ["_build"]NEWLINENEWLINE# The reST default role (used for this markup: `text`) to use for allNEWLINE# documents.NEWLINE# default_role = NoneNEWLINENEWLINE# If true, '()' will be appended to :func: etc. cross-reference text.NEWLINE# add_function_parentheses = TrueNEWLINENEWLINE# If true, the current module name will be prepended to all descriptionNEWLINE# unit titles (such as .. function::).NEWLINE# add_module_names = TrueNEWLINENEWLINE# If true, sectionauthor and moduleauthor directives will be shown in theNEWLINE# output. They are ignored by default.NEWLINE# show_authors = FalseNEWLINENEWLINE# The name of the Pygments (syntax highlighting) style to use.NEWLINEpygments_style = "sphinx"NEWLINENEWLINE# A list of ignored prefixes for module index sorting.NEWLINE# modindex_common_prefix = []NEWLINENEWLINE# If true, keep warnings as "system message" paragraphs in the builtNEWLINE# documents.NEWLINE# keep_warnings = FalseNEWLINENEWLINENEWLINE# -- Options for HTML output -------------------------------------------NEWLINENEWLINE# The theme to use for HTML and HTML Help pages. See the documentation forNEWLINE# a list of builtin themes.NEWLINEhtml_theme = "default"NEWLINENEWLINE# Theme options are theme-specific and customize the look and feel of aNEWLINE# theme further. For a list of options available for each theme, see theNEWLINE# documentation.NEWLINE# html_theme_options = {}NEWLINENEWLINE# Add any paths that contain custom themes here, relative to this directory.NEWLINE# html_theme_path = []NEWLINENEWLINE# The name for this set of Sphinx documents. If None, it defaults toNEWLINE# "<project> v<release> documentation".NEWLINE# html_title = NoneNEWLINENEWLINE# A shorter title for the navigation bar. Default is the same asNEWLINE# html_title.NEWLINE# html_short_title = NoneNEWLINENEWLINE# The name of an image file (relative to this directory) to place at theNEWLINE# top of the sidebar.NEWLINE# html_logo = NoneNEWLINENEWLINE# The name of an image file (within the static path) to use as faviconNEWLINE# of the docs. This file should be a Windows icon file (.ico) beingNEWLINE# 16x16 or 32x32 pixels large.NEWLINE# html_favicon = NoneNEWLINENEWLINE# Add any paths that contain custom static files (such as style sheets)NEWLINE# here, relative to this directory. They are copied after the builtinNEWLINE# static files, so a file named "default.css" will overwrite the builtinNEWLINE# "default.css".NEWLINE# html_static_path = ['_static']NEWLINENEWLINE# If not '', a 'Last updated on:' timestamp is inserted at every pageNEWLINE# bottom, using the given strftime format.NEWLINE# html_last_updated_fmt = '%b %d, %Y'NEWLINENEWLINE# If true, SmartyPants will be used to convert quotes and dashes toNEWLINE# typographically correct entities.NEWLINE# html_use_smartypants = TrueNEWLINENEWLINE# Custom sidebar templates, maps document names to template names.NEWLINE# html_sidebars = {}NEWLINENEWLINE# Additional templates that should be rendered to pages, maps page namesNEWLINE# to template names.NEWLINE# html_additional_pages = {}NEWLINENEWLINE# If false, no module index is generated.NEWLINE# html_domain_indices = TrueNEWLINENEWLINE# If false, no index is generated.NEWLINE# html_use_index = TrueNEWLINENEWLINE# If true, the index is split into individual pages for each letter.NEWLINE# html_split_index = FalseNEWLINENEWLINE# If true, links to the reST sources are added to the pages.NEWLINE# html_show_sourcelink = TrueNEWLINENEWLINE# If true, "Created using Sphinx" is shown in the HTML footer.NEWLINE# Default is True.NEWLINE# html_show_sphinx = TrueNEWLINENEWLINE# If true, "(C) Copyright ..." is shown in the HTML footer.NEWLINE# Default is True.NEWLINE# html_show_copyright = TrueNEWLINENEWLINE# If true, an OpenSearch description file will be output, and all pagesNEWLINE# will contain a <link> tag referring to it. The value of this optionNEWLINE# must be the base URL from which the finished HTML is served.NEWLINE# html_use_opensearch = ''NEWLINENEWLINE# This is the file name suffix for HTML files (e.g. ".xhtml").NEWLINE# html_file_suffix = NoneNEWLINENEWLINE# Output file base name for HTML help builder.NEWLINEhtmlhelp_basename = "conda_devenvdoc"NEWLINENEWLINENEWLINE# -- Options for LaTeX output ------------------------------------------NEWLINENEWLINElatex_elements: Dict[str, str] = {NEWLINE # The paper size ('letterpaper' or 'a4paper').NEWLINE #'papersize': 'letterpaper',NEWLINE # The font size ('10pt', '11pt' or '12pt').NEWLINE #'pointsize': '10pt',NEWLINE # Additional stuff for the LaTeX preamble.NEWLINE #'preamble': '',NEWLINE}NEWLINENEWLINE# Grouping the document tree into LaTeX files. List of tuplesNEWLINE# (source start file, target name, title, author, documentclassNEWLINE# [howto/manual]).NEWLINElatex_documents = [NEWLINE (NEWLINE "index",NEWLINE "conda_devenv.tex",NEWLINE "conda-devenv Documentation",NEWLINE "Edison Gustavo Muenz",NEWLINE "manual",NEWLINE ),NEWLINE]NEWLINENEWLINE# The name of an image file (relative to this directory) to place atNEWLINE# the top of the title page.NEWLINE# latex_logo = NoneNEWLINENEWLINE# For "manual" documents, if this is true, then toplevel headingsNEWLINE# are parts, not chapters.NEWLINE# latex_use_parts = FalseNEWLINENEWLINE# If true, show page references after internal links.NEWLINE# latex_show_pagerefs = FalseNEWLINENEWLINE# If true, show URL addresses after external links.NEWLINE# latex_show_urls = FalseNEWLINENEWLINE# Documents to append as an appendix to all manuals.NEWLINE# latex_appendices = []NEWLINENEWLINE# If false, no module index is generated.NEWLINE# latex_domain_indices = TrueNEWLINENEWLINENEWLINE# -- Options for manual page output ------------------------------------NEWLINENEWLINE# One entry per manual page. List of tuplesNEWLINE# (source start file, name, description, authors, manual section).NEWLINEman_pages = [NEWLINE (NEWLINE "index",NEWLINE "conda_devenv",NEWLINE "conda-devenv Documentation",NEWLINE ["Edison Gustavo Muenz"],NEWLINE 1,NEWLINE )NEWLINE]NEWLINENEWLINE# If true, show URL addresses after external links.NEWLINE# man_show_urls = FalseNEWLINENEWLINENEWLINE# -- Options for Texinfo output ----------------------------------------NEWLINENEWLINE# Grouping the document tree into Texinfo files. List of tuplesNEWLINE# (source start file, target name, title, author,NEWLINE# dir menu entry, description, category)NEWLINEtexinfo_documents = [NEWLINE (NEWLINE "index",NEWLINE "conda_devenv",NEWLINE "conda-devenv Documentation",NEWLINE "Edison Gustavo Muenz",NEWLINE "conda_devenv",NEWLINE "One line description of project.",NEWLINE "Miscellaneous",NEWLINE ),NEWLINE]NEWLINENEWLINE# Documents to append as an appendix to all manuals.NEWLINE# texinfo_appendices = []NEWLINENEWLINE# If false, no module index is generated.NEWLINE# texinfo_domain_indices = TrueNEWLINENEWLINE# How to display URL addresses: 'footnote', 'no', or 'inline'.NEWLINE# texinfo_show_urls = 'footnote'NEWLINENEWLINE# If true, do not generate a @detailmenu in the "Top" node's menu.NEWLINE# texinfo_no_detailmenu = FalseNEWLINENEWLINEsuppress_warnings = [NEWLINE # we include the README file into the docs, which contains nonlocal-uri to images (travis badge, etc)NEWLINE "image.nonlocal_uri",NEWLINE # our yaml files include jinja, which is not valid yaml; leave it like thisNEWLINE # anyway because some syntax highlight still worksNEWLINE "misc.highlighting_failure",NEWLINE]NEWLINE |
"""OpenAQ Air Quality Dashboard with Flask"""NEWLINEfrom flask import Flask, render_template, requestNEWLINEfrom flask_sqlalchemy import SQLAlchemyNEWLINEfrom openaq_py import OpenAQNEWLINENEWLINENEWLINE# Create app and DataBaseNEWLINEAPP = Flask(__name__)NEWLINEAPP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'NEWLINEDB = SQLAlchemy(APP)NEWLINENEWLINENEWLINE# Table for database to store retrieved dataNEWLINEclass Record(DB.Model):NEWLINE id = DB.Column(DB.Integer, primary_key=True)NEWLINE datetime = DB.Column(DB.String(25))NEWLINE value = DB.Column(DB.Float, nullable=False)NEWLINENEWLINE def __repr__(self):NEWLINE return f'<Time {self.datetime} --- Value {self.value}>'NEWLINENEWLINENEWLINEdef get_utc_values(city, parameter):NEWLINE """NEWLINE Function for pulling in data from apiNEWLINE Returns tuple of datetime and value"""NEWLINE api = OpenAQ()NEWLINE status, body = api.measurements(city=city, parameter=parameter)NEWLINE datetimes = []NEWLINE values = []NEWLINE for result in body['results']:NEWLINE datetime = result['date']['utc']NEWLINE value = result['value']NEWLINE datetimes.append(datetime)NEWLINE values.append(value)NEWLINE records = list(zip(datetimes, values))NEWLINE return recordsNEWLINENEWLINENEWLINE# Route to home pageNEWLINE@APP.route('/')NEWLINEdef root():NEWLINE """Base View."""NEWLINE if not DB.engine.dialect.has_table(DB.engine, 'record'):NEWLINE DB.create_all()NEWLINE records = []NEWLINE return render_template('home.html', records=records)NEWLINE else:NEWLINE records = Record.query.filter(Record.value >= 10).all()NEWLINE return render_template('home.html', records=records)NEWLINENEWLINENEWLINE# route to refresh page for cleaning and reestablishing just LA in dbNEWLINE@APP.route('/refresh')NEWLINEdef refresh():NEWLINE """Pull fresh data from Open AQ and replace existing data"""NEWLINE DB.drop_all()NEWLINE DB.create_all()NEWLINE # TODO get data from OpenAQ make Record objects with it and add to dbNEWLINE records = get_utc_values('Los Angeles', 'pm25')NEWLINE for tup in records:NEWLINE record = Record(datetime=tup[0], value=tup[1])NEWLINE DB.session.add(record)NEWLINE DB.session.commit()NEWLINE return 'Data refreshed!' |
#! /usr/vin/env pythonNEWLINE# -*-coding:utf8-*-NEWLINENEWLINEfrom collections import OrderedDictNEWLINENEWLINENEWLINEclass LRUCache(OrderedDict):NEWLINE '''不能存储可变类型对象,不能并发访问set()''' NEWLINENEWLINE def __init__(self,capacity):NEWLINE self.capacity = capacityNEWLINE self.cache = OrderedDict()NEWLINENEWLINENEWLINE def get(self,key):NEWLINE if self.cache.has_key(key):NEWLINE value = self.cache.pop(key)NEWLINE self.cache[key] = valueNEWLINE else:NEWLINE value = NoneNEWLINENEWLINE return valueNEWLINENEWLINENEWLINE def set(self,key,value):NEWLINE if self.cache.has_key(key):NEWLINE value = self.cache.pop(key)NEWLINE self.cache[key] = valueNEWLINE else:NEWLINE if len(self.cache) == self.capacity:NEWLINE self.cache.popitem(last = False) #pop出第一个itemNEWLINE self.cache[key] = valueNEWLINE else:NEWLINE self.cache[key] = valueNEWLINE |
from flask import Flask, render_templateNEWLINEfrom flask_sqlalchemy import SQLAlchemyNEWLINEimport osNEWLINENEWLINEapp = Flask(__name__)NEWLINENEWLINEdb_path = os.path.join(os.path.dirname(__file__), 'database.db')NEWLINEdb_uri = 'sqlite:///{}'.format(db_path)NEWLINEapp.config['SQLALCHEMY_DATABASE_URI'] = db_uriNEWLINEapp.config['SECRET_KEY'] = 'thisisasecret'NEWLINENEWLINE@app.route('/')NEWLINEdef index():NEWLINE return render_template('index.html')NEWLINENEWLINE@app.route('/about')NEWLINEdef about():NEWLINE return render_template('about.html')NEWLINENEWLINE@app.route('/base')NEWLINEdef base():NEWLINE return render_template('base.html')NEWLINENEWLINE@app.route('/register')NEWLINEdef register():NEWLINE return render_template('register.html')NEWLINENEWLINE@app.route('/reservation')NEWLINEdef reservation():NEWLINE return render_template('reservation.html')NEWLINENEWLINE@app.route('/sign_in')NEWLINEdef sign_in():NEWLINE return render_template('sign_in.html')NEWLINENEWLINEif __name__ == '__main__':NEWLINE app.run(debug=True) |
# BSD 3-Clause LicenseNEWLINE#NEWLINE# Copyright (c) 2019, Elasticsearch BVNEWLINE# All rights reserved.NEWLINE#NEWLINE# Redistribution and use in source and binary forms, with or withoutNEWLINE# modification, are permitted provided that the following conditions are met:NEWLINE#NEWLINE# * Redistributions of source code must retain the above copyright notice, thisNEWLINE# list of conditions and the following disclaimer.NEWLINE#NEWLINE# * Redistributions in binary form must reproduce the above copyright notice,NEWLINE# this list of conditions and the following disclaimer in the documentationNEWLINE# and/or other materials provided with the distribution.NEWLINE#NEWLINE# * Neither the name of the copyright holder nor the names of itsNEWLINE# contributors may be used to endorse or promote products derived fromNEWLINE# this software without specific prior written permission.NEWLINE#NEWLINE# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"NEWLINE# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THENEWLINE# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARENEWLINE# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLENEWLINE# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIALNEWLINE# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS ORNEWLINE# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVERNEWLINE# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,NEWLINE# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USENEWLINE# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.NEWLINENEWLINEimport pytest # isort:skipNEWLINENEWLINEaiohttp = pytest.importorskip("aiohttp") # isort:skipNEWLINEyarl = pytest.importorskip("yarl") # isort:skipNEWLINENEWLINEfrom elasticapm.conf import constantsNEWLINEfrom elasticapm.utils.disttracing import TraceParentNEWLINENEWLINEpytestmark = [pytest.mark.asyncio, pytest.mark.aiohttp]NEWLINENEWLINENEWLINE@pytest.mark.parametrize("use_yarl", [True, False])NEWLINEasync def test_http_get(instrument, event_loop, elasticapm_client, waiting_httpserver, use_yarl):NEWLINE assert event_loop.is_running()NEWLINE elasticapm_client.begin_transaction("test")NEWLINENEWLINE url = waiting_httpserver.urlNEWLINE url = yarl.URL(url) if use_yarl else urlNEWLINENEWLINE async with aiohttp.ClientSession() as session:NEWLINE async with session.get(waiting_httpserver.url) as resp:NEWLINE status = resp.statusNEWLINE text = await resp.text()NEWLINENEWLINE elasticapm_client.end_transaction()NEWLINE transaction = elasticapm_client.events[constants.TRANSACTION][0]NEWLINE spans = elasticapm_client.spans_for_transaction(transaction)NEWLINE assert len(spans) == 1NEWLINE span = spans[0]NEWLINE assert span["name"] == "GET %s:%s" % waiting_httpserver.server_addressNEWLINE assert span["type"] == "external"NEWLINE assert span["subtype"] == "http"NEWLINE assert span["sync"] is FalseNEWLINE assert span["context"]["http"]["url"] == waiting_httpserver.urlNEWLINE assert span["context"]["http"]["status_code"] == 204NEWLINE assert spans[0]["context"]["destination"]["service"] == {NEWLINE "name": "",NEWLINE "resource": "127.0.0.1:%d" % waiting_httpserver.server_address[1],NEWLINE "type": "",NEWLINE }NEWLINE assert spans[0]["outcome"] == "success"NEWLINENEWLINENEWLINE@pytest.mark.parametrize("status_code", [400, 500])NEWLINEasync def test_http_get_error(instrument, event_loop, elasticapm_client, waiting_httpserver, status_code):NEWLINE assert event_loop.is_running()NEWLINE elasticapm_client.begin_transaction("test")NEWLINE waiting_httpserver.serve_content("", code=status_code)NEWLINE url = waiting_httpserver.urlNEWLINENEWLINE async with aiohttp.ClientSession() as session:NEWLINE async with session.get(waiting_httpserver.url) as resp:NEWLINE status = resp.statusNEWLINE text = await resp.text()NEWLINENEWLINE elasticapm_client.end_transaction()NEWLINE transaction = elasticapm_client.events[constants.TRANSACTION][0]NEWLINE spans = elasticapm_client.spans_for_transaction(transaction)NEWLINE assert len(spans) == 1NEWLINE span = spans[0]NEWLINE assert span["name"] == "GET %s:%s" % waiting_httpserver.server_addressNEWLINE assert span["type"] == "external"NEWLINE assert span["subtype"] == "http"NEWLINE assert span["sync"] is FalseNEWLINE assert span["context"]["http"]["url"] == waiting_httpserver.urlNEWLINE assert span["context"]["http"]["status_code"] == status_codeNEWLINE assert spans[0]["context"]["destination"]["service"] == {NEWLINE "name": "",NEWLINE "resource": "127.0.0.1:%d" % waiting_httpserver.server_address[1],NEWLINE "type": "",NEWLINE }NEWLINE assert spans[0]["outcome"] == "failure"NEWLINENEWLINENEWLINE@pytest.mark.parametrize(NEWLINE "elasticapm_client",NEWLINE [NEWLINE pytest.param({"use_elastic_traceparent_header": True}, id="use_elastic_traceparent_header-True"),NEWLINE pytest.param({"use_elastic_traceparent_header": False}, id="use_elastic_traceparent_header-False"),NEWLINE ],NEWLINE indirect=True,NEWLINE)NEWLINEasync def test_trace_parent_propagation_sampled(instrument, event_loop, elasticapm_client, waiting_httpserver):NEWLINE waiting_httpserver.serve_content("")NEWLINE url = waiting_httpserver.url + "/hello_world"NEWLINE elasticapm_client.begin_transaction("transaction")NEWLINE async with aiohttp.ClientSession() as session:NEWLINE async with session.get(waiting_httpserver.url) as resp:NEWLINE status = resp.statusNEWLINE text = await resp.text()NEWLINE elasticapm_client.end_transaction("MyView")NEWLINE transactions = elasticapm_client.events[constants.TRANSACTION]NEWLINE spans = elasticapm_client.spans_for_transaction(transactions[0])NEWLINENEWLINE headers = waiting_httpserver.requests[0].headersNEWLINE assert constants.TRACEPARENT_HEADER_NAME in headersNEWLINE trace_parent = TraceParent.from_string(NEWLINE headers[constants.TRACEPARENT_HEADER_NAME], tracestate_string=headers[constants.TRACESTATE_HEADER_NAME]NEWLINE )NEWLINE assert trace_parent.trace_id == transactions[0]["trace_id"]NEWLINE assert trace_parent.span_id == spans[0]["id"]NEWLINE assert trace_parent.trace_options.recordedNEWLINE # Check that sample_rate was correctly placed in the tracestateNEWLINE assert constants.TRACESTATE.SAMPLE_RATE in trace_parent.tracestate_dictNEWLINENEWLINE if elasticapm_client.config.use_elastic_traceparent_header:NEWLINE assert constants.TRACEPARENT_LEGACY_HEADER_NAME in headersNEWLINE assert headers[constants.TRACEPARENT_HEADER_NAME] == headers[constants.TRACEPARENT_LEGACY_HEADER_NAME]NEWLINE else:NEWLINE assert constants.TRACEPARENT_LEGACY_HEADER_NAME not in headersNEWLINENEWLINENEWLINE@pytest.mark.parametrize("sampled", [True, False])NEWLINEasync def test_trace_parent_propagation_sampled_headers_none(NEWLINE instrument, event_loop, elasticapm_client, waiting_httpserver, sampledNEWLINE):NEWLINE """NEWLINE Test that we don't blow up if headers are explicitly set to NoneNEWLINE """NEWLINE waiting_httpserver.serve_content("")NEWLINE url = waiting_httpserver.url + "/hello_world"NEWLINE transaction = elasticapm_client.begin_transaction("transaction")NEWLINE transaction.is_sampled = sampledNEWLINE async with aiohttp.ClientSession() as session:NEWLINE async with session.get(waiting_httpserver.url, headers=None) as resp:NEWLINE status = resp.statusNEWLINE text = await resp.text()NEWLINE elasticapm_client.end_transaction("MyView")NEWLINE transactions = elasticapm_client.events[constants.TRANSACTION]NEWLINE spans = elasticapm_client.spans_for_transaction(transactions[0])NEWLINENEWLINE headers = waiting_httpserver.requests[0].headersNEWLINE assert constants.TRACEPARENT_HEADER_NAME in headersNEWLINE trace_parent = TraceParent.from_string(NEWLINE headers[constants.TRACEPARENT_HEADER_NAME], tracestate_string=headers[constants.TRACESTATE_HEADER_NAME]NEWLINE )NEWLINE assert trace_parent.trace_id == transactions[0]["trace_id"]NEWLINE if sampled:NEWLINE assert trace_parent.span_id == spans[0]["id"]NEWLINE else:NEWLINE assert trace_parent.tracestate_dict[constants.TRACESTATE.SAMPLE_RATE] == "0"NEWLINE assert trace_parent.span_id == transactions[0]["id"]NEWLINENEWLINENEWLINE@pytest.mark.parametrize(NEWLINE "elasticapm_client",NEWLINE [NEWLINE pytest.param({"use_elastic_traceparent_header": True}, id="use_elastic_traceparent_header-True"),NEWLINE pytest.param({"use_elastic_traceparent_header": False}, id="use_elastic_traceparent_header-False"),NEWLINE ],NEWLINE indirect=True,NEWLINE)NEWLINEasync def test_trace_parent_propagation_unsampled(instrument, event_loop, elasticapm_client, waiting_httpserver):NEWLINE waiting_httpserver.serve_content("")NEWLINE url = waiting_httpserver.url + "/hello_world"NEWLINE transaction_object = elasticapm_client.begin_transaction("transaction")NEWLINE transaction_object.is_sampled = FalseNEWLINE async with aiohttp.ClientSession() as session:NEWLINE async with session.get(waiting_httpserver.url) as resp:NEWLINE status = resp.statusNEWLINE text = await resp.text()NEWLINE elasticapm_client.end_transaction("MyView")NEWLINE transactions = elasticapm_client.events[constants.TRANSACTION]NEWLINE spans = elasticapm_client.spans_for_transaction(transactions[0])NEWLINENEWLINE assert not spansNEWLINENEWLINE headers = waiting_httpserver.requests[0].headersNEWLINE assert constants.TRACEPARENT_HEADER_NAME in headersNEWLINE trace_parent = TraceParent.from_string(NEWLINE headers[constants.TRACEPARENT_HEADER_NAME], tracestate_string=headers[constants.TRACESTATE_HEADER_NAME]NEWLINE )NEWLINE assert trace_parent.trace_id == transactions[0]["trace_id"]NEWLINE assert trace_parent.span_id == transaction_object.idNEWLINE assert not trace_parent.trace_options.recordedNEWLINE # Check that sample_rate was correctly placed in the tracestateNEWLINE assert constants.TRACESTATE.SAMPLE_RATE in trace_parent.tracestate_dictNEWLINENEWLINE if elasticapm_client.config.use_elastic_traceparent_header:NEWLINE assert constants.TRACEPARENT_LEGACY_HEADER_NAME in headersNEWLINE assert headers[constants.TRACEPARENT_HEADER_NAME] == headers[constants.TRACEPARENT_LEGACY_HEADER_NAME]NEWLINE else:NEWLINE assert constants.TRACEPARENT_LEGACY_HEADER_NAME not in headersNEWLINENEWLINENEWLINE@pytest.mark.parametrize(NEWLINE "is_sampled", [pytest.param(True, id="is_sampled-True"), pytest.param(False, id="is_sampled-False")]NEWLINE)NEWLINEasync def test_tracestate_propagation(instrument, event_loop, elasticapm_client, waiting_httpserver, is_sampled):NEWLINE traceparent = TraceParent.from_string(NEWLINE "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-03", "foo=bar,baz=bazzinga"NEWLINE )NEWLINENEWLINE waiting_httpserver.serve_content("")NEWLINE url = waiting_httpserver.url + "/hello_world"NEWLINE transaction_object = elasticapm_client.begin_transaction("transaction", trace_parent=traceparent)NEWLINE transaction_object.is_sampled = is_sampledNEWLINE async with aiohttp.ClientSession() as session:NEWLINE async with session.get(waiting_httpserver.url) as resp:NEWLINE status = resp.statusNEWLINE text = await resp.text()NEWLINE elasticapm_client.end_transaction("MyView")NEWLINE headers = waiting_httpserver.requests[0].headersNEWLINE assert headers[constants.TRACESTATE_HEADER_NAME] == "foo=bar,baz=bazzinga"NEWLINE |
NEWLINEimport dns.resolverNEWLINEimport loggingNEWLINEimport timeNEWLINENEWLINENEWLINEclass DnsResolver:NEWLINENEWLINE def __init__(self, nameserver="100.64.0.10", timeout=120):NEWLINE self.nameserver = nameserverNEWLINE self.timeout = timeoutNEWLINENEWLINE self.resolver = dns.resolver.Resolver(configure=False)NEWLINE self.resolver.nameservers = [self.nameserver]NEWLINENEWLINE def resolve(self, domain):NEWLINE timeout = 0NEWLINE while timeout < self.timeout:NEWLINE try:NEWLINE logging.info(f"resolving {domain}")NEWLINENEWLINE addresses = []NEWLINE for response in self.resolver.query(domain):NEWLINE if response.rdtype == dns.rdatatype.A:NEWLINE addresses.append(response.address)NEWLINENEWLINE return addressesNEWLINENEWLINE except Exception as e:NEWLINE logging.warning(f"failed to resolve {domain}: {e}")NEWLINENEWLINE timeout += 5NEWLINE time.sleep(5)NEWLINENEWLINE raise Exception(f"domain {domain} is not resolvable by nameserver {self.nameserver}")NEWLINE |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License").NEWLINE# You may not use this file except in compliance with the License.NEWLINE# A copy of the License is located atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# or in the "license" file accompanying this file. This file is distributedNEWLINE# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eitherNEWLINE# express or implied. See the License for the specific language governingNEWLINE# permissions and limitations under the License.NEWLINENEWLINEimport pytestNEWLINENEWLINEimport mxnet as mxNEWLINENEWLINEfrom gluonts.distribution.gaussian import GaussianNEWLINENEWLINEDISTR_SHAPE = (3, 4)NEWLINENEWLINEDISTR_CASES = [NEWLINE Gaussian(NEWLINE mu=mx.nd.random.normal(shape=DISTR_SHAPE),NEWLINE sigma=mx.nd.random.uniform(shape=DISTR_SHAPE),NEWLINE )NEWLINE]NEWLINENEWLINESLICE_AXIS_CASES = [[(0, 0, None), 3], [(0, 1, 3), 2], [(1, -1, None), 1]]NEWLINENEWLINENEWLINE@pytest.mark.parametrize(NEWLINE "slice_axis_args, expected_axis_length", SLICE_AXIS_CASESNEWLINE)NEWLINE@pytest.mark.parametrize("distr", DISTR_CASES)NEWLINEdef test_distr_slice_axis(distr, slice_axis_args, expected_axis_length):NEWLINE axis, begin, end = slice_axis_argsNEWLINE distr_sliced = distr.slice_axis(axis, begin, end)NEWLINENEWLINE assert distr_sliced.batch_shape[axis] == expected_axis_lengthNEWLINE |
# Copyright (c) Microsoft Corporation.NEWLINE# Licensed under the MIT license.NEWLINENEWLINEimport timeNEWLINEfrom typing import IterableNEWLINENEWLINEfrom ..graph import Model, ModelStatusNEWLINEfrom .interface import AbstractExecutionEngineNEWLINEfrom .listener import DefaultListenerNEWLINENEWLINE_execution_engine = NoneNEWLINE_default_listener = NoneNEWLINENEWLINE__all__ = ['get_execution_engine', 'get_and_register_default_listener',NEWLINE 'list_models', 'submit_models', 'wait_models', 'query_available_resources',NEWLINE 'set_execution_engine', 'is_stopped_exec', 'budget_exhausted']NEWLINENEWLINENEWLINEdef set_execution_engine(engine: AbstractExecutionEngine) -> None:NEWLINE global _execution_engineNEWLINE if _execution_engine is None:NEWLINE _execution_engine = engineNEWLINE else:NEWLINE raise RuntimeError('Execution engine is already set. 'NEWLINE 'You should avoid instantiating RetiariiExperiment twice in one process. 'NEWLINE 'If you are running in a Jupyter notebook, please restart the kernel.')NEWLINENEWLINENEWLINEdef get_execution_engine() -> AbstractExecutionEngine:NEWLINE global _execution_engineNEWLINE assert _execution_engine is not None, 'You need to set execution engine, before using it.'NEWLINE return _execution_engineNEWLINENEWLINENEWLINEdef get_and_register_default_listener(engine: AbstractExecutionEngine) -> DefaultListener:NEWLINE global _default_listenerNEWLINE if _default_listener is None:NEWLINE _default_listener = DefaultListener()NEWLINE engine.register_graph_listener(_default_listener)NEWLINE return _default_listenerNEWLINENEWLINENEWLINEdef submit_models(*models: Model) -> None:NEWLINE engine = get_execution_engine()NEWLINE get_and_register_default_listener(engine)NEWLINE engine.submit_models(*models)NEWLINENEWLINENEWLINEdef list_models(*models: Model) -> Iterable[Model]:NEWLINE engine = get_execution_engine()NEWLINE get_and_register_default_listener(engine)NEWLINE return engine.list_models()NEWLINENEWLINENEWLINEdef wait_models(*models: Model) -> None:NEWLINE get_and_register_default_listener(get_execution_engine())NEWLINE while True:NEWLINE time.sleep(1)NEWLINE left_models = [g for g in models if not g.status in (ModelStatus.Trained, ModelStatus.Failed)]NEWLINE if not left_models:NEWLINE breakNEWLINENEWLINENEWLINEdef query_available_resources() -> int:NEWLINE engine = get_execution_engine()NEWLINE resources = engine.query_available_resource()NEWLINE return resources if isinstance(resources, int) else len(resources)NEWLINENEWLINENEWLINEdef is_stopped_exec(model: Model) -> bool:NEWLINE return model.status in (ModelStatus.Trained, ModelStatus.Failed)NEWLINENEWLINENEWLINEdef budget_exhausted() -> bool:NEWLINE engine = get_execution_engine()NEWLINE return engine.budget_exhausted()NEWLINE |
#crie uma matrix 3x3 inputa numa lista e msotra no final 9 valoresNEWLINElista = list()NEWLINEdado = list()NEWLINEfor m in range(1, 10):NEWLINE dado.append(int(input(f'Digite o {m}o Valor: ')))NEWLINE lista.append(dado[:])NEWLINE dado.clear()NEWLINEprint(f'{lista[0]}{lista[1]}{lista[2]}')NEWLINEprint(f'{lista[3]}{lista[4]}{lista[5]}')NEWLINEprint(f'{lista[6]}{lista[7]}{lista[8]}')NEWLINE |
#!/usr/bin/env pythonNEWLINEimport numpy as npNEWLINEimport sysNEWLINEimport scipy.io as io_mat NEWLINEimport argparseNEWLINEfrom scipy.interpolate import griddataNEWLINEimport matplotlibNEWLINEmatplotlib.use('Svg')NEWLINEimport matplotlib.pyplot as pltNEWLINEfont = {'weight' : 'normal',NEWLINE 'size' : 12}NEWLINEmatplotlib.rc('font', **font)NEWLINEname_sol = sys.argv[1]NEWLINEname_fe = name_sol+'.mat'NEWLINEdat_fe=np.squeeze(io_mat.loadmat(name_fe)['dat_seis'])NEWLINEcrd_fe=np.squeeze(io_mat.loadmat(name_fe)['crd_obs' ])NEWLINEdt_fe =np.squeeze(io_mat.loadmat(name_fe)['dt_dyn' ])NEWLINENEWLINEname_fd = name_sol+'_fd.mat'NEWLINEdat_fd=np.squeeze(io_mat.loadmat(name_fd)['dat_obs'])NEWLINEcrd_fd=np.squeeze(io_mat.loadmat(name_fd)['crd_obs'])NEWLINEdt_fd =np.squeeze(io_mat.loadmat(name_fd)['dt_obs' ])NEWLINENEWLINE# Waveform comparisonsNEWLINEeid=14NEWLINEdat_fe=dat_fe[eid]NEWLINEplt.figure(figsize=(16, 12), dpi=80)NEWLINExplt_fe=range(dat_fe.shape[1])*dt_fe NEWLINExplt_fd=range(dat_fd.shape[1])*dt_fdNEWLINEfor i in range(5): NEWLINE for j in range(3):NEWLINE plt.subplot(5,3,i*3+j+1) NEWLINE plt.plot(xplt_fe,dat_fe[i,:,j])NEWLINE plt.plot(xplt_fd,dat_fd[i,:,j])NEWLINE plt.xlim([0,1])NEWLINE if (i>0 or j>0): plt.gca().axes.get_xaxis().set_visible(False)NEWLINEplt.savefig(name_sol+'_wf.png')NEWLINE |
from dataclasses import dataclassNEWLINENEWLINENEWLINE__all__ = ['CORSSettings']NEWLINENEWLINENEWLINE@dataclassNEWLINEclass CORSSettings:NEWLINE origins: list[str]NEWLINE methods: list[str]NEWLINE headers: list[str]NEWLINE |
import sysNEWLINEimport pype_trayNEWLINENEWLINEsys.exit(pype_tray.PypeTrayApplication().exec_())NEWLINE |
from enum import EnumNEWLINEimport loggingNEWLINEimport randomNEWLINEimport reNEWLINEimport requestsNEWLINEfrom r2d7.core import DroidCoreNEWLINENEWLINElogger = logging.getLogger(__name__)NEWLINENEWLINEclass Talkback(DroidCore):NEWLINE """NEWLINE Chat responses unrelated to other commandsNEWLINE """NEWLINE pattern_fix = re.compile('^!((fix)|(typo))', re.I)NEWLINE pattern_data = re.compile('^!(data)', re.I)NEWLINE pattern_help = re.compile('^!(help)', re.I)NEWLINE pattern_stitchCrew = re.compile('^!(stitch ?crew)', re.I)NEWLINE pattern_stitchCard = re.compile('\[\[(stitch ?crew)\]\]', re.I)NEWLINE pattern_egg = re.compile('^!((egg)|(sooga))', re.I)NEWLINENEWLINE _data_url = 'https://github.com/guidokessels/xwing-data2'NEWLINE _r2d7_url = 'https://github.com/FreakyDug/r2-d7'NEWLINENEWLINE def __init__(self):NEWLINE super().__init__()NEWLINE self.register_handler(Talkback.pattern_fix, self.fixHandler)NEWLINE self.register_handler(Talkback.pattern_data, self.dataHandler)NEWLINE self.register_handler(Talkback.pattern_help, self.helpHandler)NEWLINE self.register_handler(Talkback.pattern_stitchCrew, self.stitchCrewHandler)NEWLINE self.register_handler(Talkback.pattern_stitchCard, self.stitchCardHandler)NEWLINE self.register_handler(Talkback.pattern_egg, self.eggHandler)NEWLINENEWLINE def fixHandler(self, message):NEWLINE dataErrorText = 'For issues with card data, raise an issue or pull request at 'NEWLINE dataErrorText += self.link(self._data_url, self._data_url)NEWLINE squadErrorText = 'For issues with squad lists, raise an issue at 'NEWLINE squadErrorText += self.link(self._r2d7_url, self._r2d7_url)NEWLINE return [[dataErrorText, squadErrorText]]NEWLINENEWLINE def dataHandler(self, message):NEWLINE text = 'X-Wing card data taken from 'NEWLINE text += self.link(self._data_url, self._data_url)NEWLINE return [[text]]NEWLINENEWLINE def helpHandler(self, message):NEWLINE return [[self.helpMessage()]]NEWLINENEWLINE def stitchCrewHandler(self, message):NEWLINE lines = [NEWLINE ['Stitch who?'],NEWLINE ['STITCH CREW!'],NEWLINE [':sewing_needle::crew:'],NEWLINE [NEWLINE self.bold('Stitch Crew'),NEWLINE '4 players, 200pts, 2 ships per player, 2 obstacles per player. First player is random and player order proceeds clockwise.',NEWLINE f'{self.bold("Setup:")} Players place obstacles in player order until 6 obstacles have been placed. Players deploy ships within range 3 of their assigned table corner and range 1 of the table edge.',NEWLINE f'{self.bold("Rules:")} The last surviving player wins the game. Alliances are forbidden, but table talk is encouraged. When a ship engages, if it has one or more valid enemy targets, it must shoot.'NEWLINE ]NEWLINE ]NEWLINE return [random.choice(lines)]NEWLINENEWLINE def stitchCardHandler(self, message):NEWLINE lines = [NEWLINE [NEWLINE f':crew::crew::crew::crew:• {self.bold("Stitch Crew")} [0]',NEWLINE self.italics('Restrictions: Stitch Crew Only'),NEWLINE 'Pew Pew Pew'NEWLINE ],NEWLINE ]NEWLINE return [random.choice(lines)]NEWLINENEWLINE def eggHandler(self, message):NEWLINE lines = [NEWLINE ['Sooga! Sooga! Sooga!'],NEWLINE ['Utinni!'],NEWLINE [':egg:'],NEWLINE ['Maclunkey!'],NEWLINE ]NEWLINE return [random.choice(lines)]NEWLINE |
from config import BLACK, WHITE, EMPTYNEWLINEfrom random import randintNEWLINENEWLINEclass Evaluator(object):NEWLINE def __init__(self, level):NEWLINE super().__init__()NEWLINE self.level = levelNEWLINE if(level == 1):NEWLINE self.WEIGHT_MATRIX = self.WORST_WEIGHT_MATRIXNEWLINE if(level == 2):NEWLINE self.WEIGHT_MATRIX = self.MEDIUM_WEIGHT_MATRIXNEWLINE if(level == 3):NEWLINE self.WEIGHT_MATRIX = self.BEST_WEIGHT_MATRIXNEWLINENEWLINE WORST_WEIGHT_MATRIX = [NEWLINE [ 1, 1, 1, 1, 1, 1, 1, 1],NEWLINE [ 1, 1, 1, 1, 1, 1, 1, 1],NEWLINE [ 1, 1, 1, 1, 1, 1, 1, 1],NEWLINE [ 1, 1, 1, 1, 1, 1, 1, 1],NEWLINE [ 1, 1, 1, 1, 1, 1, 1, 1],NEWLINE [ 1, 1, 1, 1, 1, 1, 1, 1],NEWLINE [ 1, 1, 1, 1, 1, 1, 1, 1],NEWLINE [ 1, 1, 1, 1, 1, 1, 1, 1],NEWLINE ]NEWLINENEWLINE MEDIUM_WEIGHT_MATRIX = [NEWLINE [ 4,-3, 2, 2, 2, 2,-3, 4],NEWLINE [-3,-4,-1,-1,-1,-1,-4,-3],NEWLINE [ 2,-1, 1, 0, 0, 1,-1, 2],NEWLINE [ 2,-1, 0, 1, 1, 0,-1, 2],NEWLINE [ 2,-1, 0, 1, 1, 0,-1, 2],NEWLINE [ 2,-1, 1, 0, 0, 1,-1, 2],NEWLINE [-3,-4,-1,-1,-1,-1,-4,-3],NEWLINE [ 4,-3, 2, 2, 2, 2,-3, 4],NEWLINE ]NEWLINENEWLINE BEST_WEIGHT_MATRIX = [NEWLINE [64,-8, 8, 8, 8, 8,-8,64],NEWLINE [-8,-8,-2,-2,-2,-2,-8,-8],NEWLINE [ 8,-2, 1, 1, 1, 1,-2, 8],NEWLINE [ 8,-2, 1, 1, 1, 1,-2, 8],NEWLINE [ 8,-2, 1, 1, 1, 1,-2, 8],NEWLINE [ 8,-2, 1, 1, 1, 1,-2, 8],NEWLINE [-8,-8,-2,-2,-2,-2,-8,-8],NEWLINE [64,-8, 8, 8, 8, 8,-8,64],NEWLINE ]NEWLINENEWLINENEWLINE def score(self, board, currentDepth, player, opponent):NEWLINE whites, blacks, empty = board.count_stones()NEWLINENEWLINE # check wipe outNEWLINE if (player == WHITE and whites == 0) or (player == BLACK and blacks == 0):NEWLINE return -1000NEWLINE if (opponent == WHITE and whites == 0) or (opponent == BLACK and blacks == 0):NEWLINE return 1000NEWLINE NEWLINE score = 0NEWLINE # determine weigths according to the number of piecesNEWLINE for i in range (0,8):NEWLINE for j in range (0,8):NEWLINE if(board.board[i][j] == player):NEWLINE score += (self.WEIGHT_MATRIX[i][j])NEWLINE if(board.board[i][j] == opponent):NEWLINE score -= (self.WEIGHT_MATRIX[i][j])NEWLINE return scoreNEWLINE |
#!/usr/bin/env pythonNEWLINE# coding=utf-8NEWLINENEWLINEimport socketNEWLINENEWLINEfrom urllib.parse import urlparseNEWLINEfrom http.server import HTTPServer, BaseHTTPRequestHandlerNEWLINENEWLINENEWLINEclass ProxyHandler(BaseException):NEWLINE """NEWLINE 参考链接:NEWLINE https://zhuanlan.zhihu.com/p/28737960NEWLINE https://docs.python.org/3/library/http.server.htmlNEWLINE """NEWLINE def _recv_proxy_data(self, socket_client: socket.socket):NEWLINE data = b''NEWLINE while True:NEWLINE recv = socket_client.recv(1024)NEWLINE if recv:NEWLINE data += recvNEWLINE else:NEWLINE breakNEWLINE socket_client.close()NEWLINE return dataNEWLINENEWLINE def do_GET(self):NEWLINE uri = urlparse(self.path)NEWLINE scheme, host, path = uri.scheme, uri.netloc, uri.pathNEWLINE host_id = socket.gethostbyname(host)NEWLINE port = 443 if scheme == 'https' else 80NEWLINENEWLINE data = 'GET {} {}\r\n'.format(path, self.protocol_version)NEWLINE for k, v in self.headers.items():NEWLINE data += '{}: {}\r\n'.format(k, v)NEWLINE data += '\r\n'NEWLINENEWLINE with open('./res.txt', 'a') as fp:NEWLINE fp.write(data)NEWLINE socket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)NEWLINE socket_client.connect((host, port))NEWLINE socket_client.sendall(data.encode('utf-8'))NEWLINE recv_res_data = self._recv_proxy_data(socket_client)NEWLINE self.wfile.write(recv_res_data)NEWLINENEWLINENEWLINEdef main():NEWLINE try:NEWLINE server = HTTPServer(('', 6789), ProxyHandler)NEWLINE server.serve_forever()NEWLINE except KeyboardInterrupt as e:NEWLINE server.socket.close()NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE main()NEWLINE |
import datetimeNEWLINENEWLINEimport pytestNEWLINENEWLINEfrom prisma import PrismaNEWLINENEWLINENEWLINE# TODO: add tests for every database provider we supportNEWLINENEWLINENEWLINE@pytest.mark.asyncioNEWLINEasync def test_precision_loss(client: Prisma) -> None:NEWLINE """https://github.com/RobertCraigie/prisma-client-py/issues/129"""NEWLINE date = datetime.datetime.utcnow()NEWLINE post = await client.post.create(NEWLINE data={NEWLINE 'title': 'My first post',NEWLINE 'published': False,NEWLINE 'created_at': date,NEWLINE },NEWLINE )NEWLINE found = await client.post.find_first(NEWLINE where={NEWLINE 'created_at': date,NEWLINE },NEWLINE )NEWLINE assert found is not NoneNEWLINENEWLINE found = await client.post.find_first(NEWLINE where={NEWLINE 'created_at': post.created_at,NEWLINE },NEWLINE )NEWLINE assert found is not NoneNEWLINE |
"""NEWLINEA WSGI application which lists available versionsNEWLINEof APIs understood by another WSGI application.NEWLINE"""NEWLINENEWLINEfrom reporting_api.common.apiversion import APIVersionNEWLINEfrom reporting_api.common.application import ApplicationNEWLINENEWLINENEWLINEclass VersionsApp(Application):NEWLINE """A WSGI application which lists available versionsNEWLINE of APIs understood by another WSGI application.NEWLINE """NEWLINENEWLINE def __init__(self):NEWLINE super(VersionsApp, self).__init__(None)NEWLINENEWLINE def operation_api_version_list(self, req, params):NEWLINE """Return a list of available API versions.NEWLINE """NEWLINE return (NEWLINE [NEWLINE version.api_version_detail(req, params)NEWLINE for version in APIVersion.version_classesNEWLINE ],NEWLINE NoneNEWLINE )NEWLINENEWLINE def operation_api_version_details(self, req, params):NEWLINE """Return details of one API version.NEWLINE FIXME: This calls an abstract base class method.NEWLINE """NEWLINE return (APIVersion.api_version_detail(req, params), None)NEWLINENEWLINENEWLINEdef app_factory(global_config, **settings):NEWLINE """A factory function which returns WSGI version-list applications.NEWLINE """NEWLINE return VersionsApp()NEWLINE |
class Solution:NEWLINE def solve(self, matrix):NEWLINE if not matrix:NEWLINE return -1NEWLINE NEWLINE sets = [set(row) for row in matrix]NEWLINE return next((num for num in matrix[0] if all(num in row for row in matrix)), -1)NEWLINE |
#!/usr/bin/env python3NEWLINE# Copyright (c) 2014-2020 The Vadercoin Core developersNEWLINE# Distributed under the MIT software license, see the accompanyingNEWLINE# file COPYING or http://www.opensource.org/licenses/mit-license.php.NEWLINENEWLINE"""NEWLINE ZMQ example using python3's asyncioNEWLINENEWLINE Vadercoin should be started with the command line arguments:NEWLINE vadercoind -testnet -daemon \NEWLINE -zmqpubrawtx=tcp://127.0.0.1:28332 \NEWLINE -zmqpubrawblock=tcp://127.0.0.1:28332 \NEWLINE -zmqpubhashtx=tcp://127.0.0.1:28332 \NEWLINE -zmqpubhashblock=tcp://127.0.0.1:28332 \NEWLINE -zmqpubsequence=tcp://127.0.0.1:28332NEWLINENEWLINE We use the asyncio library here. `self.handle()` installs itself as aNEWLINE future at the end of the function. Since it never returns with the eventNEWLINE loop having an empty stack of futures, this creates an infinite loop. AnNEWLINE alternative is to wrap the contents of `handle` inside `while True`.NEWLINENEWLINE A blocking example using python 2.7 can be obtained from the git history:NEWLINE https://github.com/vadercoin/vadercoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.pyNEWLINE"""NEWLINENEWLINEimport binasciiNEWLINEimport asyncioNEWLINEimport zmqNEWLINEimport zmq.asyncioNEWLINEimport signalNEWLINEimport structNEWLINEimport sysNEWLINENEWLINEif (sys.version_info.major, sys.version_info.minor) < (3, 5):NEWLINE print("This example only works with Python 3.5 and greater")NEWLINE sys.exit(1)NEWLINENEWLINEport = 28332NEWLINENEWLINEclass ZMQHandler():NEWLINE def __init__(self):NEWLINE self.loop = asyncio.get_event_loop()NEWLINE self.zmqContext = zmq.asyncio.Context()NEWLINENEWLINE self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)NEWLINE self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0)NEWLINE self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")NEWLINE self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")NEWLINE self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")NEWLINE self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")NEWLINE self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "sequence")NEWLINE self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)NEWLINENEWLINE async def handle(self) :NEWLINE topic, body, seq = await self.zmqSubSocket.recv_multipart()NEWLINE sequence = "Unknown"NEWLINE if len(seq) == 4:NEWLINE sequence = str(struct.unpack('<I', seq)[-1])NEWLINE if topic == b"hashblock":NEWLINE print('- HASH BLOCK ('+sequence+') -')NEWLINE print(binascii.hexlify(body))NEWLINE elif topic == b"hashtx":NEWLINE print('- HASH TX ('+sequence+') -')NEWLINE print(binascii.hexlify(body))NEWLINE elif topic == b"rawblock":NEWLINE print('- RAW BLOCK HEADER ('+sequence+') -')NEWLINE print(binascii.hexlify(body[:80]))NEWLINE elif topic == b"rawtx":NEWLINE print('- RAW TX ('+sequence+') -')NEWLINE print(binascii.hexlify(body))NEWLINE elif topic == b"sequence":NEWLINE hash = binascii.hexlify(body[:32])NEWLINE label = chr(body[32])NEWLINE mempool_sequence = None if len(body) != 32+1+8 else struct.unpack("<Q", body[32+1:])[0]NEWLINE print('- SEQUENCE ('+sequence+') -')NEWLINE print(hash, label, mempool_sequence)NEWLINE # schedule ourselves to receive the next messageNEWLINE asyncio.ensure_future(self.handle())NEWLINENEWLINE def start(self):NEWLINE self.loop.add_signal_handler(signal.SIGINT, self.stop)NEWLINE self.loop.create_task(self.handle())NEWLINE self.loop.run_forever()NEWLINENEWLINE def stop(self):NEWLINE self.loop.stop()NEWLINE self.zmqContext.destroy()NEWLINENEWLINEdaemon = ZMQHandler()NEWLINEdaemon.start()NEWLINE |
#NEWLINE# This file is part of pysnmp software.NEWLINE#NEWLINE# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>NEWLINE# License: http://snmplabs.com/pysnmp/license.htmlNEWLINE#NEWLINEimport randomNEWLINEfrom hashlib import md5NEWLINEfrom hashlib import sha1NEWLINENEWLINEtry:NEWLINE from pysnmpcrypto import aes, PysnmpCryptoErrorNEWLINENEWLINEexcept ImportError:NEWLINE PysnmpCryptoError = AttributeErrorNEWLINE aes = NoneNEWLINENEWLINEfrom pyasn1.type import univNEWLINEfrom pysnmp.proto.secmod.rfc3414.priv import baseNEWLINEfrom pysnmp.proto.secmod.rfc3414.auth import hmacmd5NEWLINEfrom pysnmp.proto.secmod.rfc3414.auth import hmacshaNEWLINEfrom pysnmp.proto.secmod.rfc7860.auth import hmacsha2NEWLINEfrom pysnmp.proto.secmod.rfc3414 import localkeyNEWLINEfrom pysnmp.proto import errindNEWLINEfrom pysnmp.proto import errorNEWLINENEWLINErandom.seed()NEWLINENEWLINENEWLINE# RFC3826NEWLINENEWLINE#NEWLINENEWLINEclass Aes(base.AbstractEncryptionService):NEWLINE SERVICE_ID = (1, 3, 6, 1, 6, 3, 10, 1, 2, 4) # usmAesCfb128ProtocolNEWLINE KEY_SIZE = 16NEWLINENEWLINE local_int = random.randrange(0, 0xffffffffffffffff)NEWLINENEWLINE # 3.1.2.1NEWLINE def _getEncryptionKey(self, privKey, snmpEngineBoots, snmpEngineTime):NEWLINE salt = [NEWLINE self.local_int >> 56 & 0xff,NEWLINE self.local_int >> 48 & 0xff,NEWLINE self.local_int >> 40 & 0xff,NEWLINE self.local_int >> 32 & 0xff,NEWLINE self.local_int >> 24 & 0xff,NEWLINE self.local_int >> 16 & 0xff,NEWLINE self.local_int >> 8 & 0xff,NEWLINE self.local_int & 0xffNEWLINE ]NEWLINENEWLINE if self.local_int == 0xffffffffffffffff:NEWLINE self.local_int = 0NEWLINENEWLINE else:NEWLINE self.local_int += 1NEWLINENEWLINE key, iv = self._getDecryptionKey(NEWLINE privKey, snmpEngineBoots, snmpEngineTime, salt)NEWLINENEWLINE return key, iv, univ.OctetString(salt).asOctets()NEWLINENEWLINE def _getDecryptionKey(self, privKey, snmpEngineBoots,NEWLINE snmpEngineTime, salt):NEWLINENEWLINE snmpEngineBoots, snmpEngineTime, salt = (NEWLINE int(snmpEngineBoots), int(snmpEngineTime), salt)NEWLINENEWLINE iv = [NEWLINE snmpEngineBoots >> 24 & 0xff,NEWLINE snmpEngineBoots >> 16 & 0xff,NEWLINE snmpEngineBoots >> 8 & 0xff,NEWLINE snmpEngineBoots & 0xff,NEWLINE snmpEngineTime >> 24 & 0xff,NEWLINE snmpEngineTime >> 16 & 0xff,NEWLINE snmpEngineTime >> 8 & 0xff,NEWLINE snmpEngineTime & 0xffNEWLINE ]NEWLINENEWLINE iv += saltNEWLINENEWLINE key = privKey[:self.KEY_SIZE].asOctets()NEWLINE iv = univ.OctetString(iv).asOctets()NEWLINENEWLINE return key, ivNEWLINENEWLINE def hashPassphrase(self, authProtocol, privKey):NEWLINE if authProtocol == hmacmd5.HmacMd5.SERVICE_ID:NEWLINE hashAlgo = md5NEWLINENEWLINE elif authProtocol == hmacsha.HmacSha.SERVICE_ID:NEWLINE hashAlgo = sha1NEWLINENEWLINE elif authProtocol in hmacsha2.HmacSha2.HASH_ALGORITHM:NEWLINE hashAlgo = hmacsha2.HmacSha2.HASH_ALGORITHM[authProtocol]NEWLINENEWLINE else:NEWLINE raise error.ProtocolError(NEWLINE 'Unknown auth protocol %s' % (authProtocol,))NEWLINENEWLINE return localkey.hashPassphrase(privKey, hashAlgo)NEWLINENEWLINE def localizeKey(self, authProtocol, privKey, snmpEngineID):NEWLINE if authProtocol == hmacmd5.HmacMd5.SERVICE_ID:NEWLINE hashAlgo = md5NEWLINENEWLINE elif authProtocol == hmacsha.HmacSha.SERVICE_ID:NEWLINE hashAlgo = sha1NEWLINENEWLINE elif authProtocol in hmacsha2.HmacSha2.HASH_ALGORITHM:NEWLINE hashAlgo = hmacsha2.HmacSha2.HASH_ALGORITHM[authProtocol]NEWLINENEWLINE else:NEWLINE raise error.ProtocolError(NEWLINE 'Unknown auth protocol %s' % (authProtocol,))NEWLINENEWLINE localPrivKey = localkey.localizeKey(privKey, snmpEngineID, hashAlgo)NEWLINENEWLINE return localPrivKey[:self.KEY_SIZE]NEWLINENEWLINE # 3.2.4.1NEWLINE def encryptData(self, encryptKey, privParameters, dataToEncrypt):NEWLINE snmpEngineBoots, snmpEngineTime, salt = privParametersNEWLINENEWLINE # 3.3.1.1NEWLINE aesKey, iv, salt = self._getEncryptionKey(NEWLINE encryptKey, snmpEngineBoots, snmpEngineTime)NEWLINENEWLINE # 3.3.1.3NEWLINE # PyCrypto seems to require paddingNEWLINE padding = univ.OctetString((0,) * (16 - len(dataToEncrypt) % 16))NEWLINE dataToEncrypt += paddingNEWLINENEWLINE try:NEWLINE ciphertext = aes.encrypt(dataToEncrypt.asOctets(), aesKey, iv)NEWLINENEWLINE except PysnmpCryptoError:NEWLINE raise error.StatusInformation(NEWLINE errorIndication=errind.unsupportedPrivProtocol)NEWLINENEWLINE # 3.3.1.4NEWLINE return univ.OctetString(ciphertext), univ.OctetString(salt)NEWLINENEWLINE # 3.2.4.2NEWLINE def decryptData(self, decryptKey, privParameters, encryptedData):NEWLINE snmpEngineBoots, snmpEngineTime, salt = privParametersNEWLINENEWLINE # 3.3.2.1NEWLINE if len(salt) != 8:NEWLINE raise error.StatusInformation(NEWLINE errorIndication=errind.decryptionError)NEWLINENEWLINE # 3.3.2.3NEWLINE aesKey, iv = self._getDecryptionKey(NEWLINE decryptKey, snmpEngineBoots, snmpEngineTime, salt)NEWLINENEWLINE # PyCrypto seems to require paddingNEWLINE padding = univ.OctetString((0,) * (16 - len(encryptedData) % 16))NEWLINE encryptedData += paddingNEWLINENEWLINE try:NEWLINE # 3.3.2.4-6NEWLINE return aes.decrypt(encryptedData.asOctets(), aesKey, iv)NEWLINENEWLINE except PysnmpCryptoError:NEWLINE raise error.StatusInformation(NEWLINE errorIndication=errind.unsupportedPrivProtocol)NEWLINE |
# Copyright 2016-2020 Blue Marble Analytics LLC.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License");NEWLINE# you may not use this file except in compliance with the License.NEWLINE# You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINENEWLINEfrom __future__ import print_functionNEWLINENEWLINEfrom builtins import strNEWLINEfrom importlib import import_moduleNEWLINEimport os.pathNEWLINEimport sysNEWLINEimport unittestNEWLINENEWLINEfrom tests.common_functions import create_abstract_model, add_components_and_load_dataNEWLINENEWLINETEST_DATA_DIRECTORY = os.path.join(os.path.dirname(__file__), "..", "test_data")NEWLINENEWLINE# Import prerequisite modulesNEWLINEPREREQUISITE_MODULE_NAMES = [NEWLINE "temporal.operations.timepoints",NEWLINE "temporal.operations.horizons",NEWLINE "temporal.investment.periods",NEWLINE]NEWLINENAME_OF_MODULE_BEING_TESTED = "geography.local_capacity_zones"NEWLINEIMPORTED_PREREQ_MODULES = list()NEWLINEfor mdl in PREREQUISITE_MODULE_NAMES:NEWLINE try:NEWLINE imported_module = import_module("." + str(mdl), package="gridpath")NEWLINE IMPORTED_PREREQ_MODULES.append(imported_module)NEWLINE except ImportError:NEWLINE print("ERROR! Module " + str(mdl) + " not found.")NEWLINE sys.exit(1)NEWLINE# Import the module we'll testNEWLINEtry:NEWLINE MODULE_BEING_TESTED = import_module(NEWLINE "." + NAME_OF_MODULE_BEING_TESTED, package="gridpath"NEWLINE )NEWLINEexcept ImportError:NEWLINE print("ERROR! Couldn't import module " + NAME_OF_MODULE_BEING_TESTED + " to test.")NEWLINENEWLINENEWLINEclass TestLocalCapacityZones(unittest.TestCase):NEWLINE """ """NEWLINENEWLINE def test_add_model_components(self):NEWLINE """NEWLINE Test that there are no errors when adding model componentsNEWLINE :return:NEWLINE """NEWLINE create_abstract_model(NEWLINE prereq_modules=IMPORTED_PREREQ_MODULES,NEWLINE module_to_test=MODULE_BEING_TESTED,NEWLINE test_data_dir=TEST_DATA_DIRECTORY,NEWLINE subproblem="",NEWLINE stage="",NEWLINE )NEWLINENEWLINE def test_load_model_data(self):NEWLINE """NEWLINE Test that data are loaded with no errorsNEWLINE :return:NEWLINE """NEWLINE add_components_and_load_data(NEWLINE prereq_modules=IMPORTED_PREREQ_MODULES,NEWLINE module_to_test=MODULE_BEING_TESTED,NEWLINE test_data_dir=TEST_DATA_DIRECTORY,NEWLINE subproblem="",NEWLINE stage="",NEWLINE )NEWLINENEWLINE def test_data_loaded_correctly(self):NEWLINE """NEWLINE Test components initialized with data as expectedNEWLINE :return:NEWLINE """NEWLINE m, data = add_components_and_load_data(NEWLINE prereq_modules=IMPORTED_PREREQ_MODULES,NEWLINE module_to_test=MODULE_BEING_TESTED,NEWLINE test_data_dir=TEST_DATA_DIRECTORY,NEWLINE subproblem="",NEWLINE stage="",NEWLINE )NEWLINE instance = m.create_instance(data)NEWLINENEWLINE # Set: LOCAL_CAPACITY_ZONESNEWLINE expected_local_capacity_zones = sorted(NEWLINE ["Local_Capacity_Zone1", "Local_Capacity_Zone2"]NEWLINE )NEWLINE actual_local_capacity_zones = sorted([z for z in instance.LOCAL_CAPACITY_ZONES])NEWLINE self.assertListEqual(expected_local_capacity_zones, actual_local_capacity_zones)NEWLINE |
from dalek_debug import DalekPrint, DalekDebugOn , DalekDebugSetOutputDeviceNEWLINEimport timeNEWLINEimport dalek_spiNEWLINEimport RPi.GPIO as GPIO # Import GPIO diversNEWLINEdalek_spi.init()NEWLINENEWLINENEWLINE#####################################################NEWLINENEWLINE# This is just for playing with the bits to see if it works :)NEWLINE# do not leave code here that you need NEWLINENEWLINE#####################################################NEWLINEDalekDebugOn()NEWLINE# DalekDebugSetOutputDevice("scrollphat")NEWLINE# DalekPrint("hello phil from the main module")NEWLINENEWLINEDalekPrint("Spin Left 56","SL")NEWLINEDalekPrint("Spin Left {}".format(666),"KKK" )NEWLINEDalekPrint("Turn Right 56")NEWLINEDalekPrint("\n... Shutting Down...\n")NEWLINEDalekPrint("\n\nReturning to Main Menu\n\n", "HM")NEWLINEDalekPrint("","PSS")NEWLINE# while True:NEWLINE NEWLINE# # piSensors = DalekSpi.readDevice1Data() NEWLINE# # DalekPrint(piSensors['frontPing'] )NEWLINE# # DalekPrint(piSensors['compass'] )NEWLINE# DalekSpi.test()NEWLINENEWLINE# # mag = DalekSpi.readDevice1Data()NEWLINE# # DalekPrint(mag)NEWLINE# time.sleep(.2)NEWLINENEWLINE NEWLINE |
"""NEWLINECodemonk link: https://www.hackerearth.com/problem/algorithm/vaishu-and-tower-arrangements-fe7c349eNEWLINENEWLINEVaishu is fond of building wooden towers. Currently, she has N wooden towers. Since, she is a big fan of uniformity, sheNEWLINEarranges the towers in such a way that the first few consecutive towers should be facing downwards and then after aNEWLINEcertain specific point, all should be facing upwards. But, there needs to be at least one tower facing in both upwardNEWLINEand downward directions. Now, Vaishu's notorious brother Vibhu has distorted the arrangement by toggling the directionNEWLINEof few towers and made her pretty sad. He is feeling sorry and is ready to help her. But he is smart and knows that sheNEWLINEdoesn't remember the number of towers that were arranged initially. So, he decided to toggle the direction of fewestNEWLINEnumber of towers to make the uniform arrangement that she likes. Though the final arrangement may not be same as theNEWLINEinitial arrangement Vaishu had. Can you help her brother to toggle direction of minimum number of towers to makeNEWLINEarrangement she likes?NEWLINENEWLINEInput - Output:NEWLINEFirst line of the input contains integer T denoting the number of test cases.NEWLINEFor each test case, there will be two separate lines.NEWLINEFirst line of each test case contains N denoting the number of towers Vaishu had initially.NEWLINEThe next and the last line denotes an array of length N having values 1 and 1, where 1 denotes that tower isNEWLINEfacing downwards and 1 denotes tower is facing upwards.NEWLINEFor each test case, you need to print minimum number of towers her brother needs to toggle so that Vaishu'sNEWLINEuniform arrangement is sustained.NEWLINENEWLINESample input:NEWLINE1NEWLINE5NEWLINE1 -1 1 1 -1NEWLINENEWLINESample Output:NEWLINE2NEWLINE"""NEWLINENEWLINE"""NEWLINEThat's a classic problem that can be solved by calculating 2 arrays, one that contains the maximum amount amount ofNEWLINE1's ending at each index and 1 that contains the maximum amount of -1's starting from each index. Iterating through theNEWLINEwhole array the answer can be given by ans = min(ans, ending[i] + starting[i+1]).NEWLINENEWLINEFinal complexity: O(N)NEWLINE"""NEWLINENEWLINEt = int(input())NEWLINEfor _ in range(t):NEWLINE n = int(input())NEWLINE towers = list(map(int, input().split()))NEWLINE neg = [0] * nNEWLINE pos = [0] * nNEWLINE if towers[0] == 1:NEWLINE pos[0] = 1NEWLINENEWLINE if towers[-1] == -1:NEWLINE neg[-1] = 1NEWLINENEWLINE for i in range(n-2, -1, -1):NEWLINE if towers[i] == -1:NEWLINE neg[i] = neg[i+1] + 1NEWLINE else:NEWLINE neg[i] = neg[i+1]NEWLINENEWLINE ans = nNEWLINE for i in range(n-1):NEWLINE if towers[i] == 1:NEWLINE pos[i] = pos[i-1] + 1NEWLINE else:NEWLINE pos[i] = pos[i-1]NEWLINENEWLINE ans = min(ans, pos[i] + neg[i+1])NEWLINENEWLINE print(ans)NEWLINE |
# Copyright 2018 IguazioNEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License");NEWLINE# you may not use this file except in compliance with the License.NEWLINE# You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINENEWLINEimport timeNEWLINENEWLINEfrom kubernetes import clientNEWLINEfrom kubernetes.client.rest import ApiExceptionNEWLINENEWLINEimport mlrun.errorsNEWLINEfrom mlrun.runtimes.base import BaseRuntimeHandlerNEWLINENEWLINEfrom ..builder import build_runtimeNEWLINEfrom ..db import RunDBErrorNEWLINEfrom ..kfpops import build_opNEWLINEfrom ..model import RunObjectNEWLINEfrom ..utils import get_in, loggerNEWLINEfrom .base import RunErrorNEWLINEfrom .pod import KubeResource, kube_resource_spec_to_pod_specNEWLINEfrom .utils import AsyncLogWriterNEWLINENEWLINENEWLINEclass KubejobRuntime(KubeResource):NEWLINE kind = "job"NEWLINE _is_nested = TrueNEWLINENEWLINE _is_remote = TrueNEWLINENEWLINE @propertyNEWLINE def is_deployed(self):NEWLINE """check if the function is deployed (have a valid container)"""NEWLINE if self.spec.image:NEWLINE return TrueNEWLINENEWLINE if self._is_remote_api():NEWLINE db = self._get_db()NEWLINE try:NEWLINE db.get_builder_status(self, logs=False)NEWLINE except RunDBError:NEWLINE passNEWLINENEWLINE if self.spec.image:NEWLINE return TrueNEWLINE if self.status.state and self.status.state == "ready":NEWLINE return TrueNEWLINE return FalseNEWLINENEWLINE def with_source_archive(self, source, pythonpath=None, pull_at_runtime=True):NEWLINE """load the code from git/tar/zip archive at runtime or buildNEWLINENEWLINE :param source: valid path to git, zip, or tar file, e.g.NEWLINE git://github.com/mlrun/something.gitNEWLINE http://some/url/file.zipNEWLINE :param pythonpath: python search path relative to the archive root or absolute (e.g. './subdir')NEWLINE :param pull_at_runtime: load the archive into the container at job runtime vs on build/deployNEWLINE """NEWLINE self.spec.build.load_source_on_run = pull_at_runtimeNEWLINE self.spec.build.source = sourceNEWLINE if pythonpath:NEWLINE self.spec.pythonpath = pythonpathNEWLINENEWLINE def build_config(NEWLINE self,NEWLINE image="",NEWLINE base_image=None,NEWLINE commands: list = None,NEWLINE secret=None,NEWLINE source=None,NEWLINE extra=None,NEWLINE load_source_on_run=None,NEWLINE ):NEWLINE """specify builder configuration for the deploy operationNEWLINENEWLINE :param image: target image name/pathNEWLINE :param base_image: base image name/pathNEWLINE :param commands: list of docker build (RUN) commands e.g. ['pip install pandas']NEWLINE :param secret: k8s secret for accessing the docker registryNEWLINE :param source: source git/tar archive to load code from in to the context/workdirNEWLINE e.g. git://github.com/mlrun/something.git#developmentNEWLINE :param extra: extra Dockerfile linesNEWLINE :param load_source_on_run: load the archive code into the container at runtime vs at build timeNEWLINE """NEWLINE if image:NEWLINE self.spec.build.image = imageNEWLINE if commands:NEWLINE if not isinstance(commands, list):NEWLINE raise ValueError("commands must be a string list")NEWLINE self.spec.build.commands = self.spec.build.commands or []NEWLINE self.spec.build.commands += commandsNEWLINE if extra:NEWLINE self.spec.build.extra = extraNEWLINE if secret:NEWLINE self.spec.build.secret = secretNEWLINE if base_image:NEWLINE self.spec.build.base_image = base_imageNEWLINE if source:NEWLINE self.spec.build.source = sourceNEWLINE if load_source_on_run:NEWLINE self.spec.build.load_source_on_run = load_source_on_runNEWLINENEWLINE def deploy(NEWLINE self,NEWLINE watch=True,NEWLINE with_mlrun=True,NEWLINE skip_deployed=False,NEWLINE is_kfp=False,NEWLINE mlrun_version_specifier=None,NEWLINE ):NEWLINE """deploy function, build container with dependenciesNEWLINENEWLINE :param watch: wait for the deploy to complete (and print build logs)NEWLINE :param with_mlrun: add the current mlrun package to the container buildNEWLINE :param skip_deployed: skip the build if we already have an image for the functionNEWLINE :param mlrun_version_specifier: which mlrun package version to include (if not current)NEWLINE """NEWLINENEWLINE build = self.spec.buildNEWLINENEWLINE if not build.source and not build.commands and not build.extra and with_mlrun:NEWLINE logger.info(NEWLINE "running build to add mlrun package, set "NEWLINE "with_mlrun=False to skip if its already in the image"NEWLINE )NEWLINE self.status.state = ""NEWLINENEWLINE # When we're in pipelines context we must watch otherwise the pipelines pod will exit before the operationNEWLINE # is actually done. (when a pipelines pod exits, the pipeline step marked as done)NEWLINE if is_kfp:NEWLINE watch = TrueNEWLINENEWLINE if self._is_remote_api():NEWLINE db = self._get_db()NEWLINE data = db.remote_builder(NEWLINE self, with_mlrun, mlrun_version_specifier, skip_deployedNEWLINE )NEWLINE logger.info(NEWLINE f"Started building image: {data.get('data', {}).get('spec', {}).get('build', {}).get('image')}"NEWLINE )NEWLINE self.status = data["data"].get("status", None)NEWLINE self.spec.image = get_in(data, "data.spec.image")NEWLINE ready = data.get("ready", False)NEWLINE if watch and not ready:NEWLINE state = self._build_watch(watch)NEWLINE ready = state == "ready"NEWLINE self.status.state = stateNEWLINE else:NEWLINE self.save(versioned=False)NEWLINE ready = build_runtime(NEWLINE self, with_mlrun, mlrun_version_specifier, skip_deployed, watchNEWLINE )NEWLINE self.save(versioned=False)NEWLINENEWLINE if watch and not ready:NEWLINE raise mlrun.errors.MLRunRuntimeError("Deploy failed")NEWLINE return readyNEWLINENEWLINE def _build_watch(self, watch=True, logs=True):NEWLINE db = self._get_db()NEWLINE offset = 0NEWLINE try:NEWLINE text, _ = db.get_builder_status(self, 0, logs=logs)NEWLINE except RunDBError:NEWLINE raise ValueError("function or build process not found")NEWLINENEWLINE if text:NEWLINE print(text)NEWLINE if watch:NEWLINE while self.status.state in ["pending", "running"]:NEWLINE offset += len(text)NEWLINE time.sleep(2)NEWLINE text, _ = db.get_builder_status(self, offset, logs=logs)NEWLINE if text:NEWLINE print(text, end="")NEWLINENEWLINE return self.status.stateNEWLINENEWLINE def builder_status(self, watch=True, logs=True):NEWLINE if self._is_remote_api():NEWLINE return self._build_watch(watch, logs)NEWLINENEWLINE else:NEWLINE pod = self.status.build_podNEWLINE if not self.status.state == "ready" and pod:NEWLINE k8s = self._get_k8s()NEWLINE status = k8s.get_pod_status(pod)NEWLINE if logs:NEWLINE if watch:NEWLINE status = k8s.watch(pod)NEWLINE else:NEWLINE resp = k8s.logs(pod)NEWLINE if resp:NEWLINE print(resp.encode())NEWLINENEWLINE if status == "succeeded":NEWLINE self.status.build_pod = NoneNEWLINE self.status.state = "ready"NEWLINE logger.info("build completed successfully")NEWLINE return "ready"NEWLINE if status in ["failed", "error"]:NEWLINE self.status.state = statusNEWLINE logger.error(f" build {status}, watch the build pod logs: {pod}")NEWLINE return statusNEWLINENEWLINE logger.info(f"builder status is: {status}, wait for it to complete")NEWLINE return NoneNEWLINENEWLINE def deploy_step(NEWLINE self,NEWLINE image=None,NEWLINE base_image=None,NEWLINE commands: list = None,NEWLINE secret_name="",NEWLINE with_mlrun=True,NEWLINE skip_deployed=False,NEWLINE ):NEWLINE function_name = self.metadata.name or "function"NEWLINE name = f"deploy_{function_name}"NEWLINE return build_op(NEWLINE name,NEWLINE self,NEWLINE image=image,NEWLINE base_image=base_image,NEWLINE commands=commands,NEWLINE secret_name=secret_name,NEWLINE with_mlrun=with_mlrun,NEWLINE skip_deployed=skip_deployed,NEWLINE )NEWLINENEWLINE def _run(self, runobj: RunObject, execution):NEWLINENEWLINE command, args, extra_env = self._get_cmd_args(runobj)NEWLINENEWLINE if runobj.metadata.iteration:NEWLINE self.store_run(runobj)NEWLINE k8s = self._get_k8s()NEWLINE new_meta = self._get_meta(runobj)NEWLINENEWLINE if self._secrets:NEWLINE if self._secrets.has_vault_source():NEWLINE self._add_vault_params_to_spec(runobj)NEWLINE if self._secrets.has_azure_vault_source():NEWLINE self._add_azure_vault_params_to_spec(NEWLINE self._secrets.get_azure_vault_k8s_secret()NEWLINE )NEWLINE k8s_secrets = self._secrets.get_k8s_secrets()NEWLINE if k8s_secrets:NEWLINE self._add_project_k8s_secrets_to_spec(k8s_secrets, runobj)NEWLINENEWLINE pod_spec = func_to_pod(NEWLINE self.full_image_path(), self, extra_env, command, args, self.spec.workdirNEWLINE )NEWLINE pod = client.V1Pod(metadata=new_meta, spec=pod_spec)NEWLINE try:NEWLINE pod_name, namespace = k8s.create_pod(pod)NEWLINE except ApiException as exc:NEWLINE raise RunError(str(exc))NEWLINENEWLINE if pod_name and self.kfp:NEWLINE writer = AsyncLogWriter(self._db_conn, runobj)NEWLINE status = k8s.watch(pod_name, namespace, writer=writer)NEWLINENEWLINE if status in ["failed", "error"]:NEWLINE raise RunError(f"pod exited with {status}, check logs")NEWLINE else:NEWLINE txt = f"Job is running in the background, pod: {pod_name}"NEWLINE logger.info(txt)NEWLINE runobj.status.status_text = txtNEWLINENEWLINE return NoneNEWLINENEWLINENEWLINEdef func_to_pod(image, runtime, extra_env, command, args, workdir):NEWLINE container = client.V1Container(NEWLINE name="base",NEWLINE image=image,NEWLINE env=extra_env + runtime.spec.env,NEWLINE command=[command],NEWLINE args=args,NEWLINE working_dir=workdir,NEWLINE image_pull_policy=runtime.spec.image_pull_policy,NEWLINE volume_mounts=runtime.spec.volume_mounts,NEWLINE resources=runtime.spec.resources,NEWLINE )NEWLINENEWLINE pod_spec = kube_resource_spec_to_pod_spec(runtime.spec, container)NEWLINENEWLINE if runtime.spec.image_pull_secret:NEWLINE pod_spec.image_pull_secrets = [NEWLINE client.V1LocalObjectReference(name=runtime.spec.image_pull_secret)NEWLINE ]NEWLINENEWLINE return pod_specNEWLINENEWLINENEWLINEclass KubeRuntimeHandler(BaseRuntimeHandler):NEWLINE @staticmethodNEWLINE def _are_resources_coupled_to_run_object() -> bool:NEWLINE return TrueNEWLINENEWLINE @staticmethodNEWLINE def _get_object_label_selector(object_id: str) -> str:NEWLINE return f"mlrun/uid={object_id}"NEWLINENEWLINE @staticmethodNEWLINE def _get_default_label_selector() -> str:NEWLINE return "mlrun/class in (build, job)"NEWLINE |
from django.db import modelsNEWLINEfrom django.db.models import signalsNEWLINEfrom media_management_api.media_service.models import UserProfileNEWLINENEWLINEimport osNEWLINEimport base64NEWLINEimport structNEWLINEimport hashlibNEWLINEimport loggingNEWLINENEWLINElogger = logging.getLogger(__name__)NEWLINENEWLINEclass Application(models.Model):NEWLINE client_id = models.CharField(max_length=20, unique=True, blank=False)NEWLINE client_secret = models.CharField(max_length=40, blank=True)NEWLINE description = models.CharField(max_length=1024, blank=True)NEWLINE created = models.DateTimeField(auto_now_add=True)NEWLINE updated = models.DateTimeField(auto_now=True)NEWLINENEWLINE class Meta:NEWLINE verbose_name = 'application'NEWLINE verbose_name_plural = 'applications'NEWLINE ordering = ["client_id"]NEWLINENEWLINEclass Token(models.Model):NEWLINE key = models.CharField(max_length=80, unique=True, blank=False)NEWLINE scope = models.CharField(max_length=1024, blank=True)NEWLINE created = models.DateTimeField(auto_now_add=True)NEWLINE user_profile = models.ForeignKey(UserProfile, on_delete=models.CASCADE, related_name="user_tokens")NEWLINE application = models.ForeignKey(Application, on_delete=models.CASCADE, related_name='user_tokens')NEWLINENEWLINE class Meta:NEWLINE verbose_name = 'token'NEWLINE verbose_name_plural = 'tokens'NEWLINE ordering = ["-created"]NEWLINENEWLINEdef generate_random_client_secret():NEWLINE '''NEWLINE Returns a random hex string that can be used as the client secret.NEWLINE '''NEWLINE m = hashlib.sha1()NEWLINE m.update(os.urandom(4096))NEWLINE return m.hexdigest()NEWLINENEWLINEdef generate_random_access_token(pk):NEWLINE '''NEWLINE Returns a unique, non-guessable string that can be used as the access token.NEWLINE '''NEWLINE m = hashlib.sha1()NEWLINE m.update(os.urandom(4096))NEWLINENEWLINE digest_encoded_bytes = base64.urlsafe_b64encode(m.digest())NEWLINE pk_encoded_bytes = base64.urlsafe_b64encode(struct.pack('I', int(pk))) # ensure uniquenessNEWLINE token = (digest_encoded_bytes + pk_encoded_bytes).strip(b"=\n").decode("utf-8")NEWLINENEWLINE return tokenNEWLINENEWLINEdef create_token_key(sender, instance, **kwargs):NEWLINE '''NEWLINE Sets an access token for new Token instances (called via post_save signal).NEWLINE '''NEWLINE if not instance.key:NEWLINE instance.key = generate_random_access_token(instance.pk)NEWLINE instance.save(update_fields=['key'])NEWLINE logger.debug("Creating token key for sender=%s instance=%s token=%s" % (sender, instance, instance.key))NEWLINENEWLINEdef create_client_secret(sender, instance, **kwargs):NEWLINE '''NEWLINE Sets a client secret for new Application instances (called via post_save signal).NEWLINE '''NEWLINE if not instance.client_secret:NEWLINE instance.client_secret = generate_random_client_secret()NEWLINE instance.save(update_fields=['client_secret'])NEWLINE logger.debug("Created client secret for sender=%s instance=%s client_secret=%s" % (sender, instance, instance.client_secret))NEWLINENEWLINENEWLINEsignals.post_save.connect(create_client_secret, sender=Application)NEWLINEsignals.post_save.connect(create_token_key, sender=Token)NEWLINE |
# test importing of required modules and sit2standpy packageNEWLINENEWLINENEWLINEdef test_numpy():NEWLINE import numpyNEWLINENEWLINE returnNEWLINENEWLINENEWLINEdef test_scipy():NEWLINE import scipyNEWLINENEWLINE returnNEWLINENEWLINENEWLINEdef test_pywt():NEWLINE import pywtNEWLINENEWLINE returnNEWLINENEWLINENEWLINEdef test_pysit2stand():NEWLINE import sit2standpyNEWLINE from sit2standpy import Sit2Stand, detectors, mov_stats, Transition, TransitionQuantifier, \NEWLINE AccelerationFilter, process_timestamps, __version__NEWLINE from sit2standpy.detectors import Stillness, DisplacementNEWLINENEWLINE returnNEWLINE |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License");NEWLINE# you may not use this file except in compliance with the License.NEWLINE# You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINE# ==============================================================================NEWLINE"""Common utility functions for evaluation."""NEWLINEfrom __future__ import absolute_importNEWLINEfrom __future__ import divisionNEWLINEfrom __future__ import print_functionNEWLINENEWLINEimport collectionsNEWLINEimport osNEWLINEimport reNEWLINEimport timeNEWLINENEWLINEimport numpy as npNEWLINEfrom six.moves import rangeNEWLINEimport tensorflow.compat.v1 as tfNEWLINENEWLINEimport tf_slim as slimNEWLINENEWLINEfrom object_detection.core import box_listNEWLINEfrom object_detection.core import box_list_opsNEWLINEfrom object_detection.core import keypoint_opsNEWLINEfrom object_detection.core import standard_fields as fieldsNEWLINEfrom object_detection.metrics import coco_evaluationNEWLINEfrom object_detection.metrics import lvis_evaluationNEWLINEfrom object_detection.protos import eval_pb2NEWLINEfrom object_detection.utils import label_map_utilNEWLINEfrom object_detection.utils import object_detection_evaluationNEWLINEfrom object_detection.utils import opsNEWLINEfrom object_detection.utils import shape_utilsNEWLINEfrom object_detection.utils import visualization_utils as vis_utilsNEWLINENEWLINEEVAL_KEYPOINT_METRIC = 'coco_keypoint_metrics'NEWLINENEWLINE# A dictionary of metric names to classes that implement the metric. The classesNEWLINE# in the dictionary must implementNEWLINE# utils.object_detection_evaluation.DetectionEvaluator interface.NEWLINEEVAL_METRICS_CLASS_DICT = {NEWLINE 'coco_detection_metrics':NEWLINE coco_evaluation.CocoDetectionEvaluator,NEWLINE 'coco_keypoint_metrics':NEWLINE coco_evaluation.CocoKeypointEvaluator,NEWLINE 'coco_mask_metrics':NEWLINE coco_evaluation.CocoMaskEvaluator,NEWLINE 'coco_panoptic_metrics':NEWLINE coco_evaluation.CocoPanopticSegmentationEvaluator,NEWLINE 'lvis_mask_metrics':NEWLINE lvis_evaluation.LVISMaskEvaluator,NEWLINE 'oid_challenge_detection_metrics':NEWLINE object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,NEWLINE 'oid_challenge_segmentation_metrics':NEWLINE object_detection_evaluationNEWLINE .OpenImagesInstanceSegmentationChallengeEvaluator,NEWLINE 'pascal_voc_detection_metrics':NEWLINE object_detection_evaluation.PascalDetectionEvaluator,NEWLINE 'weighted_pascal_voc_detection_metrics':NEWLINE object_detection_evaluation.WeightedPascalDetectionEvaluator,NEWLINE 'precision_at_recall_detection_metrics':NEWLINE object_detection_evaluation.PrecisionAtRecallDetectionEvaluator,NEWLINE 'pascal_voc_instance_segmentation_metrics':NEWLINE object_detection_evaluation.PascalInstanceSegmentationEvaluator,NEWLINE 'weighted_pascal_voc_instance_segmentation_metrics':NEWLINE object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,NEWLINE 'oid_V2_detection_metrics':NEWLINE object_detection_evaluation.OpenImagesDetectionEvaluator,NEWLINE}NEWLINENEWLINEEVAL_DEFAULT_METRIC = 'coco_detection_metrics'NEWLINENEWLINENEWLINEdef write_metrics(metrics, global_step, summary_dir):NEWLINE """Write metrics to a summary directory.NEWLINENEWLINE Args:NEWLINE metrics: A dictionary containing metric names and values.NEWLINE global_step: Global step at which the metrics are computed.NEWLINE summary_dir: Directory to write tensorflow summaries to.NEWLINE """NEWLINE tf.logging.info('Writing metrics to tf summary.')NEWLINE summary_writer = tf.summary.FileWriterCache.get(summary_dir)NEWLINE for key in sorted(metrics):NEWLINE summary = tf.Summary(value=[NEWLINE tf.Summary.Value(tag=key, simple_value=metrics[key]),NEWLINE ])NEWLINE summary_writer.add_summary(summary, global_step)NEWLINE tf.logging.info('%s: %f', key, metrics[key])NEWLINE tf.logging.info('Metrics written to tf summary.')NEWLINENEWLINENEWLINE# TODO(rathodv): Add tests.NEWLINEdef visualize_detection_results(result_dict,NEWLINE tag,NEWLINE global_step,NEWLINE categories,NEWLINE summary_dir='',NEWLINE export_dir='',NEWLINE agnostic_mode=False,NEWLINE show_groundtruth=False,NEWLINE groundtruth_box_visualization_color='black',NEWLINE min_score_thresh=.5,NEWLINE max_num_predictions=20,NEWLINE skip_scores=False,NEWLINE skip_labels=False,NEWLINE skip_weights=True,NEWLINE keep_image_id_for_visualization_export=False):NEWLINE """Visualizes detection results and writes visualizations to image summaries.NEWLINENEWLINE This function visualizes an image with its detected bounding boxes and writesNEWLINE to image summaries which can be viewed on tensorboard. It optionally alsoNEWLINE writes images to a directory. In the case of missing entry in the label map,NEWLINE unknown class name in the visualization is shown as "N/A".NEWLINENEWLINE Args:NEWLINE result_dict: a dictionary holding groundtruth and detectionNEWLINE data corresponding to each image being evaluated. The following keysNEWLINE are required:NEWLINE 'original_image': a numpy array representing the image with shapeNEWLINE [1, height, width, 3] or [1, height, width, 1]NEWLINE 'detection_boxes': a numpy array of shape [N, 4]NEWLINE 'detection_scores': a numpy array of shape [N]NEWLINE 'detection_classes': a numpy array of shape [N]NEWLINE The following keys are optional:NEWLINE 'groundtruth_boxes': a numpy array of shape [N, 4]NEWLINE 'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]NEWLINE Detections are assumed to be provided in decreasing order of score and forNEWLINE display, and we assume that scores are probabilities between 0 and 1.NEWLINE tag: tensorboard tag (string) to associate with image.NEWLINE global_step: global step at which the visualization are generated.NEWLINE categories: a list of dictionaries representing all possible categories.NEWLINE Each dict in this list has the following keys:NEWLINE 'id': (required) an integer id uniquely identifying this categoryNEWLINE 'name': (required) string representing category nameNEWLINE e.g., 'cat', 'dog', 'pizza'NEWLINE 'supercategory': (optional) string representing the supercategoryNEWLINE e.g., 'animal', 'vehicle', 'food', etcNEWLINE summary_dir: the output directory to which the image summaries are written.NEWLINE export_dir: the output directory to which images are written. If this isNEWLINE empty (default), then images are not exported.NEWLINE agnostic_mode: boolean (default: False) controlling whether to evaluate inNEWLINE class-agnostic mode or not.NEWLINE show_groundtruth: boolean (default: False) controlling whether to showNEWLINE groundtruth boxes in addition to detected boxesNEWLINE groundtruth_box_visualization_color: box color for visualizing groundtruthNEWLINE boxesNEWLINE min_score_thresh: minimum score threshold for a box to be visualizedNEWLINE max_num_predictions: maximum number of detections to visualizeNEWLINE skip_scores: whether to skip score when drawing a single detectionNEWLINE skip_labels: whether to skip label when drawing a single detectionNEWLINE keep_image_id_for_visualization_export: whether to keep image identifier inNEWLINE filename when exported to export_dirNEWLINE Raises:NEWLINE ValueError: if result_dict does not contain the expected keys (i.e.,NEWLINE 'original_image', 'detection_boxes', 'detection_scores',NEWLINE 'detection_classes')NEWLINE """NEWLINE detection_fields = fields.DetectionResultFieldsNEWLINE input_fields = fields.InputDataFieldsNEWLINE if not set([NEWLINE input_fields.original_image,NEWLINE detection_fields.detection_boxes,NEWLINE detection_fields.detection_scores,NEWLINE detection_fields.detection_classes,NEWLINE ]).issubset(set(result_dict.keys())):NEWLINE raise ValueError('result_dict does not contain all expected keys.')NEWLINE if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:NEWLINE raise ValueError('If show_groundtruth is enabled, result_dict must contain 'NEWLINE 'groundtruth_boxes.')NEWLINE tf.logging.info('Creating detection visualizations.')NEWLINE category_index = label_map_util.create_category_index(categories)NEWLINENEWLINE image = np.squeeze(result_dict[input_fields.original_image], axis=0)NEWLINE if image.shape[2] == 1: # If one channel image, repeat in RGB.NEWLINE image = np.tile(image, [1, 1, 3])NEWLINE detection_boxes = result_dict[detection_fields.detection_boxes]NEWLINE detection_scores = result_dict[detection_fields.detection_scores]NEWLINE detection_classes = np.int32((result_dict[NEWLINE detection_fields.detection_classes]))NEWLINE detection_keypoints = result_dict.get(detection_fields.detection_keypoints)NEWLINE detection_masks = result_dict.get(detection_fields.detection_masks)NEWLINE detection_boundaries = result_dict.get(detection_fields.detection_boundaries)NEWLINE detection_weights = result_dict[detection_fields.detection_weightPerObject]NEWLINENEWLINE # Plot groundtruth underneath detectionsNEWLINE if show_groundtruth:NEWLINE groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]NEWLINE groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)NEWLINE vis_utils.visualize_boxes_and_labels_on_image_array(NEWLINE image=image,NEWLINE boxes=groundtruth_boxes,NEWLINE classes=None,NEWLINE scores=None,NEWLINE category_index=category_index,NEWLINE keypoints=groundtruth_keypoints,NEWLINE use_normalized_coordinates=False,NEWLINE max_boxes_to_draw=None,NEWLINE groundtruth_box_visualization_color=groundtruth_box_visualization_color)NEWLINE vis_utils.visualize_boxes_and_labels_on_image_array(NEWLINE image,NEWLINE detection_boxes,NEWLINE detection_classes,NEWLINE detection_scores,NEWLINE detection_weights,NEWLINE category_index,NEWLINE instance_masks=detection_masks,NEWLINE instance_boundaries=detection_boundaries,NEWLINE keypoints=detection_keypoints,NEWLINE use_normalized_coordinates=False,NEWLINE max_boxes_to_draw=max_num_predictions,NEWLINE min_score_thresh=min_score_thresh,NEWLINE agnostic_mode=agnostic_mode,NEWLINE skip_scores=skip_scores,NEWLINE skip_labels=skip_labels,NEWLINE skip_weights=skip_weights)NEWLINENEWLINE if export_dir:NEWLINE if keep_image_id_for_visualization_export and result_dict[fields.NEWLINE InputDataFields()NEWLINE .key]:NEWLINE export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(NEWLINE tag, result_dict[fields.InputDataFields().key]))NEWLINE else:NEWLINE export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))NEWLINE vis_utils.save_image_array_as_png(image, export_path)NEWLINENEWLINE summary = tf.Summary(value=[NEWLINE tf.Summary.Value(NEWLINE tag=tag,NEWLINE image=tf.Summary.Image(NEWLINE encoded_image_string=vis_utils.encode_image_array_as_png_str(NEWLINE image)))NEWLINE ])NEWLINE summary_writer = tf.summary.FileWriterCache.get(summary_dir)NEWLINE summary_writer.add_summary(summary, global_step)NEWLINENEWLINE tf.logging.info('Detection visualizations written to summary with tag %s.',NEWLINE tag)NEWLINENEWLINENEWLINEdef _run_checkpoint_once(tensor_dict,NEWLINE evaluators=None,NEWLINE batch_processor=None,NEWLINE checkpoint_dirs=None,NEWLINE variables_to_restore=None,NEWLINE restore_fn=None,NEWLINE num_batches=1,NEWLINE master='',NEWLINE save_graph=False,NEWLINE save_graph_dir='',NEWLINE losses_dict=None,NEWLINE eval_export_path=None,NEWLINE process_metrics_fn=None):NEWLINE """Evaluates metrics defined in evaluators and returns summaries.NEWLINENEWLINE This function loads the latest checkpoint in checkpoint_dirs and evaluatesNEWLINE all metrics defined in evaluators. The metrics are processed in batch by theNEWLINE batch_processor.NEWLINENEWLINE Args:NEWLINE tensor_dict: a dictionary holding tensors representing a batch of detectionsNEWLINE and corresponding groundtruth annotations.NEWLINE evaluators: a list of object of type DetectionEvaluator to be used forNEWLINE evaluation. Note that the metric names produced by different evaluatorsNEWLINE must be unique.NEWLINE batch_processor: a function taking four arguments:NEWLINE 1. tensor_dict: the same tensor_dict that is passed in as the firstNEWLINE argument to this function.NEWLINE 2. sess: a tensorflow sessionNEWLINE 3. batch_index: an integer representing the index of the batch amongstNEWLINE all batchesNEWLINE By default, batch_processor is None, which defaults to running:NEWLINE return sess.run(tensor_dict)NEWLINE To skip an image, it suffices to return an empty dictionary in place ofNEWLINE result_dict.NEWLINE checkpoint_dirs: list of directories to load into an EnsembleModel. If itNEWLINE has only one directory, EnsembleModel will not be used --NEWLINE a DetectionModelNEWLINE will be instantiated directly. Not used if restore_fn is set.NEWLINE variables_to_restore: None, or a dictionary mapping variable names found inNEWLINE a checkpoint to model variables. The dictionary would normally beNEWLINE generated by creating a tf.train.ExponentialMovingAverage object andNEWLINE calling its variables_to_restore() method. Not used if restore_fn is set.NEWLINE restore_fn: None, or a function that takes a tf.Session object and correctlyNEWLINE restores all necessary variables from the correct checkpoint file. IfNEWLINE None, attempts to restore from the first directory in checkpoint_dirs.NEWLINE num_batches: the number of batches to use for evaluation.NEWLINE master: the location of the Tensorflow session.NEWLINE save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.NEWLINE save_graph_dir: where to store the Tensorflow graph on disk. If save_graphNEWLINE is True this must be non-empty.NEWLINE losses_dict: optional dictionary of scalar detection losses.NEWLINE eval_export_path: Path for saving a json file that contains the detectionNEWLINE results in json format.NEWLINE process_metrics_fn: a callback called with evaluation results after eachNEWLINE evaluation is done. It could be used e.g. to back up checkpoints withNEWLINE best evaluation scores, or to call an external system to update evaluationNEWLINE results in order to drive best hyper-parameter search. Parameters are:NEWLINE int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics,NEWLINE str checkpoint_file path.NEWLINENEWLINE Returns:NEWLINE global_step: the count of global steps.NEWLINE all_evaluator_metrics: A dictionary containing metric names and values.NEWLINENEWLINE Raises:NEWLINE ValueError: if restore_fn is None and checkpoint_dirs doesn't have at leastNEWLINE one element.NEWLINE ValueError: if save_graph is True and save_graph_dir is not defined.NEWLINE """NEWLINE if save_graph and not save_graph_dir:NEWLINE raise ValueError('`save_graph_dir` must be defined.')NEWLINE sess = tf.Session(master, graph=tf.get_default_graph())NEWLINE sess.run(tf.global_variables_initializer())NEWLINE sess.run(tf.local_variables_initializer())NEWLINE sess.run(tf.tables_initializer())NEWLINE checkpoint_file = NoneNEWLINE if restore_fn:NEWLINE restore_fn(sess)NEWLINE else:NEWLINE if not checkpoint_dirs:NEWLINE raise ValueError('`checkpoint_dirs` must have at least one entry.')NEWLINE checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])NEWLINE saver = tf.train.Saver(variables_to_restore)NEWLINE saver.restore(sess, checkpoint_file)NEWLINENEWLINE if save_graph:NEWLINE tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')NEWLINENEWLINE counters = {'skipped': 0, 'success': 0}NEWLINE aggregate_result_losses_dict = collections.defaultdict(list)NEWLINE with slim.queues.QueueRunners(sess):NEWLINE try:NEWLINE for batch in range(int(num_batches)):NEWLINE if (batch + 1) % 100 == 0:NEWLINE tf.logging.info('Running eval ops batch %d/%d', batch + 1,NEWLINE num_batches)NEWLINE if not batch_processor:NEWLINE try:NEWLINE if not losses_dict:NEWLINE losses_dict = {}NEWLINE result_dict, result_losses_dict = sess.run([tensor_dict,NEWLINE losses_dict])NEWLINE counters['success'] += 1NEWLINE except tf.errors.InvalidArgumentError:NEWLINE tf.logging.info('Skipping image')NEWLINE counters['skipped'] += 1NEWLINE result_dict = {}NEWLINE else:NEWLINE result_dict, result_losses_dict = batch_processor(NEWLINE tensor_dict, sess, batch, counters, losses_dict=losses_dict)NEWLINE if not result_dict:NEWLINE continueNEWLINE for key, value in iter(result_losses_dict.items()):NEWLINE aggregate_result_losses_dict[key].append(value)NEWLINE for evaluator in evaluators:NEWLINE # TODO(b/65130867): Use image_id tensor once we fix the input dataNEWLINE # decoders to return correct image_id.NEWLINE # TODO(akuznetsa): result_dict contains batches of images, whileNEWLINE # add_single_ground_truth_image_info expects a single image. FixNEWLINE if (isinstance(result_dict, dict) andNEWLINE fields.InputDataFields.key in result_dict andNEWLINE result_dict[fields.InputDataFields.key]):NEWLINE image_id = result_dict[fields.InputDataFields.key]NEWLINE else:NEWLINE image_id = batchNEWLINE evaluator.add_single_ground_truth_image_info(NEWLINE image_id=image_id, groundtruth_dict=result_dict)NEWLINE evaluator.add_single_detected_image_info(NEWLINE image_id=image_id, detections_dict=result_dict)NEWLINE tf.logging.info('Running eval batches done.')NEWLINE except tf.errors.OutOfRangeError:NEWLINE tf.logging.info('Done evaluating -- epoch limit reached')NEWLINE finally:NEWLINE # When done, ask the threads to stop.NEWLINE tf.logging.info('# success: %d', counters['success'])NEWLINE tf.logging.info('# skipped: %d', counters['skipped'])NEWLINE all_evaluator_metrics = {}NEWLINE if eval_export_path and eval_export_path is not None:NEWLINE for evaluator in evaluators:NEWLINE if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) orNEWLINE isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):NEWLINE tf.logging.info('Started dumping to json file.')NEWLINE evaluator.dump_detections_to_json_file(NEWLINE json_output_path=eval_export_path)NEWLINE tf.logging.info('Finished dumping to json file.')NEWLINE for evaluator in evaluators:NEWLINE metrics = evaluator.evaluate()NEWLINE evaluator.clear()NEWLINE if any(key in all_evaluator_metrics for key in metrics):NEWLINE raise ValueError('Metric names between evaluators must not collide.')NEWLINE all_evaluator_metrics.update(metrics)NEWLINE global_step = tf.train.global_step(sess, tf.train.get_global_step())NEWLINENEWLINE for key, value in iter(aggregate_result_losses_dict.items()):NEWLINE all_evaluator_metrics['Losses/' + key] = np.mean(value)NEWLINE if process_metrics_fn and checkpoint_file:NEWLINE m = re.search(r'model.ckpt-(\d+)$', checkpoint_file)NEWLINE if not m:NEWLINE tf.logging.error('Failed to parse checkpoint number from: %s',NEWLINE checkpoint_file)NEWLINE else:NEWLINE checkpoint_number = int(m.group(1))NEWLINE process_metrics_fn(checkpoint_number, all_evaluator_metrics,NEWLINE checkpoint_file)NEWLINE sess.close()NEWLINE return (global_step, all_evaluator_metrics)NEWLINENEWLINENEWLINE# TODO(rathodv): Add tests.NEWLINEdef repeated_checkpoint_run(tensor_dict,NEWLINE summary_dir,NEWLINE evaluators,NEWLINE batch_processor=None,NEWLINE checkpoint_dirs=None,NEWLINE variables_to_restore=None,NEWLINE restore_fn=None,NEWLINE num_batches=1,NEWLINE eval_interval_secs=120,NEWLINE max_number_of_evaluations=None,NEWLINE max_evaluation_global_step=None,NEWLINE master='',NEWLINE save_graph=False,NEWLINE save_graph_dir='',NEWLINE losses_dict=None,NEWLINE eval_export_path=None,NEWLINE process_metrics_fn=None):NEWLINE """Periodically evaluates desired tensors using checkpoint_dirs or restore_fn.NEWLINENEWLINE This function repeatedly loads a checkpoint and evaluates a desiredNEWLINE set of tensors (provided by tensor_dict) and hands the resulting numpyNEWLINE arrays to a function result_processor which can be used to furtherNEWLINE process/save/visualize the results.NEWLINENEWLINE Args:NEWLINE tensor_dict: a dictionary holding tensors representing a batch of detectionsNEWLINE and corresponding groundtruth annotations.NEWLINE summary_dir: a directory to write metrics summaries.NEWLINE evaluators: a list of object of type DetectionEvaluator to be used forNEWLINE evaluation. Note that the metric names produced by different evaluatorsNEWLINE must be unique.NEWLINE batch_processor: a function taking three arguments:NEWLINE 1. tensor_dict: the same tensor_dict that is passed in as the firstNEWLINE argument to this function.NEWLINE 2. sess: a tensorflow sessionNEWLINE 3. batch_index: an integer representing the index of the batch amongstNEWLINE all batchesNEWLINE By default, batch_processor is None, which defaults to running:NEWLINE return sess.run(tensor_dict)NEWLINE checkpoint_dirs: list of directories to load into a DetectionModel or anNEWLINE EnsembleModel if restore_fn isn't set. Also used to determine when to runNEWLINE next evaluation. Must have at least one element.NEWLINE variables_to_restore: None, or a dictionary mapping variable names found inNEWLINE a checkpoint to model variables. The dictionary would normally beNEWLINE generated by creating a tf.train.ExponentialMovingAverage object andNEWLINE calling its variables_to_restore() method. Not used if restore_fn is set.NEWLINE restore_fn: a function that takes a tf.Session object and correctly restoresNEWLINE all necessary variables from the correct checkpoint file.NEWLINE num_batches: the number of batches to use for evaluation.NEWLINE eval_interval_secs: the number of seconds between each evaluation run.NEWLINE max_number_of_evaluations: the max number of iterations of the evaluation.NEWLINE If the value is left as None the evaluation continues indefinitely.NEWLINE max_evaluation_global_step: global step when evaluation stops.NEWLINE master: the location of the Tensorflow session.NEWLINE save_graph: whether or not the Tensorflow graph is saved as a pbtxt file.NEWLINE save_graph_dir: where to save on disk the Tensorflow graph. If store_graphNEWLINE is True this must be non-empty.NEWLINE losses_dict: optional dictionary of scalar detection losses.NEWLINE eval_export_path: Path for saving a json file that contains the detectionNEWLINE results in json format.NEWLINE process_metrics_fn: a callback called with evaluation results after eachNEWLINE evaluation is done. It could be used e.g. to back up checkpoints withNEWLINE best evaluation scores, or to call an external system to update evaluationNEWLINE results in order to drive best hyper-parameter search. Parameters are:NEWLINE int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics,NEWLINE str checkpoint_file path.NEWLINENEWLINE Returns:NEWLINE metrics: A dictionary containing metric names and values in the latestNEWLINE evaluation.NEWLINENEWLINE Raises:NEWLINE ValueError: if max_num_of_evaluations is not None or a positive number.NEWLINE ValueError: if checkpoint_dirs doesn't have at least one element.NEWLINE """NEWLINE if max_number_of_evaluations and max_number_of_evaluations <= 0:NEWLINE raise ValueError(NEWLINE '`max_number_of_evaluations` must be either None or a positive number.')NEWLINE if max_evaluation_global_step and max_evaluation_global_step <= 0:NEWLINE raise ValueError(NEWLINE '`max_evaluation_global_step` must be either None or positive.')NEWLINENEWLINE if not checkpoint_dirs:NEWLINE raise ValueError('`checkpoint_dirs` must have at least one entry.')NEWLINENEWLINE last_evaluated_model_path = NoneNEWLINE number_of_evaluations = 0NEWLINE while True:NEWLINE start = time.time()NEWLINE tf.logging.info('Starting evaluation at ' + time.strftime(NEWLINE '%Y-%m-%d-%H:%M:%S', time.gmtime()))NEWLINE model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])NEWLINE if not model_path:NEWLINE tf.logging.info('No model found in %s. Will try again in %d seconds',NEWLINE checkpoint_dirs[0], eval_interval_secs)NEWLINE elif model_path == last_evaluated_model_path:NEWLINE tf.logging.info('Found already evaluated checkpoint. Will try again in 'NEWLINE '%d seconds', eval_interval_secs)NEWLINE else:NEWLINE last_evaluated_model_path = model_pathNEWLINE global_step, metrics = _run_checkpoint_once(NEWLINE tensor_dict,NEWLINE evaluators,NEWLINE batch_processor,NEWLINE checkpoint_dirs,NEWLINE variables_to_restore,NEWLINE restore_fn,NEWLINE num_batches,NEWLINE master,NEWLINE save_graph,NEWLINE save_graph_dir,NEWLINE losses_dict=losses_dict,NEWLINE eval_export_path=eval_export_path,NEWLINE process_metrics_fn=process_metrics_fn)NEWLINE write_metrics(metrics, global_step, summary_dir)NEWLINE if (max_evaluation_global_step andNEWLINE global_step >= max_evaluation_global_step):NEWLINE tf.logging.info('Finished evaluation!')NEWLINE breakNEWLINE number_of_evaluations += 1NEWLINENEWLINE if (max_number_of_evaluations andNEWLINE number_of_evaluations >= max_number_of_evaluations):NEWLINE tf.logging.info('Finished evaluation!')NEWLINE breakNEWLINE time_to_next_eval = start + eval_interval_secs - time.time()NEWLINE if time_to_next_eval > 0:NEWLINE time.sleep(time_to_next_eval)NEWLINENEWLINE return metricsNEWLINENEWLINENEWLINEdef _scale_box_to_absolute(args):NEWLINE boxes, image_shape = argsNEWLINE return box_list_ops.to_absolute_coordinates(NEWLINE box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()NEWLINENEWLINENEWLINEdef _resize_detection_masks(arg_tuple):NEWLINE """Resizes detection masks.NEWLINENEWLINE Args:NEWLINE arg_tuple: A (detection_boxes, detection_masks, image_shape, pad_shape)NEWLINE tuple whereNEWLINE detection_boxes is a tf.float32 tensor of size [num_masks, 4] containingNEWLINE the box corners. Row i contains [ymin, xmin, ymax, xmax] of the boxNEWLINE corresponding to mask i. Note that the box corners are inNEWLINE normalized coordinates.NEWLINE detection_masks is a tensor of sizeNEWLINE [num_masks, mask_height, mask_width].NEWLINE image_shape is a tensor of shape [2]NEWLINE pad_shape is a tensor of shape [2] --- this is assumed to be greaterNEWLINE than or equal to image_shape along both dimensions and represents aNEWLINE shape to-be-padded-to.NEWLINENEWLINE Returns:NEWLINE """NEWLINENEWLINE detection_boxes, detection_masks, image_shape, pad_shape = arg_tupleNEWLINENEWLINE detection_masks_reframed = ops.reframe_box_masks_to_image_masks(NEWLINE detection_masks, detection_boxes, image_shape[0], image_shape[1])NEWLINENEWLINE pad_instance_dim = tf.zeros([3, 1], dtype=tf.int32)NEWLINE pad_hw_dim = tf.concat([tf.zeros([1], dtype=tf.int32),NEWLINE pad_shape - image_shape], axis=0)NEWLINE pad_hw_dim = tf.expand_dims(pad_hw_dim, 1)NEWLINE paddings = tf.concat([pad_instance_dim, pad_hw_dim], axis=1)NEWLINE detection_masks_reframed = tf.pad(detection_masks_reframed, paddings)NEWLINENEWLINE # If the masks are currently float, binarize them. Otherwise keep them asNEWLINE # integers, since they have already been thresholded.NEWLINE if detection_masks_reframed.dtype == tf.float32:NEWLINE detection_masks_reframed = tf.greater(detection_masks_reframed, 0.5)NEWLINE return tf.cast(detection_masks_reframed, tf.uint8)NEWLINENEWLINENEWLINEdef resize_detection_masks(detection_boxes, detection_masks,NEWLINE original_image_spatial_shapes):NEWLINE """Resizes per-box detection masks to be relative to the entire image.NEWLINENEWLINE Note that this function only works when the spatial size of all images inNEWLINE the batch is the same. If not, this function should be used with batch_size=1.NEWLINENEWLINE Args:NEWLINE detection_boxes: A [batch_size, num_instances, 4] float tensor containingNEWLINE bounding boxes.NEWLINE detection_masks: A [batch_size, num_instances, height, width] float tensorNEWLINE containing binary instance masks per box.NEWLINE original_image_spatial_shapes: a [batch_size, 3] shaped int tensorNEWLINE holding the spatial dimensions of each image in the batch.NEWLINE Returns:NEWLINE masks: Masks resized to the spatial extents given byNEWLINE (original_image_spatial_shapes[0, 0], original_image_spatial_shapes[0, 1])NEWLINE """NEWLINE # modify original image spatial shapes to be max along each dimNEWLINE # in evaluator, should have access to original_image_spatial_shape fieldNEWLINE # in add_Eval_DictNEWLINE max_spatial_shape = tf.reduce_max(NEWLINE original_image_spatial_shapes, axis=0, keep_dims=True)NEWLINE tiled_max_spatial_shape = tf.tile(NEWLINE max_spatial_shape,NEWLINE multiples=[tf.shape(original_image_spatial_shapes)[0], 1])NEWLINE return shape_utils.static_or_dynamic_map_fn(NEWLINE _resize_detection_masks,NEWLINE elems=[detection_boxes,NEWLINE detection_masks,NEWLINE original_image_spatial_shapes,NEWLINE tiled_max_spatial_shape],NEWLINE dtype=tf.uint8)NEWLINENEWLINENEWLINEdef _resize_groundtruth_masks(args):NEWLINE """Resizes groundtruth masks to the original image size."""NEWLINE mask, true_image_shape, original_image_shape, pad_shape = argsNEWLINE true_height = true_image_shape[0]NEWLINE true_width = true_image_shape[1]NEWLINE mask = mask[:, :true_height, :true_width]NEWLINE mask = tf.expand_dims(mask, 3)NEWLINE mask = tf.image.resize_images(NEWLINE mask,NEWLINE original_image_shape,NEWLINE method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,NEWLINE align_corners=True)NEWLINENEWLINE paddings = tf.concat(NEWLINE [tf.zeros([3, 1], dtype=tf.int32),NEWLINE tf.expand_dims(NEWLINE tf.concat([tf.zeros([1], dtype=tf.int32),NEWLINE pad_shape-original_image_shape], axis=0),NEWLINE 1)], axis=1)NEWLINE mask = tf.pad(tf.squeeze(mask, 3), paddings)NEWLINE return tf.cast(mask, tf.uint8)NEWLINENEWLINENEWLINEdef _resize_surface_coordinate_masks(args):NEWLINE detection_boxes, surface_coords, image_shape = argsNEWLINE surface_coords_v, surface_coords_u = tf.unstack(surface_coords, axis=-1)NEWLINE surface_coords_v_reframed = ops.reframe_box_masks_to_image_masks(NEWLINE surface_coords_v, detection_boxes, image_shape[0], image_shape[1])NEWLINE surface_coords_u_reframed = ops.reframe_box_masks_to_image_masks(NEWLINE surface_coords_u, detection_boxes, image_shape[0], image_shape[1])NEWLINE return tf.stack([surface_coords_v_reframed, surface_coords_u_reframed],NEWLINE axis=-1)NEWLINENEWLINENEWLINEdef _scale_keypoint_to_absolute(args):NEWLINE keypoints, image_shape = argsNEWLINE return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])NEWLINENEWLINENEWLINEdef result_dict_for_single_example(image,NEWLINE key,NEWLINE detections,NEWLINE groundtruth=None,NEWLINE class_agnostic=False,NEWLINE scale_to_absolute=False):NEWLINE """Merges all detection and groundtruth information for a single example.NEWLINENEWLINE Note that evaluation tools require classes that are 1-indexed, and so thisNEWLINE function performs the offset. If `class_agnostic` is True, all output classesNEWLINE have label 1.NEWLINENEWLINE Args:NEWLINE image: A single 4D uint8 image tensor of shape [1, H, W, C].NEWLINE key: A single string tensor identifying the image.NEWLINE detections: A dictionary of detections, returned fromNEWLINE DetectionModel.postprocess().NEWLINE groundtruth: (Optional) Dictionary of groundtruth items, with fields:NEWLINE 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, inNEWLINE normalized coordinates.NEWLINE 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.NEWLINE 'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)NEWLINE 'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)NEWLINE 'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)NEWLINE 'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)NEWLINE 'groundtruth_instance_masks': 3D int64 tensor of instance masksNEWLINE (Optional).NEWLINE 'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor withNEWLINE keypoints (Optional).NEWLINE class_agnostic: Boolean indicating whether the detections are class-agnosticNEWLINE (i.e. binary). Default False.NEWLINE scale_to_absolute: Boolean indicating whether boxes and keypoints should beNEWLINE scaled to absolute coordinates. Note that for IoU based evaluations, itNEWLINE does not matter whether boxes are expressed in absolute or relativeNEWLINE coordinates. Default False.NEWLINENEWLINE Returns:NEWLINE A dictionary with:NEWLINE 'original_image': A [1, H, W, C] uint8 image tensor.NEWLINE 'key': A string tensor with image identifier.NEWLINE 'detection_boxes': [max_detections, 4] float32 tensor of boxes, inNEWLINE normalized or absolute coordinates, depending on the value ofNEWLINE `scale_to_absolute`.NEWLINE 'detection_scores': [max_detections] float32 tensor of scores.NEWLINE 'detection_classes': [max_detections] int64 tensor of 1-indexed classes.NEWLINE 'detection_masks': [max_detections, H, W] float32 tensor of binarizedNEWLINE masks, reframed to full image masks.NEWLINE 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, inNEWLINE normalized or absolute coordinates, depending on the value ofNEWLINE `scale_to_absolute`. (Optional)NEWLINE 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.NEWLINE (Optional)NEWLINE 'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)NEWLINE 'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)NEWLINE 'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)NEWLINE 'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)NEWLINE 'groundtruth_instance_masks': 3D int64 tensor of instance masksNEWLINE (Optional).NEWLINE 'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor withNEWLINE keypoints (Optional).NEWLINE """NEWLINENEWLINE if groundtruth:NEWLINE max_gt_boxes = tf.shape(NEWLINE groundtruth[fields.InputDataFields.groundtruth_boxes])[0]NEWLINE for gt_key in groundtruth:NEWLINE # expand groundtruth dict along the batch dimension.NEWLINE groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)NEWLINENEWLINE for detection_key in detections:NEWLINE detections[detection_key] = tf.expand_dims(NEWLINE detections[detection_key][0], axis=0)NEWLINENEWLINE batched_output_dict = result_dict_for_batched_example(NEWLINE image,NEWLINE tf.expand_dims(key, 0),NEWLINE detections,NEWLINE groundtruth,NEWLINE class_agnostic,NEWLINE scale_to_absolute,NEWLINE max_gt_boxes=max_gt_boxes)NEWLINENEWLINE exclude_keys = [NEWLINE fields.InputDataFields.original_image,NEWLINE fields.DetectionResultFields.num_detections,NEWLINE fields.InputDataFields.num_groundtruth_boxesNEWLINE ]NEWLINENEWLINE output_dict = {NEWLINE fields.InputDataFields.original_image:NEWLINE batched_output_dict[fields.InputDataFields.original_image]NEWLINE }NEWLINENEWLINE for key in batched_output_dict:NEWLINE # remove the batch dimension.NEWLINE if key not in exclude_keys:NEWLINE output_dict[key] = tf.squeeze(batched_output_dict[key], 0)NEWLINE return output_dictNEWLINENEWLINENEWLINEdef result_dict_for_batched_example(images,NEWLINE keys,NEWLINE detections,NEWLINE groundtruth=None,NEWLINE class_agnostic=False,NEWLINE scale_to_absolute=False,NEWLINE original_image_spatial_shapes=None,NEWLINE true_image_shapes=None,NEWLINE max_gt_boxes=None,NEWLINE label_id_offset=1):NEWLINE """Merges all detection and groundtruth information for a single example.NEWLINENEWLINE Note that evaluation tools require classes that are 1-indexed, and so thisNEWLINE function performs the offset. If `class_agnostic` is True, all output classesNEWLINE have label 1.NEWLINE The groundtruth coordinates of boxes/keypoints in 'groundtruth' dictionary areNEWLINE normalized relative to the (potentially padded) input image, while theNEWLINE coordinates in 'detection' dictionary are normalized relative to the trueNEWLINE image shape.NEWLINENEWLINE Args:NEWLINE images: A single 4D uint8 image tensor of shape [batch_size, H, W, C].NEWLINE keys: A [batch_size] string/int tensor with image identifier.NEWLINE detections: A dictionary of detections, returned fromNEWLINE DetectionModel.postprocess().NEWLINE groundtruth: (Optional) Dictionary of groundtruth items, with fields:NEWLINE 'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensorNEWLINE of boxes, in normalized coordinates.NEWLINE 'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor ofNEWLINE 1-indexed classes.NEWLINE 'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor ofNEWLINE bbox area. (Optional)NEWLINE 'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64NEWLINE tensor. (Optional)NEWLINE 'groundtruth_difficult': [batch_size, max_number_of_boxes] int64NEWLINE tensor. (Optional)NEWLINE 'groundtruth_group_of': [batch_size, max_number_of_boxes] int64NEWLINE tensor. (Optional)NEWLINE 'groundtruth_instance_masks': 4D int64 tensor of instanceNEWLINE masks (Optional).NEWLINE 'groundtruth_keypoints': [batch_size, max_number_of_boxes, num_keypoints,NEWLINE 2] float32 tensor with keypoints (Optional).NEWLINE 'groundtruth_keypoint_visibilities': [batch_size, max_number_of_boxes,NEWLINE num_keypoints] bool tensor with keypoint visibilities (Optional).NEWLINE 'groundtruth_labeled_classes': [batch_size, num_classes] int64NEWLINE tensor of 1-indexed classes. (Optional)NEWLINE 'groundtruth_dp_num_points': [batch_size, max_number_of_boxes] int32NEWLINE tensor. (Optional)NEWLINE 'groundtruth_dp_part_ids': [batch_size, max_number_of_boxes,NEWLINE max_sampled_points] int32 tensor. (Optional)NEWLINE 'groundtruth_dp_surface_coords_list': [batch_size, max_number_of_boxes,NEWLINE max_sampled_points, 4] float32 tensor. (Optional)NEWLINE class_agnostic: Boolean indicating whether the detections are class-agnosticNEWLINE (i.e. binary). Default False.NEWLINE scale_to_absolute: Boolean indicating whether boxes and keypoints should beNEWLINE scaled to absolute coordinates. Note that for IoU based evaluations, itNEWLINE does not matter whether boxes are expressed in absolute or relativeNEWLINE coordinates. Default False.NEWLINE original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2]NEWLINE used to resize the image. When set to None, the image size is retained.NEWLINE true_image_shapes: A 2D int32 tensor of shape [batch_size, 3]NEWLINE containing the size of the unpadded original_image.NEWLINE max_gt_boxes: [batch_size] tensor representing the maximum number ofNEWLINE groundtruth boxes to pad.NEWLINE label_id_offset: offset for class ids.NEWLINENEWLINE Returns:NEWLINE A dictionary with:NEWLINE 'original_image': A [batch_size, H, W, C] uint8 image tensor.NEWLINE 'original_image_spatial_shape': A [batch_size, 2] tensor containing theNEWLINE original image sizes.NEWLINE 'true_image_shape': A [batch_size, 3] tensor containing the size ofNEWLINE the unpadded original_image.NEWLINE 'key': A [batch_size] string tensor with image identifier.NEWLINE 'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes,NEWLINE in normalized or absolute coordinates, depending on the value ofNEWLINE `scale_to_absolute`.NEWLINE 'detection_scores': [batch_size, max_detections] float32 tensor of scores.NEWLINE 'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexedNEWLINE classes.NEWLINE 'detection_masks': [batch_size, max_detections, H, W] uint8 tensor ofNEWLINE instance masks, reframed to full image masks. Note that these may beNEWLINE binarized (e.g. {0, 1}), or may contain 1-indexed part labels. (Optional)NEWLINE 'detection_keypoints': [batch_size, max_detections, num_keypoints, 2]NEWLINE float32 tensor containing keypoint coordinates. (Optional)NEWLINE 'detection_keypoint_scores': [batch_size, max_detections, num_keypoints]NEWLINE float32 tensor containing keypoint scores. (Optional)NEWLINE 'detection_surface_coords': [batch_size, max_detection, H, W, 2] float32NEWLINE tensor with normalized surface coordinates (e.g. DensePose UVNEWLINE coordinates). (Optional)NEWLINE 'num_detections': [batch_size] int64 tensor containing number of validNEWLINE detections.NEWLINE 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, inNEWLINE normalized or absolute coordinates, depending on the value ofNEWLINE `scale_to_absolute`. (Optional)NEWLINE 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexedNEWLINE classes. (Optional)NEWLINE 'groundtruth_area': [batch_size, num_boxes] float32 tensor of bboxNEWLINE area. (Optional)NEWLINE 'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional)NEWLINE 'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional)NEWLINE 'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional)NEWLINE 'groundtruth_instance_masks': 4D int64 tensor of instance masksNEWLINE (Optional).NEWLINE 'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32NEWLINE tensor with keypoints (Optional).NEWLINE 'groundtruth_keypoint_visibilities': [batch_size, num_boxes, num_keypoints]NEWLINE bool tensor with keypoint visibilities (Optional).NEWLINE 'groundtruth_labeled_classes': [batch_size, num_classes] int64 tensorNEWLINE of 1-indexed classes. (Optional)NEWLINE 'num_groundtruth_boxes': [batch_size] tensor containing the maximum numberNEWLINE of groundtruth boxes per image.NEWLINENEWLINE Raises:NEWLINE ValueError: if original_image_spatial_shape is not 2D int32 tensor of shapeNEWLINE [2].NEWLINE ValueError: if true_image_shapes is not 2D int32 tensor of shapeNEWLINE [3].NEWLINE """NEWLINE input_data_fields = fields.InputDataFieldsNEWLINE if original_image_spatial_shapes is None:NEWLINE original_image_spatial_shapes = tf.tile(NEWLINE tf.expand_dims(tf.shape(images)[1:3], axis=0),NEWLINE multiples=[tf.shape(images)[0], 1])NEWLINE else:NEWLINE if (len(original_image_spatial_shapes.shape) != 2 andNEWLINE original_image_spatial_shapes.shape[1] != 2):NEWLINE raise ValueError(NEWLINE '`original_image_spatial_shape` should be a 2D tensor of shape 'NEWLINE '[batch_size, 2].')NEWLINENEWLINE if true_image_shapes is None:NEWLINE true_image_shapes = tf.tile(NEWLINE tf.expand_dims(tf.shape(images)[1:4], axis=0),NEWLINE multiples=[tf.shape(images)[0], 1])NEWLINE else:NEWLINE if (len(true_image_shapes.shape) != 2NEWLINE and true_image_shapes.shape[1] != 3):NEWLINE raise ValueError('`true_image_shapes` should be a 2D tensor of 'NEWLINE 'shape [batch_size, 3].')NEWLINENEWLINE output_dict = {NEWLINE input_data_fields.original_image:NEWLINE images,NEWLINE input_data_fields.key:NEWLINE keys,NEWLINE input_data_fields.original_image_spatial_shape: (NEWLINE original_image_spatial_shapes),NEWLINE input_data_fields.true_image_shape:NEWLINE true_image_shapesNEWLINE }NEWLINENEWLINE detection_fields = fields.DetectionResultFieldsNEWLINE detection_boxes = detections[detection_fields.detection_boxes]NEWLINE detection_scores = detections[detection_fields.detection_scores]NEWLINE num_detections = tf.cast(detections[detection_fields.num_detections],NEWLINE dtype=tf.int32)NEWLINENEWLINE if class_agnostic:NEWLINE detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)NEWLINE else:NEWLINE detection_classes = (NEWLINE tf.to_int64(detections[detection_fields.detection_classes]) +NEWLINE label_id_offset)NEWLINENEWLINE if scale_to_absolute:NEWLINE output_dict[detection_fields.detection_boxes] = (NEWLINE shape_utils.static_or_dynamic_map_fn(NEWLINE _scale_box_to_absolute,NEWLINE elems=[detection_boxes, original_image_spatial_shapes],NEWLINE dtype=tf.float32))NEWLINE else:NEWLINE output_dict[detection_fields.detection_boxes] = detection_boxesNEWLINE output_dict[detection_fields.detection_classes] = detection_classesNEWLINE output_dict[detection_fields.detection_scores] = detection_scoresNEWLINE output_dict[detection_fields.num_detections] = num_detectionsNEWLINENEWLINE if detection_fields.detection_weightPerObject in detections:NEWLINE output_dict[detection_fields.detection_weightPerObject] = detections[detection_fields.detection_weightPerObject]NEWLINENEWLINE if detection_fields.detection_masks in detections:NEWLINE detection_masks = detections[detection_fields.detection_masks]NEWLINE output_dict[detection_fields.detection_masks] = resize_detection_masks(NEWLINE detection_boxes, detection_masks, original_image_spatial_shapes)NEWLINENEWLINE if detection_fields.detection_surface_coords in detections:NEWLINE detection_surface_coords = detections[NEWLINE detection_fields.detection_surface_coords]NEWLINE output_dict[detection_fields.detection_surface_coords] = (NEWLINE shape_utils.static_or_dynamic_map_fn(NEWLINE _resize_surface_coordinate_masks,NEWLINE elems=[detection_boxes, detection_surface_coords,NEWLINE original_image_spatial_shapes],NEWLINE dtype=tf.float32))NEWLINENEWLINE if detection_fields.detection_keypoints in detections:NEWLINE detection_keypoints = detections[detection_fields.detection_keypoints]NEWLINE output_dict[detection_fields.detection_keypoints] = detection_keypointsNEWLINE if scale_to_absolute:NEWLINE output_dict[detection_fields.detection_keypoints] = (NEWLINE shape_utils.static_or_dynamic_map_fn(NEWLINE _scale_keypoint_to_absolute,NEWLINE elems=[detection_keypoints, original_image_spatial_shapes],NEWLINE dtype=tf.float32))NEWLINE if detection_fields.detection_keypoint_scores in detections:NEWLINE output_dict[detection_fields.detection_keypoint_scores] = detections[NEWLINE detection_fields.detection_keypoint_scores]NEWLINE else:NEWLINE output_dict[detection_fields.detection_keypoint_scores] = tf.ones_like(NEWLINE detections[detection_fields.detection_keypoints][:, :, :, 0])NEWLINENEWLINE if groundtruth:NEWLINE if max_gt_boxes is None:NEWLINE if input_data_fields.num_groundtruth_boxes in groundtruth:NEWLINE max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]NEWLINE else:NEWLINE raise ValueError(NEWLINE 'max_gt_boxes must be provided when processing batched examples.')NEWLINENEWLINE if input_data_fields.groundtruth_instance_masks in groundtruth:NEWLINE masks = groundtruth[input_data_fields.groundtruth_instance_masks]NEWLINE max_spatial_shape = tf.reduce_max(NEWLINE original_image_spatial_shapes, axis=0, keep_dims=True)NEWLINE tiled_max_spatial_shape = tf.tile(NEWLINE max_spatial_shape,NEWLINE multiples=[tf.shape(original_image_spatial_shapes)[0], 1])NEWLINE groundtruth[input_data_fields.groundtruth_instance_masks] = (NEWLINE shape_utils.static_or_dynamic_map_fn(NEWLINE _resize_groundtruth_masks,NEWLINE elems=[masks, true_image_shapes,NEWLINE original_image_spatial_shapes,NEWLINE tiled_max_spatial_shape],NEWLINE dtype=tf.uint8))NEWLINENEWLINE output_dict.update(groundtruth)NEWLINENEWLINE image_shape = tf.cast(tf.shape(images), tf.float32)NEWLINE image_height, image_width = image_shape[1], image_shape[2]NEWLINENEWLINE def _scale_box_to_normalized_true_image(args):NEWLINE """Scale the box coordinates to be relative to the true image shape."""NEWLINE boxes, true_image_shape = argsNEWLINE true_image_shape = tf.cast(true_image_shape, tf.float32)NEWLINE true_height, true_width = true_image_shape[0], true_image_shape[1]NEWLINE normalized_window = tf.stack([0.0, 0.0, true_height / image_height,NEWLINE true_width / image_width])NEWLINE return box_list_ops.change_coordinate_frame(NEWLINE box_list.BoxList(boxes), normalized_window).get()NEWLINENEWLINE groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]NEWLINE groundtruth_boxes = shape_utils.static_or_dynamic_map_fn(NEWLINE _scale_box_to_normalized_true_image,NEWLINE elems=[groundtruth_boxes, true_image_shapes], dtype=tf.float32)NEWLINE output_dict[input_data_fields.groundtruth_boxes] = groundtruth_boxesNEWLINENEWLINE if input_data_fields.groundtruth_keypoints in groundtruth:NEWLINE # If groundtruth_keypoints is in the groundtruth dictionary. Update theNEWLINE # coordinates to conform with the true image shape.NEWLINE def _scale_keypoints_to_normalized_true_image(args):NEWLINE """Scale the box coordinates to be relative to the true image shape."""NEWLINE keypoints, true_image_shape = argsNEWLINE true_image_shape = tf.cast(true_image_shape, tf.float32)NEWLINE true_height, true_width = true_image_shape[0], true_image_shape[1]NEWLINE normalized_window = tf.stack(NEWLINE [0.0, 0.0, true_height / image_height, true_width / image_width])NEWLINE return keypoint_ops.change_coordinate_frame(keypoints,NEWLINE normalized_window)NEWLINENEWLINE groundtruth_keypoints = groundtruth[NEWLINE input_data_fields.groundtruth_keypoints]NEWLINE groundtruth_keypoints = shape_utils.static_or_dynamic_map_fn(NEWLINE _scale_keypoints_to_normalized_true_image,NEWLINE elems=[groundtruth_keypoints, true_image_shapes],NEWLINE dtype=tf.float32)NEWLINE output_dict[NEWLINE input_data_fields.groundtruth_keypoints] = groundtruth_keypointsNEWLINENEWLINE if scale_to_absolute:NEWLINE groundtruth_boxes = output_dict[input_data_fields.groundtruth_boxes]NEWLINE output_dict[input_data_fields.groundtruth_boxes] = (NEWLINE shape_utils.static_or_dynamic_map_fn(NEWLINE _scale_box_to_absolute,NEWLINE elems=[groundtruth_boxes, original_image_spatial_shapes],NEWLINE dtype=tf.float32))NEWLINE if input_data_fields.groundtruth_keypoints in groundtruth:NEWLINE groundtruth_keypoints = output_dict[NEWLINE input_data_fields.groundtruth_keypoints]NEWLINE output_dict[input_data_fields.groundtruth_keypoints] = (NEWLINE shape_utils.static_or_dynamic_map_fn(NEWLINE _scale_keypoint_to_absolute,NEWLINE elems=[groundtruth_keypoints, original_image_spatial_shapes],NEWLINE dtype=tf.float32))NEWLINENEWLINE # For class-agnostic models, groundtruth classes all become 1.NEWLINE if class_agnostic:NEWLINE groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]NEWLINE groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)NEWLINE output_dict[input_data_fields.groundtruth_classes] = groundtruth_classesNEWLINENEWLINE output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxesNEWLINENEWLINE return output_dictNEWLINENEWLINENEWLINEdef get_evaluators(eval_config, categories, evaluator_options=None):NEWLINE """Returns the evaluator class according to eval_config, valid for categories.NEWLINENEWLINE Args:NEWLINE eval_config: An `eval_pb2.EvalConfig`.NEWLINE categories: A list of dicts, each of which has the following keys -NEWLINE 'id': (required) an integer id uniquely identifying this category.NEWLINE 'name': (required) string representing category name e.g., 'cat', 'dog'.NEWLINE 'keypoints': (optional) dict mapping this category's keypoints to uniqueNEWLINE ids.NEWLINE evaluator_options: A dictionary of metric names (seeNEWLINE EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initializationNEWLINE keyword arguments. For example:NEWLINE evalator_options = {NEWLINE 'coco_detection_metrics': {'include_metrics_per_category': True}NEWLINE }NEWLINENEWLINE Returns:NEWLINE An list of instances of DetectionEvaluator.NEWLINENEWLINE Raises:NEWLINE ValueError: if metric is not in the metric class dictionary.NEWLINE """NEWLINE evaluator_options = evaluator_options or {}NEWLINE eval_metric_fn_keys = eval_config.metrics_setNEWLINE if not eval_metric_fn_keys:NEWLINE eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]NEWLINE evaluators_list = []NEWLINE for eval_metric_fn_key in eval_metric_fn_keys:NEWLINE if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:NEWLINE raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))NEWLINE kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_keyNEWLINE in evaluator_options else {})NEWLINE evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](NEWLINE categories,NEWLINE **kwargs_dict))NEWLINENEWLINE if isinstance(eval_config, eval_pb2.EvalConfig):NEWLINE parameterized_metrics = eval_config.parameterized_metricNEWLINE for parameterized_metric in parameterized_metrics:NEWLINE assert parameterized_metric.HasField('parameterized_metric')NEWLINE if parameterized_metric.WhichOneof(NEWLINE 'parameterized_metric') == EVAL_KEYPOINT_METRIC:NEWLINE keypoint_metrics = parameterized_metric.coco_keypoint_metricsNEWLINE # Create category to keypoints mapping dict.NEWLINE category_keypoints = {}NEWLINE class_label = keypoint_metrics.class_labelNEWLINE category = NoneNEWLINE for cat in categories:NEWLINE if cat['name'] == class_label:NEWLINE category = catNEWLINE breakNEWLINE if not category:NEWLINE continueNEWLINE keypoints_for_this_class = category['keypoints']NEWLINE category_keypoints = [{NEWLINE 'id': keypoints_for_this_class[kp_name], 'name': kp_nameNEWLINE } for kp_name in keypoints_for_this_class]NEWLINE # Create keypoint evaluator for this category.NEWLINE evaluators_list.append(EVAL_METRICS_CLASS_DICT[EVAL_KEYPOINT_METRIC](NEWLINE category['id'], category_keypoints, class_label,NEWLINE keypoint_metrics.keypoint_label_to_sigmas))NEWLINE return evaluators_listNEWLINENEWLINENEWLINEdef get_eval_metric_ops_for_evaluators(eval_config,NEWLINE categories,NEWLINE eval_dict):NEWLINE """Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`.NEWLINENEWLINE Args:NEWLINE eval_config: An `eval_pb2.EvalConfig`.NEWLINE categories: A list of dicts, each of which has the following keys -NEWLINE 'id': (required) an integer id uniquely identifying this category.NEWLINE 'name': (required) string representing category name e.g., 'cat', 'dog'.NEWLINE eval_dict: An evaluation dictionary, returned fromNEWLINE result_dict_for_single_example().NEWLINENEWLINE Returns:NEWLINE A dictionary of metric names to tuple of value_op and update_op that can beNEWLINE used as eval metric ops in tf.EstimatorSpec.NEWLINE """NEWLINE eval_metric_ops = {}NEWLINE evaluator_options = evaluator_options_from_eval_config(eval_config)NEWLINE evaluators_list = get_evaluators(eval_config, categories, evaluator_options)NEWLINE for evaluator in evaluators_list:NEWLINE eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(NEWLINE eval_dict))NEWLINE return eval_metric_opsNEWLINENEWLINENEWLINEdef evaluator_options_from_eval_config(eval_config):NEWLINE """Produces a dictionary of evaluation options for each eval metric.NEWLINENEWLINE Args:NEWLINE eval_config: An `eval_pb2.EvalConfig`.NEWLINENEWLINE Returns:NEWLINE evaluator_options: A dictionary of metric names (seeNEWLINE EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initializationNEWLINE keyword arguments. For example:NEWLINE evalator_options = {NEWLINE 'coco_detection_metrics': {'include_metrics_per_category': True}NEWLINE }NEWLINE """NEWLINE eval_metric_fn_keys = eval_config.metrics_setNEWLINE evaluator_options = {}NEWLINE for eval_metric_fn_key in eval_metric_fn_keys:NEWLINE if eval_metric_fn_key in (NEWLINE 'coco_detection_metrics', 'coco_mask_metrics', 'lvis_mask_metrics'):NEWLINE evaluator_options[eval_metric_fn_key] = {NEWLINE 'include_metrics_per_category': (NEWLINE eval_config.include_metrics_per_category)NEWLINE }NEWLINENEWLINE if (hasattr(eval_config, 'all_metrics_per_category') andNEWLINE eval_config.all_metrics_per_category):NEWLINE evaluator_options[eval_metric_fn_key].update({NEWLINE 'all_metrics_per_category': eval_config.all_metrics_per_categoryNEWLINE })NEWLINE # For coco detection eval, if the eval_config proto contains theNEWLINE # "skip_predictions_for_unlabeled_class" field, include this field inNEWLINE # evaluator_options.NEWLINE if eval_metric_fn_key == 'coco_detection_metrics' and hasattr(NEWLINE eval_config, 'skip_predictions_for_unlabeled_class'):NEWLINE evaluator_options[eval_metric_fn_key].update({NEWLINE 'skip_predictions_for_unlabeled_class':NEWLINE (eval_config.skip_predictions_for_unlabeled_class)NEWLINE })NEWLINE for super_category in eval_config.super_categories:NEWLINE if 'super_categories' not in evaluator_options[eval_metric_fn_key]:NEWLINE evaluator_options[eval_metric_fn_key]['super_categories'] = {}NEWLINE key = super_categoryNEWLINE value = eval_config.super_categories[key].split(',')NEWLINE evaluator_options[eval_metric_fn_key]['super_categories'][key] = valueNEWLINE if eval_metric_fn_key == 'lvis_mask_metrics' and hasattr(NEWLINE eval_config, 'export_path'):NEWLINE evaluator_options[eval_metric_fn_key].update({NEWLINE 'export_path': eval_config.export_pathNEWLINE })NEWLINENEWLINE elif eval_metric_fn_key == 'precision_at_recall_detection_metrics':NEWLINE evaluator_options[eval_metric_fn_key] = {NEWLINE 'recall_lower_bound': (eval_config.recall_lower_bound),NEWLINE 'recall_upper_bound': (eval_config.recall_upper_bound)NEWLINE }NEWLINE return evaluator_optionsNEWLINENEWLINENEWLINEdef has_densepose(eval_dict):NEWLINE return (fields.DetectionResultFields.detection_masks in eval_dict andNEWLINE fields.DetectionResultFields.detection_surface_coords in eval_dict)NEWLINE |
def prime_factors(N):NEWLINE n = round(N ** 0.5) + 1NEWLINE i = 1NEWLINE factors = []NEWLINE while True:NEWLINE i += 1NEWLINE if N % i == 0:NEWLINE factors.append(i)NEWLINE N = N // iNEWLINE i = 1NEWLINE n = round(N ** 0.5) + 1NEWLINE if i > n:NEWLINE if N != 1:NEWLINE factors.append(N)NEWLINE breakNEWLINE return factorsNEWLINE |
# Generated by Django 3.2.9 on 2021-11-16 10:40NEWLINENEWLINEfrom django.db import migrations, modelsNEWLINENEWLINENEWLINEclass Migration(migrations.Migration):NEWLINENEWLINE dependencies = [NEWLINE ('Blog', '0007_post_image'),NEWLINE ]NEWLINENEWLINE operations = [NEWLINE migrations.CreateModel(NEWLINE name='category',NEWLINE fields=[NEWLINE ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),NEWLINE ('name', models.CharField(max_length=255)),NEWLINE ],NEWLINE ),NEWLINE migrations.AddField(NEWLINE model_name='post',NEWLINE name='category',NEWLINE field=models.ManyToManyField(to='Blog.category'),NEWLINE ),NEWLINE ]NEWLINE |
#NEWLINE# Licensed to the Apache Software Foundation (ASF) under one or moreNEWLINE# contributor license agreements. See the NOTICE file distributed withNEWLINE# this work for additional information regarding copyright ownership.NEWLINE# The ASF licenses this file to You under the Apache License, Version 2.0NEWLINE# (the "License"); you may not use this file except in compliance withNEWLINE# the License. You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINE#NEWLINENEWLINE"""Unit tests for the DataflowRunner class."""NEWLINENEWLINE# pytype: skip-fileNEWLINENEWLINEfrom __future__ import absolute_importNEWLINENEWLINEimport jsonNEWLINEimport sysNEWLINEimport unittestNEWLINEfrom builtins import objectNEWLINEfrom builtins import rangeNEWLINEfrom datetime import datetimeNEWLINENEWLINE# patches unittest.TestCase to be python3 compatibleNEWLINEimport future.tests.base # pylint: disable=unused-importNEWLINEimport mockNEWLINEimport pytestNEWLINENEWLINEimport apache_beam as beamNEWLINEimport apache_beam.transforms as ptransformNEWLINEfrom apache_beam.options.pipeline_options import DebugOptionsNEWLINEfrom apache_beam.options.pipeline_options import PipelineOptionsNEWLINEfrom apache_beam.pipeline import AppliedPTransformNEWLINEfrom apache_beam.pipeline import PipelineNEWLINEfrom apache_beam.portability import common_urnsNEWLINEfrom apache_beam.portability.api import beam_runner_api_pb2NEWLINEfrom apache_beam.pvalue import PCollectionNEWLINEfrom apache_beam.runners import DataflowRunnerNEWLINEfrom apache_beam.runners import TestDataflowRunnerNEWLINEfrom apache_beam.runners import create_runnerNEWLINEfrom apache_beam.runners.dataflow.dataflow_runner import DataflowPipelineResultNEWLINEfrom apache_beam.runners.dataflow.dataflow_runner import DataflowRuntimeExceptionNEWLINEfrom apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_apiNEWLINEfrom apache_beam.runners.runner import PipelineStateNEWLINEfrom apache_beam.testing.extra_assertions import ExtraAssertionsMixinNEWLINEfrom apache_beam.testing.test_pipeline import TestPipelineNEWLINEfrom apache_beam.transforms import windowNEWLINEfrom apache_beam.transforms.core import WindowingNEWLINEfrom apache_beam.transforms.core import _GroupByKeyOnlyNEWLINEfrom apache_beam.transforms.display import DisplayDataItemNEWLINEfrom apache_beam.typehints import typehintsNEWLINENEWLINE# Protect against environments where apitools library is not available.NEWLINE# pylint: disable=wrong-import-order, wrong-import-positionNEWLINEtry:NEWLINE from apache_beam.runners.dataflow.internal import apiclientNEWLINEexcept ImportError:NEWLINE apiclient = None # type: ignoreNEWLINE# pylint: enable=wrong-import-order, wrong-import-positionNEWLINENEWLINE# SpecialParDo and SpecialDoFn are used in test_remote_runner_display_data.NEWLINE# Due to BEAM-8482, these need to be declared outside of the test method.NEWLINE# TODO: Should not subclass ParDo. Switch to PTransform as soon asNEWLINE# composite transforms support display data.NEWLINEclass SpecialParDo(beam.ParDo):NEWLINE def __init__(self, fn, now):NEWLINE super(SpecialParDo, self).__init__(fn)NEWLINE self.fn = fnNEWLINE self.now = nowNEWLINENEWLINE # Make this a list to be accessible within closureNEWLINE def display_data(self):NEWLINE return {'asubcomponent': self.fn,NEWLINE 'a_class': SpecialParDo,NEWLINE 'a_time': self.now}NEWLINENEWLINEclass SpecialDoFn(beam.DoFn):NEWLINE def display_data(self):NEWLINE return {'dofn_value': 42}NEWLINENEWLINE def process(self):NEWLINE passNEWLINENEWLINENEWLINE@unittest.skipIf(apiclient is None, 'GCP dependencies are not installed')NEWLINEclass DataflowRunnerTest(unittest.TestCase, ExtraAssertionsMixin):NEWLINE def setUp(self):NEWLINE self.default_properties = [NEWLINE '--dataflow_endpoint=ignored',NEWLINE '--job_name=test-job',NEWLINE '--project=test-project',NEWLINE '--staging_location=ignored',NEWLINE '--temp_location=/dev/null',NEWLINE '--no_auth',NEWLINE '--dry_run=True']NEWLINENEWLINE @mock.patch('time.sleep', return_value=None)NEWLINE def test_wait_until_finish(self, patched_time_sleep):NEWLINE values_enum = dataflow_api.Job.CurrentStateValueValuesEnumNEWLINENEWLINE class MockDataflowRunner(object):NEWLINENEWLINE def __init__(self, states):NEWLINE self.dataflow_client = mock.MagicMock()NEWLINE self.job = mock.MagicMock()NEWLINE self.job.currentState = values_enum.JOB_STATE_UNKNOWNNEWLINE self._states = statesNEWLINE self._next_state_index = 0NEWLINENEWLINE def get_job_side_effect(*args, **kwargs):NEWLINE self.job.currentState = self._states[self._next_state_index]NEWLINE if self._next_state_index < (len(self._states) - 1):NEWLINE self._next_state_index += 1NEWLINE return mock.DEFAULTNEWLINENEWLINE self.dataflow_client.get_job = mock.MagicMock(NEWLINE return_value=self.job, side_effect=get_job_side_effect)NEWLINE self.dataflow_client.list_messages = mock.MagicMock(NEWLINE return_value=([], None))NEWLINENEWLINE with self.assertRaisesRegex(NEWLINE DataflowRuntimeException, 'Dataflow pipeline failed. State: FAILED'):NEWLINE failed_runner = MockDataflowRunner([values_enum.JOB_STATE_FAILED])NEWLINE failed_result = DataflowPipelineResult(failed_runner.job, failed_runner)NEWLINE failed_result.wait_until_finish()NEWLINENEWLINE succeeded_runner = MockDataflowRunner([values_enum.JOB_STATE_DONE])NEWLINE succeeded_result = DataflowPipelineResult(NEWLINE succeeded_runner.job, succeeded_runner)NEWLINE result = succeeded_result.wait_until_finish()NEWLINE self.assertEqual(result, PipelineState.DONE)NEWLINENEWLINE # Time array has duplicate items, because some logging implementations alsoNEWLINE # call time.NEWLINE with mock.patch('time.time', mock.MagicMock(side_effect=[1, 1, 2, 2, 3])):NEWLINE duration_succeeded_runner = MockDataflowRunner(NEWLINE [values_enum.JOB_STATE_RUNNING, values_enum.JOB_STATE_DONE])NEWLINE duration_succeeded_result = DataflowPipelineResult(NEWLINE duration_succeeded_runner.job, duration_succeeded_runner)NEWLINE result = duration_succeeded_result.wait_until_finish(5000)NEWLINE self.assertEqual(result, PipelineState.DONE)NEWLINENEWLINE with mock.patch('time.time', mock.MagicMock(side_effect=[1, 9, 9, 20, 20])):NEWLINE duration_timedout_runner = MockDataflowRunner(NEWLINE [values_enum.JOB_STATE_RUNNING])NEWLINE duration_timedout_result = DataflowPipelineResult(NEWLINE duration_timedout_runner.job, duration_timedout_runner)NEWLINE result = duration_timedout_result.wait_until_finish(5000)NEWLINE self.assertEqual(result, PipelineState.RUNNING)NEWLINENEWLINE with mock.patch('time.time', mock.MagicMock(side_effect=[1, 1, 2, 2, 3])):NEWLINE with self.assertRaisesRegex(NEWLINE DataflowRuntimeException,NEWLINE 'Dataflow pipeline failed. State: CANCELLED'):NEWLINE duration_failed_runner = MockDataflowRunner(NEWLINE [values_enum.JOB_STATE_CANCELLED])NEWLINE duration_failed_result = DataflowPipelineResult(NEWLINE duration_failed_runner.job, duration_failed_runner)NEWLINE duration_failed_result.wait_until_finish(5000)NEWLINENEWLINE @mock.patch('time.sleep', return_value=None)NEWLINE def test_cancel(self, patched_time_sleep):NEWLINE values_enum = dataflow_api.Job.CurrentStateValueValuesEnumNEWLINENEWLINE class MockDataflowRunner(object):NEWLINENEWLINE def __init__(self, state, cancel_result):NEWLINE self.dataflow_client = mock.MagicMock()NEWLINE self.job = mock.MagicMock()NEWLINE self.job.currentState = stateNEWLINENEWLINE self.dataflow_client.get_job = mock.MagicMock(return_value=self.job)NEWLINE self.dataflow_client.modify_job_state = mock.MagicMock(NEWLINE return_value=cancel_result)NEWLINE self.dataflow_client.list_messages = mock.MagicMock(NEWLINE return_value=([], None))NEWLINENEWLINE with self.assertRaisesRegex(NEWLINE DataflowRuntimeException, 'Failed to cancel job'):NEWLINE failed_runner = MockDataflowRunner(values_enum.JOB_STATE_RUNNING, False)NEWLINE failed_result = DataflowPipelineResult(failed_runner.job, failed_runner)NEWLINE failed_result.cancel()NEWLINENEWLINE succeeded_runner = MockDataflowRunner(values_enum.JOB_STATE_RUNNING, True)NEWLINE succeeded_result = DataflowPipelineResult(NEWLINE succeeded_runner.job, succeeded_runner)NEWLINE succeeded_result.cancel()NEWLINENEWLINE terminal_runner = MockDataflowRunner(values_enum.JOB_STATE_DONE, False)NEWLINE terminal_result = DataflowPipelineResult(NEWLINE terminal_runner.job, terminal_runner)NEWLINE terminal_result.cancel()NEWLINENEWLINE def test_create_runner(self):NEWLINE self.assertTrue(NEWLINE isinstance(create_runner('DataflowRunner'),NEWLINE DataflowRunner))NEWLINE self.assertTrue(NEWLINE isinstance(create_runner('TestDataflowRunner'),NEWLINE TestDataflowRunner))NEWLINENEWLINE def test_environment_override_translation(self):NEWLINE self.default_properties.append('--experiments=beam_fn_api')NEWLINE self.default_properties.append('--worker_harness_container_image=FOO')NEWLINE remote_runner = DataflowRunner()NEWLINE with Pipeline(NEWLINE remote_runner,NEWLINE options=PipelineOptions(self.default_properties)) as p:NEWLINE (p | ptransform.Create([1, 2, 3]) # pylint: disable=expression-not-assignedNEWLINE | 'Do' >> ptransform.FlatMap(lambda x: [(x, x)])NEWLINE | ptransform.GroupByKey())NEWLINE self.assertEqual(NEWLINE list(remote_runner.proto_pipeline.components.environments.values()),NEWLINE [beam_runner_api_pb2.Environment(NEWLINE urn=common_urns.environments.DOCKER.urn,NEWLINE payload=beam_runner_api_pb2.DockerPayload(NEWLINE container_image='FOO').SerializeToString())])NEWLINENEWLINE def test_remote_runner_translation(self):NEWLINE remote_runner = DataflowRunner()NEWLINE with Pipeline(NEWLINE remote_runner,NEWLINE options=PipelineOptions(self.default_properties)) as p:NEWLINENEWLINE (p | ptransform.Create([1, 2, 3]) # pylint: disable=expression-not-assignedNEWLINE | 'Do' >> ptransform.FlatMap(lambda x: [(x, x)])NEWLINE | ptransform.GroupByKey())NEWLINENEWLINE def test_streaming_create_translation(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append("--streaming")NEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINE job_dict = json.loads(str(remote_runner.job))NEWLINE self.assertEqual(len(job_dict[u'steps']), 3)NEWLINENEWLINE self.assertEqual(job_dict[u'steps'][0][u'kind'], u'ParallelRead')NEWLINE self.assertEqual(NEWLINE job_dict[u'steps'][0][u'properties'][u'pubsub_subscription'],NEWLINE '_starting_signal/')NEWLINE self.assertEqual(job_dict[u'steps'][1][u'kind'], u'ParallelDo')NEWLINE self.assertEqual(job_dict[u'steps'][2][u'kind'], u'ParallelDo')NEWLINENEWLINE def test_biqquery_read_streaming_fail(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append("--streaming")NEWLINE with self.assertRaisesRegex(ValueError,NEWLINE r'source is not currently available'):NEWLINE with Pipeline(NEWLINE remote_runner,NEWLINE PipelineOptions(self.default_properties)) as p:NEWLINE _ = p | beam.io.Read(beam.io.BigQuerySource('some.table'))NEWLINENEWLINE # TODO(BEAM-8095): Segfaults in Python 3.7 with xdist.NEWLINE @pytest.mark.no_xdistNEWLINE def test_remote_runner_display_data(self):NEWLINE remote_runner = DataflowRunner()NEWLINE p = Pipeline(remote_runner,NEWLINE options=PipelineOptions(self.default_properties))NEWLINENEWLINE now = datetime.now()NEWLINE # pylint: disable=expression-not-assignedNEWLINE (p | ptransform.Create([1, 2, 3, 4, 5])NEWLINE | 'Do' >> SpecialParDo(SpecialDoFn(), now))NEWLINENEWLINE # TODO(BEAM-366) Enable runner API on this test.NEWLINE p.run(test_runner_api=False)NEWLINE job_dict = json.loads(str(remote_runner.job))NEWLINE steps = [stepNEWLINE for step in job_dict['steps']NEWLINE if len(step['properties'].get('display_data', [])) > 0]NEWLINE step = steps[1]NEWLINE disp_data = step['properties']['display_data']NEWLINE nspace = SpecialParDo.__module__+ '.'NEWLINE expected_data = [{'type': 'TIMESTAMP', 'namespace': nspace+'SpecialParDo',NEWLINE 'value': DisplayDataItem._format_value(now, 'TIMESTAMP'),NEWLINE 'key': 'a_time'},NEWLINE {'type': 'STRING', 'namespace': nspace+'SpecialParDo',NEWLINE 'value': nspace+'SpecialParDo', 'key': 'a_class',NEWLINE 'shortValue': 'SpecialParDo'},NEWLINE {'type': 'INTEGER', 'namespace': nspace+'SpecialDoFn',NEWLINE 'value': 42, 'key': 'dofn_value'}]NEWLINE self.assertUnhashableCountEqual(disp_data, expected_data)NEWLINENEWLINE def test_no_group_by_key_directly_after_bigquery(self):NEWLINE remote_runner = DataflowRunner()NEWLINE p = Pipeline(remote_runner,NEWLINE options=PipelineOptions([NEWLINE '--dataflow_endpoint=ignored',NEWLINE '--job_name=test-job',NEWLINE '--project=test-project',NEWLINE '--staging_location=ignored',NEWLINE '--temp_location=/dev/null',NEWLINE '--no_auth'NEWLINE ]))NEWLINE rows = p | beam.io.Read(beam.io.BigQuerySource('dataset.faketable'))NEWLINE with self.assertRaises(ValueError,NEWLINE msg=('Coder for the GroupByKey operation'NEWLINE '"GroupByKey" is not a key-value coder: 'NEWLINE 'RowAsDictJsonCoder')):NEWLINE unused_invalid = rows | beam.GroupByKey()NEWLINENEWLINE def test_group_by_key_input_visitor_with_valid_inputs(self):NEWLINE p = TestPipeline()NEWLINE pcoll1 = PCollection(p)NEWLINE pcoll2 = PCollection(p)NEWLINE pcoll3 = PCollection(p)NEWLINE for transform in [_GroupByKeyOnly(), beam.GroupByKey()]:NEWLINE pcoll1.element_type = NoneNEWLINE pcoll2.element_type = typehints.AnyNEWLINE pcoll3.element_type = typehints.KV[typehints.Any, typehints.Any]NEWLINE for pcoll in [pcoll1, pcoll2, pcoll3]:NEWLINE applied = AppliedPTransform(None, transform, "label", [pcoll])NEWLINE applied.outputs[None] = PCollection(None)NEWLINE DataflowRunner.group_by_key_input_visitor().visit_transform(NEWLINE applied)NEWLINE self.assertEqual(pcoll.element_type,NEWLINE typehints.KV[typehints.Any, typehints.Any])NEWLINENEWLINE def test_group_by_key_input_visitor_with_invalid_inputs(self):NEWLINE p = TestPipeline()NEWLINE pcoll1 = PCollection(p)NEWLINE pcoll2 = PCollection(p)NEWLINE for transform in [_GroupByKeyOnly(), beam.GroupByKey()]:NEWLINE pcoll1.element_type = strNEWLINE pcoll2.element_type = typehints.SetNEWLINE err_msg = (NEWLINE r"Input to 'label' must be compatible with KV\[Any, Any\]. "NEWLINE "Found .*")NEWLINE for pcoll in [pcoll1, pcoll2]:NEWLINE with self.assertRaisesRegex(ValueError, err_msg):NEWLINE DataflowRunner.group_by_key_input_visitor().visit_transform(NEWLINE AppliedPTransform(None, transform, "label", [pcoll]))NEWLINENEWLINE def test_group_by_key_input_visitor_for_non_gbk_transforms(self):NEWLINE p = TestPipeline()NEWLINE pcoll = PCollection(p)NEWLINE for transform in [beam.Flatten(), beam.Map(lambda x: x)]:NEWLINE pcoll.element_type = typehints.AnyNEWLINE DataflowRunner.group_by_key_input_visitor().visit_transform(NEWLINE AppliedPTransform(None, transform, "label", [pcoll]))NEWLINE self.assertEqual(pcoll.element_type, typehints.Any)NEWLINENEWLINE def test_flatten_input_with_visitor_with_single_input(self):NEWLINE self._test_flatten_input_visitor(typehints.KV[int, int], typehints.Any, 1)NEWLINENEWLINE def test_flatten_input_with_visitor_with_multiple_inputs(self):NEWLINE self._test_flatten_input_visitor(NEWLINE typehints.KV[int, typehints.Any], typehints.Any, 5)NEWLINENEWLINE def _test_flatten_input_visitor(self, input_type, output_type, num_inputs):NEWLINE p = TestPipeline()NEWLINE inputs = []NEWLINE for _ in range(num_inputs):NEWLINE input_pcoll = PCollection(p)NEWLINE input_pcoll.element_type = input_typeNEWLINE inputs.append(input_pcoll)NEWLINE output_pcoll = PCollection(p)NEWLINE output_pcoll.element_type = output_typeNEWLINENEWLINE flatten = AppliedPTransform(None, beam.Flatten(), "label", inputs)NEWLINE flatten.add_output(output_pcoll, None)NEWLINE DataflowRunner.flatten_input_visitor().visit_transform(flatten)NEWLINE for _ in range(num_inputs):NEWLINE self.assertEqual(inputs[0].element_type, output_type)NEWLINENEWLINE def test_gbk_then_flatten_input_visitor(self):NEWLINE p = TestPipeline(NEWLINE runner=DataflowRunner(),NEWLINE options=PipelineOptions(self.default_properties))NEWLINE none_str_pc = p | 'c1' >> beam.Create({None: 'a'})NEWLINE none_int_pc = p | 'c2' >> beam.Create({None: 3})NEWLINE flat = (none_str_pc, none_int_pc) | beam.Flatten()NEWLINE _ = flat | beam.GroupByKey()NEWLINENEWLINE # This may change if type inference changes, but we assert it hereNEWLINE # to make sure the check below is not vacuous.NEWLINE self.assertNotIsInstance(flat.element_type, typehints.TupleConstraint)NEWLINENEWLINE p.visit(DataflowRunner.group_by_key_input_visitor())NEWLINE p.visit(DataflowRunner.flatten_input_visitor())NEWLINENEWLINE # The dataflow runner requires gbk input to be tuples *and* flattenNEWLINE # inputs to be equal to their outputs. Assert both hold.NEWLINE self.assertIsInstance(flat.element_type, typehints.TupleConstraint)NEWLINE self.assertEqual(flat.element_type, none_str_pc.element_type)NEWLINE self.assertEqual(flat.element_type, none_int_pc.element_type)NEWLINENEWLINE def test_serialize_windowing_strategy(self):NEWLINE # This just tests the basic path; more complete testsNEWLINE # are in window_test.py.NEWLINE strategy = Windowing(window.FixedWindows(10))NEWLINE self.assertEqual(NEWLINE strategy,NEWLINE DataflowRunner.deserialize_windowing_strategy(NEWLINE DataflowRunner.serialize_windowing_strategy(strategy)))NEWLINENEWLINE def test_side_input_visitor(self):NEWLINE p = TestPipeline()NEWLINE pc = p | beam.Create([])NEWLINENEWLINE transform = beam.Map(NEWLINE lambda x, y, z: (x, y, z),NEWLINE beam.pvalue.AsSingleton(pc),NEWLINE beam.pvalue.AsMultiMap(pc))NEWLINE applied_transform = AppliedPTransform(None, transform, "label", [pc])NEWLINE DataflowRunner.side_input_visitor().visit_transform(applied_transform)NEWLINE self.assertEqual(2, len(applied_transform.side_inputs))NEWLINE for side_input in applied_transform.side_inputs:NEWLINE self.assertEqual(NEWLINE common_urns.side_inputs.MULTIMAP.urn,NEWLINE side_input._side_input_data().access_pattern)NEWLINENEWLINE def test_min_cpu_platform_flag_is_propagated_to_experiments(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append('--min_cpu_platform=Intel Haswell')NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINE self.assertIn('min_cpu_platform=Intel Haswell',NEWLINE remote_runner.job.options.view_as(DebugOptions).experiments)NEWLINENEWLINE def test_streaming_engine_flag_adds_windmill_experiments(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append('--streaming')NEWLINE self.default_properties.append('--enable_streaming_engine')NEWLINE self.default_properties.append('--experiment=some_other_experiment')NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINENEWLINE experiments_for_job = (NEWLINE remote_runner.job.options.view_as(DebugOptions).experiments)NEWLINE self.assertIn('enable_streaming_engine', experiments_for_job)NEWLINE self.assertIn('enable_windmill_service', experiments_for_job)NEWLINE self.assertIn('some_other_experiment', experiments_for_job)NEWLINENEWLINE def test_dataflow_worker_jar_flag_non_fnapi_noop(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append('--experiment=some_other_experiment')NEWLINE self.default_properties.append('--dataflow_worker_jar=test.jar')NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINENEWLINE experiments_for_job = (NEWLINE remote_runner.job.options.view_as(DebugOptions).experiments)NEWLINE self.assertIn('some_other_experiment', experiments_for_job)NEWLINE self.assertNotIn('use_staged_dataflow_worker_jar', experiments_for_job)NEWLINENEWLINE def test_dataflow_worker_jar_flag_adds_use_staged_worker_jar_experiment(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append('--experiment=beam_fn_api')NEWLINE self.default_properties.append('--dataflow_worker_jar=test.jar')NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINENEWLINE experiments_for_job = (NEWLINE remote_runner.job.options.view_as(DebugOptions).experiments)NEWLINE self.assertIn('beam_fn_api', experiments_for_job)NEWLINE self.assertIn('use_staged_dataflow_worker_jar', experiments_for_job)NEWLINENEWLINE def test_use_fastavro_experiment_is_added_on_py3_and_onwards(self):NEWLINE remote_runner = DataflowRunner()NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINENEWLINE self.assertEqual(NEWLINE sys.version_info[0] > 2,NEWLINE remote_runner.job.options.view_as(DebugOptions).lookup_experiment(NEWLINE 'use_fastavro', False))NEWLINENEWLINE def test_use_fastavro_experiment_is_not_added_when_use_avro_is_present(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append('--experiment=use_avro')NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINENEWLINE debug_options = remote_runner.job.options.view_as(DebugOptions)NEWLINENEWLINE self.assertFalse(debug_options.lookup_experiment('use_fastavro', False))NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE unittest.main()NEWLINE |
import socketNEWLINENEWLINEdef Server(host="127.0.0.1",porta=8585):NEWLINE """NEWLINE -> Servido TCPNEWLINE :param host: Ip para o ServidoNEWLINE :param porta: Porta de ComunicaçãoNEWLINE :return: NoneNEWLINE """NEWLINE s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)NEWLINE msg = f"[+]Você se conector ao {host}."NEWLINE s.bind((host, porta))NEWLINE s.listen(1)NEWLINE while True:NEWLINE c, e = s.accept()NEWLINE print("Conectado com ", e)NEWLINE c.send(msg.encode('utf-8'))NEWLINE c.close()NEWLINENEWLINE#Server() |
from redis import RedisNEWLINEimport timeNEWLINEimport threadingNEWLINENEWLINENEWLINEdef notrans():NEWLINE conn = Redis("127.0.0.1",6379)NEWLINE print conn.incr('notrans:')NEWLINE time.sleep(.1)NEWLINE conn.incr('notrans:',-1)NEWLINENEWLINENEWLINENEWLINEdef __main__():NEWLINE for i in xrange(3):NEWLINE threading.Thread(target=notrans).start()NEWLINE time.sleep(.5) |
import jsonNEWLINENEWLINEfrom django.template.loader import render_to_stringNEWLINEfrom django.urls import reverseNEWLINEfrom django.utils.translation import ugettext_lazy as _NEWLINEfrom wagtail.admin.widgets import AdminChooserNEWLINENEWLINENEWLINEclass AdminModelChooser(AdminChooser):NEWLINE show_edit_link = FalseNEWLINENEWLINE def __init__(self, model, filter_name=None, **kwargs):NEWLINE self.target_model = modelNEWLINE name = self.target_model._meta.verbose_nameNEWLINE self.choose_one_text = _('Choose %s') % nameNEWLINE self.choose_another_text = _('Choose another %s') % nameNEWLINE self.link_to_chosen_text = _('Edit this %s') % nameNEWLINENEWLINE self.filter_name = filter_nameNEWLINENEWLINE super(AdminModelChooser, self).__init__(**kwargs)NEWLINENEWLINE def render_html(self, name, value, attrs):NEWLINE instance, value = self.get_instance_and_id(self.target_model, value)NEWLINENEWLINE original_field_html = super(AdminModelChooser, self).render_html(NEWLINE name, value, attrs)NEWLINENEWLINE return render_to_string("wagtailmodelchooser/model_chooser.html", {NEWLINE 'widget': self,NEWLINE 'model_opts': self.target_model._meta,NEWLINE 'original_field_html': original_field_html,NEWLINE 'attrs': attrs,NEWLINE 'value': value,NEWLINE 'item': instance,NEWLINE })NEWLINENEWLINE def render_js_init(self, id_, name, value):NEWLINE opts = self.target_model._metaNEWLINE kwargs = {'app_label': opts.app_label, 'model_name': opts.model_name}NEWLINE if self.filter_name:NEWLINE kwargs['filter_name'] = self.filter_nameNEWLINENEWLINE return "createModelChooser({id}, {url});".format(NEWLINE id=json.dumps(id_),NEWLINE url=json.dumps(reverse('model_chooser', kwargs=kwargs)),NEWLINE filter_name=json.dumps(self.filter_name))NEWLINE |
# Configuration file for the Sphinx documentation builder.NEWLINE#NEWLINE# This file only contains a selection of the most common options. For a fullNEWLINE# list see the documentation:NEWLINE# https://www.sphinx-doc.org/en/master/usage/configuration.htmlNEWLINENEWLINE# -- Path setup --------------------------------------------------------------NEWLINENEWLINE# If extensions (or modules to document with autodoc) are in another directory,NEWLINE# add these directories to sys.path here. If the directory is relative to theNEWLINE# documentation root, use os.path.abspath to make it absolute, like shown here.NEWLINE#NEWLINEimport osNEWLINEimport sysNEWLINEsys.path.insert(0, os.path.abspath('../'))NEWLINENEWLINENEWLINE# -- Project information -----------------------------------------------------NEWLINENEWLINEproject = 'PolyA'NEWLINEcopyright = '2021, PolyA authors'NEWLINEauthor = 'Kaitlin Carey, Audrey Shingleton, George Lesica, Travis Wheeler'NEWLINENEWLINE# The full version, including alpha/beta/rc tagsNEWLINErelease = '1.1.0'NEWLINENEWLINENEWLINE# -- General configuration ---------------------------------------------------NEWLINENEWLINE# Add any Sphinx extension module names here, as strings. They can beNEWLINE# extensions coming with Sphinx (named 'sphinx.ext.*') or your customNEWLINE# ones.NEWLINEextensions = [NEWLINE 'sphinx.ext.autodoc',NEWLINE 'sphinx.ext.doctest',NEWLINE 'sphinx.ext.mathjax',NEWLINE 'sphinx.ext.napoleon',NEWLINE 'sphinx.ext.todo',NEWLINE]NEWLINENEWLINE# Add any paths that contain templates here, relative to this directory.NEWLINEtemplates_path = ['_templates']NEWLINENEWLINE# List of patterns, relative to source directory, that match files andNEWLINE# directories to ignore when looking for source files.NEWLINE# This pattern also affects html_static_path and html_extra_path.NEWLINEexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']NEWLINENEWLINENEWLINE# -- Options for HTML output -------------------------------------------------NEWLINENEWLINE# The theme to use for HTML and HTML Help pages. See the documentation forNEWLINE# a list of builtin themes.NEWLINE#NEWLINEhtml_theme = 'alabaster'NEWLINENEWLINE# Add any paths that contain custom static files (such as style sheets) here,NEWLINE# relative to this directory. They are copied after the builtin static files,NEWLINE# so a file named "default.css" will overwrite the builtin "default.css".NEWLINEhtml_static_path = ['_static'] |
import asyncioNEWLINEimport ioNEWLINENEWLINEimport discordNEWLINEfrom asyncpg.pool import PoolNEWLINEfrom discord.ext import commandsNEWLINENEWLINEfrom utils.db.cache import BotCacheNEWLINEfrom utils.ext import standards as stdNEWLINENEWLINENEWLINEclass Context(commands.Context):NEWLINE def __init__(self, **kwargs):NEWLINE super().__init__(**kwargs)NEWLINE self.pool = self.bot.dbNEWLINE self._db = NoneNEWLINENEWLINE async def safe_send(self, content, *, escape_mentions=True, **kwargs):NEWLINE if escape_mentions:NEWLINE content = discord.utils.escape_mentions(content)NEWLINENEWLINE content.replace("`", "")NEWLINENEWLINE if len(content) > 2000:NEWLINE fp = io.BytesIO(content.encode())NEWLINE kwargs.pop('file', None)NEWLINE return await self.reply(file=discord.File(fp, filename='message_to_long.txt'), **kwargs)NEWLINE else:NEWLINE return await self.reply(content)NEWLINENEWLINE @propertyNEWLINE def db(self) -> Pool:NEWLINE return self._db if self._db else self.poolNEWLINENEWLINE @propertyNEWLINE def cache(self) -> BotCache:NEWLINE return self.bot.cacheNEWLINENEWLINE async def lang(self, utils=False, module=None):NEWLINE if module is None:NEWLINE module = self.cog.qualified_nameNEWLINENEWLINE if isinstance(module, list):NEWLINE data = {}NEWLINE for _module in module:NEWLINE data |= await self.bot.lang(self.guild.id, _module.lower(), utils)NEWLINE else:NEWLINE data = await self.bot.lang(self.guild.id, module.lower(), utils)NEWLINENEWLINE return dataNEWLINENEWLINE async def release(self):NEWLINE if self._db is not None:NEWLINE await self.bot.pool.release(self._db)NEWLINE self._db = NoneNEWLINENEWLINE async def error(self, message: str, **kwargs):NEWLINE return await self.reply(embed=std.getErrorEmbed(message), **kwargs)NEWLINENEWLINE async def embed(self, message: str, signed=False, **kwargs):NEWLINE embed = std.getEmbed(message)NEWLINE if signed:NEWLINE embed.set_footer(icon_url=self.author.avatar_url, text=f'Requested by {self.author}')NEWLINENEWLINE return await self.reply(embed=embed, **kwargs)NEWLINENEWLINE async def prompt(self, message, *, timeout=60.0, delete_after=True, reacquire=True,NEWLINE author_id=None):NEWLINE if not self.channel.permissions_for(self.me).add_reactions:NEWLINE raise RuntimeError('Der Bot kann keine Reaktionen hinzufügen.')NEWLINENEWLINE fmt = f'{message}\n\nReagiere mit {std.yes_emoji} um zu bestätigen oder {std.no_emoji} ' \NEWLINE f'um abzubrechen. 'NEWLINENEWLINE author_id = author_id or self.author.idNEWLINE msg = await self.reply('Ping!', embed=discord.Embed(color=std.normal_color, description=fmt))NEWLINENEWLINE confirm = NoneNEWLINENEWLINE def check(payload):NEWLINE nonlocal confirmNEWLINENEWLINE if payload.message_id != msg.id or payload.user_id != author_id:NEWLINE return FalseNEWLINENEWLINE codepoint = str(payload.emoji)NEWLINENEWLINE if codepoint == std.yes_emoji:NEWLINE confirm = TrueNEWLINE return TrueNEWLINE elif codepoint == std.no_emoji:NEWLINE confirm = FalseNEWLINE return TrueNEWLINENEWLINE return FalseNEWLINENEWLINE for emoji in (std.yes_emoji, std.no_emoji):NEWLINE await msg.add_reaction(emoji)NEWLINENEWLINE if reacquire:NEWLINE await self.release()NEWLINENEWLINE try:NEWLINE await self.bot.wait_for('raw_reaction_add', check=check, timeout=timeout)NEWLINE except asyncio.TimeoutError:NEWLINE confirm = NoneNEWLINENEWLINE try:NEWLINE if delete_after:NEWLINE await msg.delete()NEWLINE finally:NEWLINE return confirmNEWLINENEWLINENEWLINEclass FakeContext:NEWLINE def __init__(self, bot, guild):NEWLINE self.bot = botNEWLINE self.guild = guildNEWLINENEWLINE @propertyNEWLINE def cache(self):NEWLINE return self.bot.cacheNEWLINENEWLINE @propertyNEWLINE def me(self):NEWLINE return self.guild.meNEWLINE |
from pylinsql.async_database import ConnectionParametersNEWLINEimport unittestNEWLINENEWLINENEWLINEclass DatabaseTestCase(unittest.IsolatedAsyncioTestCase):NEWLINE params: ConnectionParametersNEWLINENEWLINE def __init__(self, method_name: str):NEWLINE super().__init__(method_name)NEWLINE self.params = ConnectionParameters()NEWLINENEWLINE def assertEmpty(self, obj):NEWLINE self.assertFalse(obj)NEWLINENEWLINE def assertNotEmpty(self, obj):NEWLINE self.assertTrue(obj)NEWLINE |
# coding: utf-8NEWLINE"""NEWLINE Cisco IntersightNEWLINENEWLINE Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501NEWLINENEWLINE The version of the OpenAPI document: 1.0.9-1295NEWLINE Contact: intersight@cisco.comNEWLINE Generated by: https://openapi-generator.techNEWLINE"""NEWLINENEWLINEfrom __future__ import absolute_importNEWLINENEWLINEimport unittestNEWLINENEWLINEimport intersightNEWLINEfrom intersight.models.syslog_policy_list import SyslogPolicyList # noqa: E501NEWLINEfrom intersight.rest import ApiExceptionNEWLINENEWLINENEWLINEclass TestSyslogPolicyList(unittest.TestCase):NEWLINE """SyslogPolicyList unit test stubs"""NEWLINE def setUp(self):NEWLINE passNEWLINENEWLINE def tearDown(self):NEWLINE passNEWLINENEWLINE def testSyslogPolicyList(self):NEWLINE """Test SyslogPolicyList"""NEWLINE # FIXME: construct object with mandatory attributes with example valuesNEWLINE # model = intersight.models.syslog_policy_list.SyslogPolicyList() # noqa: E501NEWLINE passNEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE unittest.main()NEWLINE |
from lib.base import BaseJiraActionNEWLINEfrom lib.formatters import to_attachment_dictNEWLINENEWLINE__all__ = [NEWLINE 'GetJiraIssueAttachmentsAction'NEWLINE]NEWLINENEWLINENEWLINEclass GetJiraIssueAttachmentsAction(BaseJiraAction):NEWLINE def run(self, issue_key):NEWLINE issue = self._client.issue(issue_key)NEWLINENEWLINE result = []NEWLINENEWLINE for attachment in issue.fields.attachment:NEWLINE item = to_attachment_dict(attachment)NEWLINE result.append(item)NEWLINENEWLINE return resultNEWLINE |
# -*- coding: utf-8 -*-NEWLINENEWLINEimport ioNEWLINEimport reNEWLINENEWLINEimport demjsonNEWLINEimport requestsNEWLINEimport pandas as pdNEWLINENEWLINEfrom zvt.api.common import china_stock_code_to_idNEWLINEfrom zvt.api.technical import init_securities, df_to_dbNEWLINEfrom zvt.domain import Provider, StockIndex, StockCategoryNEWLINEfrom zvt.recorders.consts import DEFAULT_SH_ETF_LIST_HEADERNEWLINEfrom zvt.recorders.recorder import RecorderNEWLINENEWLINENEWLINEclass ChinaETFListSpider(Recorder):NEWLINE data_schema = StockIndexNEWLINENEWLINE def __init__(self, batch_size=10, force_update=False, sleeping_time=10.0, provider=Provider.EXCHANGE) -> None:NEWLINE self.provider = providerNEWLINE super().__init__(batch_size, force_update, sleeping_time)NEWLINENEWLINE def run(self):NEWLINE # 抓取沪市 ETF 列表NEWLINE url = 'http://query.sse.com.cn/commonQuery.do?sqlId=COMMON_SSE_ZQPZ_ETFLB_L_NEW'NEWLINE response = requests.get(url, headers=DEFAULT_SH_ETF_LIST_HEADER)NEWLINE response_dict = demjson.decode(response.text)NEWLINENEWLINE df = pd.DataFrame(response_dict.get('result', []))NEWLINE self.persist_etf_list(df, exchange='sh')NEWLINE self.logger.info('沪市 ETF 列表抓取完成...')NEWLINENEWLINE # 抓取沪市 ETF 成分股NEWLINE self.download_sh_etf_component(df)NEWLINE self.logger.info('沪市 ETF 成分股抓取完成...')NEWLINENEWLINE # 抓取深市 ETF 列表NEWLINE url = 'http://www.szse.cn/api/report/ShowReport?SHOWTYPE=xlsx&CATALOGID=1945'NEWLINE response = requests.get(url)NEWLINENEWLINE df = pd.read_excel(io.BytesIO(response.content), dtype=str)NEWLINE self.persist_etf_list(df, exchange='sz')NEWLINE self.logger.info('深市 ETF 列表抓取完成...')NEWLINENEWLINE # 抓取深市 ETF 成分股NEWLINE self.download_sz_etf_component(df)NEWLINE self.logger.info('深市 ETF 成分股抓取完成...')NEWLINENEWLINE def persist_etf_list(self, df: pd.DataFrame, exchange: str):NEWLINE if df is None:NEWLINE returnNEWLINENEWLINE df = df.copy()NEWLINE if exchange == 'sh':NEWLINE df = df[['FUND_ID', 'FUND_NAME']]NEWLINE elif exchange == 'sz':NEWLINE df = df[['证券代码', '证券简称']]NEWLINENEWLINE df.columns = ['code', 'name']NEWLINE df['id'] = df['code'].apply(lambda code: f'index_{exchange}_{code}')NEWLINE df['exchange'] = exchangeNEWLINE df['type'] = 'index'NEWLINE df['category'] = StockCategory.etf.valueNEWLINENEWLINE df = df.dropna(axis=0, how='any')NEWLINE df = df.drop_duplicates(subset='id', keep='last')NEWLINENEWLINE init_securities(df, security_type='index', provider=self.provider)NEWLINENEWLINE def download_sh_etf_component(self, df: pd.DataFrame):NEWLINE """NEWLINE ETF_CLASS => 1. 单市场 ETF 2.跨市场 ETF 3. 跨境 ETFNEWLINE 5. 债券 ETF 6. 黄金 ETFNEWLINE :param df: ETF 列表数据NEWLINE :return: NoneNEWLINE """NEWLINE query_url = 'http://query.sse.com.cn/infodisplay/queryConstituentStockInfo.do?' \NEWLINE 'isPagination=false&type={}&etfClass={}'NEWLINENEWLINE etf_df = df[(df['ETF_CLASS'] == '1') | (df['ETF_CLASS'] == '2')]NEWLINE etf_df = self.populate_sh_etf_type(etf_df)NEWLINENEWLINE for _, etf in etf_df.iterrows():NEWLINE url = query_url.format(etf['ETF_TYPE'], etf['ETF_CLASS'])NEWLINE response = requests.get(url, headers=DEFAULT_SH_ETF_LIST_HEADER)NEWLINE response_dict = demjson.decode(response.text)NEWLINE response_df = pd.DataFrame(response_dict.get('result', []))NEWLINENEWLINE etf_code = etf['FUND_ID']NEWLINE index_id = f'index_sh_{etf_code}'NEWLINE response_df = response_df[['instrumentId']]NEWLINE response_df['id'] = response_df['instrumentId'].apply(lambda code: f'{index_id}_{china_stock_code_to_id(code)}')NEWLINE response_df['stock_id'] = response_df['instrumentId'].apply(lambda code: china_stock_code_to_id(code))NEWLINE response_df['index_id'] = index_idNEWLINE response_df.drop('instrumentId', axis=1, inplace=True)NEWLINENEWLINE df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)NEWLINE self.logger.info(f'{etf["FUND_NAME"]} - {etf_code} 成分股抓取完成...')NEWLINENEWLINE self.sleep()NEWLINENEWLINE def download_sz_etf_component(self, df: pd.DataFrame):NEWLINE query_url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vII_NewestComponent/indexid/{}.phtml'NEWLINENEWLINE self.parse_sz_etf_underlying_index(df)NEWLINE for _, etf in df.iterrows():NEWLINE underlying_index = etf['拟合指数']NEWLINE etf_code = etf['证券代码']NEWLINENEWLINE if len(underlying_index) == 0:NEWLINE self.logger.info(f'{etf["证券简称"]} - {etf_code} 非 A 股市场指数,跳过...')NEWLINE continueNEWLINENEWLINE url = query_url.format(underlying_index)NEWLINE response = requests.get(url)NEWLINE response.encoding = 'gbk'NEWLINENEWLINE try:NEWLINE dfs = pd.read_html(response.text, header=1)NEWLINE except ValueError as error:NEWLINE self.logger.error(f'HTML parse error: {error}, response: {response.text}')NEWLINE continueNEWLINENEWLINE if len(dfs) < 4:NEWLINE continueNEWLINENEWLINE response_df = dfs[3].copy()NEWLINE response_df = response_df.dropna(axis=1, how='any')NEWLINE response_df['品种代码'] = response_df['品种代码'].apply(lambda x: f'{x:06d}')NEWLINENEWLINE index_id = f'index_sz_{etf_code}'NEWLINE response_df = response_df[['品种代码']]NEWLINENEWLINE response_df['id'] = response_df['品种代码'].apply(lambda code: f'{index_id}_{china_stock_code_to_id(code)}')NEWLINE response_df['stock_id'] = response_df['品种代码'].apply(lambda code: china_stock_code_to_id(code))NEWLINE response_df['index_id'] = index_idNEWLINE response_df.drop('品种代码', axis=1, inplace=True)NEWLINENEWLINE df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)NEWLINE self.logger.info(f'{etf["证券简称"]} - {etf_code} 成分股抓取完成...')NEWLINENEWLINE self.sleep()NEWLINENEWLINE @staticmethodNEWLINE def populate_sh_etf_type(df: pd.DataFrame):NEWLINE """NEWLINE 填充沪市 ETF 代码对应的 TYPE 到列表数据中NEWLINE :param df: ETF 列表数据NEWLINE :return: 包含 ETF 对应 TYPE 的列表数据NEWLINE """NEWLINE query_url = 'http://query.sse.com.cn/infodisplay/queryETFNewAllInfo.do?' \NEWLINE 'isPagination=false&type={}&pageHelp.pageSize=25'NEWLINENEWLINE type_df = pd.DataFrame()NEWLINE for etf_class in [1, 2]:NEWLINE url = query_url.format(etf_class)NEWLINE response = requests.get(url, headers=DEFAULT_SH_ETF_LIST_HEADER)NEWLINE response_dict = demjson.decode(response.text)NEWLINE response_df = pd.DataFrame(response_dict.get('result', []))NEWLINE response_df = response_df[['fundid1', 'etftype']]NEWLINENEWLINE type_df = pd.concat([type_df, response_df])NEWLINENEWLINE result_df = df.copy()NEWLINE result_df = result_df.sort_values(by='FUND_ID').reset_index(drop=True)NEWLINE type_df = type_df.sort_values(by='fundid1').reset_index(drop=True)NEWLINENEWLINE result_df['ETF_TYPE'] = type_df['etftype']NEWLINENEWLINE return result_dfNEWLINENEWLINE @staticmethodNEWLINE def parse_sz_etf_underlying_index(df: pd.DataFrame):NEWLINE """NEWLINE 解析深市 ETF 对应跟踪的指数代码NEWLINE :param df: ETF 列表数据NEWLINE :return: 解析完成 ETF 对应指数代码的列表数据NEWLINE """NEWLINE def parse_index(text):NEWLINE if len(text) == 0:NEWLINE return ''NEWLINENEWLINE result = re.search(r"(\d+).*", text)NEWLINE if result is None:NEWLINE return ''NEWLINE else:NEWLINE return result.group(1)NEWLINENEWLINE df['拟合指数'] = df['拟合指数'].apply(parse_index)NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE spider = ChinaETFListSpider(provider=Provider.EXCHANGE)NEWLINE spider.run()NEWLINE |