content
stringlengths
0
1.55M
""" @FileName: model.py @Description: Implement model @Author: Ryuk @CreateDate: 2020/05/12 @LastEditTime: 2020/05/12 @LastEditors: Please set LastEditors @Version: v0.1 """<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<class_stmt>basicBlock(nn.Module)<block_start>expansion=1<def_stmt>__init__ self in_channels out_channels stride=1<block_start>super(basicBlock self).__init__()<line_sep>self.conv1=nn.Conv2d(in_channels out_channels kernel_size=3 padding=1 bias=<false>)<line_sep>self.bn1=nn.BatchNorm2d(out_channels)<line_sep>self.conv2=nn.Conv2d(out_channels out_channels kernel_size=3 stride=stride padding=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(out_channels)<line_sep># shortcut is a convolution layer with BatchNormalization self.shortcut=nn.Sequential()<if_stmt>stride<ne>1<or>in_channels<ne>self.expansion<times>in_channels<block_start>self.shortcut=nn.Sequential(nn.Conv2d(in_channels self.expansion<times>out_channels kernel_size=1 stride=stride bias=<false>) nn.BatchNorm2d(self.expansion<times>out_channels))<block_end><block_end><def_stmt>forward self input<block_start>x=F.relu(self.bn1(self.conv1(input)))<line_sep>x=self.bn2(self.conv2(x))<line_sep>x<augadd>self.shortcut(input)<line_sep>x=F.relu(x)<line_sep><return>x<block_end><block_end><class_stmt>bottleneckBlock(nn.Module)<block_start>expansion=4<def_stmt>__init__ self in_channels out_channels stride=1<block_start>super(bottleneckBlock self).__init__()<line_sep>self.conv1=nn.Conv2d(in_channels out_channels kernel_size=1 stride=stride bias=<false>)<line_sep>self.bn1=nn.BatchNorm2d(out_channels)<line_sep>self.conv2=nn.Conv2d(out_channels out_channels kernel_size=3 stride=stride bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(out_channels)<line_sep>self.conv3=nn.Conv2d(out_channels self.expansion<times>out_channels kernel_size=1 bias=<false>)<line_sep>self.bn3=nn.BatchNorm2d(self.expansion<times>out_channels)<if_stmt>stride<ne>1<or>in_channels<ne>self.expansion<times>out_channels<block_start>self.shortcut=nn.Sequential(nn.Conv2d(in_channels self.expansion<times>out_channels kernel_size=1 stride=stride bias=<false>) nn.BatchNorm2d(self.expansion<times>out_channels))<block_end><block_end><def_stmt>forward self input<block_start>x=F.relu(self.bn1(self.conv1(input)))<line_sep>x=F.relu(self.bn2(self.conv2(x)))<line_sep>x=self.bn3(self.conv3(x))<line_sep>x<augadd>self.shortcut(input)<line_sep>x=F.relu(x)<line_sep><return>x<block_end><block_end><class_stmt>Resnet(nn.Module)<block_start><def_stmt>__init__ self block num_blocks num_classes=6<block_start>super(Resnet self).__init__()<line_sep>self.in_channels=64<line_sep>self.conv1=nn.Conv2d(1 64 kernel_size=3 stride=1 padding=1 bias=<false>)<line_sep>self.bn1=nn.BatchNorm2d(64)<line_sep>self.layer1=self._make_layer(block 64 num_blocks[0] stride=1)<line_sep>self.layer2=self._make_layer(block 128 num_blocks[1] stride=2)<line_sep>self.layer3=self._make_layer(block 256 num_blocks[2] stride=2)<line_sep>self.layer4=self._make_layer(block 512 num_blocks[3] stride=2)<line_sep>self.linear=nn.Linear(512<times>block.expansion num_classes)<block_end><def_stmt>_make_layer self block out_channels num_blocks stride<block_start>strides=[stride]+[1]<times>(num_blocks-1)<line_sep>layers=[]<for_stmt>stride strides<block_start>layers.append(block(self.in_channels out_channels stride))<line_sep>self.in_channels=out_channels<times>block.expansion<block_end><return>nn.Sequential(*layers)<block_end><def_stmt>forward self x<block_start>x=F.relu(self.bn1(self.conv1(x)))<line_sep>x=self.layer1(x)<line_sep>x=self.layer2(x)<line_sep>x=self.layer3(x)<line_sep>x=self.layer4(x)<line_sep>x=F.avg_pool2d(x 4)<line_sep>x=x.view(x.size(0) -1)<line_sep>x=self.linear(x)<line_sep><return>x<block_end><block_end><def_stmt>ResNet18 <block_start><return>Resnet(basicBlock [2 2 2 2])<block_end><def_stmt>ResNet152 <block_start><return>Resnet(bottleneckBlock [3 8 36 3])<block_end><def_stmt>main <block_start>x=torch.randn(1 1 50 32)<line_sep>net=ResNet18()<line_sep>print(net(x))<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<def_stmt>setup app<block_start>app.add_crossref_type(directivename="setting" rolename="setting")<block_end>
<import_stmt>os<import_stmt>librosa<import_from_stmt>torch.utils data<import_from_stmt>util.utils sample_fixed_length_data_aligned<class_stmt>Dataset(data.Dataset)<block_start><def_stmt>__init__ self dataset limit=<none> offset=0 sample_length=16384 mode="train"<block_start>"""Construct dataset for training and validation. Args: dataset (str): *.txt, the path of the dataset list file. See "Notes." limit (int): Return at most limit files in the list. If None, all files are returned. offset (int): Return files starting at an offset within the list. Use negative values to offset from the end of the list. sample_length(int): The model only supports fixed-length input. Use sample_length to specify the feature size of the input. mode(str): If mode is "train", return fixed-length signals. If mode is "validation", return original-length signals. Notes: dataset list file: <noisy_1_path><space><clean_1_path> <noisy_2_path><space><clean_2_path> ... <noisy_n_path><space><clean_n_path> e.g. /train/noisy/a.wav /train/clean/a.wav /train/noisy/b.wav /train/clean/b.wav ... Return: (mixture signals, clean signals, filename) """<line_sep>super(Dataset self).__init__()<line_sep>dataset_list=[line.rstrip('\n')<for>line open(os.path.abspath(os.path.expanduser(dataset)) "r")]<line_sep>dataset_list=dataset_list[offset:]<if_stmt>limit<block_start>dataset_list=dataset_list[:limit]<block_end><assert_stmt>mode<in>("train" "validation") "Mode must be one of 'train' or 'validation'."<line_sep>self.length=len(dataset_list)<line_sep>self.dataset_list=dataset_list<line_sep>self.sample_length=sample_length<line_sep>self.mode=mode<block_end><def_stmt>__len__ self<block_start><return>self.length<block_end><def_stmt>__getitem__ self item<block_start>mixture_path,clean_path=self.dataset_list[item].split(" ")<line_sep>filename=os.path.splitext(os.path.basename(mixture_path))[0]<line_sep>mixture,_=librosa.load(os.path.abspath(os.path.expanduser(mixture_path)) sr=<none>)<line_sep>clean,_=librosa.load(os.path.abspath(os.path.expanduser(clean_path)) sr=<none>)<if_stmt>self.mode<eq>"train"# The input of model should be fixed-length in the training. <block_start>mixture,clean=sample_fixed_length_data_aligned(mixture clean self.sample_length)<line_sep><return>mixture.reshape(1 -1) clean.reshape(1 -1) filename<block_end><else_stmt><block_start><return>mixture.reshape(1 -1) clean.reshape(1 -1) filename<block_end><block_end><block_end>
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>math<import_stmt>numpy<as>np<import_stmt>paddle<line_sep># x: [0: 2**bit-1], return: [-1, 1] <def_stmt>label_2_float x bits<block_start><return>2<times>x/(2<power>bits-1.)-1.<block_end>#x: [-1, 1], return: [0, 2**bits-1] <def_stmt>float_2_label x bits<block_start><assert_stmt>abs(x).max()<le>1.0<line_sep>x=(x+1.)<times>(2<power>bits-1)/2<line_sep><return>x.clip(0 2<power>bits-1)<block_end># y: [-1, 1], mu: 2**bits, return: [0, 2**bits-1] # see https://en.wikipedia.org/wiki/%CE%9C-law_algorithm # be careful the input `mu` here, which is +1 than that of the link above <def_stmt>encode_mu_law x mu<block_start>mu=mu-1<line_sep>fx=np.sign(x)<times>np.log(1+mu<times>np.abs(x))/np.log(1+mu)<line_sep><return>np.floor((fx+1)/2<times>mu+0.5)<block_end># from_labels = True: # y: [0: 2**bit-1], mu: 2**bits, return: [-1,1] # from_labels = False: # y: [-1, 1], return: [-1, 1] <def_stmt>decode_mu_law y mu from_labels=<true># TODO: get rid of log2 - makes no sense <block_start><if_stmt>from_labels<block_start>y=label_2_float(y math.log2(mu))<block_end>mu=mu-1<line_sep>x=paddle.sign(y)/mu<times>((1+mu)<power>paddle.abs(y)-1)<line_sep><return>x<block_end>
<import_from_stmt>netCDF4 Dataset<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_stmt>math<line_sep>fig,axes=plt.subplots()<line_sep>subcycleNumber=7680<line_sep>operatorMethods=["wachspress" "pwl" "weak"]<for_stmt>operatorMethod operatorMethods# data in <block_start>filenameIn="./output_hex_%s_%i/output.2000.nc"%(operatorMethod subcycleNumber)<line_sep>filein=Dataset(filenameIn "r")<line_sep>nCells=len(filein.dimensions["nCells"])<line_sep>nVertices=len(filein.dimensions["nVertices"])<line_sep>vertexDegree=len(filein.dimensions["vertexDegree"])<line_sep>nTimes=len(filein.dimensions["Time"])<line_sep>cellsOnVertex=filein.variables["cellsOnVertex"][:]<line_sep>cellsOnVertex<augsub>1<line_sep>xVertex=filein.variables["xVertex"][:]<line_sep>yVertex=filein.variables["yVertex"][:]<line_sep>xCell=filein.variables["xCell"][:]<line_sep>yCell=filein.variables["yCell"][:]<line_sep>uVelocity=filein.variables["uVelocity"][-1 :]<line_sep>vVelocity=filein.variables["vVelocity"][-1 :]<line_sep>uVelocities=filein.variables["uVelocity"][: :]<line_sep>filein.close()<line_sep>xmin=np.amin(xVertex)<line_sep>xmax=np.amax(xVertex)<line_sep>ymin=np.amin(yVertex)<line_sep>ymax=np.amax(yVertex)<line_sep>us=[]<for_stmt>iTime range(0 nTimes)<block_start>x=[]<line_sep>u=[]<for_stmt>iVertex range(0 nVertices)<block_start><if_stmt>(math.fabs(yVertex[iVertex]-508068.236886871)<l>1e-8)<block_start>x.append(xVertex[iVertex])<line_sep>u.append(uVelocities[iTime iVertex])<block_end><block_end>x=np.array(x)<line_sep>u=np.array(u)<line_sep>sortedIdxs=x.argsort()<line_sep>x=x[sortedIdxs]<line_sep>u=u[sortedIdxs]<line_sep>us.append(math.sqrt(np.sum(np.power(u 2))))<if_stmt>(iTime<eq>nTimes-1)<block_start>axes.plot(x u label=operatorMethod)<block_end><block_end><block_end>#axes.plot(x, np.zeros(x.shape[0]), zorder=1, c='k') uAir=1.0<line_sep>rhoair=1.3<line_sep>rhow=1026.0<line_sep>cocn=0.00536<line_sep>cair=0.0012<line_sep>Pstar=2.75e4<line_sep>Cstar=20.0<line_sep>e=2<line_sep>alpha=math.sqrt(1.0+math.pow(1.0/e 2))<line_sep>Lx=1280000<line_sep>uu=[]<for_stmt>xx x<block_start>a=xx/Lx<line_sep>v=2.0<times>a<line_sep>dadx=(1.0/Lx)<line_sep>dvdx=2.0<times>dadx<line_sep>oceanStressCoeff=rhow<times>cocn<times>a<line_sep>airStress=rhoair<times>uAir<times>uAir<times>a<times>cair<line_sep>P=Pstar<times>v<times>math.exp(-Cstar<times>(1-a))<line_sep>dPdx=Pstar<times>math.exp(-Cstar<times>(1-a))<times>(dvdx+v<times>Cstar<times>dadx)<line_sep>print(xx a -Cstar<times>(1-a) P dPdx)<line_sep>u=max((airStress-0.5<times>(alpha+1.0)<times>dPdx)/oceanStressCoeff 0.0)<line_sep>uu.append(u)<block_end>axes.plot(x uu zorder=2 c='r')<line_sep>axes.set_xlabel("time")<line_sep>axes.set_ylabel("uVelocity")<line_sep>axes.legend()<line_sep>plt.savefig("1D_velocity_operator.png" dpi=300)<line_sep>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transformations for equirectangular and perspective images. The coordinate system is the same as OpenGL's, where -Z is the camera looking direction, +Y points up and +X points right. Rotations are applied as pre-multiplication in all cases. """<import_stmt>math<import_from_stmt>pano_utils geometry<import_from_stmt>pano_utils math_utils<import_stmt>tensorflow.compat.v1<as>tf<import_stmt>tensorflow_addons<as>tfa<def_stmt>equirectangular_sampler images spherical_coordinates<block_start>"""Sample panorama images using a grid of spherical coordinates. Args: images: a 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`. spherical_coordinates: a float32 tensor with shape [BATCH, sampling_height, sampling_width, 2] representing spherical coordinates (colatitude, azimuth) of the sampling grids. Returns: a 4-D tensor of shape `[BATCH, sampling_height, sampling_width, CHANNELS]` representing resampled images. Raises: ValueError: 'images' or 'spherical_coordinates' has the wrong dimensions. """<with_stmt>tf.name_scope(<none> 'equirectangular_sampler' [images spherical_coordinates])<block_start><if_stmt>len(images.shape)<ne>4<block_start><raise>ValueError("'images' has the wrong dimensions.")<block_end><if_stmt>spherical_coordinates.shape[-1]<ne>2<block_start><raise>ValueError("'spherical_coordinates' has the wrong dimensions.")<block_end>shape=images.shape.as_list()<line_sep>height,width=shape[1] shape[2]<line_sep>padded_images=geometry.equirectangular_padding(images [[1 1] [1 1]])<line_sep>colatitude,azimuth=tf.split(spherical_coordinates [1 1] -1)<line_sep># The colatitude of the equirectangular image goes from 0 (the top row) # to pi (the bottom), not inclusively. The azimuth goes from 0 # (the leftmost column) to 2*pi (the rightmost column). # For example, azimuth-colatitude (0, pi/2) is the mid pixel in the first # column of the equirect image. # Convert spherical coordinates to equirectangular coordinates on images. # +1 in the end because of the padding. x_pano=(tf.mod(azimuth/math.pi 2)<times>width/2.0-0.5)+1<line_sep>y_pano=((colatitude/math.pi)<times>height-0.5)+1<line_sep>pano_coordinates=tf.concat([x_pano y_pano] -1)<line_sep>remapped=tfa.image.resampler(padded_images pano_coordinates)<line_sep><return>remapped<block_end><block_end><def_stmt>rectilinear_projection images resolution fov rotations<block_start>"""Convert equirectangular panoramic images to perspective images. First, the panorama images are rotated by the input parameter "rotations". Then, the region with the field of view "fov" centered at camera's look-at -Z axis is projected into perspective images. The -Z axis corresponds to the spherical coordinates (pi/2, pi/2) which is (HEIGHT/2, WIDTH/4) on the pano. Args: images: a 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`. resolution: a 2-D tuple or list containing the resolution of desired output. fov: (float) camera's horizontal field of view in degrees. rotations: [BATCH, 3, 3] rotation matrices. Returns: 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]` Raises: ValueError: 'images' has the wrong dimensions. ValueError: 'images' is not a float tensor. ValueError: 'rotations' has the wrong dimensions. """<with_stmt>tf.name_scope(<none> 'rectilinear_projection' [images resolution fov rotations])<block_start><if_stmt>len(images.shape)<ne>4<block_start><raise>ValueError("'images' has the wrong dimensions.")<block_end><if_stmt>images.dtype<ne>tf.float32<and>images.dtype<ne>tf.float64<block_start><raise>ValueError("'images' must be a float tensor.")<block_end><if_stmt>rotations.shape[-2:]<ne>[3 3]<block_start><raise>ValueError("'rotations' has the wrong dimensions.")<block_end>shape=images.shape.as_list()<line_sep>batch=shape[0]<line_sep>cartesian_coordinates=geometry.generate_cartesian_grid(resolution fov)<line_sep># create batch -> [batch, height, width, 3] cartesian_coordinates=tf.tile(tf.expand_dims(cartesian_coordinates axis=0) [batch 1 1 1])<line_sep># The rotation matrices have to be [batch, height, width, 3, 3]. flip_x=tf.constant([[-1. 0. 0.] [0. 1. 0.] [0. 0. 1.]])<line_sep>rotations=tf.matmul(flip_x tf.matmul(rotations flip_x transpose_a=<true>))<line_sep>rotated_coordinates=tf.matmul(rotations[: tf.newaxis tf.newaxis] tf.expand_dims(cartesian_coordinates -1) transpose_a=<true>)<line_sep>axis_convert=tf.constant([[0. 0. 1.] [1. 0. 0.] [0. 1. 0.]])<line_sep>rotated_coordinates=tf.matmul(axis_convert rotated_coordinates)<line_sep>rotated_coordinates=tf.squeeze(rotated_coordinates -1)<line_sep>spherical_coordinates=geometry.cartesian_to_spherical(rotated_coordinates)<line_sep># The azimuth of 'spherical_coordinates' decreases from left to right but # the x should increase from left to right. spherical_coordinates=tf.reverse(spherical_coordinates [2])<line_sep><return>equirectangular_sampler(images spherical_coordinates)<block_end><block_end><def_stmt>rotate_pano images rotations<block_start>"""Rotate Panoramic images. Convert the spherical coordinates (colatitude, azimuth) to Cartesian (x, y, z) then apply SO(3) rotation matrices. Finally, convert them back to spherical coordinates and remap the equirectangular images. Note1: The rotations are applied to the sampling sphere instead of the camera. The camera actually rotates R^T. I_out(x) = I_in(R * x), x are points in the camera frame. Note2: It uses a simple linear interpolation for now instead of slerp, so the pixel values are not accurate but visually plausible. Args: images: a 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`. rotations: [BATCH, 3, 3] rotation matrices. Returns: 4-D tensor of shape `[BATCH, HEIGHT, WIDTH, CHANNELS]`. Raises: ValueError: if the `images` or 'rotations' has the wrong dimensions. """<with_stmt>tf.name_scope(<none> 'rotate_pano' [images rotations])<block_start><if_stmt>len(images.shape)<ne>4<block_start><raise>ValueError("'images' has the wrong dimensions.")<block_end><if_stmt>rotations.shape[-2:]<ne>[3 3]<block_start><raise>ValueError("'rotations' must have 3x3 dimensions.")<block_end>shape=images.shape.as_list()<line_sep>batch,height,width=shape[0] shape[1] shape[2]<line_sep>spherical=tf.expand_dims(geometry.generate_equirectangular_grid([height width]) 0)<line_sep>spherical=tf.tile(spherical [batch 1 1 1])<line_sep>cartesian=geometry.spherical_to_cartesian(spherical)<line_sep>axis_convert=tf.constant([[0. 1. 0.] [0. 0. -1.] [-1. 0. 0.]])<line_sep>cartesian=tf.matmul(axis_convert tf.expand_dims(cartesian -1))<line_sep>rotated_cartesian=tf.matmul(rotations[: tf.newaxis tf.newaxis] cartesian)<line_sep>rotated_cartesian=tf.squeeze(tf.matmul(axis_convert rotated_cartesian transpose_a=<true>) -1)<line_sep>rotated_spherical=geometry.cartesian_to_spherical(rotated_cartesian)<line_sep><return>equirectangular_sampler(images rotated_spherical)<block_end><block_end><def_stmt>rotate_image_in_3d images input_rotations input_fov output_fov output_shape<block_start>"""Return reprojected perspective view images given a rotated camera. This function applies a homography H = K_output * R^T * K_input' where K_output and K_input are the output and input camera intrinsics, R is the rotation from the input images' frame to the target frame. Args: images: [BATCH, HEIGHT, WIDTH, CHANNEL] perspective view images. input_rotations: [BATCH, 3, 3] rotations matrices from current camera frame to target camera frame. input_fov: [BATCH] a 1-D tensor (float32) of input field of view in degrees. output_fov: (float) output field of view in degrees. output_shape: a 2-D list of output dimension [height, width]. Returns: reprojected images [BATCH, height, width, CHANNELS]. """<with_stmt>tf.name_scope(<none> 'rotate_image_in_3d' [images input_rotations input_fov output_fov output_shape])<block_start><if_stmt>len(images.shape)<ne>4<block_start><raise>ValueError("'images' has the wrong dimensions.")<block_end><if_stmt>input_rotations.shape[-2:]<ne>[3 3]<block_start><raise>ValueError("'input_rotations' must have 3x3 dimensions.")<block_end>shape=images.shape.as_list()<line_sep>batch,height,width=shape[0] shape[1] shape[2]<line_sep>cartesian=geometry.generate_cartesian_grid(output_shape output_fov)<line_sep>cartesian=tf.tile(cartesian[tf.newaxis : : : tf.newaxis] [batch 1 1 1 1])<line_sep>input_rotations=tf.tile(input_rotations[: tf.newaxis tf.newaxis :] [1]+output_shape+[1 1])<line_sep>cartesian=tf.squeeze(tf.matmul(input_rotations cartesian transpose_a=<true>) -1)<line_sep>image_coordinates=-cartesian[: : : :2]/cartesian[: : : -1:]<line_sep>x,y=tf.split(image_coordinates [1 1] -1)<line_sep>w=2<times>tf.tan(math_utils.degrees_to_radians(input_fov/2))<line_sep>h=2<times>tf.tan(math_utils.degrees_to_radians(input_fov/2))<line_sep>w=w[: tf.newaxis tf.newaxis tf.newaxis]<line_sep>h=h[: tf.newaxis tf.newaxis tf.newaxis]<line_sep>nx=x<times>width/w+width/2-0.5<line_sep>ny=-y<times>height/h+height/2-0.5<line_sep><return>tfa.image.resampler(images tf.concat([nx ny] -1))<block_end><block_end><def_stmt>rotate_image_on_pano images rotations fov output_shape<block_start>"""Transform perspective images to equirectangular images after rotations. Return equirectangular panoramic images in which the input perspective images embedded in after the rotation R from the input images' frame to the target frame. The image with the field of view "fov" centered at camera's look-at -Z axis is projected onto the pano. The -Z axis corresponds to the spherical coordinates (pi/2, pi/2) which is (HEIGHT/2, WIDTH/4) on the pano. Args: images: [BATCH, HEIGHT, WIDTH, CHANNEL] perspective view images. rotations: [BATCH, 3, 3] rotations matrices. fov: (float) images' field of view in degrees. output_shape: a 2-D list of output dimension [height, width]. Returns: equirectangular images [BATCH, height, width, CHANNELS]. """<with_stmt>tf.name_scope(<none> 'rotate_image_on_pano' [images rotations fov output_shape])<block_start><if_stmt>len(images.shape)<ne>4<block_start><raise>ValueError("'images' has the wrong dimensions.")<block_end><if_stmt>rotations.shape[-2:]<ne>[3 3]<block_start><raise>ValueError("'rotations' must have 3x3 dimensions.")<block_end>shape=images.shape.as_list()<line_sep>batch,height,width=shape[0] shape[1] shape[2]<line_sep># Generate a mesh grid on a sphere. spherical=geometry.generate_equirectangular_grid(output_shape)<line_sep>cartesian=geometry.spherical_to_cartesian(spherical)<line_sep>cartesian=tf.tile(cartesian[tf.newaxis : : : tf.newaxis] [batch 1 1 1 1])<line_sep>axis_convert=tf.constant([[0. -1. 0.] [0. 0. 1.] [1. 0. 0.]])<line_sep>cartesian=tf.matmul(axis_convert cartesian)<line_sep>cartesian=tf.squeeze(tf.matmul(rotations[: tf.newaxis tf.newaxis] cartesian) -1)<line_sep># Only take one hemisphere. (camera lookat direction) hemisphere_mask=tf.cast(cartesian[: : : -1:]<l>0 tf.float32)<line_sep>image_coordinates=cartesian[: : : :2]/cartesian[: : : -1:]<line_sep>x,y=tf.split(image_coordinates [1 1] -1)<line_sep># Map pixels on equirectangular pano to perspective image. nx=-x<times>width/(2<times>tf.tan(math_utils.degrees_to_radians(fov/2)))+width/2-0.5<line_sep>ny=y<times>height/(2<times>tf.tan(math_utils.degrees_to_radians(fov/2)))+height/2-0.5<line_sep>transformed=hemisphere_mask<times>tfa.image.resampler(images tf.concat([nx ny] -1))<line_sep><return>transformed<block_end><block_end>
<import_from_stmt>gpiozero LEDBoard<import_from_stmt>signal pause<line_sep>leds=LEDBoard(5 6 13 19 26 pwm=<true>)<line_sep>leds.value=(0.2 0.4 0.6 0.8 1.0)<line_sep>pause()<line_sep>
""" This package contains the portions of the library used only when implementing an OpenID consumer. """<line_sep>__all__=['consumer' 'discover']<line_sep>
<import_stmt>os<import_stmt>json<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>scipy.stats<as>stats<line_sep># t_stat, p_val = stats.ttest_ind(sample1, sample2, equal_var=False) test_result_dir="utils/testresults"<line_sep>all_results={}<line_sep>aggregate_terms=["count" "valid" "missing" "distinct" "sum" "mean" "average" "variance" "variancep" "stdev" "stdevp" "stderr" "median" "q1" "q3" "ci0" "ci1" "min" "max" "argmin" "argmax"]<line_sep>file_paths=["/vizmodeluninat5.json" "/vizmodeluninat10.json" "/vizmodeluninat15.json" "/vizmodeluninat20.json" "/vizmodeluni5.json" "/vizmodeluni10.json" "/vizmodeluni15.json" "/vizmodeluni20.json" "/vizmodelbi5.json" "/vizmodelbi10.json" "/vizmodelbi15.json" "/vizmodelbi20.json"]<def_stmt>analyze_test_suite test_dataset_directory# for subdir, dirs, files in os.walk(test_dataset_directory): # for file in files: # filepath = subdir + os.sep + file # if filepath.endswith( # "json") and not filepath.endswith("lsit.json"): <block_start><for_stmt>filepath file_paths<block_start>filepath=test_result_dir+filepath<line_sep># data = json.load(open(filepath)) # print(filepath) analyze_data(filepath)<block_end><block_end><def_stmt>is_valid_aggregate agg_val<block_start><if_stmt>(agg_val<not><in>aggregate_terms)# print("issh", agg_val) <block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end><def_stmt>computer_anova <block_start>print("anova")<block_end><def_stmt>analyze_data filepath<block_start>data=json.load(open(filepath))<line_sep>beam_width=data["beamwidth"]<line_sep>valid_json_array=[]<line_sep>valid_vega_array=[]<line_sep>phantom_count_array=[]<line_sep>x=list(range(0 100))<for_stmt>row data["data"]<block_start>valid_json_count=row["validjsoncount"]/beam_width<line_sep>valid_json_array.append(valid_json_count)<line_sep>valid_vega_count=row["validvegacount"]<line_sep>vs_array=row["vegaspecarray"]<line_sep># mark specs with incorrect aggregation value as invalid vega <for_stmt>vs_row vs_array<block_start><if_stmt>("aggregate"<in>vs_row["encoding"]["y"])<block_start><if_stmt><not>is_valid_aggregate(vs_row["encoding"]["y"]["aggregate"])<block_start>valid_vega_count<augsub>1<block_end><else_stmt><block_start><if_stmt>("aggregate"<in>vs_row["encoding"]["x"])<block_start><if_stmt><not>is_valid_aggregate(vs_row["encoding"]["x"]["aggregate"])<block_start>valid_vega_count<augsub>1<block_end><block_end><block_end><block_end><block_end># print(valid_vega_count, row["validjsoncount"]) valid_vegap_count=valid_vega_count<line_sep>valid_vega_count=valid_vega_count/beam_width<line_sep>valid_vega_array.append(valid_vega_count)<if_stmt>(valid_vega_count<eq>0)<block_start>phantom_count=0<block_end><else_stmt><block_start>phantom_count=row["phantomcount"]/valid_vegap_count<block_end>phantom_count_array.append(phantom_count)<line_sep># print("Count", row["phantomcount"], valid_vegap_count) <block_end># print(x, valid_json_array) # plt.plot(x, valid_json_array) # plt.plot(x, valid_vega_array) # plt.plot(x, phantom_count_array) # plt.show() print(filepath.split("vizmodel")[1] "Json:" round(np.mean(valid_json_array) 3) "Vega" round(np.mean(valid_vega_array) 3) "Mean % Phantom" round(np.mean(phantom_count_array) 3))<line_sep>result={"json:":valid_json_array "vega":valid_vega_array}<block_end>analyze_test_suite(test_result_dir)<line_sep># data = json.load(open("utils/testresults/vizmodelbi15.json")) # print(len(data["data"])) # analyze_data("utils/testresults/vizmodeluninat15.json")
<import_from_stmt>django template<line_sep>register=template.Library()<line_sep>@register.simple_tag<def_stmt>get_ground_truth obj image question<block_start>""" Get the ground truth value for the image/question combination in reader study obj. """<line_sep>ground_truths=obj.statistics["ground_truths"]<line_sep><return>ground_truths[image][question]<block_end>
<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>PIL Image<import_stmt>torchvision.transforms<as>T<import_from_stmt>infer Inference<import_from_stmt>utils.nms nms<line_sep>torch.set_grad_enabled(<false>)<def_stmt>class_agnostic_nms boxes scores iou=0.5<block_start><if_stmt>len(boxes)<g>1<block_start>boxes,scores=nms(np.array(boxes) np.array(scores) iou)<line_sep><return>list(boxes) list(scores)<block_end><else_stmt><block_start><return>boxes scores<block_end><block_end><def_stmt>generate_image_crops img num_crops=8<block_start>""" Note: num_crops must be greater than 2 and of multiple of 2 """<assert_stmt>num_crops<g>2<assert_stmt>num_crops%2<eq>0<line_sep># Get the image width and height img_w,img_h=img.size<line_sep>crops=[]<line_sep>coordinates=[]<line_sep>crops.append(img)<line_sep>coordinates.append((0 0 img_w img_h))<line_sep>crop_chunks_x=int(num_crops/2)<line_sep>crop_chunks_y=int(num_crops/crop_chunks_x)<line_sep>x_inc=int(img_w/crop_chunks_y)<line_sep>y_inc=int(img_h/crop_chunks_y)<line_sep>x_space=np.linspace(0 img_w-x_inc crop_chunks_y)<line_sep>y_spcae=np.linspace(0 img_h-y_inc int(num_crops/crop_chunks_y))<if_stmt>num_crops<g>1<block_start><for_stmt>x x_space<block_start><for_stmt>y y_spcae<block_start>x1,y1=x y<line_sep>x2,y2=x1+x_inc y1+y_inc<line_sep>crops.append((img.crop((x1 y1 x2 y2))).resize((img_w img_h)))<line_sep>coordinates.append((x1 y1 x2 y2))<block_end><block_end><block_end><return>crops coordinates (img_w img_h)<block_end><def_stmt>scale_boxes boxes coordinates img_dims<block_start>x1,y1,x2,y2=coordinates<line_sep>img_w,img_h=img_dims<line_sep>w=x2-x1<line_sep>h=y2-y1<for_stmt>b boxes<block_start>b[0],b[1],b[2],b[3]=int((b[0]/img_w)<times>w)+x1 int((b[1]/img_h)<times>h)+y1 int((b[2]/img_w)<times>w)+x1 int((b[3]/img_h)<times>h)+y1<block_end><return>boxes<block_end><class_stmt>ModulatedDetection(Inference)<block_start>""" The class supports the inference using both MDETR & MDef-DETR models. """<def_stmt>__init__ self model confidence_thresh=0.0<block_start>Inference.__init__(self model)<line_sep>self.conf_thresh=confidence_thresh<line_sep>self.transform=T.Compose([T.Resize(800) T.ToTensor() T.Normalize([0.485 0.456 0.406] [0.229 0.224 0.225])])<block_end>@staticmethod<def_stmt>box_cxcywh_to_xyxy x<block_start>x_c,y_c,w,h=x.unbind(1)<line_sep>b=[(x_c-0.5<times>w) (y_c-0.5<times>h) (x_c+0.5<times>w) (y_c+0.5<times>h)]<line_sep><return>torch.stack(b dim=1)<block_end><def_stmt>rescale_bboxes self out_bbox size<block_start>img_w,img_h=size<line_sep>b=self.box_cxcywh_to_xyxy(out_bbox)<line_sep>b=b<times>torch.tensor([img_w img_h img_w img_h] dtype=torch.float32)<line_sep><return>b<block_end><def_stmt>infer_image self image_path **kwargs<block_start>caption=kwargs["caption"]<line_sep># Read the image im=Image.open(image_path)<line_sep>imq=np.array(im)<if_stmt>len(imq.shape)<ne>3<block_start>im=im.convert('RGB')<block_end>img=self.transform(im).unsqueeze(0).cuda()<line_sep># propagate through the models memory_cache=self.model(img [caption] encode_and_save=<true>)<line_sep>outputs=self.model(img [caption] encode_and_save=<false> memory_cache=memory_cache)<line_sep># keep only predictions with self.conf_thresh+ confidence probas=1-outputs['pred_logits'].softmax(-1)[0 : -1].cpu()<line_sep>keep=(probas<g>self.conf_thresh).cpu()<line_sep># convert boxes from [0; 1] to image scales bboxes_scaled=self.rescale_bboxes(outputs['pred_boxes'].cpu()[0 keep] im.size)<line_sep>kept_probs=probas[keep]<line_sep># Convert outputs to the required format bboxes=list(bboxes_scaled.numpy())<line_sep>probs=list(kept_probs.numpy())<line_sep>boxes,scores=[] []<for_stmt>b,conf zip(bboxes probs)<block_start>boxes.append([int(b[0]) int(b[1]) int(b[2]) int(b[3])])<line_sep>scores.append(conf)<block_end># Read image, perform inference, parse results, append the predicted boxes to detections <return>boxes scores<block_end><def_stmt>infer_image_multi_crop self image_path **kwargs<block_start>caption=kwargs["caption"]<line_sep># Read the image im=Image.open(image_path)<line_sep>crops,coordinates,img_dims=generate_image_crops(im)<line_sep>imgs=[self.transform(crop).unsqueeze(0).cuda()<for>crop crops]<line_sep>imgs=torch.cat(imgs)<line_sep># propagate through the models memory_cache=self.model(imgs [caption<for>i range(imgs.shape[0])] encode_and_save=<true>)<line_sep>outputs=self.model(imgs [caption] encode_and_save=<false> memory_cache=memory_cache)<line_sep>all_boxes=[]<line_sep>all_scores=[]<for_stmt>i range(len(crops))# keep only predictions with self.conf_thresh+ confidence <block_start>probas=1-outputs['pred_logits'].softmax(-1)[i : -1].cpu()<line_sep>keep=(probas<g>self.conf_thresh).cpu()<line_sep># convert boxes from [0; 1] to image scales bboxes_scaled=self.rescale_bboxes(outputs['pred_boxes'].cpu()[i keep] im.size)<line_sep>kept_probs=probas[keep]<line_sep># Convert outputs to the required format bboxes=list(bboxes_scaled.numpy())<line_sep>probs=list(kept_probs.numpy())<line_sep>boxes,scores=[] []<for_stmt>b,conf zip(bboxes probs)<block_start>boxes.append([int(b[0]) int(b[1]) int(b[2]) int(b[3])])<line_sep>scores.append(conf)<block_end># Read image, perform inference, parse results, append the predicted boxes to detections boxes=scale_boxes(boxes coordinates[i] img_dims)<line_sep>all_boxes<augadd>boxes<line_sep>all_scores<augadd>scores<block_end>all_boxes=class_agnostic_nms(all_boxes all_scores)<line_sep><return>all_boxes all_scores<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_stmt>HLTrigger.HLTfilters.hltHighLevel_cfi<line_sep>ecaletaCalibHLT=HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(# HLTPaths = ['AlCa_EcalEta'], eventSetupPathsKey='EcalCalEtaCalib' throw=<false>)<line_sep>
# Copyright (c) 2014, <NAME>. Please see the AUTHORS file for details. # All rights reserved. Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file.) <import_from_stmt>subprocess PIPE<import_from_stmt>subprocess Popen<import_stmt>sublime<import_stmt>sublime_plugin<import_from_stmt>Dart.sublime_plugin_lib PluginLogger<import_from_stmt>Dart.sublime_plugin_lib.plat supress_window<import_from_stmt>Dart analyzer<import_from_stmt>Dart.lib.sdk DartFormat<line_sep>_logger=PluginLogger(__name__)<class_stmt>DartFormatCommand(sublime_plugin.WindowCommand)<block_start>'''Formats the selected text in Sublime Text using `dartfmt`. Notes: - Can be used as a build system. '''<def_stmt>run self **kwargs<block_start>view=self.window.active_view()<if_stmt><not>view<block_start><return><block_end>analyzer.g_server.send_format_file(view)<block_end><block_end><class_stmt>DartReplaceRegion(sublime_plugin.TextCommand)<block_start><def_stmt>run self edit region text<block_start>reg=sublime.Region(*region)<line_sep>self.view.replace(edit reg text)<line_sep>self.view.run_command('reindent')<block_end><block_end>
# Generated by Django 2.2.2 on 2019-06-20 11:37 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('part' '0010_auto_20190620_2135') ]<line_sep>operations=[migrations.AddField(model_name='part' name='revision' field=models.CharField(blank=<true> help_text='Part revision or version number' max_length=100) ) ]<block_end>
""" MTGJSON Singular Deck Header Object """<import_from_stmt>typing Any Dict<import_from_stmt>..classes.mtgjson_deck MtgjsonDeckObject<import_from_stmt>..utils to_camel_case<class_stmt>MtgjsonDeckHeaderObject<block_start>""" MTGJSON Singular Deck Header Object """<line_sep>code:str<line_sep>file_name:str<line_sep>name:str<line_sep>release_date:str<line_sep>type:str<def_stmt>__init__ self output_deck:MtgjsonDeckObject<arrow><none><block_start>""" Initialize the header given a deck """<line_sep>self.code=output_deck.code<line_sep>self.file_name=output_deck.file_name<line_sep>self.name=output_deck.name<line_sep>self.release_date=output_deck.release_date<line_sep>self.type=output_deck.type<block_end><def_stmt>to_json self<arrow>Dict[str Any]<block_start>""" Support json.dump() :return: JSON serialized object """<line_sep><return>{to_camel_case(key):value<for>key,value self.__dict__.items()<if>"__"<not><in>key<and><not>callable(value)}<block_end><block_end>
<def_stmt>response hey_bob<block_start>hey_bob=hey_bob.strip()<if_stmt>_is_silence(hey_bob)<block_start><return>'Fine. Be that way!'<block_end><if_stmt>_is_shouting(hey_bob)<block_start><if_stmt>_is_question(hey_bob)<block_start><return>"Calm down, I know what I'm doing!"<block_end><else_stmt><block_start><return>'Whoa, chill out!'<block_end><block_end><elif_stmt>_is_question(hey_bob)<block_start><return>'Sure.'<block_end><else_stmt><block_start><return>'Whatever.'<block_end><block_end><def_stmt>_is_silence hey_bob<block_start><return>hey_bob<eq>''<block_end><def_stmt>_is_shouting hey_bob<block_start><return>hey_bob.isupper()<block_end><def_stmt>_is_question hey_bob<block_start><return>hey_bob.endswith('?')<block_end>
"""Common utilities for alerts and alert digests."""<import_stmt>os<import_from_stmt>math floor log10<import_from_stmt>typing List Optional Union<import_from_stmt>jinja2 Environment FileSystemLoader select_autoescape<import_from_stmt>chaos_genius.alerts.email send_static_alert_email<import_from_stmt>chaos_genius.core.utils.round round_number<import_from_stmt>chaos_genius.settings CHAOSGENIUS_WEBAPP_URL<class_stmt>AlertException(Exception)<block_start>"""A general exception in a specific alert. Stores and prints alert ID and KPI ID. """<def_stmt>__init__ self message:str alert_id:int kpi_id:Optional[int]=<none><block_start>"""Initialize a new alert exception. Args: message: exception message. alert_id: ID of alert where this originated from. kpi_id: ID of KPI associated with the alert. """<if_stmt>kpi_id<block_start>message=f"(KPI: {kpi_id}, Alert: {alert_id}) {message}"<block_end><else_stmt><block_start>message=f"(Alert: {alert_id}) {message}"<block_end>super().__init__(message)<block_end><block_end><def_stmt>webapp_url_prefix <block_start>"""Constructs webapp URL prefix with a trailing slash. If not setup, this will be an invalid URL with an appropriate message. TODO: redirect to docs link showing how to setup instead of invalid URL. """<if_stmt><not>CHAOSGENIUS_WEBAPP_URL<block_start><return>"Webapp URL not setup. Please setup CHAOSGENIUS_WEBAPP_URL in the environment file./"<block_end>forward_slash="/"<if><not>CHAOSGENIUS_WEBAPP_URL[-1]<eq>"/"<else>""<line_sep><return>f"{CHAOSGENIUS_WEBAPP_URL}{forward_slash}"<block_end><def_stmt>change_message_from_percent percent_change:Union[str int float]<arrow>str<block_start>"""Creates a change message from given percentage change. percent_change will be: - "–" in case the last data point was missing or both the points had values 0 - 0 (int) in case there was no change - positive value (int/float) in case there was an increase - negative value (int/float) in case there was a decrease """<if_stmt>isinstance(percent_change str)<block_start><return>percent_change<block_end><elif_stmt>percent_change<eq>0<block_start><return>"No change (–)"<block_end><elif_stmt>percent_change<g>0<block_start><return>f"Increased by ({percent_change}%)"<block_end><else_stmt><block_start><return>f"Decreased by ({percent_change}%)"<block_end><block_end><def_stmt>find_percentage_change curr_val:Union[int float] prev_val:Optional[Union[int float]]<arrow>Union[int float str]<block_start>"""Calculates percentage change between previous and current value."""<if_stmt>prev_val<is><none># previous point wasn't found <block_start><return>"–"<block_end><elif_stmt>curr_val<eq>0<and>prev_val<eq>curr_val# both current and previous value are 0 <block_start><return>"–"<block_end><elif_stmt>prev_val<eq>0# previous value is 0, but current value isn't <block_start>sign_="+"<if>curr_val<g>0<else>"-"<line_sep><return>sign_+"inf"<block_end><else_stmt><block_start>change=curr_val-prev_val<line_sep>percentage_change=(change/prev_val)<times>100<line_sep><return>round_number(percentage_change)<block_end><block_end><def_stmt>send_email_using_template template_name:str recipient_emails:List[str] subject:str files:List[dict] **kwargs <arrow><none><block_start>"""Sends an email using a template."""<line_sep>path=os.path.join(os.path.dirname(__file__) "email_templates")<line_sep>env=Environment(loader=FileSystemLoader(path) autoescape=select_autoescape(["html" "xml"]))<line_sep>template=env.get_template(template_name)<line_sep>send_static_alert_email(recipient_emails subject template.render(**kwargs) files)<block_end>HRN_PREFIXES={-9:"n" -6:"µ" -3:"m" 0:"" 3:"K" 6:"M" 9:"B" 12:"T" }<def_stmt>_get_exponent num:float<arrow>int<block_start>"""Returns the power of 10 to which the number is raised to."""<if_stmt>num<eq>0<block_start><return>0<block_end><return>floor(log10(abs(num)))<block_end><def_stmt>human_readable num:float<arrow>str<block_start>"""Returns the human readable format of a number."""<line_sep>exponent=_get_exponent(num)<line_sep>new_exponent=min((3<times>floor(exponent/3)) 12)<line_sep>precision=10<power>(new_exponent)<line_sep>new_val=round(num/precision 3)<line_sep>human_readable_format=str(new_val)+HRN_PREFIXES[new_exponent]<line_sep><return>human_readable_format<block_end>
# coding: utf-8 <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<import_from_stmt>func linear<import_from_stmt>rnns cell<as>cell<class_stmt>gru(cell.Cell)<block_start>"""The Gated Recurrent Unit."""<def_stmt>__init__ self d ln=<false> scope='gru'<block_start>super(gru self).__init__(d ln=ln scope=scope)<block_end><def_stmt>get_init_state self shape=<none> x=<none> scope=<none><block_start><return>self._get_init_state(self.d shape=shape x=x scope=scope)<block_end><def_stmt>fetch_states self x<block_start><with_stmt>tf.variable_scope("fetch_state_{}".format(self.scope<or>"gru"))<block_start>g=linear(x self.d<times>2 bias=<false> ln=self.ln scope="gate_x")<line_sep>h=linear(x self.d bias=<false> ln=self.ln scope="hide_x")<block_end><return>g h<block_end><def_stmt>__call__ self h_ x# h_: the previous hidden state # x_g/x: the current input state for gate # x_h/x: the current input state for hidden <block_start>""" z = sigmoid(h_, x) r = sigmoid(h_, x) h' = tanh(x, r * h_) h = z * h_ + (1. - z) * h' """<with_stmt>tf.variable_scope("cell_{}".format(self.scope<or>"gru"))<block_start>x_g,x_h=x<line_sep>h_g=linear(h_ self.d<times>2 ln=self.ln scope="gate_h")<line_sep>z,r=tf.split(tf.sigmoid(x_g+h_g) 2 -1)<line_sep>h_h=linear(h_<times>r self.d ln=self.ln scope="hide_h")<line_sep>h=tf.tanh(x_h+h_h)<line_sep>h=z<times>h_+(1.-z)<times>h<block_end><return>h<block_end><block_end>
# MIT License # # Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. <def_stmt>pretty_dict d indent=0<block_start>"""Pretty the output format of a dictionary. Parameters ---------- d dict, the input dictionary instance. indent int, indent level, non-negative. Returns ------- res str, the output string """<line_sep>res=""<for_stmt>k,v d.items()<block_start>res<augadd>"\t"<times>indent+str(k)<if_stmt>isinstance(v dict)<block_start>res<augadd>"\n"+pretty_dict(v indent+1)<block_end><else_stmt><block_start>res<augadd>": "+str(v)+"\n"<block_end><block_end><return>res<block_end>
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>json<import_stmt>torch<import_from_stmt>.utils skeleton<class_stmt>SkeletonDataset(torch.utils.data.Dataset)<block_start>""" Feeder for skeleton-based action recognition Arguments: data_path: the path to data folder random_choose: If true, randomly choose a portion of the input sequence random_move: If true, randomly perfrom affine transformation window_size: The length of the output sequence repeat: times of repeating the dataset data_subscripts: subscript expression of einsum operation. In the default case, the shape of output data is `(channel, vertex, frames, person)`. To permute the shape to `(channel, frames, vertex, person)`, set `data_subscripts` to 'cvfm->cfvm'. """<def_stmt>__init__ self data_dir random_choose=<false> random_move=<false> window_size=-1 num_track=1 data_subscripts=<none> repeat=1<block_start>self.data_dir=data_dir<line_sep>self.random_choose=random_choose<line_sep>self.random_move=random_move<line_sep>self.window_size=window_size<line_sep>self.num_track=num_track<line_sep>self.data_subscripts=data_subscripts<line_sep>self.files=[os.path.join(self.data_dir f)<for>f os.listdir(self.data_dir)]<times>repeat<block_end><def_stmt>__len__ self<block_start><return>len(self.files)<block_end><def_stmt>__getitem__ self index<block_start><with_stmt>open(self.files[index])<as>f<block_start>data=json.load(f)<block_end>resolution=data['info']['resolution']<line_sep>category_id=data['category_id']<line_sep>annotations=data['annotations']<line_sep>num_frame=data['info']['num_frame']<line_sep>num_keypoints=data['info']['num_keypoints']<line_sep>channel=data['info']['keypoint_channels']<line_sep>num_channel=len(channel)<line_sep># get data data=np.zeros((num_channel num_keypoints num_frame self.num_track) dtype=np.float32)<for_stmt>a annotations<block_start>person_id=a['id']<if>a['person_id']<is><none><else>a['person_id']<line_sep>frame_index=a['frame_index']<if_stmt>person_id<l>self.num_track<and>frame_index<l>num_frame<block_start>data[: : frame_index person_id]=np.array(a['keypoints']).transpose()<block_end><block_end># normalization <if_stmt>self.normalization<block_start><for_stmt>i,c enumerate(channel)<block_start><if_stmt>c<eq>'x'<block_start>data[i]=data[i]/resolution[0]-0.5<block_end><if_stmt>c<eq>'y'<block_start>data[i]=data[i]/resolution[1]-0.5<block_end><if_stmt>c<eq>'score'<or>c<eq>'visibility'<block_start>mask=(data[i]<eq>0)<for_stmt>j range(num_channel)<block_start><if_stmt>c<ne>j<block_start>data[j][mask]=0<block_end><block_end><block_end><block_end><block_end># permute <if_stmt>self.data_subscripts<is><not><none><block_start>data=np.einsum(self.data_subscripts data)<block_end># augmentation <if_stmt>self.random_choose<block_start>data=skeleton.random_choose(data self.window_size)<block_end><elif_stmt>self.window_size<g>0<block_start>data=skeleton.auto_pading(data self.window_size)<block_end><if_stmt>self.random_move<block_start>data=skeleton.random_move(data)<block_end><return>data category_id<block_end><block_end>
<import_from_stmt>rastervision.pipeline.pipeline Pipeline<import_from_stmt>rastervision.pytorch_learner LearnerConfig<class_stmt>LearnerPipeline(Pipeline)<block_start>"""Simple Pipeline that is a wrapper around Learner.main() This supports the ability to use the pytorch_learner package to train models using the RV pipeline package and its runner functionality without the rest of RV. """<line_sep>commands=['train']<line_sep>gpu_commands=['train']<def_stmt>train self<block_start>learner_cfg:LearnerConfig=self.config.learner<line_sep>learner=learner_cfg.build(learner_cfg self.tmp_dir)<line_sep>learner.main()<block_end><block_end>
<import_from_stmt>fate_arch.common._types FederatedMode FederatedCommunicationType EngineType CoordinationProxyService CoordinationCommunicationProtocol<import_from_stmt>fate_arch.common._types BaseType Party DTable<line_sep>
<import_from_stmt>itertools chain<import_stmt>jellyfish# type: ignore <import_stmt>pytest# type: ignore <import_stmt>pytz<import_stmt>us<line_sep># attribute <def_stmt>test_attribute <block_start><for_stmt>state us.STATES_AND_TERRITORIES<block_start><assert_stmt>state<eq>getattr(us.states state.abbr)<block_end><block_end><def_stmt>test_valid_timezones <block_start><for_stmt>state us.STATES_AND_TERRITORIES<block_start><if_stmt>state.capital<block_start><assert_stmt>pytz.timezone(state.capital_tz)<block_end><for_stmt>tz state.time_zones<block_start><assert_stmt>pytz.timezone(tz)<block_end># During migration from SQLite to Python classes, a duplicate # time zone had been found <assert_stmt>len(state.time_zones)<eq>len(set(state.time_zones))<block_end><block_end># maryland lookup <def_stmt>test_fips <block_start><assert_stmt>us.states.lookup("24")<eq>us.states.MD<assert_stmt>us.states.lookup("51")<ne>us.states.MD<block_end><def_stmt>test_abbr <block_start><assert_stmt>us.states.lookup("MD")<eq>us.states.MD<assert_stmt>us.states.lookup("md")<eq>us.states.MD<assert_stmt>us.states.lookup("VA")<ne>us.states.MD<assert_stmt>us.states.lookup("va")<ne>us.states.MD<block_end><def_stmt>test_name <block_start><assert_stmt>us.states.lookup("Maryland")<eq>us.states.MD<assert_stmt>us.states.lookup("maryland")<eq>us.states.MD<assert_stmt>us.states.lookup("Maryland" field="name")<eq>us.states.MD<assert_stmt>us.states.lookup("maryland" field="name")<is><none><assert_stmt>us.states.lookup("murryland")<eq>us.states.MD<assert_stmt>us.states.lookup("Virginia")<ne>us.states.MD<block_end># lookups <def_stmt>test_abbr_lookup <block_start><for_stmt>state us.STATES<block_start><assert_stmt>us.states.lookup(state.abbr)<eq>state<block_end><block_end><def_stmt>test_fips_lookup <block_start><for_stmt>state us.STATES<block_start><assert_stmt>us.states.lookup(state.fips)<eq>state<block_end><block_end><def_stmt>test_name_lookup <block_start><for_stmt>state us.STATES<block_start><assert_stmt>us.states.lookup(state.name)<eq>state<block_end><block_end><def_stmt>test_obsolete_lookup <block_start><for_stmt>state us.OBSOLETE<block_start><assert_stmt>us.states.lookup(state.name)<is><none><block_end><block_end># test metaphone <def_stmt>test_jellyfish_metaphone <block_start><for_stmt>state chain(us.STATES_AND_TERRITORIES us.OBSOLETE)<block_start><assert_stmt>state.name_metaphone<eq>jellyfish.metaphone(state.name)<block_end><block_end># mappings <def_stmt>test_mapping <block_start>states=us.STATES[:5]<assert_stmt>us.states.mapping("abbr" "fips" states=states)<eq>dict((s.abbr s.fips)<for>s states)<block_end><def_stmt>test_obsolete_mapping <block_start>mapping=us.states.mapping("abbr" "fips")<for_stmt>state us.states.OBSOLETE<block_start><assert_stmt>state.abbr<not><in>mapping<block_end><block_end><def_stmt>test_custom_mapping <block_start>mapping=us.states.mapping("abbr" "fips" states=[us.states.DC us.states.MD])<assert_stmt>len(mapping)<eq>2<assert_stmt>"DC"<in>mapping<assert_stmt>"MD"<in>mapping<block_end># known bugs <def_stmt>test_kentucky_uppercase <block_start><assert_stmt>us.states.lookup("kentucky")<eq>us.states.KY<assert_stmt>us.states.lookup("KENTUCKY")<eq>us.states.KY<block_end><def_stmt>test_wayoming <block_start><assert_stmt>us.states.lookup("Wyoming")<eq>us.states.WY<assert_stmt>us.states.lookup("Wayoming")<is><none><block_end><def_stmt>test_dc <block_start><assert_stmt>us.states.DC<not><in>us.STATES<assert_stmt>us.states.lookup("DC")<eq>us.states.DC<assert_stmt>us.states.lookup("District of Columbia")<eq>us.states.DC<assert_stmt>"DC"<in>us.states.mapping("abbr" "name")<block_end># shapefiles @pytest.mark.skip<def_stmt>test_head <block_start><import_stmt>requests<for_stmt>state us.STATES_AND_TERRITORIES<block_start><for_stmt>url state.shapefile_urls().values()<block_start>resp=requests.head(url)<assert_stmt>resp.status_code<eq>200<block_end><block_end><block_end># counts <def_stmt>test_obsolete <block_start><assert_stmt>len(us.OBSOLETE)<eq>3<block_end><def_stmt>test_states <block_start><assert_stmt>len(us.STATES)<eq>50<block_end><def_stmt>test_territories <block_start><assert_stmt>len(us.TERRITORIES)<eq>5<block_end><def_stmt>test_contiguous # Lower 48 <block_start><assert_stmt>len(us.STATES_CONTIGUOUS)<eq>48<block_end><def_stmt>test_continental # Lower 48 + Alaska <block_start><assert_stmt>len(us.STATES_CONTINENTAL)<eq>49<block_end><def_stmt>test_dc <block_start><assert_stmt>us.states.DC<not><in>us.STATES<block_end>
<import_stmt>base64<import_from_stmt>typing Dict Optional<import_from_stmt>ciphey.iface Config Decoder ParamSpec T U registry<line_sep>@registry.register<class_stmt>Base64_url(Decoder[str])<block_start><def_stmt>decode self ctext:T<arrow>Optional[U]<block_start>""" Performs Base64 URL decoding """<line_sep>ctext_padding=ctext+"="<times>(4-len(ctext)%4)<try_stmt><block_start><return>base64.urlsafe_b64decode(ctext_padding).decode("utf-8")<block_end><except_stmt>Exception<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>priority <arrow>float# Not expected to show up often, but also very fast to check. <block_start><return>0.05<block_end><def_stmt>__init__ self config:Config<block_start>super().__init__(config)<block_end>@staticmethod<def_stmt>getParams <arrow>Optional[Dict[str ParamSpec]]<block_start><return><none><block_end>@staticmethod<def_stmt>getTarget <arrow>str<block_start><return>"base64_url"<block_end><block_end>
<import_from_stmt>.hilbert HilbertCoreset<import_from_stmt>.sampling UniformSamplingCoreset<import_from_stmt>.sparsevi SparseVICoreset<import_from_stmt>.bpsvi BatchPSVICoreset<line_sep>
<import_stmt>pytest<import_from_stmt>pathlib Path<line_sep>@pytest.fixture(scope="module")<def_stmt>resources_path <block_start><return>Path(__file__).parent/"resources"<block_end>@pytest.fixture(scope="module")<def_stmt>tasks_base_path resources_path<block_start><return>resources_path/"tasks"<block_end>@pytest.fixture(scope="module")<def_stmt>results_base_path resources_path<block_start><return>resources_path/"results"<block_end><def_stmt>pytest_addoption parser<block_start>parser.addoption("--runslow" action="store_true" default=<false> help="run slow tests")<line_sep>parser.addoption("--runintegration" action="store_true" default=<false> help="run integration tests" )<block_end><def_stmt>pytest_collection_modifyitems config items<block_start><if_stmt>config.getoption("--runslow")<and>config.getoption("--runintegration")<block_start><return><block_end><if_stmt><not>config.getoption("--runslow")<block_start>skip_slow=pytest.mark.skip(reason="need --runslow option to run")<for_stmt>item items<block_start><if_stmt>"slow"<in>item.keywords<block_start>item.add_marker(skip_slow)<block_end><block_end><block_end><if_stmt><not>config.getoption("--runintegration")<block_start>skip_integration=pytest.mark.skip(reason="need --runintegration option to run")<for_stmt>item items<block_start><if_stmt>"integration"<in>item.keywords<block_start>item.add_marker(skip_integration)<block_end><block_end><block_end><block_end>
<import_from_stmt>.interpolation_base InterpolationBase<import_from_stmt>.interpolation_cubic natural_cubic_spline_coeffs natural_cubic_coeffs CubicSpline<import_from_stmt>.interpolation_linear linear_interpolation_coeffs LinearInterpolation<import_from_stmt>.interpolation_hermite_cubic_bdiff hermite_cubic_coefficients_with_backward_differences<import_from_stmt>.log_ode logsignature_windows logsig_windows<import_from_stmt>.misc TupleControl<import_from_stmt>.solver cdeint<line_sep>__version__="0.2.5"<line_sep>
<import_from_stmt>.Response Response<class_stmt>LoginResponse(Response)<block_start><def_stmt>__init__ self response<block_start>self.username=<none><line_sep>self.has_anonymous_profile_picture=<none><line_sep>self.profile_pic_url=<none><line_sep>self.profile_pic_id=<none><line_sep>self.full_name=<none><line_sep>self.pk=<none><line_sep>self.is_private=<none><if_stmt>'logged_in_user'<in>response<and>'username'<in>response['logged_in_user']<block_start>self.username=response['logged_in_user']['username']<line_sep>self.has_anonymous_profile_picture=response['logged_in_user']['has_anonymous_profile_picture']<line_sep>self.profile_pic_url=response['logged_in_user']['profile_pic_url']<line_sep>self.full_name=response['logged_in_user']['full_name']<line_sep>self.pk=response['logged_in_user']['pk']<line_sep>self.is_private=response['logged_in_user']['is_private']<block_end><else_stmt><block_start>self.setMessage(response['message'])<block_end>self.setStatus(response['status'])<block_end><def_stmt>getUsername self<block_start><return>self.username<block_end><def_stmt>getHasAnonymousProfilePicture self<block_start><return>self.has_anonymous_profile_picture<block_end><def_stmt>getProfilePicUrl self<block_start><return>self.profile_pic_url<block_end><def_stmt>getProfilePicId self<block_start><return>self.profile_pic_id<block_end><def_stmt>getFullName self<block_start><return>self.full_name<block_end><def_stmt>getUsernameId self<block_start><return>str(self.pk)<block_end><def_stmt>getIsPrivate self<block_start><return>self.is_private<block_end><block_end>
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training Loop."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_stmt>tensorflow.compat.v1<as>tf<import_from_stmt>tensorflow_graphics.projects.cvxnet.lib datasets<import_from_stmt>tensorflow_graphics.projects.cvxnet.lib models<import_from_stmt>tensorflow_graphics.projects.cvxnet.lib utils<line_sep>tf.disable_eager_execution()<line_sep>flags=tf.app.flags<line_sep>logging=tf.logging<line_sep>tf.logging.set_verbosity(tf.logging.INFO)<line_sep>utils.define_flags()<line_sep>FLAGS=flags.FLAGS<def_stmt>main unused_argv<block_start>tf.set_random_seed(2191997)<line_sep>np.random.seed(6281996)<line_sep>logging.info("=> Starting ...")<line_sep># Select dataset. logging.info("=> Preparing datasets ...")<line_sep>data=datasets.get_dataset(FLAGS.dataset "train" FLAGS)<line_sep>batch=tf.data.make_one_shot_iterator(data).get_next()<line_sep># Select model. logging.info("=> Creating {} model".format(FLAGS.model))<line_sep>model=models.get_model(FLAGS.model FLAGS)<line_sep>optimizer=tf.train.AdamOptimizer(FLAGS.lr)<line_sep># Set up the graph train_loss,train_op,global_step=model.compute_loss(batch training=<true> optimizer=optimizer)<line_sep># Training hooks stop_hook=tf.train.StopAtStepHook(last_step=FLAGS.max_steps)<line_sep>summary_writer=tf.summary.FileWriter(FLAGS.train_dir)<line_sep>ops=tf.get_collection(tf.GraphKeys.SUMMARIES)<line_sep>summary_hook=tf.train.SummarySaverHook(save_steps=100 summary_writer=summary_writer summary_op=ops)<line_sep>step_counter_hook=tf.train.StepCounterHook(summary_writer=summary_writer)<line_sep>hooks=[stop_hook step_counter_hook summary_hook]<line_sep>logging.info("=> Start training loop ...")<with_stmt>tf.train.MonitoredTrainingSession(checkpoint_dir=FLAGS.train_dir hooks=hooks scaffold=<none> save_checkpoint_steps=FLAGS.save_every save_checkpoint_secs=<none> save_summaries_steps=<none> save_summaries_secs=<none> log_step_count_steps=<none> max_wait_secs=3600)<as>mon_sess<block_start><while_stmt><not>mon_sess.should_stop()<block_start>mon_sess.run([batch train_loss global_step train_op])<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.app.run(main)<block_end>
<import_stmt>re<import_from_stmt>urlparse urlparse urljoin<import_from_stmt>framework.plugins.api DiscoveryPlugin<import_from_stmt>framework.plugins.api url_filename<import_from_stmt>externals.moduleman.plugin moduleman_plugin<line_sep>@moduleman_plugin<class_stmt>robots(DiscoveryPlugin)<block_start>name="robots"<line_sep>description="Parses robots.txt looking for new content. Optional: discovery.bl=\".txt,.gif\""<line_sep>category=["default" "active" "discovery"]<line_sep>priority=99<def_stmt>validate self fuzzresult<block_start><return>url_filename(fuzzresult)<eq>"robots.txt"<and>fuzzresult.code<eq>200<block_end><def_stmt>process self fuzzresult# Shamelessly (partially) copied from w3af's plugins/discovery/robotsReader.py <block_start><for_stmt>line fuzzresult.history.fr_content().split('\n')<block_start>line=line.strip()<if_stmt>len(line)<g>0<and>line[0]<ne>'#'<and>(line.upper().find('ALLOW')<eq>0<or>line.upper().find('DISALLOW')<eq>0<or>line.upper().find('SITEMAP')<eq>0)<block_start>url=line[line.find(':')+1:]<line_sep>url=url.strip(" *")<if_stmt>url<and><not>self.blacklisted_extension(url)<block_start>self.queue_url(urljoin(fuzzresult.url url))<block_end><block_end><block_end><block_end><block_end>
<import_from_stmt>time time_ns<import_from_stmt>ctypes POINTER c_int16 c_uint32<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>picosdk.ps2000 ps2000<import_from_stmt>picosdk.functions assert_pico2000_ok<import_from_stmt>picosdk.ctypes_wrapper C_CALLBACK_FUNCTION_FACTORY<import_from_stmt>enum IntEnum<class_stmt>Channel(IntEnum)<block_start>PS2000_CHANNEL_A=0<line_sep>PS2000_CHANNEL_B=1<block_end><class_stmt>PotentialRange(IntEnum)<block_start>PS2000_10MV=0<line_sep>PS2000_20MV=1<line_sep>PS2000_50MV=2<line_sep>PS2000_100MV=3<line_sep>PS2000_200MV=4<line_sep>PS2000_500MV=5<line_sep>PS2000_1V=6<line_sep>PS2000_2V=7<line_sep>PS2000_5V=8<line_sep>PS2000_10V=9<line_sep>PS2000_20V=10<block_end><class_stmt>TimeUnit(IntEnum)<block_start>FEMTOSECOND=0<line_sep>PICOSECOND=1<line_sep>NANOSECOND=2<line_sep>MICROSECOND=3<line_sep>MILLISECOND=4<line_sep>SECOND=5<block_end>CALLBACK=C_CALLBACK_FUNCTION_FACTORY(<none> POINTER(POINTER(c_int16)) c_int16 c_uint32 c_int16 c_int16 c_uint32)<line_sep># reimplement this because the other one only takes ctypes <def_stmt>adc_to_mv values range_ bitness=16<block_start>v_ranges=[10 20 50 100 200 500 1_000 2_000 5_000 10_000 20_000]<line_sep><return>[(x<times>v_ranges[range_])/(2<power>(bitness-1)-1)<for>x values]<block_end><def_stmt>determine_time_unit interval_ns<block_start>unit=0<line_sep>units=['ns' 'us' 'ms' 's']<while_stmt>interval_ns<g>5_000<block_start>interval_ns<augdiv>1000<line_sep>unit<augadd>1<block_end><return>interval_ns units[unit]<block_end><class_stmt>StreamingDevice<block_start><def_stmt>__init__ self gather_values potential_range=PotentialRange.PS2000_50MV<block_start>self.device=ps2000.open_unit()<line_sep>self.potential_range=potential_range<line_sep>self.gather_values=gather_values<line_sep>res=ps2000.ps2000_set_channel(self.device.handle Channel.PS2000_CHANNEL_A <true> <true> potential_range)<line_sep>assert_pico2000_ok(res)<line_sep># start 'fast-streaming' mode res=ps2000.ps2000_run_streaming_ns(self.device.handle 500 TimeUnit.NANOSECOND 100_000 <false> 1 50_000)<line_sep>assert_pico2000_ok(res)<line_sep>self.start_time=time_ns()<line_sep>self.end_time=time_ns()<block_end><def_stmt>close self<block_start>ps2000.ps2000_stop(self.device.handle)<line_sep>self.device.close()<block_end><def_stmt>gather self<block_start>adc_values=[]<def_stmt>get_overview_buffers buffers _overflow _triggered_at _triggered _auto_stop n_values<block_start>adc_values.extend(buffers[0][0:n_values])<block_end>callback=CALLBACK(get_overview_buffers)<while_stmt>len(adc_values)<l>self.gather_values<block_start>ps2000.ps2000_get_streaming_last_values(self.device.handle callback)<block_end>self.end_time=time_ns()<line_sep><return>adc_to_mv(adc_values self.potential_range)<block_end><block_end>stream=StreamingDevice(6_000_000)<line_sep>values=stream.gather()<line_sep>stream.close()<line_sep>print('Values gathered: {}'.format(len(values)))<line_sep>fig,ax=plt.subplots()<line_sep>interval,units=determine_time_unit(stream.end_time-stream.start_time)<line_sep>ax.set_xlabel('time/{}'.format(units))<line_sep>ax.set_ylabel('voltage/mV')<line_sep>ax.plot(np.linspace(0 interval len(values)) values)<line_sep>plt.show()<line_sep>
<import_from_stmt>datetime datetime<import_from_stmt>datetime date<import_from_stmt>datetime timedelta<import_from_stmt>base TestbedTest<import_from_stmt>models Event<import_from_stmt>models Service<import_from_stmt>models Status<class_stmt>HistoryTest(TestbedTest)<block_start><def_stmt>setUp self<block_start>super(HistoryTest self).setUp()<line_sep>Status.load_defaults()<line_sep>self.service=Service(slug="account" name="Account" description="The BEST SERVICE")<line_sep>self.service.put()<block_end><def_stmt>test_history_order self<block_start>start=date(2011 4 13)<line_sep>up=Status.get_by_slug("up")<line_sep>history=self.service.history(5 up start=start)<line_sep>self.assertEquals(len(history) 5)<line_sep>history_days=[h["day"]<for>h history]<line_sep>expected=[date(2011 4 12) date(2011 4 11) date(2011 4 10) date(2011 4 9) date(2011 4 8) ]<line_sep>self.assertEquals(history_days expected)<block_end><def_stmt>test_history_order_early_month self<block_start>start=date(2011 4 2)<line_sep>up=Status.get_by_slug("up")<line_sep>history=self.service.history(5 up start=start)<line_sep>history_days=[h["day"]<for>h history]<line_sep>expected=[date(2011 4 1) date(2011 3 31) date(2011 3 30) date(2011 3 29) date(2011 3 28) ]<line_sep>self.assertEquals(history_days expected)<for_stmt>h history<block_start>self.assertFalse(h["information"])<block_end><block_end><def_stmt>test_history_order_late_month self<block_start>start=date(2011 4 5)<line_sep>up=Status.get_by_slug("up")<line_sep>history=self.service.history(5 up start=start)<line_sep>history_days=[h["day"]<for>h history]<line_sep>expected=[date(2011 4 4) date(2011 4 3) date(2011 4 2) date(2011 4 1) date(2011 3 31) ]<line_sep>self.assertEquals(history_days expected)<block_end><def_stmt>test_history_no_errors_boundary self<block_start>down=Status.get_by_slug("down")<line_sep>up=Status.get_by_slug("up")<line_sep>now=datetime(2011 4 5)<line_sep>event=Event(status=down service=self.service start=now message="HEY")<line_sep>event.put()<line_sep>history=self.service.history(5 up start=date(2011 4 5))<line_sep>self.assertEquals(history[0]["information"] <false>)<block_end><def_stmt>test_history_one_error self<block_start>down=Status.get_by_slug("down")<line_sep>up=Status.get_by_slug("up")<line_sep>now=datetime(2011 4 4 12)<line_sep>event=Event(status=down service=self.service start=now message="HEY")<line_sep>event.put()<line_sep>history=self.service.history(5 up start=date(2011 4 5))<line_sep>self.assertEquals(history[0]["information"] <true>)<line_sep>self.assertEquals(history[0]["name"] "information")<block_end><def_stmt>test_history_one_error_boundary self<block_start>down=Status.get_by_slug("down")<line_sep>up=Status.get_by_slug("up")<line_sep>now=datetime(2011 3 31)<line_sep>event=Event(status=down service=self.service start=now message="HEY")<line_sep>event.put()<line_sep>history=self.service.history(5 up start=date(2011 4 5))<line_sep>self.assertEquals(history[4]["information"] <true>)<line_sep>self.assertEquals(history[4]["name"] "information")<block_end><def_stmt>test_history_count self<block_start>up=Status.get_by_slug("up")<line_sep>history=self.service.history(10 up start=date(2011 4 5))<line_sep>self.assertEquals(len(history) 10)<block_end><def_stmt>test_history_current_status self<block_start>down=Status.get_by_slug("down")<line_sep>up=Status.get_by_slug("up")<line_sep>now=datetime(2011 4 4 12 51)<line_sep>event=Event(status=down service=self.service start=now message="HEY")<line_sep>event.put()<line_sep>history,=self.service.history(1 up start=date(2011 4 5))<line_sep>self.assertEquals(history["information"] <true>)<block_end><block_end>
<import_from_stmt>typing List<import_stmt>tensorflow<as>tf<class_stmt>MultiLayerPerceptron(tf.keras.layers.Layer)<block_start>""" A multi layer perceptron """<def_stmt>__init__ self num_hidden:List[int] activations:List prefix:str=''<block_start>""" Initializes the layer :param num_hidden: list of hidden layer sizes :param activations: list of activations for dense layer :param prefix: prefix of hidden layer name """<line_sep>super(MultiLayerPerceptron self).__init__()<assert_stmt>len(num_hidden)<eq>len(activations) "num hidden and activations must contain the same number of elements"<line_sep>self.mlp=[]<for_stmt>i,(hidden_size activation) enumerate(zip(num_hidden activations))<block_start><if_stmt>hidden_size<eq>0<block_start><continue><block_end>layer=tf.keras.layers.Dense(units=hidden_size use_bias=<true> activation=activation name=f'{prefix}hidden_projection_{str(i)}')<line_sep>self.mlp.append(layer)<block_end><block_end><def_stmt>call self inputs **kwargs<block_start>""" Applies multi-layer perceptron on given inputs :return output Shape=inputs.shape[:-1] + [num_hidden[-1]] """<line_sep>x=inputs<for_stmt>layer self.mlp<block_start>x=layer(x)<block_end><return>x<block_end><block_end>
# Unix SMB/CIFS implementation. Tests for common.py routines # Copyright (C) <NAME> 2011 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """Tests for samba.common"""<import_stmt>samba os<import_stmt>samba.tests<import_from_stmt>samba.common *<import_from_stmt>samba.samdb SamDB<class_stmt>CommonTests(samba.tests.TestCase)<block_start><def_stmt>test_normalise_int32 self<block_start>self.assertEquals('17' normalise_int32(17))<line_sep>self.assertEquals('17' normalise_int32('17'))<line_sep>self.assertEquals('-123' normalise_int32('-123'))<line_sep>self.assertEquals('-1294967296' normalise_int32('3000000000'))<block_end><def_stmt>test_dsdb_Dn self<block_start>sam=samba.Ldb(url='dntest.ldb')<line_sep>dn1=dsdb_Dn(sam "DC=foo,DC=bar")<line_sep>dn2=dsdb_Dn(sam "B:8:0000000D:<GUID=b3f0ec29-17f4-452a-b002-963e1909d101>;DC=samba,DC=example,DC=com")<line_sep>self.assertEquals(dn2.binary "0000000D")<line_sep>self.assertEquals(13 dn2.get_binary_integer())<line_sep>os.unlink('dntest.ldb')<block_end><block_end>
<import_from_future_stmt> absolute_import<import_from_stmt>zoomus util<import_from_stmt>zoomus.components base<class_stmt>LiveStreamComponentV2(base.BaseComponent)<block_start><def_stmt>update self **kwargs<block_start>""" Use this API to update the meeting's stream information. Expects: - meeting_id: int - stream_url: string (URL) - stream_key: string - page_url: string (URL) """<line_sep>util.require_keys(kwargs "meeting_id")<line_sep><return>self.patch_request("/meetings/{}/livestream".format(kwargs.get("meeting_id")) data=kwargs)<block_end><def_stmt>update_status self **kwargs<block_start>""" Use this API to update the status of a meeting's live stream. Expects: - meeting_id: int - action (start|stop) - settings: dict """<line_sep>util.require_keys(kwargs "meeting_id")<line_sep><return>self.patch_request("/meetings/{}/livestream/status".format(kwargs.get("meeting_id")) data=kwargs )<block_end><block_end>
<import_stmt>pytest<line_sep>@pytest.mark.xfail<def_stmt>test_contrib_is_available <block_start><import_from_stmt>stories.contrib.debug_toolbars.flask StoriesPanel<block_end># noqa: F401
<import_from_stmt>.resnet resnet18 resnet34 resnet50 resnet101 resnet152<import_from_stmt>.resnet resnet18ibna resnet34ibna resnet50ibna resnet101ibna resnet152ibna<import_from_stmt>.transformers *<line_sep>__cnnbackbone_factory={# resnet series 'resnet18':resnet18 'resnet34':resnet34 'resnet50':resnet50 'resnet101':resnet101 'resnet152':resnet152 'resnet18ibna':resnet18ibna 'resnet34ibna':resnet34ibna 'resnet50ibna':resnet50ibna 'resnet101ibna':resnet101ibna 'resnet152ibna':resnet152ibna # vision transformer series 'vit_small_patch16_224':vit_small_patch16_224 'vit_base_patch16_224':vit_base_patch16_224 'vit_base_patch32_224':vit_base_patch32_224 'vit_base_patch16_384':vit_base_patch16_384 'vit_base_patch32_384':vit_base_patch32_384 'vit_large_patch16_224':vit_large_patch16_224 'vit_large_patch32_224':vit_large_patch32_224 'vit_large_patch16_384':vit_large_patch16_384 'vit_large_patch32_384':vit_large_patch32_384 'vit_base_patch16_224_in21k':vit_base_patch16_224_in21k 'vit_base_patch32_224_in21k':vit_base_patch32_224_in21k 'vit_large_patch16_224_in21k':vit_large_patch16_224_in21k 'vit_large_patch32_224_in21k':vit_large_patch32_224_in21k 'vit_huge_patch14_224_in21k':vit_huge_patch14_224_in21k 'vit_deit_tiny_patch16_224':vit_deit_tiny_patch16_224 'vit_deit_small_patch16_224':vit_deit_small_patch16_224 'vit_deit_base_patch16_224':vit_deit_base_patch16_224 'vit_deit_base_patch16_384':vit_deit_base_patch16_384 'vit_deit_tiny_distilled_patch16_224':vit_deit_tiny_distilled_patch16_224 'vit_deit_small_distilled_patch16_224':vit_deit_small_distilled_patch16_224 'vit_deit_base_distilled_patch16_224':vit_deit_base_distilled_patch16_224 'vit_deit_base_distilled_patch16_384':vit_deit_base_distilled_patch16_384 'vit_base_patch16_224_miil_in21k':vit_base_patch16_224_miil_in21k 'vit_base_patch16_224_miil':vit_base_patch16_224_miil }<def_stmt>build_cnnbackbone name pretrained=<true> **kwargs<block_start><return>__cnnbackbone_factory[name](pretrained=pretrained **kwargs)<block_end>
""" Python methods for importing and exporting '.proto' files from the BBP type definition format. """<line_sep># TODO get custom exceptions for these methods <import_stmt>io<import_stmt>re<import_stmt>logging<import_from_stmt>blackboxprotobuf.lib.exceptions TypedefException<import_stmt>blackboxprotobuf.lib.api<line_sep>PROTO_FILE_TYPE_MAP={"uint":"uint64" "int":"int64" "sint":"sint64" "fixed32":"fixed32" "sfixed32":"sfixed32" "float":"float" "fixed64":"fixed64" "sfixed64":"sfixed64" "double":"double" "bytes":"bytes" "bytes_hex":"bytes" "string":"string" }<line_sep>PACKABLE_TYPES=["uint" "int" "sint" "fixed32" "sfixed32" "float" "fixed64" "sfixed64" "double" ]<line_sep># Inverse of the above, but we have to include more types PROTO_FILE_TYPE_TO_BBP={"double":"double" "float":"float" "int32":"int" "int64":"int" "uint32":"uint" "uint64":"uint" "sint32":"sint" "sint64":"sint" "fixed32":"fixed32" "fixed64":"fixed64" "sfixed32":"sfixed32" "sfixed64":"sfixed64" "bool":"uint" "string":"string" # should be default_binary_type, but can't handle that well here "bytes":"bytes" }<line_sep>NAME_REGEX=re.compile(r"\A[a-zA-Z_][a-zA-Z0-9_]*\Z")<line_sep># add packed types to the list <for_stmt>packable_type PACKABLE_TYPES<block_start>packed_type="packed_"+packable_type<line_sep>PROTO_FILE_TYPE_MAP[packed_type]=PROTO_FILE_TYPE_MAP[packable_type]<block_end><def_stmt>_print_message message_name typedef output_file depth=0<block_start>indent=u" "<times>depth<if_stmt><not>NAME_REGEX.match(message_name)<block_start><raise>TypedefException("Message name: %s is not valid"%message_name)<block_end># sort typedef for better looking output typedef=blackboxprotobuf.lib.api.sort_typedef(typedef)<line_sep>message_name=message_name.strip()<line_sep>output_file.write(u"\n")<line_sep>output_file.write(indent)<line_sep>output_file.write(u"message %s {\n"%message_name)<for_stmt>field_number,field_typedef typedef.items()# TODO Default to all fields as repeated? or optional <block_start>proto_type=<none><line_sep>field_name=<none><line_sep>field_options=""<line_sep># a repeated field with one element is indistinduishable from a # repeated field so we just put repeated if we have proof that it is # repeatable, but this might be wrong sometimes # maybe some sort of protobuf discovery tool can detect this is_repeated=field_typedef.get("seen_repeated" <false>)<if_stmt>"name"<in>field_typedef<and>field_typedef["name"]<ne>""<block_start>field_name=field_typedef["name"]<line_sep>field_name=field_name.strip()<if_stmt><not>NAME_REGEX.match(field_name)<block_start>field_name=<none><block_end><block_end><if_stmt>field_name<is><none><block_start>field_name=u"field%s"%field_number<block_end><if_stmt>field_typedef["type"]<eq>"message"# If we have multiple typedefs, this means is something like the Any # message, and has to be manually reparsed to each type <block_start><if_stmt>"alt_typedefs"<in>field_typedef<block_start>proto_type="bytes"<block_end><else_stmt><block_start>proto_type=field_name+"_type"<line_sep>_print_message(proto_type field_typedef["message_typedef"] output_file depth+1)<block_end><block_end><else_stmt><block_start><if_stmt>field_typedef["type"]<not><in>PROTO_FILE_TYPE_MAP<block_start><raise>TypedefException("Type %s does not have a mapping to protobuf types."%field_typedef["type"])<block_end>proto_type=PROTO_FILE_TYPE_MAP[field_typedef["type"]]<block_end># we're using proto3 syntax. Repeated numeric fields are packed by default # if it's repeated and not packed, then make sure we specify it's not packed <if_stmt>is_repeated<and>field_typedef["type"]<in>PACKABLE_TYPES<block_start>field_options=u" [packed=false]"<block_end># if it's a packed type, we'll explicitoly set that too, can't hurt <elif_stmt>field_typedef["type"].startswith("packed_")<block_start>field_options=u" [packed=true]"<line_sep>is_repeated=<true><block_end>output_file.write(indent)<line_sep>output_file.write(u" %s%s %s = %s%s;\n"%("repeated "<if>is_repeated<else>"" proto_type field_name field_number field_options ))<block_end>output_file.write(indent)<line_sep>output_file.write(u"}\n\n")<block_end><def_stmt>export_proto typedef_map output_filename=<none> output_file=<none> package=<none><block_start>"""Export the given type definitons as a '.proto' file. Typedefs are expected as a dictionary of {'message_name': typedef } Write to output_file or output_filename if provided, otherwise return a string output_filename will be overwritten if it exists """<line_sep>return_string=<false><if_stmt>output_filename<is><not><none><block_start>output_file=io.open(output_filename "w+")<block_end><if_stmt>output_file<is><none><block_start>return_string=<true><line_sep>output_file=io.StringIO()<block_end># preamble output_file.write(u'syntax = "proto3";\n\n')<if_stmt>package<block_start>output_file.write(u"package %s;\n\n"%package)<block_end><for_stmt>typedef_name,typedef typedef_map.items()<block_start>_print_message(typedef_name typedef output_file)<block_end><if_stmt>return_string<block_start><return>output_file.getvalue()<block_end># close the file if we opened it <elif_stmt>output_filename<is><not><none><block_start>output_file.close()<block_end><return><none><block_end>MESSAGE_START_REGEX=re.compile(r"^message +([a-zA-Z_0-9]+) *{.*")<line_sep>FIELD_REGEX=re.compile(r"^ *(repeated|optional|required)? *([a-zA-Z0-9_]+) +([a-zA-Z0-9_]+) += +([0-9]+) *(\[[a-z]+=[a-z]*\])?.*;.*$")<line_sep>SYNTAX_REGEX=re.compile(r'^ *syntax += +"(proto\d)" *;.*')<line_sep>ENUM_REGEX=re.compile(r"^ *enum +([a-zA-Z0-9_]+) *{.*")<line_sep>PACKAGE_REGEX=re.compile(r"^ *package +([a-zA-Z0-9_.]+) *;.*")<def_stmt>import_proto config input_string=<none> input_filename=<none> input_file=<none><block_start>typedef_map={}<if_stmt>input_string<is><not><none><block_start>input_file=io.StringIO(input_string)<block_end><if_stmt>input_file<is><none><and>input_filename<is><not><none><block_start>input_file=io.open(input_filename "r")<block_end><if_stmt>input_file<is><none><block_start><raise>ValueError("No file provided to import_proto")<block_end>syntax_version="proto2"<line_sep>package_prefix=""<line_sep>enum_names=[]<line_sep>message_trees=[]<line_sep>message_names=[]<line_sep>line=input_file.readline()<while_stmt>line<block_start>line=line.strip()<if_stmt>line.startswith("syntax")<and>SYNTAX_REGEX.match(line)<block_start>syntax_version=SYNTAX_REGEX.match(line).group(1)<block_end><elif_stmt>line.startswith("package")<and>PACKAGE_REGEX.match(line)<block_start>package_prefix=PACKAGE_REGEX.match(line).group(1)+"."<block_end><elif_stmt>line.startswith("import")<block_start>logging.warn("Proto file has import which is not supported "<concat>"by the parser. Ensure the imported files are "<concat>"processed first: %s" line )<block_end><elif_stmt>line.startswith("enum")<and>ENUM_REGEX.match(line)<block_start>enum_name=_parse_enum(line input_file)<line_sep>enum_names.append(enum_name)<block_end><elif_stmt>line.startswith("message")<and>MESSAGE_START_REGEX.match(line)<block_start>message_tree=_preparse_message(line input_file)<line_sep>message_trees.append(message_tree)<block_end>line=input_file.readline()<block_end># TODO parse the message data <for_stmt>tree message_trees<block_start>new_message_names,new_enum_names=_collect_names(package_prefix tree)<line_sep>enum_names<augadd>new_enum_names<line_sep>message_names<augadd>new_message_names<block_end>logging.debug("Got the following enum_names: %s" enum_names)<line_sep>logging.debug("Got the following message_names: %s" message_names)<for_stmt>tree message_trees<block_start>_parse_message(tree typedef_map message_names enum_names package_prefix syntax_version<eq>"proto3" config )<block_end><return>typedef_map<block_end><def_stmt>_parse_enum line input_file<block_start>"""Parse an enum out of the file. Goes from enum declaration to next } Returns the enum's name """<line_sep>enum_name=ENUM_REGEX.match(line).group(1)<line_sep># parse until the next '}' <while_stmt>"}"<not><in>line<block_start>line=input_file.readline()<if_stmt><not>line<block_start><raise>ValueError("Did not find close of enum")<block_end><block_end><return>enum_name<block_end><def_stmt>_preparse_message line input_file<block_start>"""Parse out a message name and the lines that make it up"""<line_sep>message_name=MESSAGE_START_REGEX.match(line).group(1)<line_sep>message_lines=[]<line_sep>inner_enums=[]<line_sep>inner_messages=[]<while_stmt>"}"<not><in>line<block_start>line=input_file.readline()<if_stmt><not>line<block_start><raise>ValueError("Did not find close of message")<block_end>line=line.strip()<if_stmt>line.startswith("enum")<and>ENUM_REGEX.match(line)<block_start>enum_name=_parse_enum(line input_file)<line_sep>inner_enums.append(enum_name)<block_end><elif_stmt>line.startswith("message")<and>MESSAGE_START_REGEX.match(line)<block_start>message_tree=_preparse_message(line input_file)<line_sep>inner_messages.append(message_tree)<block_end># not an inner enum or message <else_stmt><block_start>message_lines.append(line)<block_end><block_end><return>{"name":message_name "data":message_lines "enums":inner_enums "inner_messages":inner_messages }<block_end><def_stmt>_collect_names prefix message_tree<block_start>message_names=[]<line_sep>enum_names=[]<line_sep>name=prefix+message_tree["name"]<line_sep>message_names.append(name)<for_stmt>enum_name message_tree["enums"]<block_start>enum_names.append(prefix+enum_name)<block_end><for_stmt>inner_message message_tree["inner_messages"]<block_start>new_message_names,new_enum_names=_collect_names(name+"." inner_message)<line_sep>message_names<augadd>new_message_names<line_sep>enum_names<augadd>new_enum_names<block_end><return>message_names enum_names<block_end><def_stmt>_check_message_name current_path name known_message_names config# Verify message name against preparsed message names and global # known_messages # For example, if we have: # Message.InnerMesage # referenced from: # PackageA.Message2 # we would look up: # PackageA.Message2.Message.InnerMessage # PackageA.Message.InnerMessage # should also work for enums <block_start><if_stmt>name<in>config.known_types<block_start><return><true><block_end># search for anything under a common prefix in known_message_names logging.debug("Testing message name: %s" name)<line_sep>prefix_options=[""]<for_stmt>part current_path.split(".")<block_start><if_stmt>part<block_start>prefix_options=[prefix_options[0]+part+"."]+prefix_options<block_end><block_end>logging.debug("prefix_options: %s" prefix_options)<for_stmt>prefix prefix_options<block_start>logging.debug("Testing message name: %s" prefix+name)<if_stmt>prefix+name<in>known_message_names<block_start><return>prefix+name<block_end># remove the last bit of the prefix <if_stmt>"."<not><in>prefix<block_start><break><block_end>prefix=".".join(prefix.split(".")[:-1])<block_end>logging.debug("Message %s not found from %s Known names are: %s" name current_path known_message_names )<line_sep><return><none><block_end><def_stmt>_parse_message message_tree typdef_map known_message_names enum_names prefix is_proto3 config<block_start>message_typedef={}<line_sep>message_name=prefix+message_tree["name"]<line_sep>prefix=message_name+"."<line_sep># parse the actual message fields <for_stmt>line message_tree["data"]# lines should already be stripped and should not have messages or enums # logging.debug("Line before assert: %s", line) <block_start><assert_stmt>all([<not>line.strip().startswith(x)<for>x ["message " "enum "]])<line_sep># Check if the line matches the field regex match=FIELD_REGEX.match(line)<if_stmt>match<block_start>field_number,field_typedef=_parse_field(match known_message_names enum_names prefix is_proto3 config)<line_sep>message_typedef[field_number]=field_typedef<block_end><block_end># add the messsage to tyep returned typedefs logging.debug("Adding message %s to typedef maps" message_name)<line_sep>typdef_map[message_name]=message_typedef<for_stmt>inner_message message_tree["inner_messages"]# TODO prefix should be added to? <block_start>_parse_message(inner_message typdef_map known_message_names enum_names prefix is_proto3 config )<block_end><block_end># parse a field into a dictionary for the typedef <def_stmt>_parse_field match known_message_names enum_names prefix is_proto3 config<block_start>typedef={}<line_sep>field_name=match.group(3)<if_stmt><not>field_name<block_start><raise>ValueError("Could not parse field name from line: %s"%match)<block_end>typedef["name"]=field_name<line_sep>field_number=match.group(4)<if_stmt><not>field_number<block_start><raise>ValueError("Could not parse field number from line: %s"%match)<block_end># figure out repeated field_rule=match.group(1)<line_sep>is_repeated=<false><if_stmt>field_rule<and>"repeated"<in>field_rule<block_start>is_repeated=<true><line_sep>typedef["seen_repeated"]=<true><block_end>field_type=match.group(2)<if_stmt><not>field_type<block_start><raise>ValueError("Could not parse field type from line: %s"%match)<block_end># check normal types bbp_type=PROTO_FILE_TYPE_TO_BBP.get(field_type <none>)<if_stmt><not>bbp_type<block_start>logging.debug("Got non-basic type: %s, checking enums" field_type)<line_sep># check enum names <if_stmt>_check_message_name(prefix field_type enum_names config)# enum = uint <block_start>bbp_type="uint"<block_end><block_end><if_stmt><not>bbp_type# Not enum or normal type, check messages <block_start>message_name=_check_message_name(prefix field_type known_message_names config)<if_stmt>message_name<block_start>bbp_type="message"<line_sep>typedef["message_type_name"]=message_name<block_end><block_end><if_stmt><not>bbp_type# If we don't have a type now, then fail <block_start><raise>ValueError("Could not get a type for field %s: %s"%(field_name field_type))<block_end># figure out packed # default based on repeated + proto3, fallback to options field_options=match.group(5)<line_sep>is_packed=is_repeated<and>is_proto3<and>(field_type<in>PACKABLE_TYPES)<if_stmt>is_packed<and>field_options<and>"packed=false"<in>field_options<block_start>is_packed=<false><block_end><elif_stmt>is_repeated<and>field_options<and>"packed=true"<in>field_options<block_start>is_packed=<true><block_end># make sure the type lines up with packable <if_stmt>is_packed<and>bbp_type<not><in>PACKABLE_TYPES<block_start><raise>ValueError("Field %s set as packable, but not a packable type: %s"%(field_name bbp_type))<block_end><if_stmt>is_packed<block_start>bbp_type="packed_"+bbp_type<block_end>typedef["type"]=bbp_type<line_sep>logging.debug("Parsed field number %s: %s" field_number typedef)<line_sep><return>field_number typedef<block_end>
expected_output={'mstp':{'mst_instances':{0:{'mst_id':0 'bridge_priority':32768 'bridge_sysid':0 'bridge_address':'00e3.04ff.ad03' 'topology_change_flag':<false> 'topology_detected_flag':<false> 'topology_changes':0 'time_since_topology_change':'142:22:13' 'times':{'hold':1 'topology_change':70 'notification':10 'max_age':40 'hello':10 'forwarding_delay':30 } 'timers':{'hello':0 'topology_change':0 'notification':0 } 'root_of_the_spanning_tree':<true> 'interfaces':{'Port-channel30':{'name':'Port-channel30' 'bridge_assurance_inconsistent':<true> 'vpc_peer_link_inconsistent':<true> 'port_num':4125 'status':'broken' 'cost':500 'port_priority':128 'port_identifier':'128.4125' 'designated_root_priority':32768 'designated_root_address':'0023.04ff.ad03' 'designated_bridge_priority':61440 'designated_bridge_address':'4055.39ff.fee7' 'designated_port_id':'128.4125' 'designated_path_cost':0 'timers':{'message_age':0 'forward_delay':0 'hold':0 } 'port_type':'network' 'number_of_forward_transitions':0 'link_type':'point-to-point' 'internal':<true> 'peer_type':'STP' 'pvst_simulation':<true> 'counters':{'bpdu_sent':110 'bpdu_received':0}}}}} 'hello_time':10 'max_age':40 'forwarding_delay':30}}<line_sep>
"""Base Redis cache handler."""<line_sep># Standard Library <import_stmt>re<import_stmt>json<import_from_stmt>typing Any Optional<line_sep># Third Party <import_from_stmt>pydantic SecretStr<class_stmt>BaseCache<block_start>"""Redis cache handler."""<def_stmt>__init__ self db:int host:str="localhost" port:int=6379 password:Optional[SecretStr]=<none> decode_responses:bool=<true> **kwargs:Any <arrow><none><block_start>"""Initialize Redis connection."""<line_sep>self.db:int=db<line_sep>self.host:str=str(host)<line_sep>self.port:int=port<line_sep>self.password:Optional[SecretStr]=password<line_sep>self.decode_responses:bool=decode_responses<line_sep>self.redis_args:dict=kwargs<block_end><def_stmt>__repr__ self<arrow>str<block_start>"""Represent class state."""<line_sep><return>"HyperglassCache(db={}, host={}, port={}, password={})".format(self.db self.host self.port self.password)<block_end><def_stmt>parse_types self value:str<arrow>Any<block_start>"""Parse a string to standard python types."""<def_stmt>parse_string str_value:str<block_start>is_float=(re.compile(r"^(\d+\.\d+)$") float)<line_sep>is_int=(re.compile(r"^(\d+)$") int)<line_sep>is_bool=(re.compile(r"^(True|true|False|false)$") bool)<line_sep>is_none=(re.compile(r"^(None|none|null|nil|\(nil\))$") <lambda>v:<none>)<line_sep>is_jsonable=(re.compile(r"^[\{\[].*[\}\]]$") json.loads)<for_stmt>pattern,factory (is_float is_int is_bool is_none is_jsonable)<block_start><if_stmt>isinstance(str_value str)<and>bool(re.match(pattern str_value))<block_start>str_value=factory(str_value)<line_sep><break><block_end><block_end><return>str_value<block_end><if_stmt>isinstance(value str)<block_start>value=parse_string(value)<block_end><elif_stmt>isinstance(value bytes)<block_start>value=parse_string(value.decode("utf-8"))<block_end><elif_stmt>isinstance(value list)<block_start>value=[parse_string(i)<for>i value]<block_end><elif_stmt>isinstance(value tuple)<block_start>value=tuple(parse_string(i)<for>i value)<block_end><elif_stmt>isinstance(value dict)<block_start>value={k:self.parse_types(v)<for>k,v value.items()}<block_end><return>value<block_end><block_end>
# Generated by Django 3.2.12 on 2022-02-21 10:25 <import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("account" "0057_clear_user_addresses") ("account" "0058_update_user_search_document") ]<line_sep>operations=[]<block_end>
<import_from_stmt>gsitk.datasets.datasets DatasetManager<import_from_stmt>nltk.corpus opinion_lexicon<import_from_stmt>collections Counter<def_stmt>prepare_lexicon process=<true> dim=250 save=<false><block_start><if_stmt>process<block_start>dm=DatasetManager()<line_sep>data=dm.prepare_datasets()<line_sep>nega=set(opinion_lexicon.negative())<line_sep>posi=set(opinion_lexicon.positive())<line_sep>lexicon=opinion_lexicon.words()<line_sep>lexicon_dic={x:0<for>x lexicon}<for_stmt>t data['vader']['text']<block_start><for_stmt>w t<block_start><if_stmt>w<in>lexicon_dic<block_start>lexicon_dic[w]<augadd>1<block_end><block_end><block_end><for_stmt>t data['sentiment140']['text']<block_start><for_stmt>w t<block_start><if_stmt>w<in>lexicon_dic<block_start>lexicon_dic[w]<augadd>1<block_end><block_end><block_end>L=Counter(lexicon_dic).most_common(4000)<line_sep>N=[]<line_sep>P=[]<for_stmt>w,_ L<block_start><if_stmt>w<in>nega<block_start>N.append(w)<block_end><elif_stmt>w<in>posi<block_start>P.append(w)<block_end><block_end>l=P[:dim]+N[:dim]<if_stmt>save<block_start><with_stmt>open('senti.lexicon' 'w')<as>f<block_start><for_stmt>d l<block_start>f.write(d)<line_sep>f.write('\n')<block_end><block_end><block_end><return>l<block_end><else_stmt><block_start><with_stmt>open('senti.lexicon' 'r')<as>f<block_start>data=[line.strip()<for>line f]<block_end><return>data<block_end><block_end><import_from_stmt>gensim.models Word2Vec<import_from_stmt>numpy array dot<import_from_stmt>gensim matutils<import_stmt>collections<import_stmt>functools<class_stmt>memoized(object)<block_start><def_stmt>__init__ self func<block_start>self.func=func<line_sep>self.cache={}<block_end><def_stmt>__call__ self *args<block_start><if_stmt><not>isinstance(args collections.Hashable)# uncacheable. a list, for instance. # better to not cache than blow up. <block_start><return>self.func(*args)<block_end><if_stmt>args<in>self.cache<block_start><return>self.cache[args]<block_end><else_stmt><block_start>value=self.func(*args)<line_sep>self.cache[args]=value<line_sep><return>value<block_end><block_end><def_stmt>__repr__ self<block_start>'''Return the function's docstring.'''<line_sep><return>self.func.__doc__<block_end><def_stmt>__get__ self obj objtype<block_start>'''Support instance methods.'''<line_sep><return>functools.partial(self.__call__ obj)<block_end><block_end><class_stmt>WordRelatedness<block_start><def_stmt>__init__ self model<block_start>self._model=model<line_sep>self._words=set([w<for>w self._model.vocab])<block_end><def_stmt>check_word self word<block_start><return><true><if>word<in>self._words<else><false><block_end><def_stmt>check_words self words<block_start><return>[w<for>w words<if>self.check_word(w)]<block_end><def_stmt>similar_words self word<block_start><return>self._model.most_similar(word)<if>self.check_word(word)<else>[]<block_end>@memoized<def_stmt>word_similarity self w1 w2<block_start><return>self._model.similarity(w1 w2)<if>self.check_word(w1)<and>self.check_word(w2)<else>0.0<block_end><def_stmt>words_similarity self words1 words2<block_start>w1=self.check_words(words1)<line_sep>w2=self.check_words(words2)<line_sep><return>self._model.n_similarity(w1 w2)<if>w1<and>w2<else>0.0<block_end><def_stmt>word_vector self w<block_start><return>matutils.unitvec(self._model[w])<if>self.check_word(w)<else><none><block_end><def_stmt>words_vector self words<block_start>v_words=[self._model[w]<for>w self.check_words(words)]<line_sep><return>matutils.unitvec(array(v_words).mean(axis=0))<if>v_words<else><none><block_end><def_stmt>consine_similarity self v1 v2<block_start><return>dot(v1 v2)<block_end><block_end><import_from_stmt>gsitk.features.word2vec Word2VecFeatures<import_from_stmt>sklearn.base BaseEstimator TransformerMixin<import_from_stmt>sklearn.feature_extraction DictVectorizer<import_from_stmt>sklearn.pipeline Pipeline<import_stmt>numpy<as>np<import_stmt>nltk<class_stmt>SimVectorizer<block_start><def_stmt>__init__ self senti_lexicon<block_start>w2v_feat=Word2VecFeatures(w2v_model_path='/data/w2vmodel_500d_5mc')<line_sep>sim_model=WordRelatedness(w2v_feat.model)<line_sep>self._sim=sim_model.word_similarity<line_sep>self._lexicon=senti_lexicon<line_sep>self._N=len(self._lexicon)<line_sep># self._vectorizer = DictVectorizer(sparse=False) self._stopwords=set(nltk.corpus.stopwords.words('english'))<block_end><def_stmt>word_process self words<block_start><return>[w<for>w words<if>w<not><in>self._stopwords<and>len(w)<g>2]<block_end><def_stmt>similarity self words feature<block_start><return>max([self._sim(w feature)<for>w words]+[0.0])<block_end><def_stmt>transform self X<block_start>X_transformed=np.zeros((len(X) self._N))<for_stmt>i,x enumerate(X)# if i % 10000 == 0: # print(i) <block_start>words=self.word_process(x)<line_sep>words=set(words)<for_stmt>j,f enumerate(self._lexicon)<block_start>X_transformed[i j]=self.similarity(words f)<block_end><block_end><return>X_transformed<block_end><block_end><import_from_stmt>nltk.corpus opinion_lexicon<import_from_stmt>collections Counter<import_stmt>numpy<as>np<import_stmt>nltk<line_sep>Punc=["." "!" "?" "," ";" ":" "-" "'" "\"" "!!" "!!!" "??" "???" "?!?" "!?!" "?!?!" "!?!?"]<line_sep>Negate=["aint" "arent" "cannot" "cant" "couldnt" "darent" "didnt" "doesnt" "ain't" "aren't" "can't" "couldn't" "daren't" "didn't" "doesn't" "dont" "hadnt" "hasnt" "havent" "isnt" "mightnt" "mustnt" "neither" "don't" "hadn't" "hasn't" "haven't" "isn't" "mightn't" "mustn't" "neednt" "needn't" "never" "none" "nope" "nor" "not" "nothing" "nowhere" "oughtnt" "shant" "shouldnt" "uhuh" "wasnt" "werent" "oughtn't" "shan't" "shouldn't" "uh-uh" "wasn't" "weren't" "without" "wont" "wouldnt" "won't" "wouldn't" "rarely" "seldom" "despite"]<line_sep>Booster=["absolutely" "amazingly" "awfully" "completely" "considerably" "decidedly" "deeply" "effing" "enormously" "entirely" "especially" "exceptionally" "extremely" "fabulously" "flipping" "flippin" "fricking" "frickin" "frigging" "friggin" "fully" "fucking" "greatly" "hella" "highly" "hugely" "incredibly" "intensely" "majorly" "more" "most" "particularly" "purely" "quite" "really" "remarkably" "so" "substantially" "thoroughly" "totally" "tremendously" "uber" "unbelievably" "unusually" "utterly" "very" "almost" "barely" "hardly" "just enough" "kind of" "kinda" "kindof" "kind-of" "less" "little" "marginally" "occasionally" "partly" "scarcely" "slightly" "somewhat" "sort of" "sorta" "sortof" "sort-of"]<line_sep>Extra_Lexicon=Punc+Negate+Booster<def_stmt>create_lexicon corpus embedding num=250<block_start>stopwords=set(nltk.corpus.stopwords.words('english'))<line_sep>V=set([w<for>w embedding.vocab])<line_sep>tags=corpus['polarity']<line_sep>texts=corpus['text']<line_sep>P=[t<for>i,t texts.iteritems()<if>int(tags[i])<eq>1]<line_sep>N=[t<for>i,t texts.iteritems()<if>int(tags[i])<eq>-1]<def_stmt>word_count X<block_start>d={}<for_stmt>x X<block_start><for_stmt>w x<block_start><if_stmt>w<not><in>stopwords<and>w<in>V<and>len(w)<g>1<block_start>d[w]=d[w]+1<if>w<in>d<else>1<block_end><block_end><block_end><return>d<block_end>P_dict=word_count(P)<line_sep>N_dict=word_count(N)<line_sep>L_p=Counter(P_dict).most_common(num)<line_sep>L_n=Counter(N_dict).most_common(num)<line_sep>Words_p,Counts_p=zip(*L_p)<line_sep>Words_n,Counts_n=zip(*L_n)<line_sep>P_sum=sum(Counts_p)<line_sep>N_sum=sum(Counts_n)<line_sep>P_score=[x<times>1.0/P_sum<for>x Counts_p]<line_sep>N_score=[x<times>1.0/N_sum<for>x Counts_n]<line_sep><return>Words_p+Words_n P_score+N_score<block_end><def_stmt>prepare_lexicon corpus embedding num=250 extra=<false><block_start>V=set([w<for>w embedding.vocab])<line_sep>neg=set(opinion_lexicon.negative())<line_sep>pos=set(opinion_lexicon.positive())<line_sep>senti_lexicon=opinion_lexicon.words()<line_sep>senti_lexicon=[w<for>w senti_lexicon<if>w<in>V]<line_sep>lexicon_dic={x:0<for>x senti_lexicon}<for_stmt>sent corpus<block_start><for_stmt>w sent<block_start><if_stmt>w<in>lexicon_dic<block_start>lexicon_dic[w]<augadd>1<block_end><block_end><block_end>L=Counter(lexicon_dic).most_common(5000)<line_sep>N=[]<line_sep>N_count=[]<line_sep>P=[]<line_sep>P_count=[]<for_stmt>word,count L<block_start><if_stmt>word<in>neg<block_start>N.append(word)<line_sep>N_count.append(count)<block_end><elif_stmt>word<in>pos<block_start>P.append(word)<line_sep>P_count.append(count)<block_end><block_end>Senti_L=P[:num]+N[:num]<line_sep>P_sum=sum(P_count[:num])<line_sep>P_score=[x<times>1.0/P_sum<for>x P_count[:num]]<line_sep>N_sum=sum(N_count[:num])<line_sep>N_score=[x<times>1.0/N_sum<for>x N_count[:num]]<line_sep>Senti_W=P_score+N_score<if_stmt>extra<block_start>Extra_L=[l<for>l Extra_Lexicon<if>l<in>V]<line_sep>Extra_W=[1.0<for>l Extra_L]<line_sep><return>Senti_L+Extra_L Senti_W+Extra_W<block_end><return>Senti_L Senti_W<block_end><class_stmt>SimVectorizer<block_start><def_stmt>__init__ self lexicon weight embedding stopword=<true> weighted=<false><block_start>self._stopwords=set(nltk.corpus.stopwords.words('english'))<line_sep>self._model=embedding<line_sep>self._W=weight<line_sep>self._V=set([w<for>w self._model.vocab])<line_sep>self._L=self.word_vectors(lexicon).T<line_sep>self._filter=<lambda>x:self.vectorization(self.word_process(x))<line_sep>self.sim_vectorization=self._filter<if>stopword<else>self.vectorization<line_sep>self._weighter=<lambda>x:np.multiply(self.sim_vectorization(x) self._W)<line_sep>self.sim_vector=self._weighter<if>weighted<else>self.sim_vectorization<block_end><def_stmt>word_process self words<block_start><return>[w<for>w words<if>w<not><in>self._stopwords<and>len(w)<g>1]<block_end><def_stmt>word_vectors self x<block_start><return>np.array([self._model[w]<for>_,w enumerate(x)<if>w<in>self._V])<block_end><def_stmt>vectorization self x<block_start>v=self.word_vectors(x)<if_stmt>v.shape[0]<eq>0<block_start><return>np.zeros(self._L.shape[1])<block_end>s=np.dot(v self._L)<line_sep><return>s.max(axis=0)<block_end><def_stmt>transform self X<block_start><return>np.array([self.sim_vector(x)<for>_,x enumerate(X)])<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import unicode_literals<import_from_stmt>django.contrib admin<import_from_stmt>django.core.urlresolvers reverse<import_from_stmt>django.utils.html format_html<import_from_stmt>..models DatabaseMaintenanceTask<class_stmt>DatabaseMaintenanceTaskAdmin(admin.ModelAdmin)<block_start>list_select_related=<none><line_sep>search_fields=("database__name" "task__id" "task__task_id")<line_sep>list_filter=["database__team" "status" ]<line_sep>exclude=("task" "can_do_retry")<line_sep>actions=<none><line_sep>list_display=("database" "database_team" "current_step" "friendly_status" "maintenance_action" "link_task" "started_at" "finished_at")<line_sep>readonly_fields=("database" "link_task" "started_at" "finished_at" "current_step" "status" "maintenance_action")<line_sep>ordering=["-started_at"]<def_stmt>friendly_status self maintenance_task<block_start>html_waiting='<span class="label label-warning">Waiting</span>'<line_sep>html_running='<span class="label label-success">Running</span>'<line_sep>html_error='<span class="label label-important">Error</span>'<line_sep>html_success='<span class="label label-info">Success</span>'<line_sep>html_rollback='<span class="label label-info">Rollback</span>'<line_sep>html_status=''<if_stmt>maintenance_task.status<eq>DatabaseMaintenanceTask.WAITING<block_start>html_status=html_waiting<block_end><elif_stmt>maintenance_task.status<eq>DatabaseMaintenanceTask.RUNNING<block_start>html_status=html_running<block_end><elif_stmt>maintenance_task.status<eq>DatabaseMaintenanceTask.ERROR<block_start>html_status=html_error<block_end><elif_stmt>maintenance_task.status<eq>DatabaseMaintenanceTask.SUCCESS<block_start>html_status=html_success<block_end><elif_stmt>maintenance_task.status<eq>DatabaseMaintenanceTask.ROLLBACK<block_start>html_status=html_rollback<block_end><return>format_html(html_status)<block_end>friendly_status.short_description="Status"<def_stmt>database_team self maintenance_task<block_start><return>maintenance_task.database.team.name<block_end>database_team.short_description="Team"<def_stmt>link_task self maintenance_task<block_start>url=reverse('admin:notification_taskhistory_change' args=[maintenance_task.task.id])<line_sep><return>format_html("<a href={}>{}</a>".format(url maintenance_task.task.id))<block_end>link_task.short_description="Task"<def_stmt>has_delete_permission self request obj=<none><block_start><return><false><block_end><def_stmt>has_add_permission self request obj=<none><block_start><return><false><block_end><def_stmt>maintenance_action self maintenance_task<block_start><raise>NotImplementedError()<block_end>maintenance_action.short_description="Action"<block_end>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # United States Government Sponsorship acknowledged. This software is subject to # U.S. export control laws and regulations and has been classified as 'EAR99 NLR' # (No [Export] License Required except when exporting to an embargoed country, # end user, or in support of a prohibited end use). By downloading this software, # the user agrees to comply with all applicable U.S. export laws and regulations. # The user has the responsibility to obtain export licenses, or other export # authority as may be required before exporting this software to any 'EAR99' # embargoed foreign country or citizen of those countries. # # Author: <NAME> #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <import_stmt>logging<import_stmt>operator<import_stmt>isceobj<import_from_stmt>iscesys.ImageUtil.ImageUtil ImageUtil<as>IU<import_from_stmt>mroipac.correlation.correlation Correlation<line_sep>logger=logging.getLogger('isce.insar.runCoherence')<line_sep>## mapping from algorithm method to Correlation instance method name CORRELATION_METHOD={'phase_gradient':operator.methodcaller('calculateEffectiveCorrelation') 'cchz_wave':operator.methodcaller('calculateCorrelation')}<def_stmt>runCoherence self method="phase_gradient"<block_start>logger.info("Calculating Coherence")<line_sep># Initialize the amplitude # resampAmpImage = self.insar.resampAmpImage # ampImage = isceobj.createAmpImage() # IU.copyAttributes(resampAmpImage, ampImage) # ampImage.setAccessMode('read') # ampImage.createImage() #ampImage = self.insar.getResampOnlyAmp().copy(access_mode='read') # Initialize the flattened inteferogram topoflatIntFilename=self.insar.topophaseFlatFilename<line_sep>intImage=isceobj.createIntImage()<line_sep>#widthInt = self.insar.resampIntImage.getWidth() widthInt=self.insar.topophaseFlatFilename.getWidth()<line_sep>intImage.setFilename(topoflatIntFilename)<line_sep>intImage.setWidth(widthInt)<line_sep>intImage.setAccessMode('read')<line_sep>intImage.createImage()<line_sep># Create the coherence image cohFilename=topoflatIntFilename.replace('.flat' '.cor')<line_sep>cohImage=isceobj.createOffsetImage()<line_sep>cohImage.setFilename(cohFilename)<line_sep>cohImage.setWidth(widthInt)<line_sep>cohImage.setAccessMode('write')<line_sep>cohImage.createImage()<line_sep>cor=Correlation()<line_sep>cor.configure()<line_sep>cor.wireInputPort(name='interferogram' object=intImage)<line_sep>#cor.wireInputPort(name='amplitude', object=ampImage) cor.wireOutputPort(name='correlation' object=cohImage)<line_sep>cohImage.finalizeImage()<line_sep>intImage.finalizeImage()<line_sep>#ampImage.finalizeImage() <try_stmt><block_start>CORRELATION_METHOD[method](cor)<block_end><except_stmt>KeyError<block_start>print("Unrecognized correlation method")<line_sep>sys.exit(1)<line_sep><pass><block_end><return><none><block_end>
"""Tests for the protocol engine's ActionDispatcher."""<import_from_stmt>decoy Decoy<import_from_stmt>opentrons.protocol_engine.actions ActionDispatcher ActionHandler PlayAction <def_stmt>test_sink decoy:Decoy<arrow><none><block_start>"""It should send all actions to the sink handler."""<line_sep>action=PlayAction()<line_sep>sink=decoy.mock(cls=ActionHandler)<line_sep>subject=ActionDispatcher(sink=sink)<line_sep>subject.dispatch(action)<line_sep>decoy.verify(sink.handle_action(action))<block_end><def_stmt>test_add_handler decoy:Decoy<arrow><none><block_start>"""It should actions to handlers before the sink."""<line_sep>action=PlayAction()<line_sep>handler_1=decoy.mock(cls=ActionHandler)<line_sep>handler_2=decoy.mock(cls=ActionHandler)<line_sep>sink=decoy.mock(cls=ActionHandler)<line_sep>subject=ActionDispatcher(sink=sink)<line_sep>subject.add_handler(handler_1)<line_sep>subject.add_handler(handler_2)<line_sep>subject.dispatch(action)<line_sep>decoy.verify(handler_1.handle_action(action) handler_2.handle_action(action) sink.handle_action(action) )<block_end>
<import_stmt>nmap<import_stmt>requests<def_stmt>nScan ip<block_start>nm=nmap.PortScanner()<line_sep>nm.scan(ip arguments="-F")<for_stmt>host nm.all_hosts()<block_start>ports=[]<line_sep>protocols=[]<line_sep>states=[]<for_stmt>proto nm[host].all_protocols()<block_start>protocols.append(proto)<line_sep>lport=nm[host][proto].keys()<for_stmt>port lport<block_start>ports.append(port)<line_sep>states.append(nm[host][proto][port]['state'])<block_end><block_end>po=[]<for_stmt>p ports<block_start>n={"Port":str(p) "Name":nm[host][proto][p]['name'] "Reason":nm[host][proto][p]['reason'] "State":nm[host][proto][p]['state']}<line_sep>po.append(n)<block_end><return>po<block_end><block_end>
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>sqlalchemy CheckConstraint Column Integer Text<import_from_stmt>warehouse db<import_from_stmt>warehouse.utils.attrs make_repr<class_stmt>Classifier(db.ModelBase)<block_start>__tablename__="trove_classifiers"<line_sep>__tableargs__=CheckConstraint("classifier not ilike 'private ::%'" name="ck_disallow_private_top_level_classifier" )<line_sep>__repr__=make_repr("classifier")<line_sep>id=Column(Integer primary_key=<true> nullable=<false>)<line_sep>classifier=Column(Text unique=<true>)<block_end>
LIST_ASSIGNED_USER_ROLE_RESPONSE=""" [ { "id": "IFIFAX2BIRGUSTQ", "label": "Application Administrator", "type": "APP_ADMIN", "status": "ACTIVE", "created": "2019-02-06T16:17:40.000Z", "lastUpdated": "2019-02-06T16:17:40.000Z", "assignmentType": "USER", "_links": { "assignee": { "href": "http://{yourOktaDomain}/api/v1/users/00ur32Vg0fvpyHZeQ0g3" } } }, { "id": "JBCUYUC7IRCVGS27IFCE2SKO", "label": "Help Desk Administrator", "type": "HELP_DESK_ADMIN", "status": "ACTIVE", "created": "2019-02-06T16:17:40.000Z", "lastUpdated": "2019-02-06T16:17:40.000Z", "assignmentType": "USER", "_links": { "assignee": { "href": "http://{yourOktaDomain}/api/v1/users/00ur32Vg0fvpyHZeQ0g3" } } } ] """<line_sep>LIST_ASSIGNED_GROUP_ROLE_RESPONSE=""" [ { "id": "IFIFAX2BIRGUSTQ", "label": "Application Administrator", "type": "APP_ADMIN", "status": "ACTIVE", "created": "2019-02-27T14:48:59.000Z", "lastUpdated": "2019-02-27T14:48:59.000Z", "assignmentType": "GROUP", "_links": { "assignee": { "href": "http://{yourOktaDomain}/api/v1/groups/00gsr2IepS8YhHRFf0g3" } } }, { "id": "JBCUYUC7IRCVGS27IFCE2SKO", "label": "Help Desk Administrator", "type": "HELP_DESK_ADMIN", "status": "ACTIVE", "created": "2019-02-06T16:17:40.000Z", "lastUpdated": "2019-02-06T16:17:40.000Z", "assignmentType": "GROUP", "_links": { "assignee": { "href": "http://{yourOktaDomain}/api/v1/users/00ur32Vg0fvpyHZeQ0g3" } } } ] """<line_sep>
# -*- coding: utf-8 -*- <import_from_stmt>.base MetricGenerator<line_sep>__all__=['MetricGenerator']<line_sep>
<import_stmt>two<line_sep>two.ct().fun()<line_sep>
""" Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_from_stmt>os PathLike<import_from_stmt>pathlib Path<import_from_stmt>abc ABC abstractmethod<import_from_stmt>collections defaultdict<import_from_stmt>typing Any Callable Dict Optional Sequence Tuple Union TypeVar<import_stmt>torch<import_from_stmt>nndet.io.load save_pickle<import_from_stmt>nndet.utils.tensor to_numpy<import_from_stmt>nndet.utils.info maybe_verbose_iterable<class_stmt>BaseEnsembler(ABC)<block_start>ID="abstract"<def_stmt>__init__ self properties:Dict[str Any] parameters:Dict[str Any] device:Optional[Union[torch.device str]]=<none> **kwargs<block_start>""" Base class to containerize and ensemble the predictions of a single case. Call :method:`process_batch` to add batched predictions of a case to the ensembler and :method:`add_model` to signal the next model if multiple models are used. Args: properties: properties of the patient/case (e.g. tranpose axes) parameters: parameters for ensembling device: device to use for internal computations **kwargs: parameters for ensembling Notes: Call :method:`add_model` before adding predictions. """<line_sep>self.model_current=<none><line_sep>self.model_results={}<line_sep>self.model_weights={}<line_sep>self.properties=properties<line_sep>self.case_result:Optional[Dict]=<none><line_sep>self.parameters=parameters<line_sep>self.parameters.update(kwargs)<if_stmt>device<is><none><block_start>self.device=torch.device("cpu")<block_end><elif_stmt>isinstance(device str)<block_start>self.device=torch.device(device)<block_end><elif_stmt>isinstance(device torch.device)<block_start>self.device=device<block_end><else_stmt><block_start><raise>ValueError(f"Wrong type {type(device)} for device argument.")<block_end><block_end>@classmethod<def_stmt>from_case cls case:Dict properties:Optional[Dict]=<none> parameters:Optional[Dict]=<none> **kwargs <block_start>""" Primary way to instantiate this class. Automatically extracts all properties and uses a default set of parameters for ensembling. Args: case: case which is predicted properties: Additional properties. Defaults to None. parameters: Additional parameters. Defaults to None. """<line_sep><return>cls(properties=properties parameters=parameters **kwargs)<block_end><def_stmt>add_model self name:Optional[str]=<none> model_weight:Optional[float]=<none> <arrow>str<block_start>""" This functions signales the ensembler to add a new model for internal processing Args: name: Name of the model. If None, uses counts the models. model_weight: Optional weight for this model. Defaults to None. """<if_stmt>name<is><none><block_start>name=len(self.model_weights)+1<block_end><if_stmt>name<in>self.model_results<block_start><raise>ValueError(f"Invalid model name, model {name} is already present")<block_end><if_stmt>model_weight<is><none><block_start>model_weight=1.0<block_end>self.model_weights[name]=model_weight<line_sep>self.model_results[name]=defaultdict(list)<line_sep>self.model_current=name<line_sep><return>name<block_end>@abstractmethod@torch.no_grad()<def_stmt>process_batch self result:Dict batch:Dict<block_start>""" Process a single batch Args: result: predictions to save and ensemble batch: input batch used for predictions (for additional meta data) Raises: NotImplementedError: Overwrite this function in subclasses for the specific use case. Warnings: Make sure to move cached values to the CPU after they have been processed. """<line_sep><raise>NotImplementedError<block_end>@abstractmethod@torch.no_grad()<def_stmt>get_case_result self restore:bool=<false><arrow>Dict[str torch.Tensor]<block_start>""" Retrieve the results of a single case Args: restore: restores predictions in original image space Raises: NotImplementedError: Overwrite this function in subclasses for the specific use case. Returns: Dict[str, torch.Tensor]: the result of a single case """<line_sep><raise>NotImplementedError<block_end><def_stmt>update_parameters self **parameters:Dict<block_start>""" Update internal parameters used for ensembling the results Args: parameters: parameters to update """<line_sep>self.parameters.update(parameters)<block_end>@classmethod@abstractmethod<def_stmt>sweep_parameters cls<arrow>Tuple[Dict[str Any] Dict[str Sequence[Any]]]<block_start>""" Return a set of parameters which can be used to sweep ensembling parameters in a postprocessing step Returns: Dict[str, Any]: default state to start with Dict[str, Sequence[Any]]]: Defines the values to search for each parameter """<line_sep><raise>NotImplementedError<block_end><def_stmt>save_state self target_dir:Path name:str **kwargs <block_start>""" Save case result as pickle file. Identifier of ensembler will be added to the name Args: target_dir: folder to save result to name: name of case **kwargs: data to save """<line_sep>kwargs["properties"]=self.properties<line_sep>kwargs["parameters"]=self.parameters<line_sep>kwargs["model_current"]=self.model_current<line_sep>kwargs["model_results"]=self.model_results<line_sep>kwargs["model_weights"]=self.model_weights<line_sep>kwargs["case_result"]=self.case_result<with_stmt>open(Path(target_dir)/f"{name}_{self.ID}.pt" "wb")<as>f<block_start>torch.save(kwargs f)<block_end><block_end><def_stmt>load_state self base_dir:PathLike case_id:str<arrow>Dict<block_start>""" Path to result file """<line_sep>ckp=torch.load(str(Path(base_dir)/f"{case_id}_{self.ID}.pt"))<line_sep>self._load(ckp)<line_sep><return>ckp<block_end><def_stmt>_load self state:Dict<block_start><for_stmt>key,item state.items()<block_start>setattr(self key item)<block_end><block_end>@classmethod<def_stmt>from_checkpoint cls base_dir:PathLike case_id:str<block_start>ckp=torch.load(str(Path(base_dir)/f"{case_id}_{cls.ID}.pt"))<line_sep>t=cls(properties=ckp["properties"] parameters=ckp["parameters"] )<line_sep>t._load(ckp)<line_sep><return>t<block_end>@classmethod<def_stmt>get_case_ids cls base_dir:PathLike<block_start><return>[c.stem.rsplit(f"_{cls.ID}" 1)[0]<for>c Path(base_dir).glob(f"*_{cls.ID}.pt")]<block_end><block_end><class_stmt>OverlapMap<block_start><def_stmt>__init__ self data_shape:Sequence[int]<block_start>""" Handler for overlap map Args: data_shape: spatial dimensions of data ( no batch dim and no channel dim!) """<line_sep>self.overlap_map:torch.Tensor=torch.zeros(*data_shape requires_grad=<false> dtype=torch.float)<block_end><def_stmt>add_overlap self crop:Sequence[slice]<block_start>""" Increase values of :param:`self.overlap_map` inside of crop Args: crop: defines crop. Negative values are assumed to be outside of the data and thus discarded """<line_sep># discard leading indexes which could be due to batches and channels <if_stmt>len(crop)<g>self.overlap_map.ndim<block_start>crop=crop[-self.overlap_map.ndim:]<block_end># clip crop to data shape slicer=[]<for_stmt>data_shape,crop_dim zip(tuple(self.overlap_map.shape) crop)<block_start>start=max(0 crop_dim.start)<line_sep>stop=min(data_shape crop_dim.stop)<line_sep>slicer.append(slice(start stop crop_dim.step))<block_end>self.overlap_map[slicer]<augadd>1<block_end><def_stmt>mean_num_overlap_of_box self box:Sequence[int]<arrow>float<block_start>""" Extract mean number of overlaps from a bounding box area Args: box: defines bounding box (x1, y1, x2, y2, (z1, z2)) Returns: int: mean number of overlaps """<line_sep>slicer=[slice(int(box[0]) int(box[2])) slice(int(box[1]) int(box[3]))]<if_stmt>len(box)<eq>6<block_start>slicer.append(slice(int(box[4]) int(box[5])))<block_end><return>torch.mean(self.overlap_map[slicer].float()).item()<block_end><def_stmt>mean_num_overlap_of_boxes self boxes:torch.Tensor<arrow>torch.Tensor<block_start>""" Extract mean number of overlaps from a bounding box area Args: boxes: defines multiple bounding boxes (x1, y1, x2, y2, (z1, z2)) [N, dim * 2] Returns: Tensor: mean number of overlaps per box [N] """<line_sep><return>torch.tensor([self.mean_num_overlap_of_box(box)<for>box boxes]).to(dtype=torch.float device=boxes.device)<block_end><def_stmt>avg self<arrow>torch.Tensor<block_start>""" Compute mean over all overlaps """<line_sep><return>self.overlap_map.float().median()<block_end><def_stmt>restore_mean self val<block_start>""" Generate a new overlap map filled with the specified value """<line_sep>self.overlap_map=torch.zeros_like(self.overlap_map)<line_sep>self.overlap_map=float(val)<block_end><block_end><def_stmt>extract_results source_dir:PathLike target_dir:PathLike ensembler_cls:Callable restore:bool **params <arrow><none><block_start>""" Compute case result from ensembler and save it Args: source_dir: directory which contains the saved predictions/state from the ensembler class target_dir: directory to save results ensembler_cls: ensembler class for prediction restore: if true, the results are converted into the opriginal image space """<line_sep>Path(target_dir).mkdir(parents=<true> exist_ok=<true>)<for_stmt>case_id maybe_verbose_iterable(ensembler_cls.get_case_ids(source_dir))<block_start>ensembler=ensembler_cls.from_checkpoint(base_dir=source_dir case_id=case_id)<line_sep>ensembler.update_parameters(**params)<line_sep>pred=to_numpy(ensembler.get_case_result(restore=restore))<line_sep>save_pickle(pred Path(target_dir)/f"{case_id}_{ensembler_cls.ID}.pkl")<block_end><block_end>BaseEnsemblerType=TypeVar('BaseEnsemblerType' bound=BaseEnsembler)<line_sep>
<class_stmt>BBUtil(object)<block_start><def_stmt>__init__ self width height<block_start>super(BBUtil self).__init__()<line_sep>self.width=width<line_sep>self.height=height<block_end><def_stmt>xywh_to_tlwh self bbox_xywh<block_start>x,y,w,h=bbox_xywh<line_sep>xmin=max(int(round(x-(w/2))) 0)<line_sep>ymin=max(int(round(y-(h/2))) 0)<line_sep><return>[xmin ymin int(w) int(h)]<block_end><def_stmt>tlwh_to_xyxy self bbox_tlwh<block_start>x,y,w,h=bbox_tlwh<line_sep>x1=max(int(x) 0)<line_sep>x2=min(int(x+w) self.width-1)<line_sep>y1=max(int(y) 0)<line_sep>y2=min(int(y+h) self.height-1)<line_sep><return>[x1 y1 x2 y2]<block_end><def_stmt>xywh_to_xyxy self bbox_xywh<block_start>x,y,w,h=bbox_xywh<line_sep>x1=max(int(x-w/2) 0)<line_sep>x2=min(int(x+w/2) self.width-1)<line_sep>y1=max(int(y-h/2) 0)<line_sep>y2=min(int(y+h/2) self.height-1)<line_sep><return>[x1 y1 x2 y2]<block_end><def_stmt>xyxy_to_tlwh self bbox_xyxy<block_start>x1,y1,x2,y2=bbox_xyxy<line_sep>t=x1<line_sep>l=y1<line_sep>w=int(x2-x1)<line_sep>h=int(y2-y1)<line_sep><return>[t l w h]<block_end><def_stmt>float_to_int self bbox_xyxy<block_start>x1,y1,x2,y2=bbox_xyxy<line_sep><return>[int(x1<times>self.width) int(y1<times>self.height) int(x2<times>self.width) int(y2<times>self.height)]<block_end><def_stmt>int_to_float self bbox_xyxy<block_start>x1,y1,x2,y2=[float(item)<for>item bbox_xyxy]<line_sep><return>[x1/self.width y1/self.height x2/self.width y2/self.height]<block_end><block_end>
""" Copyright (c) 2019-2020 Uber Technologies, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<line_sep>__author__="<NAME>"<import_from_stmt>plato.agent.component.nlg.nlg NLG<import_stmt>random<line_sep>""" SlotFillingNLG is a simple template-based nlg, designed to work for Slot-Filling applications. The purpose of this class is to provide a quick way of running Conversational Agents, sanity checks, and to aid debugging. """<class_stmt>SlotFillingNLG(NLG)<block_start><def_stmt>__init__ self args=<none><block_start>""" Nothing to initialize. We need the args to support use by the Generic Agent. """<line_sep>super(SlotFillingNLG self).__init__()<block_end><def_stmt>initialize self args<block_start>""" Nothing to do here :param args: :return: """<line_sep><pass><block_end><def_stmt>generate_output self args=<none><block_start>""" Select the appropriate template given the acts in the arguments and generate the output utterance. :param args: a dictionary of arguments that contain the dialogue acts :return: the output utterance """<if_stmt><not>args<block_start>print('WARNING! SlotFillingNLG called without arguments!')<line_sep><return>''<block_end><if_stmt>'args'<in>args<block_start>dacts=args['args']<block_end><elif_stmt>'dacts'<not><in>args<block_start>print('WARNING! SlotFillingNLG called without dacts!')<line_sep><return>''<block_end><else_stmt><block_start>dacts=args['dacts']<block_end>system=<true><if_stmt>'system'<in>args<block_start>system=bool(args['system'])<block_end>response=''<for_stmt>dact dacts<block_start><if_stmt>dact.intent<eq>'request'<block_start><if_stmt>dact.params<and>dact.params[0].slot<block_start><if_stmt>system<block_start>response<augadd>'Which '+dact.params[0].slot+' do you prefer?'<block_end><else_stmt><block_start>response<augadd>'What is the '+dact.params[0].slot+'?'<block_end><block_end><else_stmt><block_start>response<augadd>'Which one?'<block_end><block_end><elif_stmt>dact.intent<in>['inform' 'offer']<block_start><for_stmt>dact_item dact.params<block_start><if_stmt>system<block_start><if_stmt>dact_item.slot<eq>'name'<and>dact_item.value<eq>'not found'<block_start>response<augadd>'Sorry, I cannot find such an item. '<block_end><else_stmt><block_start><if_stmt><not>dact_item.value<block_start>response<augadd>'its '+dact_item.slot+' is unknown, '<block_end><elif_stmt>dact_item.slot<eq>'name'<and>len(dact.params)<g>1<block_start>response<augadd>dact_item.value+' '<block_end><elif_stmt>dact_item.slot<in>['food' 'cuisine']<block_start>response<augadd>'is serving '+dact_item.value+' food, '<block_end><elif_stmt>dact_item.slot<eq>'endorsement'<block_start>response<augadd>'is '+dact_item.value+', '<block_end><else_stmt><block_start>response<augadd>'its '+dact_item.slot+' is '+dact_item.value+', '<block_end><block_end><block_end><else_stmt><block_start><if_stmt>dact.intent<eq>'offer'<block_start><if_stmt>dact_item.value<block_start>response<augadd>dact_item.slot+' is '+dact_item.value+', '<block_end><else_stmt><block_start>response<augadd>dact_item.slot+' is unknown, '<block_end><block_end><else_stmt><block_start>r=random.random()<if_stmt>r<l>0.33<block_start>response<augadd>'I prefer '+dact_item.value+' '+dact_item.slot+', '<block_end><elif_stmt>r<l>0.66<block_start>response<augadd>'um i want '+dact_item.value+' '+dact_item.slot+', '<block_end><else_stmt><block_start>response<augadd>dact_item.value+' '+dact_item.slot+' please, '<block_end><block_end><block_end><block_end><if_stmt>response# Trim trailing comma and space <block_start>response=response[:-2]<block_end><block_end><elif_stmt>dact.intent<eq>'bye'<block_start>response<augadd>'Thank you, goodbye'<block_end><elif_stmt>dact.intent<eq>'deny'<block_start>response<augadd>'No'<block_end><elif_stmt>dact.intent<eq>'negate'<block_start>response<augadd>'No '<if_stmt>dact.params<and>dact.params[0].slot<and>dact.params[0].value<block_start>response<augadd>dact.params[0].slot+' is not '+dact.params[0].value<block_end><block_end><elif_stmt>dact.intent<eq>'ack'<block_start>response<augadd>'Ok'<block_end><elif_stmt>dact.intent<eq>'affirm'<block_start>response<augadd>'Yes '<if_stmt>dact.params<and>dact.params[0].slot<and>dact.params[0].value<block_start>response<augadd>dact.params[0].slot+' is '+dact.params[0].value<block_end><block_end><elif_stmt>dact.intent<eq>'thankyou'<block_start>response<augadd>'Thank you'<block_end><elif_stmt>dact.intent<eq>'reqmore'<block_start>response<augadd>'Can you tell me more?'<block_end><elif_stmt>dact.intent<eq>'repeat'<block_start>response<augadd>'Can you please repeat?'<block_end><elif_stmt>dact.intent<eq>'restart'<block_start>response<augadd>'Can we start over?'<block_end><elif_stmt>dact.intent<eq>'expl-conf'<block_start>response<augadd>'Alright '<if_stmt>dact.params<and>dact.params[0].slot<and>dact.params[0].value<block_start>response<augadd>dact.params[0].slot+' is '+dact.params[0].value<block_end><block_end><elif_stmt>dact.intent<eq>'select'<block_start>response<augadd>'Which one do you prefer '<if_stmt>dact.params<and>dact.params[0].slot<block_start>response<augadd>'for '+dact.params[0].slot<block_end><block_end><elif_stmt>dact.intent<eq>'reqalts'<block_start>response<augadd>'Is there anything else?'<block_end><elif_stmt>dact.intent<in>['confirm' 'confirm-domain']<block_start>response<augadd>'So is '<if_stmt>dact.params<and>dact.params[0].slot<and>dact.params[0].value<block_start>response<augadd>dact.params[0].slot+' '+dact.params[0].value<block_end><block_end><elif_stmt>dact.intent<eq>'canthelp'<block_start>response<augadd>'Sorry, I cannot help you with that.'<block_end><elif_stmt>dact.intent<eq>'welcomemsg'<block_start>response<augadd>'Hello, how may I help you?'<block_end><elif_stmt>dact.intent<eq>'hello'<block_start>response='Hi'<block_end><elif_stmt>dact.intent<eq>'welcome'<block_start>response<augadd>random.choice(['Hi, how can I help you today?' 'Speak, human.'])<block_end><elif_stmt>dact.intent<eq>'na'<block_start>response<augadd>'(no system response)'<block_end><else_stmt><block_start>response<augadd>'SlotFillingNLG %s'%dact<block_end>response<augadd>' '<block_end>response=response.replace('addr' 'address')<line_sep>response=response.replace('pricerange' 'price range')<line_sep>response=response.replace('postcode' 'post code')<line_sep>response=response.replace('dontcare' 'any')<line_sep><return>response<block_end><def_stmt>train self data<block_start>""" Nothing to do here. :param data: :return: """<line_sep><pass><block_end><def_stmt>save self path=<none><block_start>""" Nothing to do here. :param path: :return: """<line_sep><pass><block_end><def_stmt>load self path<block_start>""" Nothing to do here. :param path: :return: """<line_sep><pass><block_end><block_end>
<import_from_stmt>typing Tuple<import_from_stmt>pdfminer.pdfdocument PDFDocument<import_from_stmt>pdfminer.pdfpage PDFPage<import_from_stmt>pdfminer.pdfparser PDFParser<try_stmt><block_start><import_from_stmt>IPython get_ipython<if_stmt>"IPKernelApp"<not><in>get_ipython().config<block_start><raise>ImportError("console")<block_end><block_end><except_stmt>(AttributeError ImportError)<block_start><import_from_stmt>wand.display display<block_end><else_stmt><block_start><import_from_stmt>IPython.display display<block_end><import_from_stmt>wand.color Color<import_from_stmt>wand.drawing Drawing<import_from_stmt>wand.image Image<class_stmt>TreeVisualizer<block_start>""" Object to display bounding boxes on a pdf document """<def_stmt>__init__ self pdf_file<block_start>""" :param pdf_path: directory where documents are stored :return: """<line_sep>self.pdf_file=pdf_file<block_end><def_stmt>display_boxes self tree html_path filename_prefix alternate_colors=<false><block_start>""" Displays each of the bounding boxes passed in 'boxes' on images of the pdf pointed to by pdf_file boxes is a list of 5-tuples (page, top, left, bottom, right) """<line_sep>imgs=[]<line_sep>colors={"section_header":Color("blue") "figure":Color("green") "figure_caption":Color("green") "table_caption":Color("red") "list":Color("yellow") "paragraph":Color("gray") "table":Color("red") "header":Color("brown") }<for_stmt>i,page_num enumerate(tree.keys())<block_start>img=self.pdf_to_img(page_num)<line_sep>draw=Drawing()<line_sep>draw.fill_color=Color("rgba(0, 0, 0, 0.0)")<for_stmt>clust tree[page_num]<block_start><for_stmt>(pnum pwidth pheight top left bottom right) tree[page_num][clust]<block_start>draw.stroke_color=colors[clust]<line_sep>draw.rectangle(left=left top=top right=right bottom=bottom)<line_sep>draw.push()<line_sep>draw.font_size=20<line_sep>draw.font_weight=10<line_sep>draw.fill_color=colors[clust]<if_stmt>int(left)<g>0<and>int(top)<g>0<block_start>draw.text(x=int(left) y=int(top) body=clust)<block_end>draw.pop()<block_end><block_end>draw(img)<line_sep>img.save(filename=html_path+filename_prefix+"_page_"+str(i)+".png")<line_sep>imgs.append(img)<block_end><return>imgs<block_end><def_stmt>display_candidates self tree html_path filename_prefix<block_start>""" Displays the bounding boxes corresponding to candidates on an image of the pdf boxes is a list of 5-tuples (page, top, left, bottom, right) """<line_sep>imgs=self.display_boxes(tree html_path filename_prefix alternate_colors=<true>)<line_sep><return>display(*imgs)<block_end><def_stmt>pdf_to_img self page_num pdf_dim=<none><block_start>""" Converts pdf file into image :param pdf_file: path to the pdf file :param page_num: page number to convert (index starting at 1) :return: wand image object """<if_stmt><not>pdf_dim<block_start>pdf_dim=get_pdf_dim(self.pdf_file)<block_end>page_width,page_height=pdf_dim<line_sep>img=Image(filename="{}[{}]".format(self.pdf_file page_num-1))<line_sep>img.resize(page_width page_height)<line_sep><return>img<block_end><block_end><def_stmt>get_pdf_dim pdf_file<arrow>Tuple[int int]<block_start><with_stmt>open(pdf_file "rb")<as>f<block_start>parser=PDFParser(f)<line_sep>doc=PDFDocument(parser)<line_sep># Look at the 1st page only. page=next(PDFPage.create_pages(doc))<line_sep>_,_,page_width,page_height=page.mediabox<block_end><return>page_width page_height<block_end>
# A CAN bus. <class_stmt>Bus(object)<block_start>"""A CAN bus. """<def_stmt>__init__ self name comment=<none> baudrate=<none> fd_baudrate=<none> autosar_specifics=<none><block_start>self._name=name<line_sep># If the 'comment' argument is a string, we assume that is an # English comment. This is slightly hacky, because the # function's behavior depends on the type of the passed # argument, but it is quite convenient... <if_stmt>isinstance(comment str)# use the first comment in the dictionary as "The" comment <block_start>self._comments={<none>:comment}<block_end><else_stmt># assume that we have either no comment at all or a # multi-lingual dictionary <block_start>self._comments=comment<block_end>self._baudrate=baudrate<line_sep>self._fd_baudrate=fd_baudrate<line_sep>self._autosar=autosar_specifics<block_end>@property<def_stmt>name self<block_start>"""The bus name as a string. """<line_sep><return>self._name<block_end>@property<def_stmt>comment self<block_start>"""The bus' comment, or ``None`` if unavailable. Note that we implicitly try to return the English comment if multiple languages were specified. """<if_stmt>self._comments<is><none><block_start><return><none><block_end><elif_stmt>self._comments.get(<none>)<is><not><none><block_start><return>self._comments.get(<none>)<block_end><elif_stmt>self._comments.get("FOR-ALL")<is><not><none><block_start><return>self._comments.get("FOR-ALL")<block_end><return>self._comments.get('EN')<block_end>@property<def_stmt>comments self<block_start>"""The dictionary with the descriptions of the bus in multiple languages. ``None`` if unavailable. """<line_sep><return>self._comments<block_end>@property<def_stmt>baudrate self<block_start>"""The bus baudrate, or ``None`` if unavailable. """<line_sep><return>self._baudrate<block_end>@property<def_stmt>fd_baudrate self<block_start>"""The baudrate used for the payload of CAN-FD frames, or ``None`` if unavailable. """<line_sep><return>self._fd_baudrate<block_end>@property<def_stmt>autosar self<block_start>"""An object containing AUTOSAR specific properties of the bus. """<line_sep><return>self._autosar<block_end>@autosar.setter<def_stmt>autosar self value<block_start>self._autosar=value<block_end><def_stmt>__repr__ self<block_start><return>"bus('{}', {})".format(self._name "'"+self.comment+"'"<if>self.comment<is><not><none><else><none>)<block_end><block_end>
"""Class for representing polynomials of one variable."""<import_stmt>functools<line_sep>@functools.total_ordering<class_stmt>Polynomial(object)<block_start>__slots__=("terms" )<def_stmt>__init__ self terms=()<block_start>terms=list(terms)<while_stmt>terms<and>(terms[-1]<eq>0)<block_start>terms.pop()<block_end>self.terms=tuple(terms)<block_end><def_stmt>__hash__ self<block_start><return>hash(self.terms)<block_end><def_stmt>__eq__ self other<block_start><return>self.terms<eq>other.terms<block_end><def_stmt>__lt__ self other<block_start><if_stmt>len(self.terms)<ne>len(other.terms)<block_start><return>len(self.terms)<l>len(other.terms)<block_end><for_stmt>i reversed(range(len(self.terms)))<block_start>self_term=self.terms[i]<line_sep>other_term=other.terms[i]<if_stmt>self_term<l>other_term<block_start><return><true><block_end><if_stmt>other_term<l>self_term<block_start><return><false><block_end><block_end><return><false><block_end><def_stmt>__str__ self<block_start><if_stmt><not>self.terms<block_start><return>"0"<block_end>s=str(self.terms[0])<for_stmt>i range(1 len(self.terms))<block_start><if_stmt>self.terms[i]<block_start>term=str(self.terms[i])<line_sep>exponent="n^{}".format(i)<if>i<g>1<else>"n"<line_sep>s=term+exponent+" + "+s<block_end><block_end><return>s<block_end><def_stmt>__repr__ self<block_start><return>"Polynomial({!r})".format(self.terms)<block_end><def_stmt>get_coefficient self i<block_start><if_stmt>i<ge>len(self.terms)<block_start><return>0<block_end><return>self.terms[i]<block_end><def_stmt>largest_term self<block_start><if_stmt><not>self.terms<block_start><return>DominantTerm.ZERO<block_end>exponent=len(self.terms)-1<line_sep><return>DominantTerm(multiplier=self.get_coefficient(exponent) exponent=exponent)<block_end><def_stmt>__add__ self other<block_start>terms=[0]<times>max(len(self.terms) len(other.terms))<for_stmt>i range(len(terms))<block_start>terms[i]=self.get_coefficient(i)+other.get_coefficient(i)<block_end><return>Polynomial(terms)<block_end><def_stmt>__mul__ self other<block_start><if_stmt>isinstance(other Polynomial)<block_start>res=Polynomial.ZERO<for_stmt>i range(len(self.terms))<block_start>res<augadd>other<times>self.terms[i]<line_sep>res<augadd>Polynomial([0]<times>i+list(other.terms))<block_end><return>res<block_end><else_stmt><block_start><return>Polynomial((t<times>other)<for>t self.terms)<block_end><block_end><block_end>Polynomial.ZERO=Polynomial()<line_sep>Polynomial.ONE=Polynomial([1])<line_sep>Polynomial.N=Polynomial([0 1])<line_sep>@functools.total_ordering<class_stmt>DominantTerm(object)<block_start>"""A term of the form c*n^e for some unknown n. Instances of this class can be added, multiplied, and compared. A term with a higher exponent is always greater than one with a lower exponent. """<line_sep>__slots__=("multiplier" "exponent")<def_stmt>__init__ self multiplier exponent<block_start>self.multiplier=multiplier<line_sep>self.exponent=exponent<block_end><def_stmt>__eq__ self other<block_start><return>self.multiplier<eq>other.multiplier<and>self.exponent<eq>other.exponent<block_end><def_stmt>__lt__ self other<block_start><return>(self.exponent self.multiplier)<l>(other.exponent other.multiplier)<block_end><def_stmt>__str__ self<block_start><return>"{}n^{}".format(self.multiplier self.exponent)<block_end><def_stmt>__repr__ self<block_start><return>"DominantTerm({}, {})".format(self.multiplier self.exponent)<block_end><def_stmt>__add__ self other<block_start><if_stmt>other.exponent<eq>self.exponent<block_start><return>DominantTerm(self.multiplier+other.multiplier self.exponent)<block_end><if_stmt>other.exponent<g>self.exponent<block_start><return>other<block_end><return>self<block_end><def_stmt>__mul__ self other<block_start><return>DominantTerm(self.multiplier<times>other.multiplier self.exponent+other.exponent)<block_end><block_end>DominantTerm.ZERO=DominantTerm(0 0)<line_sep>DominantTerm.ONE=DominantTerm(1 0)<line_sep>DominantTerm.N=DominantTerm(1 1)<line_sep>
<import_stmt>string random<def_stmt>string_generator size chars<block_start><return>''.join(random.choice(chars)<for>_ range(size))<block_end><def_stmt>get_option option<block_start><if_stmt>option<eq>'alphabet'<block_start>characters=string.ascii_uppercase+string.ascii_lowercase+string.digits<block_end><elif_stmt>option<eq>'numeric'<block_start>characters=string.digits<block_end><else_stmt><block_start>print('option out of context!')<block_end><return>characters<block_end># choose want alphabet generic or numeric generic option='alphabet'<line_sep># choose length of size string size=10<line_sep>characters=get_option(option)<line_sep>new_number=string_generator(size characters)<line_sep>print(new_number)<line_sep>
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>.feature_engineering FeatureEngineering<class_stmt>Simulator(object)<block_start><def_stmt>__init__ self csv_name train_split dummy_period=<none> train=<true> multiple_trades=<false><block_start><if_stmt>"EUR"<in>csv_name<block_start>df=pd.read_csv(csv_name parse_dates=[[0 1]] header=<none> names=['Date' 'Time' 'Open' 'High' 'Low' 'Close' 'Volume'])<line_sep>df=df[~np.isnan(df['Open'])].set_index('Date_Time')<block_end><else_stmt><block_start>df=pd.read_csv(csv_name usecols=['Date' 'High' 'Low' 'Open' 'Close' 'Volume'])<line_sep>df=df[~np.isnan(df['Open'])].set_index('Date')<block_end>df=FeatureEngineering(df).get_df_processed()<line_sep>##Attributes self.data=df<line_sep>self.date_time=df.index<line_sep>self.count=df.shape[0]<line_sep>self.train_end_index=int(train_split<times>self.count)<line_sep># Attributes related to the observation state: Return # print(self.data.head(1)) data_dropped=self.data.drop(['Volume' 'Open' 'Close' 'High' 'Low'] axis=1)<line_sep>print(data_dropped.head(1))<line_sep>self.states=data_dropped.values<line_sep>self.min_values=data_dropped.min(axis=0).values<line_sep>self.max_values=data_dropped.max(axis=0).values<line_sep># Generate previous Close <if_stmt>dummy_period<is><not><none><block_start>close_prices=pd.DataFrame()<line_sep>close_prices['Close']=self.data["Close"]<for_stmt>i range(1 dummy_period+1)<block_start>close_prices['Close (n - %s)'%i]=self.data['Close'].shift(i)<block_end>self.close=close_prices.values<block_end>self._reset()<block_end><def_stmt>_reset self train=<true><block_start><if_stmt>train<block_start>obs=self.states[0]<line_sep>self.current_index=1<line_sep>self._end=self.train_end_index<block_end><else_stmt><block_start>self.current_index=self.train_end_index+1<line_sep>obs=self.states[self.current_index]<line_sep>self._end=self.count-1<block_end>self._data=self.data.iloc[self.current_index:self._end+1]<line_sep><return>obs<block_end><def_stmt>_step self open_trade duration_trade<block_start><if_stmt>open_trade<block_start>obs=self.states[self.current_index]+[open_trade]+[duration_trade]<block_end><else_stmt><block_start>obs=self.states[self.current_index]<block_end>self.current_index<augadd>1<line_sep>done=self.current_index<g>self._end<line_sep><return>obs done<block_end><block_end>
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. <import_from_future_stmt> absolute_import<import_from_stmt>six.moves.urllib.parse urlparse<import_from_stmt>sagemaker KMeans<import_from_stmt>tests.integ datasets<def_stmt>test_record_set sagemaker_session cpu_instance_type<block_start>"""Test the method ``AmazonAlgorithmEstimatorBase.record_set``. In particular, test that the objects uploaded to the S3 bucket are encrypted. """<line_sep>kmeans=KMeans(role="SageMakerRole" instance_count=1 instance_type=cpu_instance_type k=10 sagemaker_session=sagemaker_session )<line_sep>record_set=kmeans.record_set(datasets.one_p_mnist()[0][:100] encrypt=<true>)<line_sep>parsed_url=urlparse(record_set.s3_data)<line_sep>s3_client=sagemaker_session.boto_session.client("s3")<line_sep>head=s3_client.head_object(Bucket=parsed_url.netloc Key=parsed_url.path.lstrip("/"))<assert_stmt>head["ServerSideEncryption"]<eq>"AES256"<block_end>
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Perform optimization tasks on image classification tensorflow_v1 including: * Model training * Model pruning * Sparse transfer learning * pruning sensitivity analysis * ONNX export ########## Command help: usage: classification.py [-h] {train,export,pruning_sensitivity} ... Run tasks on classification models and datasets using the sparseml API positional arguments: {train,export,pruning_sensitivity} optional arguments: -h, --help show this help message and exit ########## train command help: usage: classification.py train [-h] --arch-key ARCH_KEY [--pretrained PRETRAINED] [--pretrained-dataset PRETRAINED_DATASET] [--checkpoint-path CHECKPOINT_PATH] [--model-kwargs MODEL_KWARGS] --dataset DATASET --dataset-path DATASET_PATH [--dataset-kwargs DATASET_KWARGS] [--model-tag MODEL_TAG] [--save-dir SAVE_DIR] [--dataset-parallel-calls DATASET_PARALLEL_CALLS] [--shuffle-buffer-size SHUFFLE_BUFFER_SIZE] [--recipe-path RECIPE_PATH] [--sparse-transfer-learn] [--eval-mode] --train-batch-size TRAIN_BATCH_SIZE --test-batch-size TEST_BATCH_SIZE [--logs-dir LOGS_DIR] [--save-best-after SAVE_BEST_AFTER] [--save-epochs SAVE_EPOCHS [SAVE_EPOCHS ...]] [--init-lr INIT_LR] [--optim-args OPTIM_ARGS] Train and/or prune an image classification model optional arguments: -h, --help show this help message and exit --arch-key ARCH_KEY The type of model to use, ex: resnet50, vgg16, mobilenet put as help to see the full list (will raise an exception with the list) --pretrained PRETRAINED The type of pretrained weights to use, default is true to load the default pretrained weights for the model. Otherwise should be set to the desired weights type: [base, optim, optim-perf]. To not load any weights set to one of [none, false] --pretrained-dataset PRETRAINED_DATASET The dataset to load pretrained weights for if pretrained is set. Default is None which will load the default dataset for the architecture. Ex can be set to imagenet, cifar10, etc --checkpoint-path CHECKPOINT_PATH A path to a previous checkpoint to load the state from and resume the state for. If provided, pretrained will be ignored --model-kwargs MODEL_KWARGS kew word arguments to be passed to model constructor, should be given as a json object --dataset DATASET The dataset to use for training, ex: imagenet, imagenette, cifar10, etc. Set to imagefolder for a generic dataset setup with an image folder structure setup like imagenet or loadable by a dataset in sparseml.tensorflow_v1.datasets --dataset-path DATASET_PATH The root path to where the dataset is stored --dataset-kwargs DATASET_KWARGS kew word arguments to be passed to dataset constructor, should be given as a json object --model-tag MODEL_TAG A tag to use for the model for saving results under save-dir, defaults to the model arch and dataset used --save-dir SAVE_DIR The path to the directory for saving results --dataset-parallel-calls DATASET_PARALLEL_CALLS the number of parallel workers for dataset loading --shuffle-buffer-size SHUFFLE_BUFFER_SIZE Shuffle buffer size for dataset loading --recipe-path RECIPE_PATH The path to the yaml file containing the modifiers and schedule to apply them with. If set to 'transfer_learning', then will create a schedule to enable sparse transfer learning --sparse-transfer-learn Enable sparse transfer learning modifiers to enforce the sparsity for already sparse layers. The modifiers are added to the ones to be loaded from the recipe- path --eval-mode Puts into evaluation mode so that the model can be evaluated on the desired dataset --train-batch-size TRAIN_BATCH_SIZE The batch size to use while training --test-batch-size TEST_BATCH_SIZE The batch size to use while testing --logs-dir LOGS_DIR The path to the directory for saving logs --save-best-after SAVE_BEST_AFTER start saving the best validation result after the given epoch completes until the end of training --save-epochs SAVE_EPOCHS [SAVE_EPOCHS ...] epochs to save checkpoints at --init-lr INIT_LR The initial learning rate to use while training, the actual initial value used should be set by the sparseml recipe --optim-args OPTIM_ARGS Additional args to be passed to the optimizer passed in as a json object ########## export command help: usage: classification.py export [-h] --arch-key ARCH_KEY [--pretrained PRETRAINED] [--pretrained-dataset PRETRAINED_DATASET] [--checkpoint-path CHECKPOINT_PATH] [--model-kwargs MODEL_KWARGS] --dataset DATASET --dataset-path DATASET_PATH [--dataset-kwargs DATASET_KWARGS] [--model-tag MODEL_TAG] [--save-dir SAVE_DIR] [--num-samples NUM_SAMPLES] [--onnx-opset ONNX_OPSET] Export a model to onnx as well as store sample inputs, outputs, and labels optional arguments: -h, --help show this help message and exit --arch-key ARCH_KEY The type of model to use, ex: resnet50, vgg16, mobilenet put as help to see the full list (will raise an exception with the list) --pretrained PRETRAINED The type of pretrained weights to use, default is true to load the default pretrained weights for the model. Otherwise should be set to the desired weights type: [base, optim, optim-perf]. To not load any weights set to one of [none, false] --pretrained-dataset PRETRAINED_DATASET The dataset to load pretrained weights for if pretrained is set. Default is None which will load the default dataset for the architecture. Ex can be set to imagenet, cifar10, etc --checkpoint-path CHECKPOINT_PATH A path to a previous checkpoint to load the state from and resume the state for. If provided, pretrained will be ignored --model-kwargs MODEL_KWARGS kew word arguments to be passed to model constructor, should be given as a json object --dataset DATASET The dataset to use for training, ex: imagenet, imagenette, cifar10, etc. Set to imagefolder for a generic dataset setup with an image folder structure setup like imagenet or loadable by a dataset in sparseml.tensorflow_v1.datasets --dataset-path DATASET_PATH The root path to where the dataset is stored --dataset-kwargs DATASET_KWARGS kew word arguments to be passed to dataset constructor, should be given as a json object --model-tag MODEL_TAG A tag to use for the model for saving results under save-dir, defaults to the model arch and dataset used --save-dir SAVE_DIR The path to the directory for saving results --num-samples NUM_SAMPLES The number of samples to export along with the model onnx and pth files (sample inputs and labels as well as the outputs from model execution) --onnx-opset ONNX_OPSET The onnx opset to use for export. Default is 11 ########## pruning_sensitivity command help: usage: classification.py pruning_sensitivity [-h] --arch-key ARCH_KEY [--pretrained PRETRAINED] [--pretrained-dataset PRETRAINED_DATASET] [--checkpoint-path CHECKPOINT_PATH] [--model-kwargs MODEL_KWARGS] --dataset DATASET --dataset-path DATASET_PATH [--dataset-kwargs DATASET_KWARGS] [--model-tag MODEL_TAG] [--save-dir SAVE_DIR] [--dataset-parallel-calls DATASET_PARALLEL_CALLS] [--shuffle-buffer-size SHUFFLE_BUFFER_SIZE] [--approximate] [--steps-per-measurement STEPS_PER_MEASUREMENT] [--batch-size BATCH_SIZE] Run a kernel sparsity (pruning) analysis for a given model optional arguments: -h, --help show this help message and exit --arch-key ARCH_KEY The type of model to use, ex: resnet50, vgg16, mobilenet put as help to see the full list (will raise an exception with the list) --pretrained PRETRAINED The type of pretrained weights to use, default is true to load the default pretrained weights for the model. Otherwise should be set to the desired weights type: [base, optim, optim-perf]. To not load any weights set to one of [none, false] --pretrained-dataset PRETRAINED_DATASET The dataset to load pretrained weights for if pretrained is set. Default is None which will load the default dataset for the architecture. Ex can be set to imagenet, cifar10, etc --checkpoint-path CHECKPOINT_PATH A path to a previous checkpoint to load the state from and resume the state for. If provided, pretrained will be ignored --model-kwargs MODEL_KWARGS kew word arguments to be passed to model constructor, should be given as a json object --dataset DATASET The dataset to use for training, ex: imagenet, imagenette, cifar10, etc. Set to imagefolder for a generic dataset setup with an image folder structure setup like imagenet or loadable by a dataset in sparseml.tensorflow_v1.datasets --dataset-path DATASET_PATH The root path to where the dataset is stored --dataset-kwargs DATASET_KWARGS kew word arguments to be passed to dataset constructor, should be given as a json object --model-tag MODEL_TAG A tag to use for the model for saving results under save-dir, defaults to the model arch and dataset used --save-dir SAVE_DIR The path to the directory for saving results --dataset-parallel-calls DATASET_PARALLEL_CALLS the number of parallel workers for dataset loading --shuffle-buffer-size SHUFFLE_BUFFER_SIZE Shuffle buffer size for dataset loading --approximate True to approximate without running data through the model, otherwise will run a one shot analysis --steps-per-measurement STEPS_PER_MEASUREMENT The number of steps (batches) to run for each measurement --batch-size BATCH_SIZE The batch size to use while performing analysis ######### EXAMPLES ######### ########## Example command for pruning resnet50 on imagenet dataset: python scripts/tensorflow_v1/classification.py train \ --recipe-path ~/sparseml_recipes/pruning_resnet50.yaml \ --arch-key resnet50 --dataset imagenet --dataset-path ~/datasets/ILSVRC2012 \ --train-batch-size 256 --test-batch-size 1024 ########## Example command for transfer learning sparse mobilenet_v1 on an image folder dataset: python scripts/tensorflow_v1/classification.py train \ --sparse-transfer-learn \ --recipe-path ~/sparseml_recipes/pruning_mobilenet.yaml \ --arch-key mobilenet_v1 --pretrained optim \ --dataset imagefolder --dataset-path ~/datasets/my_imagefolder_dataset \ --train-batch-size 256 --test-batch-size 1024 ########## Example command for exporting ResNet50: python scripts/tensorflow_v1/classification.py export \ --arch-key resnet50 --dataset imagenet --dataset-path ~/datasets/ILSVRC2012 ########## Example command for running approximated KS sensitivity analysis on mobilenet: python scripts/tensorflow_v1/classification.py pruning_sensitivity \ --approximate \ --arch-key mobilenet --dataset imagenet \ --dataset-path ~/datasets/ILSVRC2012 ########## Example command for running one shot KS sensitivity analysis on resnet50 for coco: python scripts/tensorflow_v1/classification.py pruning_sensitivity \ --arch-key resnet50 --dataset imagenet \ --dataset-path ~/datasets/ILSVRC2012 """<import_stmt>argparse<import_stmt>json<import_stmt>math<import_stmt>os<import_from_stmt>typing Dict Optional Tuple<import_stmt>numpy<import_from_stmt>sparseml get_main_logger<import_from_stmt>sparseml.tensorflow_v1.datasets Dataset DatasetRegistry create_split_iterators_handle <import_from_stmt>sparseml.tensorflow_v1.models ModelRegistry<import_from_stmt>sparseml.tensorflow_v1.optim ConstantPruningModifier ScheduledModifierManager pruning_loss_sens_magnitude pruning_loss_sens_one_shot pruning_loss_sens_op_vars <import_from_stmt>sparseml.tensorflow_v1.utils GraphExporter accuracy batch_cross_entropy_loss tf_compat write_simple_summary <import_from_stmt>sparseml.utils create_dirs<line_sep>LOGGER=get_main_logger()<line_sep>TRAIN_COMMAND="train"<line_sep>EXPORT_COMMAND="export"<line_sep>PRUNING_SENSITVITY_COMMAND="pruning_sensitivity"<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description="Run tasks on classification models and datasets "<concat>"using the sparseml API")<line_sep>subparsers=parser.add_subparsers(dest="command")<line_sep>train_parser=subparsers.add_parser(TRAIN_COMMAND description="Train and/or prune an image classification model" )<line_sep>export_parser=subparsers.add_parser(EXPORT_COMMAND description="Export a model to onnx as well as "<concat>"store sample inputs, outputs, and labels" )<line_sep>pruning_sensitivity_parser=subparsers.add_parser(PRUNING_SENSITVITY_COMMAND description="Run a kernel sparsity (pruning) analysis for a given model" )<line_sep>parsers=[train_parser export_parser pruning_sensitivity_parser ]<for_stmt>par parsers# general arguments # model args <block_start>par.add_argument("--arch-key" type=str required=<true> help="The type of model to use, ex: resnet50, vgg16, mobilenet "<concat>"put as help to see the full list (will raise an exception with the list)" )<line_sep>par.add_argument("--pretrained" type=str default=<true> help="The type of pretrained weights to use, "<concat>"default is true to load the default pretrained weights for the model. "<concat>"Otherwise should be set to the desired weights type: "<concat>"[base, optim, optim-perf]. "<concat>"To not load any weights set to one of [none, false]" )<line_sep>par.add_argument("--pretrained-dataset" type=str default=<none> help="The dataset to load pretrained weights for if pretrained is set. "<concat>"Default is None which will load the default dataset for the architecture."<concat>" Ex can be set to imagenet, cifar10, etc" )<line_sep>par.add_argument("--checkpoint-path" type=str default=<none> help="A path to a previous checkpoint to load the state from and "<concat>"resume the state for. If provided, pretrained will be ignored" )<line_sep>par.add_argument("--model-kwargs" type=json.loads default={} help="kew word arguments to be passed to model constructor, should be "<concat>" given as a json object" )<line_sep># dataset args par.add_argument("--dataset" type=str required=<true> help="The dataset to use for training, "<concat>"ex: imagenet, imagenette, cifar10, etc. "<concat>"Set to imagefolder for a generic dataset setup "<concat>"with an image folder structure setup like imagenet or loadable by a "<concat>"dataset in sparseml.tensorflow_v1.datasets" )<line_sep>par.add_argument("--dataset-path" type=str required=<true> help="The root path to where the dataset is stored" )<line_sep>par.add_argument("--dataset-kwargs" type=json.loads default={} help="kew word arguments to be passed to dataset constructor, should be "<concat>" given as a json object" )<line_sep># logging and saving par.add_argument("--model-tag" type=str default=<none> help="A tag to use for the model for saving results under save-dir, "<concat>"defaults to the model arch and dataset used" )<line_sep>par.add_argument("--save-dir" type=str default="tensorflow_v1_classification" help="The path to the directory for saving results" )<line_sep># task specific arguments <if_stmt>par<in>[train_parser pruning_sensitivity_parser]<block_start>par.add_argument("--dataset-parallel-calls" type=int default=4 help="the number of parallel workers for dataset loading" )<line_sep>par.add_argument("--shuffle-buffer-size" type=int default=1000 help="Shuffle buffer size for dataset loading" )<block_end><if_stmt>par<eq>train_parser<block_start>par.add_argument("--recipe-path" type=str default=<none> help="The path to the yaml file containing the modifiers and "<concat>"schedule to apply them with. If set to 'transfer_learning', "<concat>"then will create a schedule to enable sparse transfer learning" )<line_sep>par.add_argument("--sparse-transfer-learn" action="store_true" help=("Enable sparse transfer learning modifiers to enforce the sparsity "<concat>"for already sparse layers. The modifiers are added to the "<concat>"ones to be loaded from the recipe-path") )<line_sep>par.add_argument("--eval-mode" action="store_true" help="Puts into evaluation mode so that the model can be "<concat>"evaluated on the desired dataset" )<line_sep>par.add_argument("--train-batch-size" type=int required=<true> help="The batch size to use while training" )<line_sep>par.add_argument("--test-batch-size" type=int required=<true> help="The batch size to use while testing" )<line_sep>par.add_argument("--logs-dir" type=str default=os.path.join("tensorflow_v1_classification_train" "tensorboard-logs") help="The path to the directory for saving logs" )<line_sep>par.add_argument("--save-best-after" type=int default=-1 help="start saving the best validation result after the given "<concat>"epoch completes until the end of training" )<line_sep>par.add_argument("--save-epochs" type=int default=[] nargs="+" help="epochs to save checkpoints at" )<line_sep>par.add_argument("--init-lr" type=float default=1e-9 help="The initial learning rate to use while training, "<concat>"the actual initial value used should be set by the sparseml recipe" )<line_sep>par.add_argument("--optim-args" type=json.loads default={} help="Additional args to be passed to the optimizer passed in"<concat>" as a json object" )<block_end><if_stmt>par<eq>export_parser<block_start>par.add_argument("--num-samples" type=int default=100 help="The number of samples to export along with the model onnx "<concat>"and pth files (sample inputs and labels as well as the outputs "<concat>"from model execution)" )<line_sep>par.add_argument("--onnx-opset" type=int default=11 help="The onnx opset to use for export. Default is 11" )<block_end><if_stmt>par<eq>pruning_sensitivity_parser<block_start>par.add_argument("--approximate" action="store_true" help="True to approximate without running data through the model, "<concat>"otherwise will run a one shot analysis" )<line_sep>par.add_argument("--steps-per-measurement" type=int default=15 help="The number of steps (batches) to run for each measurement" )<line_sep>par.add_argument("--batch-size" type=int default=64 help="The batch size to use while performing analysis" )<block_end><block_end><return>parser.parse_args()<block_end><def_stmt>_setup_save_dirs args<arrow>Tuple[str Optional[str]]# logging and saving setup <block_start>save_dir=os.path.abspath(os.path.expanduser(args.save_dir))<line_sep>logs_dir=(os.path.abspath(os.path.expanduser(os.path.join(args.logs_dir)))<if>args.command<eq>TRAIN_COMMAND<else><none>)<if_stmt><not>args.model_tag<block_start>model_tag="{}_{}".format(args.arch_key.replace("/" ".") args.dataset)<line_sep>model_id=model_tag<line_sep>model_inc=0<line_sep># set location to check for models with same name model_main_dir=logs_dir<or>save_dir<while_stmt>os.path.exists(os.path.join(model_main_dir model_id))<block_start>model_inc<augadd>1<line_sep>model_id="{}__{:02d}".format(model_tag model_inc)<block_end><block_end><else_stmt><block_start>model_id=args.model_tag<block_end>save_dir=os.path.join(save_dir model_id)<line_sep>create_dirs(save_dir)<line_sep># logs dir setup <if_stmt>args.command<eq>TRAIN_COMMAND<block_start>logs_dir=os.path.join(logs_dir model_id)<line_sep>create_dirs(logs_dir)<block_end><else_stmt><block_start>logs_dir=<none><block_end>LOGGER.info("Model id is set to {}".format(model_id))<line_sep><return>save_dir logs_dir<block_end><def_stmt>_create_dataset args train=<true> image_size=<none><arrow>Tuple[Dataset int]<block_start>kwargs=args.dataset_kwargs<if_stmt>"image_size"<in>kwargs<block_start>image_size=kwargs["image_size"]<del_stmt>kwargs["image_size"]<block_end>dataset=DatasetRegistry.create(args.dataset root=args.dataset_path train=train image_size=image_size **kwargs )<line_sep>LOGGER.info("created {} dataset: {}".format("train"<if>train<else>"val" dataset))<line_sep># get num_classes <if_stmt>args.dataset<eq>"imagefolder"<block_start>num_classes=dataset.num_classes<block_end><else_stmt><block_start>dataset_attributes=DatasetRegistry.attributes(args.dataset)<line_sep>num_classes=dataset_attributes["num_classes"]<block_end><return>dataset num_classes<block_end><def_stmt>_build_dataset args dataset:Dataset batch_size:int<arrow>Dataset<block_start><return>dataset.build(batch_size shuffle_buffer_size=args.shuffle_buffer_size prefetch_buffer_size=batch_size num_parallel_calls=args.dataset_parallel_calls )<block_end><def_stmt>_create_model args num_classes inputs training=<false><block_start>outputs=ModelRegistry.create(args.arch_key inputs training=training num_classes=num_classes **args.model_kwargs )<line_sep>LOGGER.info("created model {}".format(args.arch_key))<line_sep><return>outputs<block_end><def_stmt>_load_model args sess checkpoint_path=<none><block_start>sess.run([tf_compat.global_variables_initializer() tf_compat.local_variables_initializer() ])<line_sep>checkpoint_path=checkpoint_path<or>args.checkpoint_path<line_sep>ModelRegistry.load_pretrained(args.arch_key pretrained=args.pretrained pretrained_dataset=args.pretrained_dataset pretrained_path=checkpoint_path sess=sess )<if_stmt>checkpoint_path<block_start>LOGGER.info("Loaded model weights from checkpoint: {}".format(checkpoint_path))<block_end><block_end><def_stmt>_save_checkpoint args sess save_dir checkpoint_name<arrow>str<block_start>checkpoint_path=os.path.join(os.path.join(save_dir checkpoint_name "model"))<line_sep>create_dirs(checkpoint_path)<line_sep>saver=ModelRegistry.saver(args.arch_key)<line_sep>saved_name=saver.save(sess checkpoint_path)<line_sep>checkpoint_path=os.path.join(checkpoint_path saved_name)<line_sep>LOGGER.info("Checkpoint saved to {}".format(checkpoint_path))<line_sep><return>checkpoint_path<block_end><def_stmt>_save_recipe recipe_manager:ScheduledModifierManager save_dir:str <block_start>recipe_save_path=os.path.join(save_dir "recipe.yaml")<line_sep>recipe_manager.save(recipe_save_path)<line_sep>LOGGER.info(f"Saved recipe to {recipe_save_path}")<block_end><def_stmt>train args save_dir logs_dir# setup dataset <block_start><with_stmt>tf_compat.device("/cpu:0")<block_start>train_dataset,_=_create_dataset(args train=<true>)<line_sep>val_dataset,num_classes=_create_dataset(args train=<false>)<line_sep># calc steps train_steps=math.ceil(len(train_dataset)/args.train_batch_size)<line_sep>val_steps=math.ceil(len(val_dataset)/args.test_batch_size)<line_sep># build datasets train_dataset=_build_dataset(args train_dataset args.train_batch_size)<line_sep>val_dataset=_build_dataset(args val_dataset args.test_batch_size)<block_end>handle,iterator,(train_iter val_iter)=create_split_iterators_handle([train_dataset val_dataset])<line_sep># set up model graph images,labels=iterator.get_next()<line_sep>training=tf_compat.placeholder(dtype=tf_compat.bool shape=[])<line_sep>outputs=_create_model(args num_classes images training)<line_sep># set up training objects loss=batch_cross_entropy_loss(outputs labels)<line_sep>acc=accuracy(outputs labels)<line_sep>global_step=tf_compat.train.get_or_create_global_step()<line_sep>train_op=tf_compat.train.AdamOptimizer(learning_rate=args.init_lr **args.optim_args).minimize(loss global_step=global_step)<line_sep>update_ops=tf_compat.get_collection(tf_compat.GraphKeys.UPDATE_OPS)<line_sep>LOGGER.info("Created update ops for training")<line_sep># set up sparseml modifier ops add_mods=(ConstantPruningModifier(params="__ALL__")<if>args.sparse_transfer_learn<else><none>)<line_sep>manager=ScheduledModifierManager.from_yaml(file_path=args.recipe_path add_modifiers=add_mods)<line_sep>mod_ops,mod_extras=manager.create_ops(train_steps global_step)<line_sep>_save_recipe(recipe_manager=manager save_dir=save_dir)<with_stmt>tf_compat.Session()<as>sess# set up tensorboard logging <block_start>summary_writer=tf_compat.summary.FileWriter(logs_dir sess.graph)<line_sep>summaries=tf_compat.summary.merge_all()<line_sep>LOGGER.info("Logging to tensorboard at {}".format(logs_dir))<line_sep># initialize variables, load pretrained weights, initialize modifiers train_iter_handle,val_iter_handle=sess.run([train_iter.string_handle() val_iter.string_handle()])<line_sep>LOGGER.info("Initialized graph variables")<line_sep>_load_model(args sess)<line_sep>manager.initialize_session()<line_sep>LOGGER.info("Initialized SparseML modifiers")<line_sep>best_loss=<none><for_stmt>epoch range(manager.max_epochs)# train <block_start>LOGGER.info("Training for epoch {}...".format(epoch))<line_sep>sess.run(train_iter.initializer)<line_sep>train_acc,train_loss=[] []<for_stmt>step range(train_steps)<block_start>_,__,meas_step,meas_loss,meas_acc,meas_summ=sess.run([train_op update_ops global_step loss acc summaries] feed_dict={handle:train_iter_handle training:<true>} )<if_stmt>step<ge>train_steps-1# log the general summaries on the last training step <block_start>summary_writer.add_summary(meas_summ meas_step)<block_end># run modifier ops sess.run(mod_ops)<line_sep># summarize write_simple_summary(summary_writer "Train/Loss" meas_loss meas_step)<line_sep>write_simple_summary(summary_writer "Train/Acc" meas_acc<times>100.0 meas_step)<line_sep>train_acc.append(meas_acc)<line_sep>train_loss.append(meas_loss)<block_end>LOGGER.info("Epoch {} - Train Loss: {}, Train Acc: {}".format(epoch numpy.mean(train_loss).item() numpy.mean(train_acc).item()))<line_sep># val LOGGER.info("Validating for epoch {}...".format(epoch))<line_sep>sess.run(val_iter.initializer)<line_sep>val_acc,val_loss=[] []<for_stmt>step range(val_steps)<block_start>meas_loss,meas_acc=sess.run([loss acc] feed_dict={handle:val_iter_handle training:<false>} )<line_sep>val_acc.append(meas_acc)<line_sep>val_loss.append(meas_loss)<line_sep>write_simple_summary(summary_writer "Val/Loss" numpy.mean(val_loss).item() epoch)<line_sep>write_simple_summary(summary_writer "Val/Acc" numpy.mean(val_acc).item() epoch)<block_end>val_loss=numpy.mean(val_loss).item()<line_sep>LOGGER.info("Epoch {} - Val Loss: {}, Val Acc: {}".format(epoch val_loss numpy.mean(train_acc).item()))<if_stmt>epoch<ge>args.save_best_after<and>(best_loss<is><none><or>val_loss<le>best_loss)<block_start>_save_checkpoint(args sess save_dir "checkpoint-best")<line_sep>best_loss=val_loss<block_end><if_stmt>args.save_epochs<and>epoch<in>args.save_epochs<block_start>_save_checkpoint(args sess save_dir "checkpoint-epoch-{}".format(epoch))<block_end><block_end># cleanup graph and save final checkpoint manager.complete_graph()<line_sep>checkpoint_path=_save_checkpoint(args sess save_dir "final-checkpoint")<block_end>LOGGER.info("Running ONNX export flow")<line_sep>export(args save_dir checkpoint_path=checkpoint_path skip_samples=<true> num_classes=num_classes opset=11 )<block_end><def_stmt>export args save_dir checkpoint_path=<none> skip_samples=<false> num_classes=<none> opset=<none> <block_start><assert_stmt><not>skip_samples<or>num_classes<line_sep># dataset creation <if_stmt><not>skip_samples<block_start>val_dataset,num_classes=_create_dataset(args train=<false>)<block_end><with_stmt>tf_compat.Graph().as_default()<block_start>input_shape=ModelRegistry.input_shape(args.arch_key)<line_sep>inputs=tf_compat.placeholder(tf_compat.float32 [<none>]+list(input_shape) name="inputs")<line_sep>outputs=_create_model(args num_classes inputs)<with_stmt>tf_compat.Session()<as>sess<block_start>_load_model(args sess checkpoint_path=checkpoint_path<or>args.checkpoint_path)<line_sep>exporter=GraphExporter(save_dir)<if_stmt><not>skip_samples# Export a batch of samples and expected outputs <block_start>tf_dataset=val_dataset.build(args.num_samples repeat_count=1 num_parallel_calls=1)<line_sep>tf_iter=tf_compat.data.make_one_shot_iterator(tf_dataset)<line_sep>features,_=tf_iter.get_next()<line_sep>inputs_val=sess.run(features)<line_sep>exporter.export_samples([inputs] [inputs_val] [outputs] sess)<block_end># Export model to tensorflow checkpoint format LOGGER.info("exporting tensorflow in {}".format(save_dir))<line_sep>exporter.export_checkpoint(sess=sess)<line_sep># Export model to pb format LOGGER.info("exporting pb in {}".format(exporter.pb_path))<line_sep>exporter.export_pb(outputs=[outputs])<block_end><block_end># Export model to onnx format LOGGER.info("exporting onnx in {}".format(exporter.onnx_path))<line_sep>exporter.export_onnx([inputs] [outputs] opset=opset<or>args.onnx_opset)<block_end><def_stmt>pruning_loss_sensitivity args save_dir<block_start>input_shape=ModelRegistry.input_shape(args.arch_key)<line_sep>train_dataset,num_classes=_create_dataset(args train=<true> image_size=input_shape[1])<with_stmt>tf_compat.Graph().as_default()<as>graph# create model graph <block_start>inputs=tf_compat.placeholder(tf_compat.float32 [<none>]+list(input_shape) name="inputs")<line_sep>outputs=_create_model(args num_classes inputs)<with_stmt>tf_compat.Session()<as>sess<block_start>_load_model(args sess checkpoint_path=args.checkpoint_path)<if_stmt>args.approximate<block_start>LOGGER.info("Running weight magnitude loss sensitivity analysis...")<line_sep>analysis=pruning_loss_sens_magnitude(graph sess)<block_end><else_stmt><block_start>op_vars=pruning_loss_sens_op_vars(graph)<line_sep>train_steps=math.ceil(len(train_dataset)/args.batch_size)<line_sep>train_dataset=_build_dataset(args train_dataset args.batch_size)<line_sep>handle,iterator,dataset_iter=create_split_iterators_handle([train_dataset])<line_sep>dataset_iter=dataset_iter[0]<line_sep>images,labels=iterator.get_next()<line_sep>loss=batch_cross_entropy_loss(outputs labels)<line_sep>tensor_names=["inputs:0" labels.name]<line_sep>sess.run(dataset_iter.initializer)<def_stmt>feed_dict_creator step:int<arrow>Dict[str tf_compat.Tensor]<block_start><assert_stmt>step<l>train_steps<line_sep>batch_data=[tens.eval(session=sess)<for>tens dataset_iter.get_next()]<line_sep><return>dict(zip(tensor_names batch_data))<block_end>LOGGER.info("Running one shot loss sensitivity analysis...")<line_sep>analysis=pruning_loss_sens_one_shot(op_vars=op_vars loss_tensor=loss steps_per_measurement=args.steps_per_measurement feed_dict_creator=feed_dict_creator sess=sess )<block_end><block_end><block_end># saving and printing results LOGGER.info("completed...")<line_sep>LOGGER.info("Saving results in {}".format(save_dir))<line_sep>analysis.save_json(os.path.join(save_dir "ks_approx_sensitivity.json"<if>args.approximate<else>"ks_one_shot_sensitivity.json" ))<line_sep>analysis.plot(os.path.join(save_dir os.path.join(save_dir "ks_approx_sensitivity.png"<if>args.approximate<else>"ks_one_shot_sensitivity.png" ) ) plot_integral=<true> )<line_sep>analysis.print_res()<block_end><def_stmt>main args# set up saving and logging dirs <block_start>save_dir,logs_dir=_setup_save_dirs(args)<line_sep># RUN COMMAND SPECIFIC TASTS <if_stmt>args.command<eq>TRAIN_COMMAND<block_start>train(args save_dir logs_dir)<block_end><if_stmt>args.command<eq>EXPORT_COMMAND<block_start>export(args save_dir)<block_end><if_stmt>args.command<eq>PRUNING_SENSITVITY_COMMAND<block_start>pruning_loss_sensitivity(args save_dir)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>args_=parse_args()<line_sep>main(args_)<block_end>
""" Gathers all splitters in one place for convenient imports """<line_sep># flake8: noqa # basic splitter <import_from_stmt>deepchem.splits.splitters Splitter<import_from_stmt>deepchem.splits.splitters RandomSplitter<import_from_stmt>deepchem.splits.splitters RandomStratifiedSplitter<import_from_stmt>deepchem.splits.splitters RandomGroupSplitter<import_from_stmt>deepchem.splits.splitters SingletaskStratifiedSplitter<import_from_stmt>deepchem.splits.splitters IndexSplitter<import_from_stmt>deepchem.splits.splitters SpecifiedSplitter<line_sep># molecule splitter <import_from_stmt>deepchem.splits.splitters ScaffoldSplitter<import_from_stmt>deepchem.splits.splitters MolecularWeightSplitter<import_from_stmt>deepchem.splits.splitters MaxMinSplitter<import_from_stmt>deepchem.splits.splitters FingerprintSplitter<import_from_stmt>deepchem.splits.splitters ButinaSplitter<line_sep># other splitter <import_from_stmt>deepchem.splits.task_splitter merge_fold_datasets<import_from_stmt>deepchem.splits.task_splitter TaskSplitter<line_sep>################################################################# # Removed API ################################################################# <import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<class_stmt>IndiceSplitter<block_start><def_stmt>__init__ self valid_indices=<none> test_indices=<none><block_start><raise>ImportError("IndiceSplitter was renamed to SpecifiedSplitter.\n"<concat>"Please use SpecifiedSplitter instead of IndiceSplitter.")<block_end><block_end>
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_stmt>functools<import_from_stmt>typing Callable Optional<import_stmt>torch<import_stmt>torch.nn.functional<import_from_stmt>torch Tensor<import_from_stmt>torch.nn Module Parameter<import_from_stmt>._non_negative_parameterization NonNegativeParameterization<class_stmt>GeneralizedDivisiveNormalization(Module)<block_start>"""Applies generalized divisive normalization for each channel across a batch of data. Implements an activation function that is a multivariate generalization of the following sigmoid-like function: .. math:: y_{i}=\\frac{x_{i}}{(\\beta_{i}+\\sum_{j}\\gamma_{ij}|x_{j}|^{\\alpha_{ij}})^{\\epsilon_{i}}} where :math:`i` and :math:`j` map over channels. This implementation never sums across spatial dimensions. It is similar to local response normalization, but much more flexible, as :math:`\\alpha`, :math:`\\beta`, :math:`\\gamma`, and :math:`\\epsilon` are trainable parameters. The method was originally described in: | “Density Modeling of Images using a Generalized Normalization Transformation” | <NAME>, <NAME>, <NAME> | https://arxiv.org/abs/1511.06281 and expanded in: | “End-to-end Optimized Image Compression” | <NAME>, <NAME>, <NAME> | https://arxiv.org/abs/1611.01704 Args: channels: number of channels in the input. inverse: compute the generalized divisive normalization response. If ``True``, compute the inverse generalized divisive normalization response (one step of fixed point iteration to invert the generalized divisive normalization; the division is replaced by multiplication). alpha_parameter: A ``Tensor`` means that the value of ``alpha`` is fixed. ``None`` means that when the layer is initialized, a ``NonNegativeParameterization`` layer is created to train ``alpha`` (with a minimum value of ``1``). The default is a fixed value of ``1``. beta_parameter: A ``Tensor`` means that the value of ``beta`` is fixed. ``None`` means that when the layer is initialized, a ``NonNegativeParameterization`` layer is created to train ``beta`` (with a minimum value of ``1e-6``). epsilon_parameter: A ``Tensor`` means that the value of ``epsilon`` is fixed. ``None`` means that when the layer is initialized, a ``NonNegativeParameterization`` layer is created to train ``epsilon`` (with a minimum value of 1e-6). The default is a fixed value of ``1``. gamma_parameter: A ``Tensor`` means that the value of ``gamma`` is fixed. ``None`` means that when the layer is initialized, a ``NonNegativeParameterization`` layer is created to train ``gamma``. alpha_initializer: initializes the ``alpha`` parameter. Only used if ``alpha`` is trained. Defaults to ``1``. beta_initializer: initializes the ``beta`` parameter. Only used if ``beta`` is created when initializing the layer. Defaults to ``1``. epsilon_initializer: initializes the ``epsilon`` parameter. Only used if ``epsilon`` is trained. Defaults to ``1``. gamma_initializer: initializes the ``gamma`` parameter. Only used if ``gamma`` is created when initializing the layer. Defaults to the identity multiplied by ``0.1``. A good default value for the diagonal is somewhere between ``0`` and ``0.5``. If set to ``0`` and ``beta`` is initialized as ``1``, the layer is effectively initialized to the identity operation. """<line_sep>alpha:Parameter<line_sep>beta:Parameter<line_sep>epsilon:Parameter<line_sep>gamma:Parameter<def_stmt>__init__ self channels:int inverse:bool=<false> alpha_parameter:Optional[Tensor]=<none> beta_parameter:Optional[Tensor]=<none> epsilon_parameter:Optional[Tensor]=<none> gamma_parameter:Optional[Tensor]=<none> alpha_initializer:Optional[Callable[[Tensor] Tensor]]=<none> beta_initializer:Optional[Callable[[Tensor] Tensor]]=<none> epsilon_initializer:Optional[Callable[[Tensor] Tensor]]=<none> gamma_initializer:Optional[Callable[[Tensor] Tensor]]=<none> <block_start>super(GeneralizedDivisiveNormalization self).__init__()<line_sep>self._channels=torch.tensor(channels dtype=torch.int32)<line_sep>self._inverse=inverse<if_stmt>alpha_parameter<is><none><block_start><if_stmt>alpha_initializer<is><none><block_start>alpha_initializer=functools.partial(<lambda>x:torch.ones(x) )<block_end>self._reparameterized_alpha=NonNegativeParameterization(alpha_initializer(self._channels) minimum=1 )<if_stmt>self._reparameterized_alpha.initial_value<is><not><none><block_start>self.alpha=Parameter(self._reparameterized_alpha.initial_value )<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(alpha_parameter Parameter)<block_start>self.alpha=alpha_parameter<block_end><else_stmt><block_start>alpha_parameter=torch.tensor(alpha_parameter)<line_sep>self.alpha=Parameter(alpha_parameter)<block_end><block_end><if_stmt>beta_parameter<is><none><block_start><if_stmt>beta_initializer<is><none><block_start>beta_initializer=functools.partial(<lambda>x:torch.ones(x) )<block_end>self._reparameterized_beta=NonNegativeParameterization(beta_initializer(self._channels) minimum=1e-6 )<if_stmt>self._reparameterized_beta.initial_value<is><not><none><block_start>self.beta=Parameter(self._reparameterized_beta.initial_value )<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(beta_parameter Parameter)<block_start>self.beta=beta_parameter<block_end><else_stmt><block_start>beta_parameter=torch.tensor(beta_parameter)<line_sep>self.beta=Parameter(beta_parameter)<block_end><block_end><if_stmt>epsilon_parameter<is><none><block_start><if_stmt>epsilon_initializer<is><none><block_start>epsilon_initializer=functools.partial(<lambda>x:torch.ones(x) )<block_end>self._reparameterized_epsilon=NonNegativeParameterization(epsilon_initializer(self._channels) minimum=1e-6 )<if_stmt>self._reparameterized_epsilon.initial_value<is><not><none><block_start>self.epsilon=Parameter(self._reparameterized_epsilon.initial_value )<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(epsilon_parameter Parameter)<block_start>self.epsilon=epsilon_parameter<block_end><else_stmt><block_start>epsilon_parameter=torch.tensor(epsilon_parameter)<line_sep>self.epsilon=Parameter(epsilon_parameter)<block_end><block_end><if_stmt>gamma_parameter<is><none><block_start><if_stmt>gamma_initializer<is><none><block_start>gamma_initializer=functools.partial(<lambda>x:0.1<times>torch.eye(x) )<block_end>self._reparameterized_gamma=NonNegativeParameterization(gamma_initializer(self._channels) minimum=0 )<if_stmt>self._reparameterized_gamma.initial_value<is><not><none><block_start>self.gamma=Parameter(self._reparameterized_gamma.initial_value )<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(gamma_parameter Parameter)<block_start>self.gamma=gamma_parameter<block_end><else_stmt><block_start>gamma_parameter=torch.tensor(gamma_parameter)<line_sep>self.gamma=Parameter(gamma_parameter)<block_end><block_end><block_end><def_stmt>forward self x:Tensor<arrow>Tensor<block_start>_,channels,_,_=x.size()<line_sep>y=torch.nn.functional.conv2d(x<power>2 torch.reshape(self._reparameterized_gamma(self.gamma) (channels channels 1 1) ) self._reparameterized_beta(self.beta) )<if_stmt>self._inverse<block_start><return>x<times>torch.sqrt(y)<block_end><return>x<times>torch.rsqrt(y)<block_end><block_end>
<import_stmt>logging<import_from_stmt>milvus_benchmark.env.base BaseEnv<line_sep>logger=logging.getLogger("milvus_benchmark.env.local")<class_stmt>LocalEnv(BaseEnv)<block_start>"""docker env class wrapper"""<line_sep>env_mode="local"<def_stmt>__init__ self deploy_mode=<none><block_start>super(LocalEnv self).__init__(deploy_mode)<block_end><def_stmt>start_up self hostname port<block_start>res=<true><try_stmt><block_start>self.set_hostname(hostname)<block_end><except_stmt>Exception<as>e<block_start>logger.error(str(e))<line_sep>res=<false><block_end><return>res<block_end><block_end>
<import_stmt>matplotlib.gridspec<as>gridspec<import_from_stmt>nose.tools assert_equal<def_stmt>test_equal <block_start>gs=gridspec.GridSpec(2 1)<line_sep>assert_equal(gs[0 0] gs[0 0])<line_sep>assert_equal(gs[: 0] gs[: 0])<block_end>
# -*- coding: utf-8 -*- """Example for a password question type. Run example by typing `python -m examples.password` in your console."""<import_from_stmt>pprint pprint<import_stmt>questionary<import_from_stmt>examples custom_style_dope<import_from_stmt>questionary Separator Choice prompt<def_stmt>ask_pystyle **kwargs# create the question object <block_start>question=questionary.password("Enter your git password" style=custom_style_dope **kwargs)<line_sep># prompt the user for an answer <return>question.ask()<block_end><def_stmt>ask_dictstyle **kwargs<block_start>questions=[{"type":"password" "message":"Enter your git password" "name":"password"}]<line_sep><return>prompt(questions style=custom_style_dope **kwargs)<block_end><if_stmt>__name__<eq>"__main__"<block_start>pprint(ask_pystyle())<block_end>
<import_from_future_stmt> print_function<import_from_future_stmt> division<import_from_future_stmt> absolute_import<line_sep># from builtins import * <import_from_stmt>codecs open<import_from_stmt>os.path realpath dirname join<import_from_stmt>setuptools setup find_packages<import_stmt>sys<import_stmt>re<line_sep>DISTNAME='pymc-learn'<line_sep>DESCRIPTION="Practical Probabilistic Machine Learning in Python"<line_sep>AUTHOR='Pymc-Learn Team'<line_sep>AUTHOR_EMAIL='<EMAIL>'<line_sep>URL="https://github.com/pymc-learn/pymc-learn"<line_sep>classifiers=['Programming Language :: Python' 'Programming Language :: Python :: 2' 'Programming Language :: Python :: 3' 'Programming Language :: Python :: 2.7' 'Programming Language :: Python :: 3.4' 'Programming Language :: Python :: 3.5' 'Programming Language :: Python :: 3.6' 'Topic :: Scientific/Engineering' 'Topic :: Scientific/Engineering :: Mathematics' 'Operating System :: OS Independent']<line_sep>PROJECT_ROOT=dirname(realpath(__file__))<with_stmt>open(join(PROJECT_ROOT 'README.rst') encoding='utf-8')<as>r<block_start>readme=r.read()<block_end>REQUIREMENTS_FILE=join(PROJECT_ROOT 'requirements.txt')<with_stmt>open(REQUIREMENTS_FILE)<as>f<block_start>install_reqs=f.read().splitlines()<block_end><if_stmt>sys.version_info<l>(3 4)<block_start>install_reqs.append('enum34')<block_end><def_stmt>get_version <block_start>VERSIONFILE=join('pmlearn' '__init__.py')<line_sep>lines=open(VERSIONFILE 'rt').readlines()<line_sep>version_regex=r"^__version__ = ['\"]([^'\"]*)['\"]"<for_stmt>line lines<block_start>mo=re.search(version_regex line re.M)<if_stmt>mo<block_start><return>mo.group(1)<block_end><block_end><raise>RuntimeError('Unable to find version in %s.'%(VERSIONFILE ))<block_end><with_stmt>open('AUTHORS.txt')<as>a# reSt-ify the authors list <block_start>authors=''<for_stmt>author a.read().split('\n')<block_start>authors<augadd>'| '+author+'\n'<block_end><block_end><with_stmt>open('LICENSE')<as>l<block_start>license=l.read()<block_end><if_stmt>__name__<eq>"__main__"<block_start>setup(name=DISTNAME version=get_version() description=DESCRIPTION long_description=readme author=AUTHOR author_email=AUTHOR_EMAIL url=URL license=license packages=find_packages() package_data={'docs':['*']} include_package_data=<true> zip_safe=<false> install_requires=install_reqs classifiers=classifiers)<block_end>
<import_stmt>matplotlib<import_stmt>matplotlib.pyplot<as>plt# for plotting stuff <import_stmt>os<import_stmt>numpy<as>np<line_sep>matplotlib.rcParams['text.usetex']=<true># for type-1 fonts <def_stmt>get_line_coordinates w x1 x2<block_start>y1=(-w[0]-(w[1]<times>x1))/w[2]<line_sep>y2=(-w[0]-(w[1]<times>x2))/w[2]<line_sep><return>y1 y2<block_end><def_stmt>plot_data X y x_sensitive w_arr label_arr lt_arr fname title group=<none># print fp_fn_arr <block_start>plt.figure()<line_sep>num_to_draw=200# we will only draw a small number of points to avoid clutter fs=20# font size for labels and legends x_draw=X[:num_to_draw]<line_sep>y_draw=y[:num_to_draw]<line_sep>x_sensitive_draw=x_sensitive[:num_to_draw]<line_sep>x_lim=[min(x_draw[: -2])-np.absolute(0.3<times>min(x_draw[: -2])) max(x_draw[: -2])+np.absolute(0.5<times>max(x_draw[: -2]))]<line_sep>y_lim=[min(x_draw[: -1])-np.absolute(0.3<times>min(x_draw[: -1])) max(x_draw[: -1])+np.absolute(0.7<times>max(x_draw[: -1]))]<line_sep>X_s_0=x_draw[x_sensitive_draw<eq>0.0]<line_sep>X_s_1=x_draw[x_sensitive_draw<eq>1.0]<line_sep>y_s_0=y_draw[x_sensitive_draw<eq>0.0]<line_sep>y_s_1=y_draw[x_sensitive_draw<eq>1.0]<if_stmt>w_arr<is><not><none># we are plotting the boundaries of a trained classifier <block_start>plt.scatter(X_s_0[y_s_0<eq>1.0][: -2] X_s_0[y_s_0<eq>1.0][: -1] color='green' marker='x' s=70 linewidth=2)<line_sep>plt.scatter(X_s_0[y_s_0<eq>-1.0][: -2] X_s_0[y_s_0<eq>-1.0][: -1] color='red' marker='x' s=70 linewidth=2)<line_sep>plt.scatter(X_s_1[y_s_1<eq>1.0][: -2] X_s_1[y_s_1<eq>1.0][: -1] color='green' marker='o' facecolors='none' s=70 linewidth=2)<line_sep>plt.scatter(X_s_1[y_s_1<eq>-1.0][: -2] X_s_1[y_s_1<eq>-1.0][: -1] color='red' marker='o' facecolors='none' s=70 linewidth=2)<for_stmt>i range(0 len(w_arr))<block_start>w=w_arr[i]<line_sep>l=label_arr[i]<line_sep>lt=lt_arr[i]<line_sep>x1,x2=min(x_draw[: 1]) max(x_draw[: 1])<line_sep>y1,y2=get_line_coordinates(w x1 x2)<line_sep>plt.plot([x1 x2] [y1 y2] lt linewidth=3 label=l)<block_end>plt.title(title fontsize=fs)<block_end><else_stmt># just plotting the data <block_start>plt.scatter(X_s_0[y_s_0<eq>1.0][: -2] X_s_0[y_s_0<eq>1.0][: -1] color='green' marker='x' s=70 linewidth=2 label="group-0 +ve")<line_sep>plt.scatter(X_s_0[y_s_0<eq>-1.0][: -2] X_s_0[y_s_0<eq>-1.0][: -1] color='red' marker='x' s=70 linewidth=2 label="group-0 -ve")<line_sep>plt.scatter(X_s_1[y_s_1<eq>1.0][: -2] X_s_1[y_s_1<eq>1.0][: -1] color='green' marker='o' facecolors='none' s=70 linewidth=2 label="group-1 +ve")<line_sep>plt.scatter(X_s_1[y_s_1<eq>-1.0][: -2] X_s_1[y_s_1<eq>-1.0][: -1] color='red' marker='o' facecolors='none' s=70 linewidth=2 label="group-1 -ve")<block_end><if_stmt><true># turn the ticks on or off <block_start>plt.tick_params(axis='x' which='both' bottom='off' top='off' labelbottom='off')# dont need the ticks to see the data distribution plt.tick_params(axis='y' which='both' left='off' right='off' labelleft='off')<block_end>plt.legend(loc=2 fontsize=fs)<line_sep>plt.xlim(x_lim)<line_sep>plt.ylim(y_lim)<line_sep>plt.savefig(fname)<line_sep>plt.show()<block_end>
<import_stmt>os<import_stmt>mimetypes<line_sep>MIMETYPE_MAP={'.js':'text/javascript' '.mov':'video/quicktime' '.mp4':'video/mp4' '.m4v':'video/x-m4v' '.3gp':'video/3gpp' '.woff':'application/font-woff' '.eot':'application/vnd.ms-fontobject' '.ttf':'application/x-font-truetype' '.otf':'application/x-font-opentype' '.svg':'image/svg+xml' }<line_sep>MIMETYPE_DEFAULT='application/octet-stream'<def_stmt>guess path<block_start><if_stmt><not>path<block_start><return>MIMETYPE_DEFAULT<block_end>base,ext=os.path.splitext(path)<if_stmt>ext.lower()<in>MIMETYPE_MAP<block_start><return>MIMETYPE_MAP[ext.lower()]<block_end>mime_type,encoding=mimetypes.guess_type(path)<if_stmt>mime_type<block_start><return>mime_type<block_end><return>MIMETYPE_DEFAULT<block_end>
<def_stmt>f <block_start>'''f'''<line_sep><pass><def_stmt>f1 <block_start><pass><block_end><block_end>f2=f<if_stmt><true><block_start><def_stmt>g <block_start><pass><block_end><block_end><else_stmt><block_start><def_stmt>h <block_start><pass><block_end><block_end><class_stmt>C<block_start><def_stmt>i self<block_start><pass><block_end><def_stmt>j self<block_start><def_stmt>j2 self<block_start><pass><block_end><block_end><class_stmt>C2<block_start><def_stmt>k self<block_start><pass><block_end><block_end><block_end>
# # Copyright 2021 Red Hat Inc. # SPDX-License-Identifier: Apache-2.0 # <import_from_stmt>itertools chain<import_from_stmt>itertools combinations<import_from_stmt>unittest.mock Mock<import_from_stmt>django.test TestCase<import_from_stmt>api.common.permissions.openshift_all_access OpenshiftAllAccessPermission<import_from_stmt>api.iam.models User<import_from_stmt>api.provider.models Provider<line_sep>ACCESS_KEYS={Provider.PROVIDER_AWS:{"aws.account":{"read":["*"]}} Provider.PROVIDER_AZURE:{"azure.subscription_guid":{"read":["*"]}} Provider.PROVIDER_OCP:{"openshift.cluster":{"read":["*"]}} }<class_stmt>OCPAllAccessPermissionTest(TestCase)<block_start>"""Test the OCP-on-All access permissions."""<def_stmt>test_has_perm_with_access_on_get self<block_start>"""Test that a user with at least 1 access can execute."""<line_sep>accessPerm=OpenshiftAllAccessPermission()<line_sep>s=ACCESS_KEYS.keys()<for_stmt>key chain.from_iterable(combinations(s r)<for>r range(1 len(s)+1))<block_start><with_stmt>self.subTest(permission=key)<block_start>access={}<for_stmt>k key<block_start>access.update(ACCESS_KEYS[k])<block_end>user=Mock(spec=User access=access admin=<false>)<line_sep>req=Mock(user=user method="GET")<line_sep>result=accessPerm.has_permission(request=req view=<none>)<line_sep>self.assertTrue(result)<block_end><block_end><block_end><block_end>
# Copyright (C) 2016 <NAME>. # This file is part of CodexGigas - https://github.com/codexgigassys/ # See the file 'LICENSE' for copying permission. <import_from_stmt>PlugIns.PlugIn PlugIn<import_from_stmt>Modules.PEFileModule PEFileModule<import_stmt>pefile<import_from_stmt>Utils.InfoExtractor *<class_stmt>ResourceEntriesPlug(PlugIn)<block_start><def_stmt>__init__ self sample=<none><block_start>PlugIn.__init__(self sample)<block_end><def_stmt>getPath self<block_start><return>"particular_header.res_entries"<block_end><def_stmt>getName self<block_start><return>"res_entries"<block_end><def_stmt>getVersion self<block_start><return>6<block_end><def_stmt>process self<block_start>pelib=self._getLibrary(PEFileModule().getName())<if_stmt>(pelib<is><none>)<block_start><return>""<block_end>ret=[]<if_stmt>hasattr(pelib 'DIRECTORY_ENTRY_RESOURCE')<block_start>i=0<for_stmt>resource_type pelib.DIRECTORY_ENTRY_RESOURCE.entries<block_start><if_stmt>resource_type.name<is><not><none><block_start>name="%s"%resource_type.name<block_end><else_stmt><block_start>name="%s"%pefile.RESOURCE_TYPE.get(resource_type.struct.Id)<block_end><if_stmt>name<is><none><block_start>name="%d"%resource_type.struct.Id<block_end><if_stmt>hasattr(resource_type 'directory')<block_start><for_stmt>resource_id resource_type.directory.entries<block_start><if_stmt>hasattr(resource_id 'directory')<block_start><for_stmt>resource_lang resource_id.directory.entries<block_start><try_stmt><block_start>data=pelib.get_data(resource_lang.data.struct.OffsetToData resource_lang.data.struct.Size)<line_sep># fd=open(name,'wb') # fd.write(data) # (data) <block_end><except_stmt>pefile.PEFormatError<block_start><return>"corrupt"<block_end>filetype=MIME_TYPE(data <false>)<line_sep>lang=pefile.LANG.get(resource_lang.data.lang 'unknown')<line_sep>sublang=pefile.get_sublang_name_for_lang(resource_lang.data.lang resource_lang.data.sublang)<line_sep>entry={}<line_sep>entry["name"]=self._normalize(name)<line_sep>entry["rva"]=self._normalize(hex(resource_lang.data.struct.OffsetToData))<line_sep>entry["size"]=self._normalize(hex(resource_lang.data.struct.Size))<line_sep>entry["type"]=self._normalize(filetype)<line_sep>entry["lang"]=self._normalize(lang)<line_sep>entry["sublang"]=self._normalize(sublang)<line_sep>entry["sha1"]=SHA1(data)<line_sep>ret.append(entry)<block_end><block_end><block_end><block_end><block_end><block_end><return>ret<block_end><block_end>
""" Arithmetic progression class >>> ap = ArithmeticProgression(1, .5, 3) >>> list(ap) [1.0, 1.5, 2.0, 2.5] """<import_from_stmt>collections abc<class_stmt>ArithmeticProgression<block_start><def_stmt>__init__ self begin step end<block_start>self.begin=begin<line_sep>self.step=step<line_sep>self.end=end<block_end><def_stmt>__iter__ self<block_start><return>ArithmeticProgressionIterator(self)<block_end><block_end><class_stmt>ArithmeticProgressionIterator(abc.Iterator)<block_start><def_stmt>__init__ self arithmetic_progression<block_start>self._ap=arithmetic_progression<line_sep>self._index=0<block_end><def_stmt>__next__ self<block_start>first=type(self._ap.begin+self._ap.step)(self._ap.begin)<line_sep>result=first+self._ap.step<times>self._index<if_stmt>result<l>self._ap.end<block_start>self._index<augadd>1<line_sep><return>result<block_end><else_stmt><block_start><raise>StopIteration<block_end><block_end><block_end>
<import_from_stmt>logging getLogger<import_from_stmt>libcity.executor.abstract_tradition_executor AbstractTraditionExecutor<import_from_stmt>libcity.utils get_evaluator<class_stmt>MapMatchingExecutor(AbstractTraditionExecutor)<block_start><def_stmt>__init__ self config model<block_start>self.model=model<line_sep>self.config=config<line_sep>self.evaluator=get_evaluator(config)<line_sep>self.exp_id=self.config.get('exp_id' <none>)<line_sep>self.cache_dir='./libcity/cache/{}/model_cache'.format(self.exp_id)<line_sep>self.evaluate_res_dir='./libcity/cache/{}/evaluate_cache'.format(self.exp_id)<line_sep>self._logger=getLogger()<block_end><def_stmt>evaluate self test_data<block_start>""" use model to test data Args: test_data """<line_sep>result=self.model.run(test_data)<line_sep>batch={'route':test_data['route'] 'result':result 'rd_nwk':test_data['rd_nwk']}<line_sep>self.evaluator.collect(batch)<line_sep>self.evaluator.save_result(self.evaluate_res_dir)<block_end><def_stmt>train self train_dataloader eval_dataloader<block_start>""" 对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader): Dataloader eval_dataloader(torch.Dataloader): Dataloader """<line_sep><pass><block_end><block_end># do nothing
# Copyright (c) 2021 Graphcore Ltd. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>torch nn<import_from_stmt>espnet.nets.pytorch_backend.conformer.convolution ConvolutionModule<import_from_stmt>src.layers.layer_norm LayerNorm<class_stmt>ConvolutionModule_cpu(ConvolutionModule)<block_start>"""ConvolutionModule in Conformer model. Args: channels_ (int): The number of channels_ of conv layers. kernel_size_ (int): Kernerl size of conv layers. """<def_stmt>__init__ self channels_ kernel_size_ activation_=nn.ReLU() bias=<true><block_start>"""Construct an ConvolutionModule object."""<line_sep>super(ConvolutionModule_cpu self).__init__(channels=channels_ kernel_size=kernel_size_ activation=activation_)<line_sep># kernerl_size should be a odd number for 'SAME' padding <assert_stmt>(kernel_size_-1)%2<eq>0<line_sep>self.pointwise_conv1=nn.Conv1d(channels_ 2<times>channels_ kernel_size=1 stride=1 padding=0 bias=bias )<line_sep>self.depthwise_conv=nn.Conv1d(1<times>channels_ 1<times>channels_ kernel_size_ stride=1 padding=(kernel_size_-1)<floordiv>2 groups=channels_ bias=bias )<line_sep># Replace the original batch_norm with layer_norm self.norm=LayerNorm(1<times>channels_ -2)<line_sep>self.pointwise_conv2=nn.Conv1d(1<times>channels_ channels_ kernel_size=1 stride=1 padding=0 bias=bias )<line_sep>self.activation=activation_<block_end><block_end>
<import_stmt>goless<import_from_stmt>goless.backends current<as>be<import_from_stmt>. BaseTests<class_stmt>RecvCaseTests(BaseTests)<block_start>chansize=1<def_stmt>setUp self<block_start>BaseTests.setUp(self)<line_sep>self.ch=goless.chan(self.chansize)<line_sep>self.ca=goless.rcase(self.ch)<block_end><def_stmt>test_ready self<block_start>self.assertFalse(self.ca.ready())<line_sep>be.run(self.ch.send 1)<line_sep>self.assertTrue(self.ca.ready())<line_sep>be.run(self.ch.recv)<line_sep>self.assertFalse(self.ca.ready())<block_end><def_stmt>test_executes self<block_start>be.run(self.ch.send 'a')<line_sep>x=self.ca.exec_()<line_sep>self.assertEqual(x 'a')<block_end><def_stmt>test_exec_with_no_body self<block_start>be.run(self.ch.send 'a')<line_sep>ca=goless.rcase(self.ch)<line_sep>self.assertEqual(ca.exec_() 'a')<block_end><block_end><class_stmt>RecvCaseUnbufferedTests(RecvCaseTests)<block_start>chansize=0<block_end><class_stmt>SendCaseTests(BaseTests)<block_start>chansize=1<def_stmt>setUp self<block_start>BaseTests.setUp(self)<line_sep>self.ch=goless.chan(self.chansize)<line_sep>self.sendval=1<line_sep>self.ca=goless.scase(self.ch self.sendval)<block_end><def_stmt>test_ready self<block_start><def_stmt>assert_default_readiness <block_start>self.assertEquals(self.ca.ready() self.chansize<g>0)<block_end>assert_default_readiness()<line_sep>be.run(self.ch.send)<line_sep>self.assertFalse(self.ca.ready())<line_sep>be.run(self.ch.recv)<line_sep>assert_default_readiness()<line_sep>be.run(self.ch.send)<line_sep>self.assertFalse(self.ca.ready())<line_sep>be.run(self.ch.recv)<line_sep>assert_default_readiness()<block_end><def_stmt>test_executes self<block_start><def_stmt>recv <block_start>a.append(self.ch.recv())<block_end>a=[]<line_sep>be.run(recv)<line_sep>self.ca.exec_()<line_sep>self.assertEqual(a [self.sendval])<block_end><def_stmt>test_exec_no_onselected self<block_start>be.run(self.ch.recv)<line_sep>self.ca.exec_()<block_end><block_end><class_stmt>SendCaseUnbufferedTests(SendCaseTests)<block_start>chansize=0<block_end><class_stmt>SelectTests(BaseTests)<block_start><def_stmt>setUp self<block_start>BaseTests.setUp(self)<line_sep>self.chan1=goless.chan()<block_end><def_stmt>test_select_uses_default self<block_start>cases=[goless.rcase(self.chan1) goless.dcase()]<line_sep>result,val=goless.select(cases)<line_sep>self.assertIs(result cases[1])<line_sep>self.assertIsNone(val)<block_end><def_stmt>test_select_chooses_ready_selection self<block_start>readychan=goless.chan(1)<line_sep>notreadychan=goless.chan(1)<line_sep>readychan.send(3)<line_sep>cases=[goless.rcase(notreadychan) goless.rcase(readychan) goless.dcase()]<line_sep>result,val=goless.select(cases)<line_sep>self.assertIs(result cases[1])<line_sep>self.assertEqual(val 3)<block_end><def_stmt>test_select_no_default_no_ready_blocks self<block_start>chan1=goless.chan()<line_sep>chan2=goless.chan()<line_sep>a=[]<line_sep>cases=[goless.rcase(chan2) goless.rcase(chan1)]<def_stmt>sel <block_start>a.append(goless.select(cases))<block_end>be.run(sel)<line_sep>self.assertEqual(a [])<line_sep>chan1.send(5)<line_sep>be.yield_()<line_sep>self.assertEqual(len(a) 1)<line_sep>chosen,val=a[0]<line_sep>self.assertEqual(chosen cases[1])<line_sep>self.assertEqual(val 5)<block_end><def_stmt>test_main_tasklet_can_select self<block_start>chan1=goless.chan(1)<line_sep>cases=[goless.scase(chan1 3)]<line_sep>chosen,val=goless.select(cases)<line_sep>self.assertIs(chosen cases[0])<line_sep>self.assertIsNone(val)<block_end><def_stmt>test_raises_if_multiple_default_cases self<block_start><with_stmt>self.assertRaises(AssertionError)<block_start>goless.select([goless.dcase() goless.dcase()])<block_end><block_end><def_stmt>test_select_accepts_args self<block_start>chan1=goless.chan(1)<line_sep>scase=goless.scase(chan1 1)<line_sep>chosen,val=goless.select(scase)<line_sep>self.assertIs(chosen scase)<line_sep>self.assertIsNone(val)<block_end><def_stmt>test_select_raises_for_list_and_args self<block_start>chan1=goless.chan(1)<line_sep>chan2=goless.chan(1)<line_sep>chan3=goless.chan(1)<line_sep>cases=[goless.scase(chan1 1) goless.scase(chan2 2)]<with_stmt>self.assertRaises(TypeError)<block_start>goless.select(cases chan3)<block_end><block_end><def_stmt>test_select_with_no_args_should_do_nothing self<block_start>goless.select()<line_sep>goless.select([])<block_end><def_stmt>test_raises_deadlock_if_no_goroutines self<block_start><with_stmt>self.assertRaises(goless.Deadlock)<block_start>goless.select(goless.rcase(goless.chan()))<block_end><block_end><block_end>
""" cuTENSOR Wrapper Use `cupy_backends.cuda.libs.cutensor` directly in CuPy codebase. """<line_sep>available=<true><try_stmt><block_start><import_from_stmt>cupy_backends.cuda.libs.cutensor *# NOQA <block_end><except_stmt>ImportError<as>e<block_start>available=<false><import_from_stmt>cupy._environment _preload_warning<line_sep>_preload_warning('cutensor' e)<block_end>
"""Test that a forward-declared class works when its complete definition is in a library"""<import_from_future_stmt> print_function<import_stmt>os<import_stmt>time<import_stmt>lldb<import_from_stmt>lldbsuite.test.decorators *<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test lldbutil<class_stmt>ForwardDeclTestCase(TestBase)<block_start>mydir=TestBase.compute_mydir(__file__)<def_stmt>setUp self# Call super's setUp(). <block_start>TestBase.setUp(self)<line_sep># Find the line number to break inside main(). self.source='main.m'<line_sep>self.line=line_number(self.source '// Set breakpoint 0 here.')<line_sep>self.shlib_names=["Container"]<block_end>@skipUnlessDarwin<def_stmt>test_expr self<block_start>self.build()<line_sep># Create a target by the debugger. target=self.dbg.CreateTarget("a.out")<line_sep>self.assertTrue(target VALID_TARGET)<line_sep># Create the breakpoint inside function 'main'. breakpoint=target.BreakpointCreateByLocation(self.source self.line)<line_sep>self.assertTrue(breakpoint VALID_BREAKPOINT)<line_sep># Register our shared libraries for remote targets so they get # automatically uploaded environment=self.registerSharedLibrariesWithTarget(target self.shlib_names)<line_sep># Now launch the process, and do not stop at entry point. process=target.LaunchSimple(<none> environment self.get_process_working_directory())<line_sep>self.assertTrue(process PROCESS_IS_VALID)<line_sep># The stop reason of the thread should be breakpoint. self.expect("thread list" STOPPED_DUE_TO_BREAKPOINT substrs=['stopped' 'stop reason = breakpoint'])<line_sep># The breakpoint should have a hit count of 1. self.expect("breakpoint list -f" BREAKPOINT_HIT_ONCE substrs=[' resolved, hit count = 1'])<line_sep># This should display correctly. self.expect("expression [j getMember]" VARIABLES_DISPLAYED_CORRECTLY substrs=["= 0x"])<block_end><block_end>
<import_from_stmt>builtins int range<import_from_stmt>pyinfrabox ValidationError<import_from_stmt>pyinfrabox.utils *<def_stmt>check_version v path<block_start><if_stmt><not>isinstance(v int)<block_start><raise>ValidationError(path "must be an int")<block_end><if_stmt>v<ne>1<block_start><raise>ValidationError(path "unsupported version")<block_end><block_end><def_stmt>parse_measurement d path<block_start>check_allowed_properties(d path ("name" "unit" "value"))<line_sep>check_required_properties(d path ("name" "unit" "value"))<line_sep>check_text(d['unit'] path+".unit")<line_sep>check_text(d['name'] path+".name")<line_sep>check_text(d['value'] path+".value")<block_end><def_stmt>parse_measurements e path<block_start><if_stmt><not>isinstance(e list)<block_start><raise>ValidationError(path "must be an array")<block_end><for_stmt>i range(0 len(e))<block_start>elem=e[i]<line_sep>path="%s[%s]"%(path i)<line_sep>parse_measurement(elem path)<block_end><block_end><def_stmt>parse_t d path<block_start>check_allowed_properties(d path ("suite" "name" "status" "duration" "message" "stack" "measurements"))<line_sep>check_required_properties(d path ("suite" "name" "status" "duration"))<line_sep>check_text(d['suite'] path+".suite")<line_sep>check_text(d['name'] path+".name")<line_sep>check_text(d['status'] path+".status")<line_sep>check_number(d['duration'] path+".duration")<if_stmt>'message'<in>d<block_start>check_text(d['message'] path+".message")<block_end><if_stmt>'stack'<in>d<block_start>check_text(d['stack'] path+".stack")<block_end><if_stmt>'measurements'<in>d<block_start>parse_measurements(d['measurements'] path+".measurements")<block_end><block_end><def_stmt>parse_ts e path<block_start><if_stmt><not>isinstance(e list)<block_start><raise>ValidationError(path "must be an array")<block_end><if_stmt><not>e<block_start><raise>ValidationError(path "must not be empty")<block_end><for_stmt>i range(0 len(e))<block_start>elem=e[i]<line_sep>p="%s[%s]"%(path i)<line_sep>parse_t(elem p)<block_end><block_end><def_stmt>parse_document d<block_start>check_allowed_properties(d "#" ("version" "tests"))<line_sep>check_required_properties(d "#" ("version" "tests"))<line_sep>check_version(d['version'] "#version")<line_sep>parse_ts(d['tests'] "#tests")<block_end><def_stmt>validate_result d<block_start>parse_document(d)<block_end>
<import_stmt>cv2<line_sep>im=cv2.imread('data/src/lena_square_half.png')<line_sep>th,im_th=cv2.threshold(im 128 255 cv2.THRESH_BINARY)<line_sep>print(th)<line_sep># 128.0 cv2.imwrite('data/dst/opencv_th.jpg' im_th)<line_sep># True th,im_th_tz=cv2.threshold(im 128 255 cv2.THRESH_TOZERO)<line_sep>print(th)<line_sep># 128.0 cv2.imwrite('data/dst/opencv_th_tz.jpg' im_th_tz)<line_sep># True # th, im_th_otsu = cv2.threshold(im, 128, 192, cv2.THRESH_OTSU) # error: OpenCV(4.2.0) /tmp/opencv-20200105-17262-cwpzm4/opencv-4.2.0/modules/imgproc/src/thresh.cpp:1529: error: (-215:Assertion failed) src.type() == CV_8UC1 in function 'threshold' im_gray=cv2.cvtColor(im cv2.COLOR_BGR2GRAY)<line_sep>th,im_gray_th_otsu=cv2.threshold(im_gray 128 192 cv2.THRESH_OTSU)<line_sep>print(th)<line_sep># 117.0 cv2.imwrite('data/dst/opencv_th_otsu.jpg' im_gray_th_otsu)<line_sep># True
<import_stmt>numpy<as>np<def_stmt>cgls A b<block_start>height,width=A.shape<line_sep>x=np.zeros((height))<while_stmt>(<true>)<block_start>sumA=A.sum()<if_stmt>(sumA<l>100)<block_start><break><block_end><if_stmt>(np.linalg.det(A)<l>1)<block_start>A=A+np.eye(height width)<times>sumA<times>0.000000005<block_end><else_stmt><block_start>x=np.linalg.inv(A).dot(b)<line_sep><break><block_end><block_end><return>x<block_end>
# coding: utf-8 <import_stmt>copy<import_stmt>PIL.Image<import_stmt>PIL.ImageDraw<import_from_stmt>handright *<import_from_stmt>tests.util *<line_sep>BACKGROUND_COLOR="white"<line_sep>WIDTH=32<line_sep>HEIGHT=32<line_sep>SIZE=(WIDTH HEIGHT)<line_sep>SEED="Handright"<def_stmt>get_default_template <block_start>template=Template(background=PIL.Image.new(mode="RGB" size=SIZE color=BACKGROUND_COLOR) left_margin=3 top_margin=6 right_margin=3 bottom_margin=6 line_spacing=2 font=get_default_font(2) font_size_sigma=0 )<line_sep><return>template<block_end><def_stmt>test_side_effect <block_start>text=get_short_text()<line_sep>template=get_default_template()<line_sep>template_clone=copy.copy(template)<line_sep>handwrite(text template)<assert_stmt>text<eq>get_short_text()<assert_stmt>template<eq>template_clone<block_end><def_stmt>test_null_text <block_start><assert_stmt>list(handwrite("" get_default_template()))<eq>[]<block_end><def_stmt>test_blank_text <block_start>temp=get_default_template()<line_sep>images=handwrite(" " temp)<assert_stmt>temp.get_background()<eq>next(images)<block_end><def_stmt>test_seed <block_start>text=get_long_text()<line_sep>template=get_default_template()<for_stmt>seed (0 "Handright")<block_start>ims1=handwrite(text template seed=seed)<line_sep>ims2=handwrite(text template seed=seed)<assert_stmt>list(ims1)<eq>list(ims2)<block_end><block_end><def_stmt>test_line_and_page_breaks <block_start>text="哈"<times>4<line_sep>template=Template(background=PIL.Image.new(mode="L" size=(30 30) color="white") font=get_default_font(12) left_margin=3 right_margin=3 top_margin=3 bottom_margin=3 word_spacing_sigma=0 font_size_sigma=0 )<line_sep>images=handwrite(text template)<assert_stmt>len(list(images))<eq>1<block_end><def_stmt>test_line_separators <block_start>text1="a\nb\nc\n"<line_sep>text2="a\rb\rc\r"<line_sep>text3="a\r\nb\r\nc\r\n"<line_sep>text4="a\rb\nc\r\n"<line_sep>text5="a\rb\nc\r"<line_sep>text6="a\r\nb\rc\r"<line_sep>text7="a\r\nb\nc\n"<line_sep>template=get_default_template()<assert_stmt>(list(handwrite(text1 template seed=SEED))<eq>list(handwrite(text2 template seed=SEED))<eq>list(handwrite(text3 template seed=SEED))<eq>list(handwrite(text4 template seed=SEED))<eq>list(handwrite(text5 template seed=SEED))<eq>list(handwrite(text6 template seed=SEED))<eq>list(handwrite(text7 template seed=SEED)))<block_end>
<import_from_stmt>compiler.errors TypedSyntaxError<import_from_stmt>typing ClassVar<import_from_stmt>.common StaticTestBase<class_stmt>FinalTests(StaticTestBase)<block_start><def_stmt>test_final_multiple_typeargs self<block_start>codestr=""" from typing import Final from something import hello x: Final[int, str] = hello() """<with_stmt>self.assertRaisesRegex(TypedSyntaxError r"incorrect number of generic arguments for Final\[T\], expected 1, got 2" )<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_annotation_nesting self<block_start><with_stmt>self.assertRaisesRegex(TypedSyntaxError "Final annotation is only valid in initial declaration")<block_start>self.compile(""" from typing import Final, List x: List[Final[str]] = [] """ modname="foo" )<block_end><with_stmt>self.assertRaisesRegex(TypedSyntaxError "Final annotation is only valid in initial declaration")<block_start>self.compile(""" from typing import Final, List x: List[int | Final] = [] """ modname="foo" )<block_end><block_end><def_stmt>test_final self<block_start>codestr=""" from typing import Final x: Final[int] = 0xdeadbeef """<line_sep>self.compile(codestr modname="foo")<block_end><def_stmt>test_final_generic self<block_start>codestr=""" from typing import Final x: Final[int] = 0xdeadbeef """<line_sep>self.compile(codestr modname="foo")<block_end><def_stmt>test_final_generic_types self<block_start>codestr=""" from typing import Final def g(i: int) -> int: return i def f() -> int: x: Final[int] = 0xdeadbeef return g(x) """<line_sep>self.compile(codestr modname="foo")<block_end><def_stmt>test_final_uninitialized self<block_start>codestr=""" from typing import Final x: Final[int] """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Must assign a value when declaring a Final")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_reassign self<block_start>codestr=""" from typing import Any, Final x: Final[Any] = 0xdeadbeef x = "something" """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final variable")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_reassign_explicit_global self<block_start>codestr=""" from typing import Final a: Final[int] = 1337 def fn(): def fn2(): global a a = 0 """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final variable")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_reassign_explicit_global_shadowed self<block_start>codestr=""" from typing import Final a: Final[int] = 1337 def fn(): a = 2 def fn2(): global a a = 0 """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final variable")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_reassign_nonlocal self<block_start>codestr=""" from typing import Final a: Final[int] = 1337 def fn(): def fn2(): nonlocal a a = 0 """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final variable")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_reassign_nonlocal_shadowed self<block_start>codestr=""" from typing import Final a: Final[int] = 1337 def fn(): a = 3 def fn2(): nonlocal a # should be allowed, we're assigning to the shadowed # value a = 0 """<line_sep>self.compile(codestr modname="foo")<block_end><def_stmt>test_final_reassigned_in_tuple self<block_start>codestr=""" from typing import Final x: Final[int] = 0xdeadbeef y = 3 x, y = 4, 5 """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final variable")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_reassigned_in_loop self<block_start>codestr=""" from typing import Final x: Final[int] = 0xdeadbeef for x in [1, 3, 5]: pass """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final variable")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_reassigned_in_except self<block_start>codestr=""" from typing import Final def f(): e: Final[int] = 3 try: x = 1 + "2" except Exception as e: pass """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final variable")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_reassigned_in_loop_target_tuple self<block_start>codestr=""" from typing import Final x: Final[int] = 0xdeadbeef for x, y in [(1, 2)]: pass """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final variable")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_reassigned_in_ctxmgr self<block_start>codestr=""" from typing import Final x: Final[int] = 0xdeadbeef with open("lol") as x: pass """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final variable")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_generic_reassign self<block_start>codestr=""" from typing import Final x: Final[int] = 0xdeadbeef x = 0x5ca1ab1e """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final variable")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_callable_protocol_retains_inferred_type self<block_start>codestr=""" from typing import Final, Protocol def foo(x: int) -> str: return "A" class CallableProtocol(Protocol): def __call__(self, x: int) -> str: pass f: Final[CallableProtocol] = foo def bar(x: int) -> str: return f(x) """<with_stmt>self.in_module(codestr)<as>mod<block_start>f=mod.bar<line_sep>self.assertInBytecode(f "INVOKE_FUNCTION")<block_end><block_end><def_stmt>test_final_in_args self<block_start>codestr=""" from typing import Final def f(a: Final) -> None: pass """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Final annotation is only valid in initial declaration" )<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_returns self<block_start>codestr=""" from typing import Final def f() -> Final[int]: return 1 """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Final annotation is only valid in initial declaration" )<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_decorator self<block_start>codestr=""" from typing import final class C: @final def f(): pass """<line_sep>self.compile(codestr modname="foo")<block_end><def_stmt>test_final_decorator_override self<block_start>codestr=""" from typing import final class C: @final def f(): pass class D(C): def f(): pass """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final attribute of foo.D:f")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_decorator_override_with_assignment self<block_start>codestr=""" from typing import final class C: @final def f(): pass class D(C): f = print """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final attribute of foo.D:f")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_decorator_override_transitivity self<block_start>codestr=""" from typing import final class C: @final def f(): pass class D(C): pass class E(D): def f(): pass """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Cannot assign to a Final attribute of foo.E:f")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_decorator_class self<block_start>codestr=""" from typing import final @final class C: def f(self): pass def f(): return C().f() """<line_sep>c=self.compile(codestr modname="foo")<line_sep>f=self.find_code(c "f")<line_sep>self.assertInBytecode(f "INVOKE_FUNCTION")<block_end><def_stmt>test_final_decorator_class_inheritance self<block_start>codestr=""" from typing import final @final class C: pass class D(C): pass """<with_stmt>self.assertRaisesRegex(TypedSyntaxError "Class `foo.D` cannot subclass a Final class: `foo.C`")<block_start>self.compile(codestr modname="foo")<block_end><block_end><def_stmt>test_final_decorator_class_nonstatic_subclass self<block_start>codestr=""" from typing import final @final class C: pass """<with_stmt>self.in_module(codestr)<as>mod<block_start><with_stmt>self.assertRaisesRegex(TypeError "type 'C' is not an acceptable base type")<block_start><class_stmt>D(mod.C)<block_start><pass><block_end><block_end><block_end><block_end><def_stmt>test_final_decorator_class_dynamic self<block_start>"""We should never mark DYNAMIC_TYPE as final."""<line_sep>codestr=""" from typing import final, Generic, NamedTuple @final class NT(NamedTuple): x: int class C(Generic): pass """<line_sep># No TypedSyntaxError "cannot inherit from Final class 'dynamic'" self.compile(codestr)<block_end><def_stmt>test_final_constant_folding_int self<block_start>codestr=""" from typing import Final X: Final[int] = 1337 def plus_1337(i: int) -> int: return i + X """<with_stmt>self.in_module(codestr)<as>mod<block_start>plus_1337=mod.plus_1337<line_sep>self.assertInBytecode(plus_1337 "LOAD_CONST" 1337)<line_sep>self.assertNotInBytecode(plus_1337 "LOAD_GLOBAL")<line_sep>self.assertEqual(plus_1337(3) 1340)<block_end><block_end><def_stmt>test_final_constant_folding_bool self<block_start>codestr=""" from typing import Final X: Final[bool] = True def f() -> bool: return not X """<with_stmt>self.in_module(codestr)<as>mod<block_start>f=mod.f<line_sep>self.assertInBytecode(f "LOAD_CONST" <true>)<line_sep>self.assertNotInBytecode(f "LOAD_GLOBAL")<line_sep>self.assertFalse(f())<block_end><block_end><def_stmt>test_final_constant_folding_str self<block_start>codestr=""" from typing import Final X: Final[str] = "omg" def f() -> str: return X[1] """<with_stmt>self.in_module(codestr)<as>mod<block_start>f=mod.f<line_sep>self.assertInBytecode(f "LOAD_CONST" "omg")<line_sep>self.assertNotInBytecode(f "LOAD_GLOBAL")<line_sep>self.assertEqual(f() "m")<block_end><block_end><def_stmt>test_final_constant_folding_disabled_on_nonfinals self<block_start>codestr=""" from typing import Final X: str = "omg" def f() -> str: return X[1] """<with_stmt>self.in_module(codestr)<as>mod<block_start>f=mod.f<line_sep>self.assertNotInBytecode(f "LOAD_CONST" "omg")<line_sep>self.assertInBytecode(f "LOAD_GLOBAL" "X")<line_sep>self.assertEqual(f() "m")<block_end><block_end><def_stmt>test_final_constant_folding_disabled_on_nonconstant_finals self<block_start>codestr=""" from typing import Final def p() -> str: return "omg" X: Final[str] = p() def f() -> str: return X[1] """<with_stmt>self.in_module(codestr)<as>mod<block_start>f=mod.f<line_sep>self.assertNotInBytecode(f "LOAD_CONST" "omg")<line_sep>self.assertInBytecode(f "LOAD_GLOBAL" "X")<line_sep>self.assertEqual(f() "m")<block_end><block_end><def_stmt>test_final_constant_folding_shadowing self<block_start>codestr=""" from typing import Final X: Final[str] = "omg" def f() -> str: X = "lol" return X[1] """<with_stmt>self.in_module(codestr)<as>mod<block_start>f=mod.f<line_sep>self.assertInBytecode(f "LOAD_CONST" "lol")<line_sep>self.assertNotInBytecode(f "LOAD_GLOBAL" "omg")<line_sep>self.assertEqual(f() "o")<block_end><block_end><def_stmt>test_final_constant_folding_in_module_scope self<block_start>codestr=""" from typing import Final X: Final[int] = 21 y = X + 3 """<line_sep>c=self.compile(codestr modname="foo.py")<line_sep>self.assertNotInBytecode(c "LOAD_NAME" "X")<with_stmt>self.in_module(codestr)<as>mod<block_start>self.assertEqual(mod.y 24)<block_end><block_end><def_stmt>test_final_constant_in_module_scope self<block_start>codestr=""" from typing import Final X: Final[int] = 21 """<with_stmt>self.in_module(codestr)<as>mod<block_start>self.assertEqual(mod.__final_constants__ ("X" ))<block_end><block_end><def_stmt>test_final_nonconstant_in_module_scope self<block_start>codestr=""" from typing import Final def p() -> str: return "omg" X: Final[str] = p() """<with_stmt>self.in_module(codestr)<as>mod<block_start>self.assertEqual(mod.__final_constants__ ())<block_end><block_end><def_stmt>test_final_method_in_class_slots self<block_start>codestr=""" from typing import final class C: @final def foo(self): return self def bar(self): return self """<with_stmt>self.in_module(codestr)<as>mod<block_start>self.assertEqual(mod.C.__final_method_names__ ("foo" ))<block_end><block_end><def_stmt>test_final_method_in_class_slots_with_inheritance self<block_start>codestr=""" from typing import final class C: @final def foo(self): return self def bar(self): return self class D(C): @final def bar(self): return self def baz(self): return self class E(D): @final def baz(self): return self class F(D): def baz(self): return self """<with_stmt>self.in_module(codestr)<as>mod<block_start>self.assertEqual(mod.C.__final_method_names__ ("foo" ))<line_sep>self.assertEqual(mod.D.__final_method_names__ ("bar" "foo"))<line_sep>self.assertEqual(mod.E.__final_method_names__ ("bar" "baz" "foo"))<line_sep>self.assertEqual(mod.F.__final_method_names__ ("bar" "foo"))<block_end><block_end><def_stmt>test_final_method_in_class_nonstatic_subclass_slots self<block_start>codestr=""" from typing import final class C: @final def foo(self): return self def bar(self): return self """<with_stmt>self.in_module(codestr)<as>mod<block_start><class_stmt>D(mod.C)<block_start><pass><block_end>self.assertEqual(D.__final_method_names__ ("foo" ))<block_end><block_end><def_stmt>test_final_method_nonstatic_override_throws_runtime_type_error self<block_start>codestr=""" from typing import final class C: @final def foo(self): return self def bar(self): return self """<with_stmt>self.in_module(codestr)<as>mod<block_start><with_stmt>self.assertRaisesRegex(TypeError r"'foo' overrides a final method in the static base class")<block_start><class_stmt>D(mod.C)<block_start><def_stmt>foo self<block_start><return>self<block_end><block_end><block_end><block_end><block_end><def_stmt>test_final_method_nonstatic_override_of_static_subclass_throws_runtime_type_error self <block_start>codestr=""" from typing import final class C: @final def foo(self): return self def bar(self): return self class D(C): pass """<with_stmt>self.in_module(codestr)<as>mod<block_start><with_stmt>self.assertRaisesRegex(TypeError r"'foo' overrides a final method in the static base class")<block_start><class_stmt>E(mod.D)<block_start><def_stmt>foo self<block_start><return>self<block_end><block_end><block_end><block_end><block_end><def_stmt>test_final_method_nonstatic_subclass_of_static_class_throws_runtime_type_error self <block_start>codestr=""" from typing import final class C: @final def foo(self): return self def bar(self): return self """<with_stmt>self.in_module(codestr)<as>mod<block_start><with_stmt>self.assertRaisesRegex(TypeError r"'foo' overrides a final method in the static base class")<block_start><class_stmt>D(mod.C)<block_start><pass><block_end><class_stmt>E(D)<block_start><def_stmt>foo self<block_start><return>self<block_end><block_end><block_end><block_end><block_end><def_stmt>test_final_method_with_other_decorator_throws_type_error self <block_start>codestr=""" from typing import final class C: @final @staticmethod def foo(): return self @staticmethod @final def bar(): return self """<with_stmt>self.in_module(codestr)<as>mod<block_start><with_stmt>self.assertRaisesRegex(TypeError r"'foo' overrides a final method in the static base class")<block_start><class_stmt>D(mod.C)<block_start>@staticmethod<def_stmt>foo <block_start><return>self<block_end><block_end><block_end><with_stmt>self.assertRaisesRegex(TypeError r"'bar' overrides a final method in the static base class")<block_start><class_stmt>D(mod.C)<block_start>@staticmethod<def_stmt>bar <block_start><return>self<block_end><block_end><block_end><block_end><block_end><def_stmt>test_updating_slot_of_final_method_in_subclass_throws_type_error self <block_start>codestr=""" from typing import final class C: @final def foo(self) -> int: return 0 """<with_stmt>self.in_module(codestr)<as>mod<block_start><with_stmt>self.assertRaisesRegex(TypeError r"'foo' overrides a final method in the static base class")<block_start><class_stmt>D(mod.C)<block_start><pass><block_end>D.foo=<lambda>self:0<block_end><block_end><block_end><def_stmt>test_updating_slot_of_final_method_in_base_class_succeeds self <block_start>codestr=""" from typing import final class C: @final def foo(self) -> int: return 0 """<with_stmt>self.in_module(codestr)<as>mod<block_start><class_stmt>D(mod.C)<block_start><pass><block_end>mod.C.foo=<lambda>self:1<line_sep>self.assertEqual(mod.C().foo() 1)<block_end><block_end><def_stmt>test_final_method_in_non_final_class_emits_invoke_function self <block_start>codestr=""" from typing import final class C: def __init__(self, x: int) -> None: self.x = x @final def foo(self) -> int: return self.x def foo(c: C) -> int: return c.foo() """<with_stmt>self.in_module(codestr)<as>mod<block_start><class_stmt>D(mod.C)<block_start><def_stmt>__init__ self<block_start>super().__init__(5)<block_end><block_end>self.assertInBytecode(mod.foo "INVOKE_FUNCTION")<line_sep>self.assertEqual(mod.foo(mod.C(4)) 4)<line_sep>self.assertEqual(mod.foo(D()) 5)<block_end><block_end><def_stmt>test_final_method_in_subclass_of_non_final_class_emits_invoke_function self <block_start>codestr=""" from typing import final class C: def __init__(self, x: int) -> None: self.x = x @final def foo(self) -> int: return self.x class D(C): def __init__(self) -> None: self.x = 4 def foo(d: D) -> int: return d.foo() """<with_stmt>self.in_module(codestr)<as>mod<block_start>self.assertInBytecode(mod.foo "INVOKE_FUNCTION" ((mod.__name__ "C" "foo") 1))<line_sep>self.assertEqual(mod.foo(mod.D()) 4)<block_end><block_end><def_stmt>test_final_classmethod_in_non_final_nonstatic_class_emits_invoke_function self <block_start>codestr=""" from typing import ClassVar, final class C: CV: ClassVar[int] = 42 @final @classmethod def foo(cls) -> int: return cls.CV def foo(c: C) -> int: return c.foo() """<with_stmt>self.in_module(codestr)<as>mod<block_start><class_stmt>D(mod.C)<block_start>CV:ClassVar[int]=84<block_end>self.assertInBytecode(mod.foo "INVOKE_FUNCTION" ((mod.__name__ "C" "foo") 1))<line_sep>self.assertEqual(mod.foo(mod.C()) 42)<line_sep>self.assertEqual(mod.foo(D()) 84)<block_end><block_end><def_stmt>test_final_classmethod_in_non_final_static_class_emits_invoke_function self <block_start>codestr=""" from typing import ClassVar, final class C: CV: ClassVar[int] = 42 @final @classmethod def foo(cls) -> int: return cls.CV class D(C): CV: ClassVar[int] = 63 def foo(c: C) -> int: return c.foo() """<with_stmt>self.in_module(codestr)<as>mod<block_start>self.assertInBytecode(mod.foo "INVOKE_FUNCTION" ((mod.__name__ "C" "foo") 1))<line_sep>self.assertEqual(mod.foo(mod.C()) 42)<line_sep>self.assertEqual(mod.foo(mod.D()) 63)<block_end><block_end><block_end>
# # Autogenerated by Thrift Compiler (0.9.2) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py # <import_from_stmt>thrift.Thrift TType TMessageType TException TApplicationException<import_from_stmt>thrift.transport TTransport<import_from_stmt>thrift.protocol TBinaryProtocol TProtocol<try_stmt><block_start><import_from_stmt>thrift.protocol fastbinary<block_end><except_stmt><block_start>fastbinary=<none><block_end><class_stmt>ConnectionParams<block_start>""" Attributes: - client_id - seq_id - user - password - app_id - app_token - repo_base """<line_sep>thrift_spec=(<none> # 0 (1 TType.STRING 'client_id' <none> <none> ) # 1 (2 TType.STRING 'seq_id' <none> <none> ) # 2 (3 TType.STRING 'user' <none> <none> ) # 3 (4 TType.STRING 'password' <none> <none> ) # 4 (5 TType.STRING 'app_id' <none> <none> ) # 5 (6 TType.STRING 'app_token' <none> <none> ) # 6 (7 TType.STRING 'repo_base' <none> <none> ) # 7 )<def_stmt>__init__ self client_id=<none> seq_id=<none> user=<none> password=<none> app_id=<none> app_token=<none> repo_base=<none> <block_start>self.client_id=client_id<line_sep>self.seq_id=seq_id<line_sep>self.user=user<line_sep>self.password=password<line_sep>self.app_id=app_id<line_sep>self.app_token=app_token<line_sep>self.repo_base=repo_base<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.client_id=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.seq_id=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>3<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.user=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>4<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.password=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>5<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.app_id=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>6<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.app_token=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>7<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.repo_base=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('ConnectionParams')<if_stmt>self.client_id<is><not><none><block_start>oprot.writeFieldBegin('client_id' TType.STRING 1)<line_sep>oprot.writeString(self.client_id)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.seq_id<is><not><none><block_start>oprot.writeFieldBegin('seq_id' TType.STRING 2)<line_sep>oprot.writeString(self.seq_id)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.user<is><not><none><block_start>oprot.writeFieldBegin('user' TType.STRING 3)<line_sep>oprot.writeString(self.user)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.password<is><not><none><block_start>oprot.writeFieldBegin('password' TType.STRING 4)<line_sep>oprot.writeString(self.password)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.app_id<is><not><none><block_start>oprot.writeFieldBegin('app_id' TType.STRING 5)<line_sep>oprot.writeString(self.app_id)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.app_token<is><not><none><block_start>oprot.writeFieldBegin('app_token' TType.STRING 6)<line_sep>oprot.writeString(self.app_token)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.repo_base<is><not><none><block_start>oprot.writeFieldBegin('repo_base' TType.STRING 7)<line_sep>oprot.writeString(self.repo_base)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__hash__ self<block_start>value=17<line_sep>value=(value<times>31)^hash(self.client_id)<line_sep>value=(value<times>31)^hash(self.seq_id)<line_sep>value=(value<times>31)^hash(self.user)<line_sep>value=(value<times>31)^hash(self.password)<line_sep>value=(value<times>31)^hash(self.app_id)<line_sep>value=(value<times>31)^hash(self.app_token)<line_sep>value=(value<times>31)^hash(self.repo_base)<line_sep><return>value<block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>Connection<block_start>""" Attributes: - client_id - seq_id - user - is_app - repo_base - cursor """<line_sep>thrift_spec=(<none> # 0 (1 TType.STRING 'client_id' <none> <none> ) # 1 (2 TType.STRING 'seq_id' <none> <none> ) # 2 (3 TType.STRING 'user' <none> <none> ) # 3 (4 TType.BOOL 'is_app' <none> <none> ) # 4 (5 TType.STRING 'repo_base' <none> <none> ) # 5 (6 TType.I64 'cursor' <none> <none> ) # 6 )<def_stmt>__init__ self client_id=<none> seq_id=<none> user=<none> is_app=<none> repo_base=<none> cursor=<none> <block_start>self.client_id=client_id<line_sep>self.seq_id=seq_id<line_sep>self.user=user<line_sep>self.is_app=is_app<line_sep>self.repo_base=repo_base<line_sep>self.cursor=cursor<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.client_id=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.seq_id=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>3<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.user=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>4<block_start><if_stmt>ftype<eq>TType.BOOL<block_start>self.is_app=iprot.readBool()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>5<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.repo_base=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>6<block_start><if_stmt>ftype<eq>TType.I64<block_start>self.cursor=iprot.readI64()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('Connection')<if_stmt>self.client_id<is><not><none><block_start>oprot.writeFieldBegin('client_id' TType.STRING 1)<line_sep>oprot.writeString(self.client_id)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.seq_id<is><not><none><block_start>oprot.writeFieldBegin('seq_id' TType.STRING 2)<line_sep>oprot.writeString(self.seq_id)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.user<is><not><none><block_start>oprot.writeFieldBegin('user' TType.STRING 3)<line_sep>oprot.writeString(self.user)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.is_app<is><not><none><block_start>oprot.writeFieldBegin('is_app' TType.BOOL 4)<line_sep>oprot.writeBool(self.is_app)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.repo_base<is><not><none><block_start>oprot.writeFieldBegin('repo_base' TType.STRING 5)<line_sep>oprot.writeString(self.repo_base)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.cursor<is><not><none><block_start>oprot.writeFieldBegin('cursor' TType.I64 6)<line_sep>oprot.writeI64(self.cursor)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__hash__ self<block_start>value=17<line_sep>value=(value<times>31)^hash(self.client_id)<line_sep>value=(value<times>31)^hash(self.seq_id)<line_sep>value=(value<times>31)^hash(self.user)<line_sep>value=(value<times>31)^hash(self.is_app)<line_sep>value=(value<times>31)^hash(self.repo_base)<line_sep>value=(value<times>31)^hash(self.cursor)<line_sep><return>value<block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>Tuple<block_start>""" Attributes: - cells """<line_sep>thrift_spec=(<none> # 0 (1 TType.LIST 'cells' (TType.STRING <none>) <none> ) # 1 )<def_stmt>__init__ self cells=<none> <block_start>self.cells=cells<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.LIST<block_start>self.cells=[]<line_sep>(_etype3 _size0)=iprot.readListBegin()<for_stmt>_i4 xrange(_size0)<block_start>_elem5=iprot.readString()<line_sep>self.cells.append(_elem5)<block_end>iprot.readListEnd()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('Tuple')<if_stmt>self.cells<is><not><none><block_start>oprot.writeFieldBegin('cells' TType.LIST 1)<line_sep>oprot.writeListBegin(TType.STRING len(self.cells))<for_stmt>iter6 self.cells<block_start>oprot.writeString(iter6)<block_end>oprot.writeListEnd()<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__hash__ self<block_start>value=17<line_sep>value=(value<times>31)^hash(self.cells)<line_sep><return>value<block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>ResultSet<block_start>""" Attributes: - status - con - num_tuples - num_more_tuples - tuples - field_names - field_types """<line_sep>thrift_spec=(<none> # 0 (1 TType.BOOL 'status' <none> <none> ) # 1 (2 TType.STRUCT 'con' (Connection Connection.thrift_spec) <none> ) # 2 (3 TType.I64 'num_tuples' <none> <none> ) # 3 (4 TType.I64 'num_more_tuples' <none> <none> ) # 4 (5 TType.LIST 'tuples' (TType.STRUCT (Tuple Tuple.thrift_spec)) <none> ) # 5 (6 TType.LIST 'field_names' (TType.STRING <none>) <none> ) # 6 (7 TType.LIST 'field_types' (TType.STRING <none>) <none> ) # 7 )<def_stmt>__init__ self status=<none> con=<none> num_tuples=<none> num_more_tuples=<none> tuples=<none> field_names=<none> field_types=<none> <block_start>self.status=status<line_sep>self.con=con<line_sep>self.num_tuples=num_tuples<line_sep>self.num_more_tuples=num_more_tuples<line_sep>self.tuples=tuples<line_sep>self.field_names=field_names<line_sep>self.field_types=field_types<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.BOOL<block_start>self.status=iprot.readBool()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.con=Connection()<line_sep>self.con.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>3<block_start><if_stmt>ftype<eq>TType.I64<block_start>self.num_tuples=iprot.readI64()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>4<block_start><if_stmt>ftype<eq>TType.I64<block_start>self.num_more_tuples=iprot.readI64()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>5<block_start><if_stmt>ftype<eq>TType.LIST<block_start>self.tuples=[]<line_sep>(_etype10 _size7)=iprot.readListBegin()<for_stmt>_i11 xrange(_size7)<block_start>_elem12=Tuple()<line_sep>_elem12.read(iprot)<line_sep>self.tuples.append(_elem12)<block_end>iprot.readListEnd()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>6<block_start><if_stmt>ftype<eq>TType.LIST<block_start>self.field_names=[]<line_sep>(_etype16 _size13)=iprot.readListBegin()<for_stmt>_i17 xrange(_size13)<block_start>_elem18=iprot.readString()<line_sep>self.field_names.append(_elem18)<block_end>iprot.readListEnd()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>7<block_start><if_stmt>ftype<eq>TType.LIST<block_start>self.field_types=[]<line_sep>(_etype22 _size19)=iprot.readListBegin()<for_stmt>_i23 xrange(_size19)<block_start>_elem24=iprot.readString()<line_sep>self.field_types.append(_elem24)<block_end>iprot.readListEnd()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('ResultSet')<if_stmt>self.status<is><not><none><block_start>oprot.writeFieldBegin('status' TType.BOOL 1)<line_sep>oprot.writeBool(self.status)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.con<is><not><none><block_start>oprot.writeFieldBegin('con' TType.STRUCT 2)<line_sep>self.con.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.num_tuples<is><not><none><block_start>oprot.writeFieldBegin('num_tuples' TType.I64 3)<line_sep>oprot.writeI64(self.num_tuples)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.num_more_tuples<is><not><none><block_start>oprot.writeFieldBegin('num_more_tuples' TType.I64 4)<line_sep>oprot.writeI64(self.num_more_tuples)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.tuples<is><not><none><block_start>oprot.writeFieldBegin('tuples' TType.LIST 5)<line_sep>oprot.writeListBegin(TType.STRUCT len(self.tuples))<for_stmt>iter25 self.tuples<block_start>iter25.write(oprot)<block_end>oprot.writeListEnd()<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.field_names<is><not><none><block_start>oprot.writeFieldBegin('field_names' TType.LIST 6)<line_sep>oprot.writeListBegin(TType.STRING len(self.field_names))<for_stmt>iter26 self.field_names<block_start>oprot.writeString(iter26)<block_end>oprot.writeListEnd()<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.field_types<is><not><none><block_start>oprot.writeFieldBegin('field_types' TType.LIST 7)<line_sep>oprot.writeListBegin(TType.STRING len(self.field_types))<for_stmt>iter27 self.field_types<block_start>oprot.writeString(iter27)<block_end>oprot.writeListEnd()<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.status<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field status is unset!')<block_end><return><block_end><def_stmt>__hash__ self<block_start>value=17<line_sep>value=(value<times>31)^hash(self.status)<line_sep>value=(value<times>31)^hash(self.con)<line_sep>value=(value<times>31)^hash(self.num_tuples)<line_sep>value=(value<times>31)^hash(self.num_more_tuples)<line_sep>value=(value<times>31)^hash(self.tuples)<line_sep>value=(value<times>31)^hash(self.field_names)<line_sep>value=(value<times>31)^hash(self.field_types)<line_sep><return>value<block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>DBException(TException)<block_start>""" Attributes: - error_code - message - details """<line_sep>thrift_spec=(<none> # 0 (1 TType.I32 'error_code' <none> <none> ) # 1 (2 TType.STRING 'message' <none> <none> ) # 2 (3 TType.STRING 'details' <none> <none> ) # 3 )<def_stmt>__init__ self error_code=<none> message=<none> details=<none> <block_start>self.error_code=error_code<line_sep>self.message=message<line_sep>self.details=details<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.I32<block_start>self.error_code=iprot.readI32()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.message=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>3<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.details=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('DBException')<if_stmt>self.error_code<is><not><none><block_start>oprot.writeFieldBegin('error_code' TType.I32 1)<line_sep>oprot.writeI32(self.error_code)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.message<is><not><none><block_start>oprot.writeFieldBegin('message' TType.STRING 2)<line_sep>oprot.writeString(self.message)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.details<is><not><none><block_start>oprot.writeFieldBegin('details' TType.STRING 3)<line_sep>oprot.writeString(self.details)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__str__ self<block_start><return>repr(self)<block_end><def_stmt>__hash__ self<block_start>value=17<line_sep>value=(value<times>31)^hash(self.error_code)<line_sep>value=(value<times>31)^hash(self.message)<line_sep>value=(value<times>31)^hash(self.details)<line_sep><return>value<block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end>
''' Munivariate statistics exercises ================================ '''<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep>#%matplotlib inline np.random.seed(seed=42)# make the example reproducible ''' ### Dot product and Euclidean norm '''<line_sep>a=np.array([2 1])<line_sep>b=np.array([1 1])<def_stmt>euclidian x<block_start><return>np.sqrt(np.dot(x x))<block_end>euclidian(a)<line_sep>euclidian(a-b)<line_sep>np.dot(b a/euclidian(a))<line_sep>X=np.random.randn(100 2)<line_sep>np.dot(X a/euclidian(a))<line_sep>''' ### Covariance matrix and Mahalanobis norm '''<line_sep>N=100<line_sep>mu=np.array([1 1])<line_sep>Cov=np.array([[1 .8] [.8 1]])<line_sep>X=np.random.multivariate_normal(mu Cov N)<line_sep>xbar=np.mean(X axis=0)<line_sep>print(xbar)<line_sep>Xc=(X-xbar)<line_sep>np.mean(Xc axis=0)<line_sep>S=1/(N-1)<times>np.dot(Xc.T Xc)<line_sep>print(S)<line_sep>#import scipy Sinv=np.linalg.inv(S)<def_stmt>mahalanobis x xbar Sinv<block_start>xc=x-xbar<line_sep><return>np.sqrt(np.dot(np.dot(xc Sinv) xc))<block_end>dists=pd.DataFrame([[mahalanobis(X[i :] xbar Sinv) euclidian(X[i :]-xbar)]<for>i range(X.shape[0])] columns=['Mahalanobis' 'Euclidean'])<line_sep>print(dists[:10])<line_sep>x=X[0 :]<import_stmt>scipy.spatial<assert_stmt>(mahalanobis(X[0 :] xbar Sinv)<eq>scipy.spatial.distance.mahalanobis(xbar X[0 :] Sinv))<assert_stmt>(mahalanobis(X[1 :] xbar Sinv)<eq>scipy.spatial.distance.mahalanobis(xbar X[1 :] Sinv))<line_sep>
<import_stmt>sys<line_sep>sys.path.append("../")<line_sep>new_ticks=["Dogs2" "Cats2" "-" " " "Hamsters2" "Fish2" "Spiders2" "" " "]<line_sep>orig_ticks=["Dogs" "Cats" "Hamsters" "Fish" "Spiders"]<import_from_stmt>appJar gui<def_stmt>get btn<block_start>print(app.getOptionBox("Favourite Pets"))<line_sep>print(app.getOptionBox("The Action"))<block_end><def_stmt>tickOption opt<block_start>print("tick box" opt)<line_sep>app.setOptionBox("Favourite Pets" opt app.getCheckBox(opt))<block_end><def_stmt>tickOptionBox opt<block_start>print("menu tick box" opt)<line_sep>optValue=app.getOptionBox("Favourite Pets")[opt]<line_sep>app.setCheckBox(opt optValue callFunction=<false>)<block_end><def_stmt>doAction act<block_start>app.setOptionBox("The Action" app.getOptionBox(act))<block_end><def_stmt>findIndex act<block_start>app.setOptionBox("The Action" app.getScale(act))<block_end><def_stmt>changeOptions btn=<none><block_start>app.changeOptionBox("Favourite Pets" new_ticks)<line_sep>app.setOptionBoxChangeFunction("Favourite Pets" tickOptionBox)<block_end><def_stmt>changeOptionsBack btn=<none><block_start>app.changeOptionBox("Favourite Pets" orig_ticks)<line_sep>app.setOptionBoxChangeFunction("Favourite Pets" tickOptionBox)<block_end>app=gui()<line_sep>app.setFont(20)<line_sep>app.setBg("PapayaWhip")<line_sep>app.addLabelTickOptionBox("Favourite Pets" [])<line_sep>changeOptionsBack()<line_sep>app.addLabelOptionBox("The Action" ["Pet" "Stroke" "Feed" "Bathe" "Walk"])<line_sep>app.addLabelOptionBox("Set Action" ["Pet" "Stroke" "Feed" "Bathe" "Walk"])<line_sep>app.setOptionBoxChangeFunction("Set Action" doAction)<line_sep>app.addScale("index")<line_sep>app.setScaleRange("index" 0 4)<line_sep>app.showScaleValue("index")<line_sep>app.setScaleChangeFunction("index" findIndex)<line_sep>app.startLabelFrame("Tick Us")<line_sep>app.addCheckBox("Dogs")<line_sep>app.addCheckBox("Cats")<line_sep>app.addCheckBox("Hamsters")<line_sep>app.addCheckBox("Fish")<line_sep>app.addCheckBox("People")<line_sep>app.setCheckBoxChangeFunction("Dogs" tickOption)<line_sep>app.setCheckBoxChangeFunction("Cats" tickOption)<line_sep>app.setCheckBoxChangeFunction("Hamsters" tickOption)<line_sep>app.setCheckBoxChangeFunction("Fish" tickOption)<line_sep>app.setCheckBoxChangeFunction("People" tickOption)<line_sep>app.stopLabelFrame()<line_sep>app.addButtons(["GET" "CHANGE" "BACK"] [get changeOptions changeOptionsBack])<line_sep>#app.setCheckBox("Dogs", True) #app.setOptionBox("Favourite Pets", "Dogs") app.go()<line_sep>
<import_stmt>os<import_from_stmt>config Package<import_from_stmt>.libXML2 libXML2<import_from_stmt>.MPI MPI<import_from_stmt>.pcu pcu<class_stmt>StGermain(Package)<block_start><def_stmt>setup_dependencies self<block_start>self.mpi=self.add_dependency(MPI required=<true>)<line_sep>self.libxml2=self.add_dependency(libXML2 required=<true>)<line_sep>self.pcu=self.add_dependency(pcu required=<true>)<block_end><def_stmt>gen_locations self<block_start><yield>('/usr' [] [])<line_sep><yield>('/usr/local' [] [])<block_end><def_stmt>gen_envs self loc<block_start><for_stmt>env Package.gen_envs(self loc)<block_start>self.headers=[os.path.join('StGermain' 'StGermain.h')]<if_stmt>self.find_libraries(loc[2] 'StGermain')<block_start>env.PrependUnique(LIBS=['StGermain'])<line_sep><yield>env<block_end><block_end><block_end><block_end>
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>logging<import_from_stmt>monai.inferers SlidingWindowInferer<import_from_stmt>monai.losses DiceCELoss<import_from_stmt>monai.optimizers Novograd<import_from_stmt>monai.transforms Activationsd AddChanneld AsDiscreted CropForegroundd EnsureTyped LoadImaged RandCropByPosNegLabeld RandShiftIntensityd ScaleIntensityRanged Spacingd ToDeviced ToTensord <import_from_stmt>monailabel.tasks.train.basic_train BasicTrainTask Context<line_sep>logger=logging.getLogger(__name__)<class_stmt>MyTrain(BasicTrainTask)<block_start><def_stmt>__init__ self model_dir network description="Train Segmentation model for spleen" **kwargs <block_start>self._network=network<line_sep>super().__init__(model_dir description **kwargs)<block_end><def_stmt>network self context:Context<block_start><return>self._network<block_end><def_stmt>optimizer self context:Context<block_start><return>Novograd(self._network.parameters() 0.0001)<block_end><def_stmt>loss_function self context:Context<block_start><return>DiceCELoss(to_onehot_y=<true> softmax=<true> squared_pred=<true> batch=<true>)<block_end><def_stmt>train_pre_transforms self context:Context<block_start>t=[LoadImaged(keys=("image" "label")) AddChanneld(keys=("image" "label")) Spacingd(keys=("image" "label") pixdim=(1.0 1.0 1.0) mode=("bilinear" "nearest") ) ScaleIntensityRanged(keys="image" a_min=-57 a_max=164 b_min=0.0 b_max=1.0 clip=<true>) CropForegroundd(keys=("image" "label") source_key="image") ]<if_stmt>context.request.get("to_gpu" <false>)<block_start>t.extend([EnsureTyped(keys=("image" "label")) ToDeviced(keys=("image" "label") device=context.device)])<block_end>t.extend([RandCropByPosNegLabeld(keys=("image" "label") label_key="label" spatial_size=(96 96 96) pos=1 neg=1 num_samples=4 image_key="image" image_threshold=0 ) RandShiftIntensityd(keys="image" offsets=0.1 prob=0.5) ])<line_sep><return>t<block_end><def_stmt>train_post_transforms self context:Context<block_start><return>[ToTensord(keys=("pred" "label")) Activationsd(keys="pred" softmax=<true>) AsDiscreted(keys=("pred" "label") argmax=(<true> <false>) to_onehot=<true> n_classes=2 ) ]<block_end><def_stmt>val_pre_transforms self context:Context<block_start>t=[LoadImaged(keys=("image" "label")) AddChanneld(keys=("image" "label")) Spacingd(keys=("image" "label") pixdim=(1.0 1.0 1.0) mode=("bilinear" "nearest") ) ScaleIntensityRanged(keys="image" a_min=-57 a_max=164 b_min=0.0 b_max=1.0 clip=<true>) CropForegroundd(keys=("image" "label") source_key="image") ]<if_stmt>context.request.get("to_gpu" <false>)<block_start>t.extend([EnsureTyped(keys=("image" "label")) ToDeviced(keys=("image" "label") device=context.device)])<block_end><return>t<block_end><def_stmt>val_inferer self context:Context<block_start><return>SlidingWindowInferer(roi_size=(160 160 160) sw_batch_size=1 overlap=0.25)<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> division print_function absolute_import<import_stmt>os<import_stmt>tensorflow<as>tf<import_stmt>math<line_sep>ROOT_PATH=os.path.abspath('../../')<line_sep>print(ROOT_PATH)<line_sep>SUMMARY_PATH=os.path.join(ROOT_PATH 'output/summary')<line_sep># backbone NET_NAME='resnet50_v1d'<line_sep>RESTORE_FROM_RPN=<false><line_sep>FIXED_BLOCKS=1# allow 0~3 FREEZE_BLOCKS=[<true> <false> <false> <false> <false>]# for gluoncv backbone # neck FPN_MODE='fpn'<line_sep>SHARE_NET=<true><line_sep>USE_P5=<true><line_sep>FPN_CHANNEL=256<line_sep># bbox head NUM_SUBNET_CONV=4<line_sep>LEVEL=['P3' 'P4' 'P5' 'P6' 'P7']<line_sep>BASE_ANCHOR_SIZE_LIST=[32 64 128 256 512]<line_sep>ANCHOR_STRIDE=[8 16 32 64 128]<line_sep>ANCHOR_SCALES=[2<power>0 2<power>(1.0/3.0) 2<power>(2.0/3.0)]<line_sep>ANCHOR_RATIOS=[1 1/2 2. 1/3. 3. 5. 1/5.]<line_sep>ANCHOR_ANGLES=[-90 -75 -60 -45 -30 -15]<line_sep>ANCHOR_SCALE_FACTORS=<none><line_sep>USE_CENTER_OFFSET=<true><line_sep>METHOD='H'<line_sep>ANGLE_RANGE=90# 90 or 180 USE_GN=<false><line_sep>SUBNETS_WEIGHTS_INITIALIZER=tf.random_normal_initializer(mean=0.0 stddev=0.01 seed=<none>)<line_sep>SUBNETS_BIAS_INITIALIZER=tf.constant_initializer(value=0.0)<line_sep>PROBABILITY=0.01<line_sep>FINAL_CONV_BIAS_INITIALIZER=tf.constant_initializer(value=-math.log((1.0-PROBABILITY)/PROBABILITY))<line_sep># loss CLS_WEIGHT=1.0<line_sep>REG_WEIGHT=1.0<line_sep># sample IOU_POSITIVE_THRESHOLD=0.5<line_sep>IOU_NEGATIVE_THRESHOLD=0.4<line_sep># post-processing NMS=<true><line_sep>NMS_IOU_THRESHOLD=0.3<line_sep>MAXIMUM_DETECTIONS=100<line_sep>FILTERED_SCORE=0.05<line_sep>VIS_SCORE=0.4<line_sep># test and eval TEST_SAVE_PATH=os.path.join(ROOT_PATH 'tools/test_result')<line_sep>EVALUATE_R_DIR=os.path.join(ROOT_PATH 'output/evaluate_result_pickle/')<line_sep>USE_07_METRIC=<true><line_sep>EVAL_THRESHOLD=0.5<line_sep>
<import_stmt>boto3<import_from_stmt>botocore.exceptions ClientError<import_stmt>json<import_stmt>time<line_sep>dynamodb=boto3.resource('dynamodb' region_name='us-east-1')<line_sep>ddb_table=dynamodb.Table('GomokuPlayerInfo')<def_stmt>lambda_handler event context<block_start>print(event)<line_sep># You can also use TicketId to track Matchmaking Event. ticket_id=event['TicketId']<line_sep>player_name=event['PlayerName']<line_sep>response={'IpAddress':'' 'PlayerSessionId':'' 'Port':0}<try_stmt><block_start>match_response=ddb_table.get_item(TableName='GomokuPlayerInfo' Key={'PlayerName':player_name})<if_stmt>'Item'<in>match_response<block_start>print(match_response['Item'])<line_sep>connection_info=json.loads(match_response['Item']['ConnectionInfo'])<if_stmt>connection_info['status']<eq>'matching'<block_start>response['IpAddress']=connection_info['IpAddress']<line_sep>response['Port']=connection_info['Port']<line_sep>response['PlayerSessionId']=connection_info['PlayerSessionId']<line_sep>connection_update={'IpAddress':connection_info['IpAddress'] 'Port':connection_info['Port'] 'PlayerSessionId':connection_info['PlayerSessionId'] 'timestamp':int(time.time()) 'status':'complete'}<line_sep>ddb_table.update_item(TableName="GomokuPlayerInfo" Key={'PlayerName':player_name} UpdateExpression="set ConnectionInfo = :connection_update" ExpressionAttributeValues={':connection_update':""+json.dumps(connection_update) } ReturnValues="UPDATED_NEW")<block_end><block_end><block_end><except_stmt>ClientError<as>e<block_start>print(e.response['Error']['Message'])<block_end>print(response)<line_sep><return>response<block_end>
"""This problem was asked by Pinterest. At a party, there is a single person who everyone knows, but who does not know anyone in return (the "celebrity"). To help figure out who this is, you have access to an O(1) method called knows(a, b), which returns True if person a knows person b, else False. Given a list of N people and the above operation, find a way to identify the celebrity in O(N) time. """<line_sep>
<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>chainer<import_from_stmt>siam_rpn.general.eval_sot_vot eval_sot_vot<import_from_stmt>siam_rpn.siam_rpn SiamRPN<import_from_stmt>siam_rpn.siam_rpn_tracker SiamRPNTracker<import_from_stmt>siam_rpn.siam_mask_tracker SiamMaskTracker<import_from_stmt>siam_rpn.general.vot_tracking_dataset VOTTrackingDataset<import_from_stmt>chainercv.utils apply_to_iterator<import_from_stmt>chainercv.utils ProgressHook<import_from_stmt>chainer iterators<import_from_stmt>siam_rpn.general.predictor_with_gt PredictorWithGT<def_stmt>collate_images_from_same_video data used_ids=<none><block_start>imgs=data.slice[: 'img']<line_sep>polys=data.slice[: 'poly']<line_sep>video_ids=data.slice[: 'video_id']<line_sep>frame_ids=data.slice[: 'frame_id']<if_stmt>used_ids<is><none><block_start>used_ids=np.unique(video_ids)<line_sep>np.sort(used_ids)<block_end>videos=[]<line_sep>video_polys=[]<for_stmt>video_id used_ids<block_start>indices=np.where(video_ids<eq>video_id)[0]<line_sep>the_frame_ids=list(frame_ids.slice[indices])<assert_stmt>all(list(the_frame_ids)<eq>np.arange(len(the_frame_ids)))<line_sep>videos.append(imgs.slice[indices])<line_sep>video_polys.append(polys[indices])<block_end><return>videos video_polys<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--pretrained-model' type=str)<line_sep>parser.add_argument('--gpu' type=int default=-1)<line_sep>parser.add_argument('--mask' action='store_true')<line_sep>args=parser.parse_args()<line_sep>data=VOTTrackingDataset('data')<if_stmt>args.mask<block_start>model=SiamRPN(multi_scale=<false> mask=<true>)<line_sep>chainer.serializers.load_npz(args.pretrained_model model)<line_sep>tracker=SiamMaskTracker(model)<block_end><else_stmt><block_start>model=SiamRPN()<line_sep>chainer.serializers.load_npz(args.pretrained_model model)<line_sep>tracker=SiamRPNTracker(model)<block_end><if_stmt>args.gpu<ge>0<block_start>chainer.cuda.get_device_from_id(args.gpu).use()<line_sep>tracker.to_gpu()<block_end>videos,video_polys=collate_images_from_same_video(data used_ids=<none>)<line_sep>video_dataset=chainer.datasets.TupleDataset(videos video_polys)<line_sep>it=iterators.SerialIterator(video_dataset 1 <false> <false>)<line_sep>in_values,out_values,rest_values=apply_to_iterator(PredictorWithGT(tracker mask=args.mask) it n_input=2 hook=ProgressHook(len(video_dataset)))<line_sep># delete unused iterators explicitly imgs,video_polys=in_values<line_sep>pred_bboxes,pred_statuses,sizes=out_values<del_stmt>imgs<line_sep>video_polys=list(video_polys)<line_sep>pred_bboxes=list(pred_bboxes)<line_sep>pred_statuses=list(pred_statuses)<line_sep>sizes=list(sizes)<line_sep>np.savez('eval_sot_out.npz' pred_bboxes=pred_bboxes pred_statuses=pred_statuses gt_polys=video_polys sizes=sizes)<line_sep>result=eval_sot_vot(pred_bboxes pred_statuses video_polys sizes)<line_sep>print(result['eao'] result['accuracy'] result['robustness'])<block_end>
<import_from_stmt>smoke.features.steps.openshift Openshift<import_from_stmt>kubernetes client config<line_sep>oc=Openshift()<line_sep>v1=client.CoreV1Api()<line_sep>@then(u'we delete deploymentconfig.apps.openshift.io "jenkins"')<def_stmt>del_dc context<block_start>res=oc.delete("deploymentconfig" "jenkins" context.current_project)<if_stmt>res<eq><none><block_start><raise>AssertionError<block_end><block_end>@then(u'we delete route.route.openshift.io "jenkins"')<def_stmt>del_route context<block_start>res=oc.delete("route" "jenkins" context.current_project)<if_stmt>res<eq><none><block_start><raise>AssertionError<block_end><block_end>@then(u'delete configmap "jenkins-trusted-ca-bundle"')<def_stmt>del_cm context<block_start>res=oc.delete("configmap" "jenkins-trusted-ca-bundle" context.current_project)<if_stmt>res<eq><none><block_start><raise>AssertionError<block_end><block_end>@then(u'delete serviceaccount "jenkins"')<def_stmt>del_sa context<block_start>res=oc.delete("serviceaccount" "jenkins" context.current_project)<if_stmt>res<eq><none><block_start><raise>AssertionError<block_end><block_end>@then(u'delete rolebinding.authorization.openshift.io "jenkins_edit"')<def_stmt>del_rb context<block_start>res=oc.delete("rolebinding" "jenkins_edit" context.current_project)<if_stmt>res<eq><none><block_start><raise>AssertionError<block_end><block_end>@then(u'delete service "jenkins"')<def_stmt>del_svc context<block_start>res=oc.delete("service" "jenkins" context.current_project)<if_stmt>res<eq><none><block_start><raise>AssertionError<block_end><block_end>@then(u'delete service "jenkins-jnlp"')<def_stmt>del_svc_jnlp context<block_start>res=oc.delete("service" "jenkins-jnlp" context.current_project)<if_stmt>res<eq><none><block_start><raise>AssertionError<block_end><block_end>@then(u'delete all buildconfigs')<def_stmt>del_bc context<block_start>res=oc.delete("bc" "--all" context.current_project)<if_stmt>res<eq><none><block_start><raise>AssertionError<block_end><block_end>@then(u'delete all builds')<def_stmt>del_builds context<block_start>res=oc.delete("builds" "--all" context.current_project)<if_stmt>res<eq><none><block_start><raise>AssertionError<block_end><block_end>@then(u'delete all deploymentconfig')<def_stmt>del_alldc context<block_start>res=oc.delete("deploymentconfig" "--all" context.current_project)<if_stmt>res<eq><none><block_start><raise>AssertionError<block_end><block_end>@then(u'delete all remaining test resources')@given(u'cleared from all test resources')<def_stmt>del_all_remaining_test_resources context<block_start>delete_command="all,rolebindings.authorization.openshift.io,bc,cm,is,pvc,sa,secret"<line_sep>oc.delete(delete_command "-l app=jenkins-ephemeral" context.current_project)<line_sep>oc.delete(delete_command "-l app=jenkins-persistent" context.current_project)<line_sep>oc.delete(delete_command "-l app=openshift-jee-sample" context.current_project)<line_sep>oc.delete(delete_command "-l app=jenkins-pipeline-example" context.current_project)<block_end>
# -*- coding: utf-8 -*- <import_stmt>pytest<import_from_stmt>giraffez._teradata RequestEnded StatementEnded StatementInfoEnded<import_stmt>giraffez<import_from_stmt>giraffez.constants *<import_from_stmt>giraffez.errors *<import_from_stmt>giraffez.types *<class_stmt>ResultsHelper<block_start>""" Helps to emulate how exceptions are raised when working with the CLIv2 so that the control flow will be adequately represented. """<def_stmt>__init__ self rows<block_start>self.first=<true><line_sep>self.index=0<line_sep>self.rows=rows<block_end><def_stmt>get self<block_start><if_stmt>self.first<block_start>self.first=<false><line_sep><raise>StatementInfoEnded<block_end><if_stmt>self.index<ge>len(self.rows)<block_start><raise>RequestEnded<block_end>row=self.rows[self.index]<line_sep>self.index<augadd>1<line_sep><return>row<block_end><def_stmt>__call__ self<block_start><return>self.get()<block_end><block_end>@pytest.mark.usefixtures('config' 'context')<class_stmt>TestCmd(object)<block_start><def_stmt>test_results self mocker<block_start>connect_mock=mocker.patch('giraffez.cmd.TeradataCmd._connect')<line_sep>mock_columns=mocker.patch("giraffez.cmd.Cursor._columns")<line_sep>cmd=giraffez.Cmd()<line_sep>query="select * from db1.info"<line_sep>columns=Columns([("col1" VARCHAR_NN 50 0 0) ("col2" VARCHAR_N 50 0 0) ("col3" VARCHAR_N 50 0 0) ])<line_sep>mock_columns.return_value=columns<line_sep>rows=[["value1" "value2" "value3"] ["value1" "value2" "value3"] ["value1" "value2" "value3"] ]<line_sep>expected_rows=[{"col1":"value1" "col2":"value2" "col3":"value3"} {"col1":"value1" "col2":"value2" "col3":"value3"} {"col1":"value1" "col2":"value2" "col3":"value3"} ]<line_sep>cmd.cmd=mocker.MagicMock()<line_sep>cmd.cmd.fetchone.side_effect=ResultsHelper(rows)<line_sep>result=list(cmd.execute(query))<assert_stmt>[x.items()<for>x result]<eq>expected_rows<line_sep>cmd._close()<line_sep># This ensures that the config was proper mocked connect_mock.assert_called_with('db1' 'user123' '<PASSWORD>' <none> <none>)<block_end><def_stmt>test_invalid_credentials self mocker<block_start>connect_mock=mocker.patch('giraffez.cmd.TeradataCmd._connect')<line_sep>connect_mock.side_effect=InvalidCredentialsError("test")<with_stmt>pytest.raises(InvalidCredentialsError)<block_start>cmd=giraffez.Cmd(protect=<true>)<line_sep>cmd._close()<block_end><block_end><block_end>@pytest.mark.usefixtures('config' 'context' 'tmpfiles')<class_stmt>TestInsert(object)<block_start><def_stmt>test_insert_from_file self mocker tmpfiles<block_start>mock_connect=mocker.patch("giraffez.cmd.TeradataCmd._connect")<line_sep>mock_execute=mocker.patch("giraffez.cmd.TeradataCmd.execute")<line_sep>columns=Columns([("col1" VARCHAR_NN 50 0 0) ("col2" VARCHAR_N 50 0 0) ("col3" VARCHAR_N 50 0 0) ])<line_sep>mock_columns=mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")<line_sep>mock_columns.return_value=columns<with_stmt>open(tmpfiles.load_file 'w')<as>f<block_start>f.write("|".join(["col1" "col2" "col3"]))<line_sep>f.write("\n")<line_sep>rows=[]<for_stmt>i range(100)<block_start>rows.append("|".join(["value1" "value2" "value3"]))<block_end>f.write("\n".join(rows))<block_end><with_stmt>giraffez.Cmd()<as>cmd<block_start>result=cmd.insert("db1.test" tmpfiles.load_file delimiter="|")<block_end><assert_stmt>result.get('count')<eq>100<block_end><def_stmt>test_insert_from_file_quoted self mocker tmpfiles<block_start>mock_connect=mocker.patch("giraffez.cmd.TeradataCmd._connect")<line_sep>mock_execute=mocker.patch("giraffez.cmd.TeradataCmd.execute")<line_sep>columns=Columns([("col1" VARCHAR_NN 50 0 0) ("col2" VARCHAR_N 50 0 0) ("col3" VARCHAR_N 50 0 0) ])<line_sep>mock_columns=mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")<line_sep>mock_columns.return_value=columns<with_stmt>open(tmpfiles.load_file 'w')<as>f<block_start>f.write("|".join(["col1" "col2" "col3"]))<line_sep>f.write("\n")<line_sep>rows=[]<for_stmt>i range(99)<block_start>rows.append("|".join(["value1" "value2" "value3"]))<block_end>rows.append("|".join(["value1" '"value2|withpipe"' "value3"]))<line_sep>f.write("\n".join(rows))<block_end><with_stmt>giraffez.Cmd()<as>cmd<block_start>result=cmd.insert("db1.test" tmpfiles.load_file delimiter="|")<block_end><assert_stmt>result.get('count')<eq>100<block_end><def_stmt>test_insert_from_file_single_quoted self mocker tmpfiles<block_start>mock_connect=mocker.patch("giraffez.cmd.TeradataCmd._connect")<line_sep>mock_execute=mocker.patch("giraffez.cmd.TeradataCmd.execute")<line_sep>columns=Columns([("col1" VARCHAR_NN 50 0 0) ("col2" VARCHAR_N 50 0 0) ("col3" VARCHAR_N 50 0 0) ])<line_sep>mock_columns=mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")<line_sep>mock_columns.return_value=columns<with_stmt>open(tmpfiles.load_file 'w')<as>f<block_start>f.write("|".join(["col1" "col2" "col3"]))<line_sep>f.write("\n")<line_sep>rows=[]<for_stmt>i range(99)<block_start>rows.append("|".join(["value1" "value2" "value3"]))<block_end>rows.append("|".join(["value1" "'value2|withpipe'" "value3"]))<line_sep>f.write("\n".join(rows))<block_end><with_stmt>giraffez.Cmd()<as>cmd<block_start>result=cmd.insert("db1.test" tmpfiles.load_file delimiter="|" quotechar="'")<block_end><assert_stmt>result.get('count')<eq>100<block_end><def_stmt>test_insert_from_file_nonstandard_quote self mocker tmpfiles<block_start>mock_connect=mocker.patch("giraffez.cmd.TeradataCmd._connect")<line_sep>mock_execute=mocker.patch("giraffez.cmd.TeradataCmd.execute")<line_sep>columns=Columns([("col1" VARCHAR_NN 50 0 0) ("col2" VARCHAR_N 50 0 0) ("col3" VARCHAR_N 50 0 0) ])<line_sep>mock_columns=mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")<line_sep>mock_columns.return_value=columns<with_stmt>open(tmpfiles.load_file 'w')<as>f<block_start>f.write("|".join(["col1" "col2" "col3"]))<line_sep>f.write("\n")<line_sep>rows=[]<for_stmt>i range(99)<block_start>rows.append("|".join(["value1" "value2" "value3"]))<block_end>rows.append("|".join(['va"lue1' '$value2|withpipe"and"quote$' "value3"]))<line_sep>f.write("\n".join(rows))<block_end><with_stmt>giraffez.Cmd()<as>cmd<block_start>result=cmd.insert("db1.test" tmpfiles.load_file delimiter="|" quotechar="$")<block_end><assert_stmt>result.get('count')<eq>100<block_end><def_stmt>test_insert_from_file_error self mocker tmpfiles<block_start>mock_connect=mocker.patch("giraffez.cmd.TeradataCmd._connect")<line_sep>mock_execute=mocker.patch("giraffez.cmd.TeradataCmd.execute")<line_sep>columns=Columns([("col1" VARCHAR_NN 50 0 0) ("col2" VARCHAR_N 50 0 0) ("col3" VARCHAR_N 50 0 0) ])<line_sep>mock_columns=mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")<line_sep>mock_columns.return_value=columns<with_stmt>open(tmpfiles.load_file 'w')<as>f<block_start>f.write("|".join(["col1" "col2" "col3"]))<line_sep>f.write("\n")<line_sep>f.write("|".join(["value1" "value2" "value3" "value4"]))<line_sep>f.write("\n")<block_end><with_stmt>giraffez.Cmd()<as>cmd<block_start>cmd.panic=<false><line_sep>result=cmd.insert("db1.test" tmpfiles.load_file delimiter="|")<block_end><block_end><def_stmt>test_insert_from_file_error_panic self mocker tmpfiles<block_start>mock_connect=mocker.patch("giraffez.cmd.TeradataCmd._connect")<line_sep>mock_execute=mocker.patch("giraffez.cmd.TeradataCmd.execute")<line_sep>columns=Columns([("col1" VARCHAR_NN 50 0 0) ("col2" VARCHAR_N 50 0 0) ("col3" VARCHAR_N 50 0 0) ])<line_sep>mock_columns=mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")<line_sep>mock_columns.return_value=columns<with_stmt>open(tmpfiles.load_file 'w')<as>f<block_start>f.write("|".join(["col1" "col2" "col3"]))<line_sep>f.write("\n")<line_sep>f.write("|".join(["value1" "value2" "value3" "value4"]))<line_sep>f.write("\n")<block_end><with_stmt>giraffez.Cmd()<as>cmd<block_start><with_stmt>pytest.raises(GiraffeEncodeError)<block_start>result=cmd.insert("db1.test" tmpfiles.load_file delimiter="|")<line_sep>print(result)<block_end><block_end><block_end><def_stmt>test_insert_from_file_invalid_header self mocker tmpfiles<block_start>mock_connect=mocker.patch("giraffez.cmd.TeradataCmd._connect")<line_sep>mock_execute=mocker.patch("giraffez.cmd.TeradataCmd.execute")<line_sep>columns=Columns([("col1" VARCHAR_NN 50 0 0) ("col2" VARCHAR_N 50 0 0) ("col3" VARCHAR_N 50 0 0) ])<line_sep>mock_columns=mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")<line_sep>mock_columns.return_value=columns<line_sep># Invalid column (blank string) <with_stmt>open(tmpfiles.load_file 'w')<as>f<block_start>f.write("|".join(["col1" "col2" "col3" "" ""]))<line_sep>f.write("\n")<line_sep>f.write("|".join(["value1" "value2" "value3"]))<line_sep>f.write("\n")<block_end><with_stmt>giraffez.Cmd()<as>cmd<block_start><with_stmt>pytest.raises(GiraffeError)<block_start>result=cmd.insert("db1.test" tmpfiles.load_file delimiter="|")<line_sep>print(result)<block_end><block_end># Invalid column (wrong name) <with_stmt>open(tmpfiles.load_file 'w')<as>f<block_start>f.write("|".join(["col1" "col2" "col4"]))<line_sep>f.write("\n")<line_sep>f.write("|".join(["value1" "value2" "value3"]))<line_sep>f.write("\n")<block_end><with_stmt>giraffez.Cmd()<as>cmd<block_start><with_stmt>pytest.raises(GiraffeError)<block_start>result=cmd.insert("db1.test" tmpfiles.load_file delimiter="|")<line_sep>print(result)<block_end><block_end># Too many columns (duplicate name) <with_stmt>open(tmpfiles.load_file 'w')<as>f<block_start>f.write("|".join(["col1" "col2" "col3" "col3"]))<line_sep>f.write("\n")<line_sep>f.write("|".join(["value1" "value2" "value3"]))<line_sep>f.write("\n")<block_end><with_stmt>giraffez.Cmd()<as>cmd<block_start><with_stmt>pytest.raises(GiraffeEncodeError)<block_start>result=cmd.insert("db1.test" tmpfiles.load_file delimiter="|")<line_sep>print(result)<block_end><block_end><block_end><def_stmt>test_insert_insert_no_specify_fields self mocker<block_start>mock_connect=mocker.patch("giraffez.cmd.TeradataCmd._connect")<line_sep>mock_execute=mocker.patch("giraffez.cmd.TeradataCmd.execute")<line_sep>columns=Columns([("col1" VARCHAR_NN 50 0 0) ("col2" VARCHAR_N 50 0 0) ("col3" VARCHAR_N 50 0 0) ])<line_sep>mock_columns=mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")<line_sep>mock_columns.return_value=columns<line_sep>rows=[("value1" "value3") ("value1" "value3") ("value1" "value3") ]<with_stmt>giraffez.Cmd()<as>cmd<block_start><with_stmt>pytest.raises(GiraffeEncodeError)<block_start>cmd.insert("db1.test" rows)<block_end><block_end><block_end><block_end>
# terrascript/oneandone/__init__.py <import_stmt>terrascript<class_stmt>oneandone(terrascript.Provider)<block_start><pass><block_end>
<import_stmt>nltk<import_stmt>json<import_stmt>numpy<as>np<import_from_stmt>nltk word_tokenize<import_stmt>triton_python_backend_utils<as>pb_utils<class_stmt>TritonPythonModel<block_start>"""Your Python model must use the same class name. Every Python model that is created must have "TritonPythonModel" as the class name. """<def_stmt>initialize self args<block_start>"""`initialize` is called only once when the model is being loaded. Implementing `initialize` function is optional. This function allows the model to intialize any state associated with this model. Parameters ---------- args : dict Both keys and values are strings. The dictionary keys and values are: * model_config: A JSON string containing the model configuration * model_instance_kind: A string containing model instance kind * model_instance_device_id: A string containing model instance device ID * model_repository: Model repository path * model_version: Model version * model_name: Model name """<line_sep># You must parse model_config. JSON string is not parsed here self.model_config=model_config=json.loads(args["model_config"])<line_sep># Get OUTPUT0 configuration output0_config=pb_utils.get_output_config_by_name(model_config "OUTPUT0")<line_sep># Get OUTPUT1 configuration output1_config=pb_utils.get_output_config_by_name(model_config "OUTPUT1")<line_sep># Get OUTPUT2 configuration output2_config=pb_utils.get_output_config_by_name(model_config "OUTPUT2")<line_sep># Get OUTPUT3 configuration output3_config=pb_utils.get_output_config_by_name(model_config "OUTPUT3")<line_sep># Convert Triton types to numpy types self.output0_dtype=pb_utils.triton_string_to_numpy(output0_config["data_type"])<line_sep>self.output1_dtype=pb_utils.triton_string_to_numpy(output1_config["data_type"])<line_sep>self.output2_dtype=pb_utils.triton_string_to_numpy(output2_config["data_type"])<line_sep>self.output3_dtype=pb_utils.triton_string_to_numpy(output3_config["data_type"])<line_sep># Get model repository path to read labels self.model_repository=model_repository=args["model_repository"]<line_sep>print(model_repository)<line_sep># Initialize tokenizer nltk.download("punkt")<block_end><def_stmt>tokenize self text<block_start>tokens=word_tokenize(text)<line_sep># split into lower-case word tokens, in numpy array with shape of (seq, 1) words=np.array([w.lower()<for>w tokens] dtype=np.object_).reshape(-1 1)<line_sep># split words into chars, in numpy array with shape of (seq, 1, 1, 16) chars=[[c<for>c t][:16]<for>t tokens]<line_sep>chars=[cs+[""]<times>(16-len(cs))<for>cs chars]<line_sep>chars=np.array(chars dtype=np.object_).reshape(-1 1 1 16)<line_sep><return>words chars<block_end><def_stmt>execute self requests<block_start>""" Parameters ---------- requests : list A list of pb_utils.InferenceRequest Returns ------- list A list of pb_utils.InferenceResponse. The length of this list must be the same as `requests` """<line_sep>output0_dtype=self.output0_dtype<line_sep>output1_dtype=self.output1_dtype<line_sep>output2_dtype=self.output2_dtype<line_sep>output3_dtype=self.output3_dtype<line_sep>responses=[]<line_sep># Every Python backend must iterate over everyone of the requests # and create a pb_utils.InferenceResponse for each of them. <for_stmt>request requests# Get INPUT0 <block_start>in_0=pb_utils.get_input_tensor_by_name(request "INPUT0")<line_sep>context=in_0.as_numpy().astype(str)<line_sep>print(context)<line_sep># Get INPUT1 in_0=pb_utils.get_input_tensor_by_name(request "INPUT1")<line_sep>query=in_0.as_numpy().astype(str)<line_sep>print(query)<line_sep>cw,cc=self.tokenize(context[0])<line_sep>qw,qc=self.tokenize(query[0])<line_sep>out_0=np.array(qw dtype=output0_dtype)<line_sep>out_1=np.array(cc dtype=output1_dtype)<line_sep>out_2=np.array(qc dtype=output2_dtype)<line_sep>out_3=np.array(cw dtype=output3_dtype)<line_sep># Create output tensors. You need pb_utils.Tensor objects to create pb_utils.InferenceResponse. out_tensor_0=pb_utils.Tensor("OUTPUT0" out_0)<line_sep>out_tensor_1=pb_utils.Tensor("OUTPUT1" out_1)<line_sep>out_tensor_2=pb_utils.Tensor("OUTPUT2" out_2)<line_sep>out_tensor_3=pb_utils.Tensor("OUTPUT3" out_3)<line_sep>inference_response=pb_utils.InferenceResponse(output_tensors=[out_tensor_0 out_tensor_1 out_tensor_2 out_tensor_3])<line_sep>responses.append(inference_response)<block_end><return>responses<block_end><def_stmt>finalize self<block_start>"""`finalize` is called only once when the model is being unloaded. Implementing `finalize` function is OPTIONAL. This function allows the model to perform any necessary clean ups before exit. """<line_sep>print("Cleaning up...")<block_end><block_end>
<def_stmt>main <block_start><return>'THIS_IS_MAIN_MAIN'<block_end><def_stmt>func <block_start><return>'THIS_IS_MAIN_FUNC'<block_end>
<import_from_stmt>typing List<import_from_stmt>fastapi Depends FastAPI<import_from_stmt>fastapi.testclient TestClient<import_from_stmt>pydantic BaseModel<line_sep>app=FastAPI()<line_sep>client=TestClient(app)<class_stmt>Item(BaseModel)<block_start>data:str<block_end><def_stmt>duplicate_dependency item:Item<block_start><return>item<block_end><def_stmt>dependency item2:Item<block_start><return>item2<block_end><def_stmt>sub_duplicate_dependency item:Item sub_item:Item=Depends(duplicate_dependency)<block_start><return>[item sub_item]<block_end>@app.post("/with-duplicates")<async_keyword><def_stmt>with_duplicates item:Item item2:Item=Depends(duplicate_dependency)<block_start><return>[item item2]<block_end>@app.post("/no-duplicates")<async_keyword><def_stmt>no_duplicates item:Item item2:Item=Depends(dependency)<block_start><return>[item item2]<block_end>@app.post("/with-duplicates-sub")<async_keyword><def_stmt>no_duplicates_sub item:Item sub_items:List[Item]=Depends(sub_duplicate_dependency)<block_start><return>[item sub_items]<block_end>openapi_schema={"openapi":"3.0.2" "info":{"title":"FastAPI" "version":"0.1.0"} "paths":{"/with-duplicates":{"post":{"summary":"With Duplicates" "operationId":"with_duplicates_with_duplicates_post" "requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/Item"}}} "required":<true> } "responses":{"200":{"description":"Successful Response" "content":{"application/json":{"schema":{}}} } "422":{"description":"Validation Error" "content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}} } } }} "/no-duplicates":{"post":{"summary":"No Duplicates" "operationId":"no_duplicates_no_duplicates_post" "requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/Body_no_duplicates_no_duplicates_post"}}} "required":<true> } "responses":{"200":{"description":"Successful Response" "content":{"application/json":{"schema":{}}} } "422":{"description":"Validation Error" "content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}} } } }} "/with-duplicates-sub":{"post":{"summary":"No Duplicates Sub" "operationId":"no_duplicates_sub_with_duplicates_sub_post" "requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/Item"}}} "required":<true> } "responses":{"200":{"description":"Successful Response" "content":{"application/json":{"schema":{}}} } "422":{"description":"Validation Error" "content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}} } } }} } "components":{"schemas":{"Body_no_duplicates_no_duplicates_post":{"title":"Body_no_duplicates_no_duplicates_post" "required":["item" "item2"] "type":"object" "properties":{"item":{"$ref":"#/components/schemas/Item"} "item2":{"$ref":"#/components/schemas/Item"} } } "HTTPValidationError":{"title":"HTTPValidationError" "type":"object" "properties":{"detail":{"title":"Detail" "type":"array" "items":{"$ref":"#/components/schemas/ValidationError"} }} } "Item":{"title":"Item" "required":["data"] "type":"object" "properties":{"data":{"title":"Data" "type":"string"}} } "ValidationError":{"title":"ValidationError" "required":["loc" "msg" "type"] "type":"object" "properties":{"loc":{"title":"Location" "type":"array" "items":{"type":"string"} } "msg":{"title":"Message" "type":"string"} "type":{"title":"Error Type" "type":"string"} } } }} }<def_stmt>test_openapi_schema <block_start>response=client.get("/openapi.json")<assert_stmt>response.status_code<eq>200 response.text<assert_stmt>response.json()<eq>openapi_schema<block_end><def_stmt>test_no_duplicates_invalid <block_start>response=client.post("/no-duplicates" json={"item":{"data":"myitem"}})<assert_stmt>response.status_code<eq>422 response.text<assert_stmt>response.json()<eq>{"detail":[{"loc":["body" "item2"] "msg":"field required" "type":"value_error.missing" }]}<block_end><def_stmt>test_no_duplicates <block_start>response=client.post("/no-duplicates" json={"item":{"data":"myitem"} "item2":{"data":"myitem2"}} )<assert_stmt>response.status_code<eq>200 response.text<assert_stmt>response.json()<eq>[{"data":"myitem"} {"data":"myitem2"}]<block_end><def_stmt>test_duplicates <block_start>response=client.post("/with-duplicates" json={"data":"myitem"})<assert_stmt>response.status_code<eq>200 response.text<assert_stmt>response.json()<eq>[{"data":"myitem"} {"data":"myitem"}]<block_end><def_stmt>test_sub_duplicates <block_start>response=client.post("/with-duplicates-sub" json={"data":"myitem"})<assert_stmt>response.status_code<eq>200 response.text<assert_stmt>response.json()<eq>[{"data":"myitem"} [{"data":"myitem"} {"data":"myitem"}] ]<block_end>
<import_from_stmt>django forms<import_from_stmt>.models UserProfile<class_stmt>ProfileForm(forms.ModelForm)<block_start><class_stmt>Meta<block_start>model=UserProfile<line_sep>fields=['name' 'photo']<line_sep>widgets={'name':forms.TextInput(attrs={'class':'form-control'}) 'photo':forms.FileInput(attrs={'class':'form-control'}) }<block_end><block_end>
<import_from_future_stmt> print_function<import_stmt>sys<line_sep>sys.path.insert(1 "../../")<import_stmt>h2o<import_from_stmt>tests pyunit_utils<def_stmt>runif_check <block_start>fr=h2o.H2OFrame([[r]<for>r range(1 1001)])<line_sep>runif1=fr[0].runif(1234)<line_sep>runif2=fr[0].runif(1234)<line_sep>runif3=fr[0].runif(42)<assert_stmt>(runif1<eq>runif2).all() "Expected runif with the same seeds to return the same values."<assert_stmt><not>(runif1<eq>runif3).all() "Expected runif with different seeds to return different values."<block_end><if_stmt>__name__<eq>"__main__"<block_start>pyunit_utils.standalone_test(runif_check)<block_end><else_stmt><block_start>runif_check()<block_end>
<import_stmt>sys<line_sep>sys.path.insert(1 "../../../")<import_stmt>h2o<import_from_stmt>tests pyunit_utils<def_stmt>frame_as_list <block_start>prostate=h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate.csv.zip"))<line_sep>(prostate%10).show()<line_sep>(prostate[4]%10).show()<line_sep>airlines=h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/allyears2k_headers.zip"))<line_sep>(airlines["CRSArrTime"]%100).show()<block_end><if_stmt>__name__<eq>"__main__"<block_start>pyunit_utils.standalone_test(frame_as_list)<block_end><else_stmt><block_start>frame_as_list()<block_end>