content
stringlengths
0
1.55M
# Generated by Django 2.2.6 on 2019-10-25 12:31 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("scripts" "0012_auto_20190128_1820")]<line_sep>operations=[migrations.AlterField(model_name="scriptdb" name="db_typeclass_path" field=models.CharField(db_index=<true> help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass." max_length=255 null=<true> verbose_name="typeclass" ) )]<block_end>
# -*- coding: utf-8 -*- # # Copyright (c) 2016 - 2020 -- <NAME> # All rights reserved. # # License: BSD License # """\ Test against issue <https://github.com/pyqrcode/pyqrcodeNG/pull/13/>. The initial test was created by Mathieu <https://github.com/albatros69>, see the above mentioned pull request. Adapted for Segno to check if it suffers from the same problem. """<import_from_future_stmt> absolute_import unicode_literals<import_stmt>segno<def_stmt>test_autodetect <block_start>data='Émetteur'<line_sep>qr=segno.make(data)<assert_stmt>qr.mode<eq>'byte'<block_end><def_stmt>test_encoding <block_start>encoding='iso-8859-15'<line_sep>data='Émetteur'<line_sep>qr=segno.make(data.encode(encoding))<assert_stmt>qr.mode<eq>'byte'<line_sep>qr2=segno.make(data encoding=encoding)<assert_stmt>qr2<eq>qr<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>pytest<line_sep>pytest.main([__file__])<block_end>
<import_from_stmt>osp.corpus.syllabus Syllabus<import_from_stmt>osp.test.utils requires_tika<def_stmt>test_empty mock_osp<block_start>""" Should return None if the file is empty. """<line_sep>path=mock_osp.add_file(content='' ftype='plain')<line_sep>syllabus=Syllabus(path)<assert_stmt>syllabus.text<eq><none><block_end><def_stmt>test_plaintext mock_osp<block_start>""" Should extract text from vanilla text files. """<line_sep>path=mock_osp.add_file(content='text' ftype='plain')<line_sep>syllabus=Syllabus(path)<assert_stmt>syllabus.text<eq>'text'<block_end><def_stmt>test_html mock_osp<block_start>""" Should extract text from HTML files. """<line_sep>path=mock_osp.add_file(content='<p>text</p>' ftype='html')<line_sep>syllabus=Syllabus(path)<assert_stmt>syllabus.text<eq>'text'<block_end><def_stmt>test_pdf mock_osp<block_start>""" Should extract text from PDF files. """<line_sep>path=mock_osp.add_file(content='text' ftype='pdf')<line_sep>syllabus=Syllabus(path)<assert_stmt>syllabus.text.strip()<eq>'text'<block_end>@requires_tika<def_stmt>test_office mock_osp<block_start>""" Should extract text from office files. """<line_sep>path=mock_osp.add_file(content='text' ftype='docx')<line_sep>syllabus=Syllabus(path)<assert_stmt>syllabus.text.strip()<eq>'text'<block_end>
# Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>torch<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>torchvision.datasets CIFAR10<import_from_stmt>torchvision.transforms Compose ToTensor Normalize<import_from_stmt>nvflare.apis.dxo from_shareable DataKind DXO<import_from_stmt>nvflare.apis.executor Executor<import_from_stmt>nvflare.apis.fl_constant ReturnCode<import_from_stmt>nvflare.apis.fl_context FLContext<import_from_stmt>nvflare.apis.shareable Shareable make_reply<import_from_stmt>nvflare.apis.signal Signal<import_from_stmt>nvflare.app_common.app_constant AppConstants<import_from_stmt>simple_network SimpleNetwork<class_stmt>Cifar10Validator(Executor)<block_start><def_stmt>__init__ self validate_task_name=AppConstants.TASK_VALIDATION<block_start>super(Cifar10Validator self).__init__()<line_sep>self._validate_task_name=validate_task_name<line_sep># Setup the model self.model=SimpleNetwork()<line_sep>self.device=torch.device("cuda")<if>torch.cuda.is_available()<else>torch.device("cpu")<line_sep>self.model.to(self.device)<line_sep># Preparing the dataset for testing. transforms=Compose([ToTensor() Normalize((0.5 0.5 0.5) (0.5 0.5 0.5)) ])<line_sep>self.test_data=CIFAR10(root='~/data' train=<false> transform=transforms)<line_sep>self.test_loader=DataLoader(self.test_data batch_size=4 shuffle=<false>)<block_end><def_stmt>execute self task_name:str shareable:Shareable fl_ctx:FLContext abort_signal:Signal<arrow>Shareable<block_start><if_stmt>task_name<eq>self._validate_task_name<block_start>model_owner="?"<try_stmt><block_start><try_stmt><block_start>dxo=from_shareable(shareable)<block_end><except_stmt><block_start>self.log_error(fl_ctx "Error in extracting dxo from shareable.")<line_sep><return>make_reply(ReturnCode.BAD_TASK_DATA)<block_end># Ensure data_kind is weights. <if_stmt><not>dxo.data_kind<eq>DataKind.WEIGHTS<block_start>self.log_exception(fl_ctx f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")<line_sep><return>make_reply(ReturnCode.BAD_TASK_DATA)<block_end># Extract weights and ensure they are tensor. model_owner=shareable.get_header(AppConstants.MODEL_OWNER "?")<line_sep>weights={k:torch.as_tensor(v device=self.device)<for>k,v dxo.data.items()}<line_sep># Get validation accuracy val_accuracy=self.do_validation(weights abort_signal)<if_stmt>abort_signal.triggered<block_start><return>make_reply(ReturnCode.TASK_ABORTED)<block_end>self.log_info(fl_ctx f"Accuracy when validating {model_owner}'s model on"<concat>f" {fl_ctx.get_identity_name()}"<concat>f's data: {val_accuracy}')<line_sep>dxo=DXO(data_kind=DataKind.METRICS data={'val_acc':val_accuracy})<line_sep><return>dxo.to_shareable()<block_end><except_stmt><block_start>self.log_exception(fl_ctx f"Exception in validating model from {model_owner}")<line_sep><return>make_reply(ReturnCode.EXECUTION_EXCEPTION)<block_end><block_end><else_stmt><block_start><return>make_reply(ReturnCode.TASK_UNKNOWN)<block_end><block_end><def_stmt>do_validation self weights abort_signal<block_start>self.model.load_state_dict(weights)<line_sep>self.model.eval()<line_sep>correct=0<line_sep>total=0<with_stmt>torch.no_grad()<block_start><for_stmt>i,(images labels) enumerate(self.test_loader)<block_start><if_stmt>abort_signal.triggered<block_start><return>0<block_end>images,labels=images.to(self.device) labels.to(self.device)<line_sep>output=self.model(images)<line_sep>_,pred_label=torch.max(output 1)<line_sep>correct<augadd>(pred_label<eq>labels).sum().item()<line_sep>total<augadd>images.size()[0]<block_end>metric=correct/float(total)<block_end><return>metric<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>aliyunsdkcore.request RpcRequest<import_from_stmt>aliyunsdkehpc.endpoint endpoint_data<class_stmt>EditJobTemplateRequest(RpcRequest)<block_start><def_stmt>__init__ self<block_start>RpcRequest.__init__(self 'EHPC' '2018-04-12' 'EditJobTemplate')<line_sep>self.set_method('GET')<if_stmt>hasattr(self "endpoint_map")<block_start>setattr(self "endpoint_map" endpoint_data.getEndpointMap())<block_end><if_stmt>hasattr(self "endpoint_regional")<block_start>setattr(self "endpoint_regional" endpoint_data.getEndpointRegional())<block_end><block_end><def_stmt>get_StderrRedirectPath self<block_start><return>self.get_query_params().get('StderrRedirectPath')<block_end><def_stmt>set_StderrRedirectPath self StderrRedirectPath<block_start>self.add_query_param('StderrRedirectPath' StderrRedirectPath)<block_end><def_stmt>get_ClockTime self<block_start><return>self.get_query_params().get('ClockTime')<block_end><def_stmt>set_ClockTime self ClockTime<block_start>self.add_query_param('ClockTime' ClockTime)<block_end><def_stmt>get_CommandLine self<block_start><return>self.get_query_params().get('CommandLine')<block_end><def_stmt>set_CommandLine self CommandLine<block_start>self.add_query_param('CommandLine' CommandLine)<block_end><def_stmt>get_ArrayRequest self<block_start><return>self.get_query_params().get('ArrayRequest')<block_end><def_stmt>set_ArrayRequest self ArrayRequest<block_start>self.add_query_param('ArrayRequest' ArrayRequest)<block_end><def_stmt>get_PackagePath self<block_start><return>self.get_query_params().get('PackagePath')<block_end><def_stmt>set_PackagePath self PackagePath<block_start>self.add_query_param('PackagePath' PackagePath)<block_end><def_stmt>get_Mem self<block_start><return>self.get_query_params().get('Mem')<block_end><def_stmt>set_Mem self Mem<block_start>self.add_query_param('Mem' Mem)<block_end><def_stmt>get_StdoutRedirectPath self<block_start><return>self.get_query_params().get('StdoutRedirectPath')<block_end><def_stmt>set_StdoutRedirectPath self StdoutRedirectPath<block_start>self.add_query_param('StdoutRedirectPath' StdoutRedirectPath)<block_end><def_stmt>get_Variables self<block_start><return>self.get_query_params().get('Variables')<block_end><def_stmt>set_Variables self Variables<block_start>self.add_query_param('Variables' Variables)<block_end><def_stmt>get_RunasUser self<block_start><return>self.get_query_params().get('RunasUser')<block_end><def_stmt>set_RunasUser self RunasUser<block_start>self.add_query_param('RunasUser' RunasUser)<block_end><def_stmt>get_ReRunable self<block_start><return>self.get_query_params().get('ReRunable')<block_end><def_stmt>set_ReRunable self ReRunable<block_start>self.add_query_param('ReRunable' ReRunable)<block_end><def_stmt>get_Thread self<block_start><return>self.get_query_params().get('Thread')<block_end><def_stmt>set_Thread self Thread<block_start>self.add_query_param('Thread' Thread)<block_end><def_stmt>get_TemplateId self<block_start><return>self.get_query_params().get('TemplateId')<block_end><def_stmt>set_TemplateId self TemplateId<block_start>self.add_query_param('TemplateId' TemplateId)<block_end><def_stmt>get_Priority self<block_start><return>self.get_query_params().get('Priority')<block_end><def_stmt>set_Priority self Priority<block_start>self.add_query_param('Priority' Priority)<block_end><def_stmt>get_Gpu self<block_start><return>self.get_query_params().get('Gpu')<block_end><def_stmt>set_Gpu self Gpu<block_start>self.add_query_param('Gpu' Gpu)<block_end><def_stmt>get_Node self<block_start><return>self.get_query_params().get('Node')<block_end><def_stmt>set_Node self Node<block_start>self.add_query_param('Node' Node)<block_end><def_stmt>get_Task self<block_start><return>self.get_query_params().get('Task')<block_end><def_stmt>set_Task self Task<block_start>self.add_query_param('Task' Task)<block_end><def_stmt>get_Name self<block_start><return>self.get_query_params().get('Name')<block_end><def_stmt>set_Name self Name<block_start>self.add_query_param('Name' Name)<block_end><def_stmt>get_Queue self<block_start><return>self.get_query_params().get('Queue')<block_end><def_stmt>set_Queue self Queue<block_start>self.add_query_param('Queue' Queue)<block_end><block_end>
<import_stmt>pybullet<as>p<import_stmt>pybullet_data<import_stmt>gym<import_from_stmt>gym spaces<import_from_stmt>gym.utils seeding<import_stmt>numpy<as>np<import_from_stmt>math sqrt<import_stmt>random<import_stmt>time<import_stmt>math<import_stmt>cv2<import_stmt>torch<import_stmt>os<def_stmt>random_crop imgs out<block_start>""" args: imgs: shape (B,C,H,W) out: output size (e.g. 84) """<line_sep>n,c,h,w=imgs.shape<line_sep>crop_max=h-out+1<line_sep>w1=np.random.randint(0 crop_max n)<line_sep>h1=np.random.randint(0 crop_max n)<line_sep>cropped=np.empty((n c out out) dtype=imgs.dtype)<for_stmt>i,(img w11 h11) enumerate(zip(imgs w1 h1))<block_start>cropped[i]=img[: h11:h11+out w11:w11+out]<block_end><return>cropped<block_end><class_stmt>KukaReachVisualEnv(gym.Env)<block_start>metadata={'render.modes':['human' 'rgb_array'] 'video.frames_per_second':50}<line_sep>kMaxEpisodeSteps=700<line_sep>kImageSize={'width':96 'height':96}<line_sep>kFinalImageSize={'width':84 'height':84}<def_stmt>__init__ self is_render=<false> is_good_view=<false><block_start>self.is_render=is_render<line_sep>self.is_good_view=is_good_view<if_stmt>self.is_render<block_start>p.connect(p.GUI)<block_end><else_stmt><block_start>p.connect(p.DIRECT)<block_end>self.x_low_obs=0.2<line_sep>self.x_high_obs=0.7<line_sep>self.y_low_obs=-0.3<line_sep>self.y_high_obs=0.3<line_sep>self.z_low_obs=0<line_sep>self.z_high_obs=0.55<line_sep>self.x_low_action=-0.4<line_sep>self.x_high_action=0.4<line_sep>self.y_low_action=-0.4<line_sep>self.y_high_action=0.4<line_sep>self.z_low_action=-0.6<line_sep>self.z_high_action=0.3<line_sep>self.step_counter=0<line_sep>self.urdf_root_path=pybullet_data.getDataPath()<line_sep># lower limits for null space self.lower_limits=[-.967 -2 -2.96 0.19 -2.96 -2.09 -3.05]<line_sep># upper limits for null space self.upper_limits=[.967 2 2.96 2.29 2.96 2.09 3.05]<line_sep># joint ranges for null space self.joint_ranges=[5.8 4 5.8 4 5.8 4 6]<line_sep># restposes for null space self.rest_poses=[0 0 0 0.5<times>math.pi 0 -math.pi<times>0.5<times>0.66 0]<line_sep># joint damping coefficents self.joint_damping=[0.00001 0.00001 0.00001 0.00001 0.00001 0.00001 0.00001]<line_sep>self.init_joint_positions=[0.006418 0.413184 -0.011401 -1.589317 0.005379 1.137684 -0.006539]<line_sep>self.orientation=p.getQuaternionFromEuler([0. -math.pi math.pi/2.])<line_sep>self.camera_parameters={'width':960. 'height':720 'fov':60 'near':0.1 'far':100. 'eye_position':[0.59 0 0.8] 'target_position':[0.55 0 0.05] 'camera_up_vector':[1 0 0] # I really do not know the parameter's effect. 'light_direction':[0.5 0 1] # the direction is from the light source position to the origin of the world frame. }<line_sep>self.view_matrix=p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[0.55 0 0.05] distance=.7 yaw=90 pitch=-70 roll=0 upAxisIndex=2)<line_sep>self.projection_matrix=p.computeProjectionMatrixFOV(fov=self.camera_parameters['fov'] aspect=self.camera_parameters['width']/self.camera_parameters['height'] nearVal=self.camera_parameters['near'] farVal=self.camera_parameters['far'])<line_sep>p.configureDebugVisualizer(lightPosition=[5 0 5])<line_sep>p.resetDebugVisualizerCamera(cameraDistance=1.5 cameraYaw=0 cameraPitch=-40 cameraTargetPosition=[0.55 -0.35 0.2])<line_sep>self.action_space=spaces.Box(low=np.array([self.x_low_action self.y_low_action self.z_low_action]) high=np.array([self.x_high_action self.y_high_action self.z_high_action]) dtype=np.float32)<line_sep>self.observation_space=spaces.Box(low=0 high=1 shape=(1 self.kFinalImageSize['width'] self.kFinalImageSize['height']))<line_sep>self.seed()<line_sep>self.reset()<block_end><def_stmt>seed self seed=<none><block_start>self.np_random,seed=seeding.np_random(seed)<line_sep><return>[seed]<block_end><def_stmt>reset self<block_start>self.step_counter=0<line_sep>p.resetSimulation()<line_sep># p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0) self.terminated=<false><line_sep>p.setGravity(0 0 -10)<line_sep># 这些是周围那些白线,用来观察是否超过了obs的边界 p.addUserDebugLine(lineFromXYZ=[self.x_low_obs self.y_low_obs 0] lineToXYZ=[self.x_low_obs self.y_low_obs self.z_high_obs])<line_sep>p.addUserDebugLine(lineFromXYZ=[self.x_low_obs self.y_high_obs 0] lineToXYZ=[self.x_low_obs self.y_high_obs self.z_high_obs])<line_sep>p.addUserDebugLine(lineFromXYZ=[self.x_high_obs self.y_low_obs 0] lineToXYZ=[self.x_high_obs self.y_low_obs self.z_high_obs])<line_sep>p.addUserDebugLine(lineFromXYZ=[self.x_high_obs self.y_high_obs 0] lineToXYZ=[self.x_high_obs self.y_high_obs self.z_high_obs])<line_sep>p.addUserDebugLine(lineFromXYZ=[self.x_low_obs self.y_low_obs self.z_high_obs] lineToXYZ=[self.x_high_obs self.y_low_obs self.z_high_obs])<line_sep>p.addUserDebugLine(lineFromXYZ=[self.x_low_obs self.y_high_obs self.z_high_obs] lineToXYZ=[self.x_high_obs self.y_high_obs self.z_high_obs])<line_sep>p.addUserDebugLine(lineFromXYZ=[self.x_low_obs self.y_low_obs self.z_high_obs] lineToXYZ=[self.x_low_obs self.y_high_obs self.z_high_obs])<line_sep>p.addUserDebugLine(lineFromXYZ=[self.x_high_obs self.y_low_obs self.z_high_obs] lineToXYZ=[self.x_high_obs self.y_high_obs self.z_high_obs])<line_sep>p.loadURDF(os.path.join(self.urdf_root_path "plane.urdf") basePosition=[0 0 -0.65])<line_sep>self.kuka_id=p.loadURDF(os.path.join(self.urdf_root_path "kuka_iiwa/model.urdf") useFixedBase=<true>)<line_sep>table_uid=p.loadURDF(os.path.join(self.urdf_root_path "table/table.urdf") basePosition=[0.5 0 -0.65])<line_sep>p.changeVisualShape(table_uid -1 rgbaColor=[1 1 1 1])<line_sep>self.object_id=p.loadURDF(os.path.join(self.urdf_root_path "random_urdfs/000/000.urdf") basePosition=[random.uniform(self.x_low_obs self.x_high_obs) random.uniform(self.y_low_obs self.y_high_obs) 0.01])<line_sep>self.num_joints=p.getNumJoints(self.kuka_id)<for_stmt>i range(self.num_joints)<block_start>p.resetJointState(bodyUniqueId=self.kuka_id jointIndex=i targetValue=self.init_joint_positions[i] )<block_end>self.robot_pos_obs=p.getLinkState(self.kuka_id self.num_joints-1)[4]<line_sep>p.stepSimulation()<line_sep>(_ _ px _ _)=p.getCameraImage(width=960 height=960 viewMatrix=self.view_matrix projectionMatrix=self.projection_matrix renderer=p.ER_BULLET_HARDWARE_OPENGL)<line_sep>self.images=px<line_sep>p.enableJointForceTorqueSensor(bodyUniqueId=self.kuka_id jointIndex=self.num_joints-1 enableSensor=<true>)<line_sep>self.object_pos=p.getBasePositionAndOrientation(self.object_id)[0]<line_sep>self.images=self.images[: : :3]<line_sep># the 4th channel is alpha channel, we do not need it. <return>self._process_image(self.images)<block_end><def_stmt>_process_image self image<block_start>"""Convert the RGB pic to gray pic and add a channel 1 Args: image ([type]): [description] """<if_stmt>image<is><not><none><block_start>image=cv2.cvtColor(image cv2.COLOR_RGB2GRAY)<line_sep>image=cv2.resize(image (self.kImageSize['width'] self.kImageSize['height']))[<none> : :]/255.<line_sep><return>image<block_end><else_stmt><block_start><return>np.zeros((1 self.kImageSize['width'] self.kImageSize['height']))<block_end><block_end><def_stmt>step self action<block_start>dv=0.005<line_sep>dx=action[0]<times>dv<line_sep>dy=action[1]<times>dv<line_sep>dz=action[2]<times>dv<line_sep>self.current_pos=p.getLinkState(self.kuka_id self.num_joints-1)[4]<line_sep>self.new_robot_pos=[self.current_pos[0]+dx self.current_pos[1]+dy self.current_pos[2]+dz]<line_sep>self.robot_joint_positions=p.calculateInverseKinematics(bodyUniqueId=self.kuka_id endEffectorLinkIndex=self.num_joints-1 targetPosition=[self.new_robot_pos[0] self.new_robot_pos[1] self.new_robot_pos[2]] targetOrientation=self.orientation jointDamping=self.joint_damping )<for_stmt>i range(self.num_joints)<block_start>p.resetJointState(bodyUniqueId=self.kuka_id jointIndex=i targetValue=self.robot_joint_positions[i] )<block_end>p.stepSimulation()<line_sep># 在代码开始部分,如果定义了is_good_view,那么机械臂的动作会变慢,方便观察 <if_stmt>self.is_good_view<block_start>time.sleep(0.05)<block_end>self.step_counter<augadd>1<line_sep><return>self._reward()<block_end><def_stmt>_reward self# 一定注意是取第4个值,请参考pybullet手册的这个函数返回值的说明 <block_start>self.robot_state=p.getLinkState(self.kuka_id self.num_joints-1)[4]<line_sep>self.object_state=np.array(p.getBasePositionAndOrientation(self.object_id)[0]).astype(np.float32)<line_sep>square_dx=(self.robot_state[0]-self.object_state[0])<power>2<line_sep>square_dy=(self.robot_state[1]-self.object_state[1])<power>2<line_sep>square_dz=(self.robot_state[2]-self.object_state[2])<power>2<line_sep># 用机械臂末端和物体的距离作为奖励函数的依据 self.distance=sqrt(square_dx+square_dy+square_dz)<line_sep># print(self.distance) x=self.robot_state[0]<line_sep>y=self.robot_state[1]<line_sep>z=self.robot_state[2]<line_sep># 如果机械比末端超过了obs的空间,也视为done,而且会给予一定的惩罚 terminated=bool(x<l>self.x_low_obs<or>x<g>self.x_high_obs<or>y<l>self.y_low_obs<or>y<g>self.y_high_obs<or>z<l>self.z_low_obs<or>z<g>self.z_high_obs)<if_stmt>terminated<block_start>reward=-0.1<line_sep>self.terminated=<true><block_end># 如果机械臂一直无所事事,在最大步数还不能接触到物体,也需要给一定的惩罚 <elif_stmt>self.step_counter<g>self.kMaxEpisodeSteps<block_start>reward=-0.1<line_sep>self.terminated=<true><block_end><elif_stmt>self.distance<l>0.1<block_start>reward=1<line_sep>self.terminated=<true><block_end><else_stmt><block_start>reward=0<line_sep>self.terminated=<false><block_end>info={'distance:' self.distance}<line_sep>(_ _ px _ _)=p.getCameraImage(width=960 height=960 viewMatrix=self.view_matrix projectionMatrix=self.projection_matrix renderer=p.ER_BULLET_HARDWARE_OPENGL)<line_sep>self.images=px<line_sep>self.processed_image=self._process_image(self.images)<line_sep># self.observation=self.robot_state self.observation=self.object_state<line_sep><return>self.processed_image reward self.terminated info<block_end><def_stmt>close self<block_start>p.disconnect()<block_end><def_stmt>_get_force_sensor_value self<block_start>force_sensor_value=p.getJointState(bodyUniqueId=self.kuka_id jointIndex=self.num_joints-1)[2][2]<line_sep># the first 2 stands for jointReactionForces, the second 2 stands for Fz, # the pybullet methods' return is a tuple,so can not # index it with str like dict. I think it can be improved # that return value is a dict rather than tuple. <return>force_sensor_value<block_end><block_end><class_stmt>CustomSkipFrame(gym.Wrapper)<block_start>""" Make a 4 frame skip, so the observation space will change to (4,84,84) from (1,84,84) Args: gym ([type]): [description] """<def_stmt>__init__ self env skip=4<block_start>super(CustomSkipFrame self).__init__(env)<line_sep>self.observation_space=spaces.Box(low=0 high=1 shape=(skip self.kFinalImageSize['width'] self.kFinalImageSize['height']))<line_sep>self.skip=skip<block_end><def_stmt>step self action<block_start>total_reward=0<line_sep>states=[]<line_sep>state,reward,done,info=self.env.step(action)<for_stmt>i range(self.skip)<block_start><if_stmt><not>done<block_start>state,reward,done,info=self.env.step(action)<line_sep>total_reward<augadd>reward<line_sep>states.append(state)<block_end><else_stmt><block_start>states.append(state)<block_end><block_end>states=np.concatenate(states 0)[<none> : : :]<line_sep><return>random_crop(states.astype(np.float32) self.kFinalImageSize['width']) reward done info<block_end><def_stmt>reset self<block_start>state=self.env.reset()<line_sep>states=np.concatenate([state<for>_ range(self.skip)] 0)[<none> : : :]<line_sep><return>random_crop(states.astype(np.float32) self.kFinalImageSize['width'])<block_end><block_end><if_stmt>__name__<eq>'__main__'# 这一部分是做baseline,即让机械臂随机选择动作,看看能够得到的分数 <block_start><import_stmt>matplotlib.pyplot<as>plt<line_sep>env=KukaReachVisualEnv(is_render=<false>)<line_sep>env=CustomSkipFrame(env)<line_sep>print(env.observation_space.shape)<line_sep>print(env.action_space.shape)<line_sep>print(env.action_space.n)<line_sep># for _ in range(20): # action=env.action_space.sample() # print(action) # env.step(action) # # state = env.reset() # print(state.shape) # img = state[0][0] # plt.imshow(img, cmap='gray') # plt.show() <block_end>
# Author: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # License: BSD-3-Clause <import_stmt>os.path<as>op<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_array_equal<import_stmt>pytest<import_from_stmt>mne pick_types<import_from_stmt>mne.datasets testing<import_from_stmt>mne.io.tests.test_raw _test_raw_reader<import_from_stmt>mne.io.cnt read_raw_cnt<import_from_stmt>mne.annotations read_annotations<line_sep>data_path=testing.data_path(download=<false>)<line_sep>fname=op.join(data_path 'CNT' 'scan41_short.cnt')<line_sep>@testing.requires_testing_data<def_stmt>test_data <block_start>"""Test reading raw cnt files."""<with_stmt>pytest.warns(RuntimeWarning match='number of bytes')<block_start>raw=_test_raw_reader(read_raw_cnt input_fname=fname eog='auto' misc=['NA1' 'LEFT_EAR'])<block_end># make sure we use annotations event if we synthesized stim <assert_stmt>len(raw.annotations)<eq>6<line_sep>eog_chs=pick_types(raw.info eog=<true> exclude=[])<assert_stmt>len(eog_chs)<eq>2# test eog='auto' <assert_stmt>raw.info['bads']<eq>['LEFT_EAR' 'VEOGR']# test bads # the data has "05/10/200 17:35:31" so it is set to None <assert_stmt>raw.info['meas_date']<is><none><block_end>@testing.requires_testing_data<def_stmt>test_compare_events_and_annotations <block_start>"""Test comparing annotations and events."""<with_stmt>pytest.warns(RuntimeWarning match='Could not parse meas date')<block_start>raw=read_raw_cnt(fname)<block_end>events=np.array([[333 0 7] [1010 0 7] [1664 0 109] [2324 0 7] [2984 0 109]])<line_sep>annot=read_annotations(fname)<assert_stmt>len(annot)<eq>6<line_sep>assert_array_equal(annot.onset[:-1] events[: 0]/raw.info['sfreq'])<assert_stmt>'STI 014'<not><in>raw.info['ch_names']<block_end>
# This sample tests the type checker's reportUnnecessaryCast feature. <import_from_stmt>typing cast Union<def_stmt>foo a:int# This should generate an error if # reportUnnecessaryCast is enabled. <block_start>b=cast(int a)<block_end>c:Union[int str]="hello"<line_sep>d=cast(int c)<line_sep>
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ <import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>mindspore<as>ms<import_from_stmt>mindspore context Tensor Parameter<import_from_stmt>mindspore.common.api _cell_graph_executor<import_from_stmt>mindspore.nn Cell TrainOneStepCell Momentum<import_from_stmt>mindspore.ops operations<as>P<import_from_stmt>mindspore.common.initializer initializer<class_stmt>Net(Cell)<block_start><def_stmt>__init__ self strategy1=<none> strategy2=<none> strategy3=<none> axis=0 init_flag=<true> split_tuple=(4 4) split_string="manual_split" param_shape=(8 8)<block_start>super().__init__()<line_sep>self.gatherv2=P.Gather().shard(strategy1)<line_sep>self.gatherv2.add_prim_attr(split_string split_tuple)<line_sep>self.mul=P.Mul().shard(strategy2)<line_sep>self.reshape=P.Reshape()<line_sep>self.matmul=P.MatMul().shard(strategy3)<line_sep>self.matmul.add_prim_attr("forward_reduce_scatter" <true>)<if_stmt>init_flag<block_start>self.param=Parameter(initializer("ones" param_shape ms.float32) name="gatherv2_param")<block_end><else_stmt><block_start>self.param=Parameter(Tensor(np.ones(param_shape) dtype=ms.float32) name="gatherv2_param")<block_end>self.mul_weight=Parameter(initializer("ones" (8 8 8) ms.float32) name="mul_weight")<line_sep>self.matmul_weight=Parameter(initializer("ones" (64 16) ms.float32) name="matmul_weight")<line_sep>self.axis=axis<block_end><def_stmt>construct self x b<block_start>out=self.gatherv2(self.param x self.axis)<line_sep>out=self.mul(out self.mul_weight)<line_sep>out=self.reshape(out (8 64))<line_sep>out=self.matmul(out self.matmul_weight)<line_sep><return>out<block_end><block_end>_x=Tensor(np.ones([8 8]) dtype=ms.int32)<line_sep>_b=Tensor(np.ones([64 8]) dtype=ms.float32)<def_stmt>compile_net net<block_start>optimizer=Momentum(net.trainable_params() learning_rate=0.1 momentum=0.9)<line_sep>train_net=TrainOneStepCell(net optimizer)<line_sep>train_net.set_auto_parallel()<line_sep>train_net.set_train()<line_sep>_cell_graph_executor.compile(train_net _x _b auto_parallel_mode=<true>)<line_sep>context.reset_auto_parallel_context()<block_end><def_stmt>test_normal_split <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=2 global_rank=0)<line_sep>strategy1=((2 1) (1 2))<line_sep>strategy2=((1 2 1) (1 2 1))<line_sep>strategy3=((1 2) (2 1))<line_sep>net=Net(strategy1 strategy2 strategy3)<line_sep>compile_net(net)<block_end><def_stmt>test_normal_split2 <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=4 global_rank=0)<line_sep>strategy1=((4 1) (1 4))<line_sep>strategy2=((1 4 1) (1 4 1))<line_sep>strategy3=((1 4) (4 1))<line_sep>net=Net(strategy1 strategy2 strategy3 split_tuple=(10 20 30 4) param_shape=(64 8))<line_sep>compile_net(net)<block_end><def_stmt>test_normal_split3 <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=32 global_rank=17)<line_sep>strategy1=((4 8) (1 4))<line_sep>strategy2=((1 4 8) (1 4 8))<line_sep>strategy3=((1 32) (32 1))<line_sep>net=Net(strategy1 strategy2 strategy3 split_tuple=(10 20 30 4) param_shape=(64 8))<line_sep>compile_net(net)<block_end><def_stmt>test_normal_split_with_offset <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=2 global_rank=0)<line_sep>strategy1=((2 1) (1 2))<line_sep>strategy2=((1 2 1) (1 2 1))<line_sep>strategy3=((1 2) (2 1))<line_sep>net=Net(strategy1 strategy2 strategy3 split_string="manual_split_with_offset" split_tuple=((4 0) (4 4)))<line_sep>compile_net(net)<block_end><def_stmt>test_auto_parallel_error <block_start>context.set_auto_parallel_context(parallel_mode="auto_parallel" device_num=2 global_rank=0)<line_sep>net=Net()<with_stmt>pytest.raises(RuntimeError)<block_start>compile_net(net)<block_end><block_end><def_stmt>test_axis_error <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=2 global_rank=0)<line_sep>strategy1=((2 1) (1 2))<line_sep>strategy2=((1 2 1) (1 2 1))<line_sep>strategy3=((1 2) (2 1))<line_sep>net=Net(strategy1 strategy2 strategy3 axis=1)<with_stmt>pytest.raises(RuntimeError)<block_start>compile_net(net)<block_end><block_end><def_stmt>test_strategy_error <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=8 global_rank=0)<line_sep>strategy1=((4 1) (8 1))<line_sep>strategy2=((1 2 1) (1 2 1))<line_sep>strategy3=((1 2) (2 1))<line_sep>net=Net(strategy1 strategy2 strategy3)<with_stmt>pytest.raises(RuntimeError)<block_start>compile_net(net)<block_end><block_end><def_stmt>test_strategy_error2 <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=8 global_rank=0)<line_sep>strategy1=((4 1) (1 8))<line_sep>strategy2=((1 2 1) (1 2 1))<line_sep>strategy3=((1 2) (2 1))<line_sep>net=Net(strategy1 strategy2 strategy3)<with_stmt>pytest.raises(RuntimeError)<block_start>compile_net(net)<block_end><block_end><def_stmt>test_strategy_error3 <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=8 global_rank=0)<line_sep>strategy1=((2 1) (1 2))<line_sep>strategy2=((1 2 1) (1 2 1))<line_sep>strategy3=((1 2) (2 1))<line_sep>net=Net(strategy1 strategy2 strategy3)<with_stmt>pytest.raises(RuntimeError)<block_start>compile_net(net)<block_end><block_end><def_stmt>test_strategy_error4 <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=2 global_rank=0)<line_sep>strategy1=((2 8) (1 2))<line_sep>strategy2=((1 2 1) (1 2 1))<line_sep>strategy3=((1 2) (2 1))<line_sep>net=Net(strategy1 strategy2 strategy3)<with_stmt>pytest.raises(RuntimeError)<block_start>compile_net(net)<block_end><block_end><def_stmt>test_strategy_error5 <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=4 global_rank=0)<line_sep>strategy1=((4 1) (1 4))<line_sep>strategy2=((1 2 1) (1 2 1))<line_sep>strategy3=((1 2) (2 1))<line_sep>net=Net(strategy1 strategy2 strategy3)<with_stmt>pytest.raises(RuntimeError)<block_start>compile_net(net)<block_end><block_end><def_stmt>test_split_tuple_error <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=2 global_rank=0)<line_sep>strategy1=((2 1) (1 2))<line_sep>strategy2=((1 2 1) (1 2 1))<line_sep>strategy3=((1 2) (2 1))<line_sep>net=Net(strategy1 strategy2 strategy3 split_tuple=((5 0) (5 5)))<with_stmt>pytest.raises(RuntimeError)<block_start>compile_net(net)<block_end><block_end><def_stmt>test_parameter_use_tensor_error <block_start>context.set_auto_parallel_context(parallel_mode="semi_auto_parallel" device_num=2 global_rank=0)<line_sep>strategy1=((2 1) (1 2))<line_sep>strategy2=((1 2 1) (1 2 1))<line_sep>strategy3=((1 2) (2 1))<line_sep>net=Net(strategy1 strategy2 strategy3 init_flag=<false>)<with_stmt>pytest.raises(RuntimeError)<block_start>compile_net(net)<block_end><block_end>
<import_from_stmt>bot.api.api_client ApiClient<import_from_stmt>bot.api.base_route BaseRoute<import_stmt>typing<as>t<import_from_stmt>bot.models Tag<class_stmt>TagRoute(BaseRoute)<block_start><def_stmt>__init__ self api_client:ApiClient<block_start>super().__init__(api_client)<block_end><async_keyword><def_stmt>create_tag self name:str content:str guild_id:int user_id:int **kwargs<arrow>t.Optional[Tag]<block_start>json={'Name':name 'Content':content 'GuildId':guild_id 'UserId':user_id }<line_sep>tag_dict=<await>self._client.post('tags' data=json **kwargs)<if_stmt><not>tag_dict<block_start><return><none><block_end><return>Tag.from_dict(tag_dict)<block_end><async_keyword><def_stmt>edit_tag_content self guild_id:int name:str content:str **kwargs<arrow>t.Optional[Tag]<block_start>json={'GuildId':guild_id 'Name':name 'Content':content}<line_sep>tag_dict=<await>self._client.patch('bot/tags' data=json **kwargs)<if_stmt><not>tag_dict<block_start><return><none><block_end><return>Tag.from_dict(tag_dict)<block_end><async_keyword><def_stmt>edit_tag_owner self guild_id:int name:str user_id:int **kwargs<arrow>t.Optional[Tag]<block_start>json={'GuildId':guild_id 'Name':name 'UserId':user_id}<line_sep>tag_dict=<await>self._client.patch('bot/tags' data=json **kwargs)<if_stmt><not>tag_dict<block_start><return><none><block_end><return>Tag.from_dict(tag_dict)<block_end><async_keyword><def_stmt>get_tag self guild_id:int name:str<arrow>t.Optional[Tag]<block_start>json={'GuildId':guild_id 'Name':name }<line_sep>tag_dict=<await>self._client.get('bot/tags' data=json)<if_stmt><not>tag_dict<block_start><return><none><block_end><return>Tag.from_dict(tag_dict)<block_end><async_keyword><def_stmt>get_tag_content self guild_id:int name:str<arrow>t.Optional[str]<block_start>json={'GuildId':guild_id 'Name':name }<line_sep>resp=<await>self._client.get('bot/tags' data=json)<line_sep><return><none><if>resp<is><none><else>resp['content']<block_end><async_keyword><def_stmt>delete_tag self guild_id:int name:str **kwargs<block_start>""" Makes a call to the API to delete a tag w/ the given GuildId and Name. If successful, the API will return a dict with the given values: - name The name of the tag. - content The content of the tag. - guildId The guild id the tag was in. """<line_sep>json={'GuildId':guild_id 'Name':name }<line_sep><return><await>self._client.delete('bot/tags' data=json **kwargs)<block_end><async_keyword><def_stmt>add_tag_use self guild_id:int name:str channel_id:int user_id:int<block_start>""" Makes a call to the API to say a tag w/ the given Name was used. If successful, the API will return a dict with the given values: - name The name of the tag. - guildId The guild id the tag is in. """<line_sep>json={'GuildId':guild_id 'Name':name 'ChannelId':channel_id 'UserId':user_id}<line_sep><return><await>self._client.post('bot/tags/invoke' data=json)<block_end><async_keyword><def_stmt>get_guilds_tags self guild_id:int<arrow>t.Iterator[Tag]<block_start>resp=<await>self._client.get(f'guilds/{guild_id}/tags')<if_stmt><not>resp<block_start><return>[]<block_end><return>[Tag.from_dict(i)<for>i resp['tags']]<block_end><block_end>
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>openfermioncirq.variational.ansatzes SwapNetworkTrotterHubbardAnsatz<def_stmt>test_swap_network_trotter_hubbard_ansatz_param_bounds <block_start>ansatz=SwapNetworkTrotterHubbardAnsatz(3 1 1.0 4.0 periodic=<false>)<assert_stmt>list(symbol.name<for>symbol ansatz.params())<eq>['Th_0' 'V_0' ]<assert_stmt>ansatz.param_bounds()<eq>[(-2.0 2.0) (-1.0 1.0)]<line_sep>ansatz=SwapNetworkTrotterHubbardAnsatz(1 4 1.0 4.0 periodic=<false>)<assert_stmt>list(symbol.name<for>symbol ansatz.params())<eq>['Tv_0' 'V_0' ]<assert_stmt>ansatz.param_bounds()<eq>[(-2.0 2.0) (-1.0 1.0)]<line_sep>ansatz=SwapNetworkTrotterHubbardAnsatz(3 2 1.0 4.0)<assert_stmt>list(symbol.name<for>symbol ansatz.params())<eq>['Th_0' 'Tv_0' 'V_0' ]<assert_stmt>ansatz.param_bounds()<eq>[(-2.0 2.0) (-2.0 2.0) (-1.0 1.0)]<block_end>
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This dictionary of GPU information was captured from a run of # Telemetry on a Linux workstation with NVIDIA GPU. It helps test # telemetry.internal.platform's GPUInfo class, and specifically the # attributes it expects to find in the dictionary; if the code changes # in an incompatible way, tests using this fake GPU info will begin # failing, indicating this fake data must be updated. # # To regenerate it, import pdb in # telemetry/internal/platform/gpu_info.py and add a call to # pdb.set_trace() in GPUInfo.FromDict before the return statement. # Print the attrs dictionary in the debugger and copy/paste the result # on the right-hand side of this assignment. Then run: # # pyformat [this file name] | sed -e "s/'/'/g" # # and put the output into this file. FAKE_GPU_INFO={'feature_status':{'flash_stage3d':'enabled' 'gpu_compositing':'enabled' 'video_decode':'unavailable_software' 'flash_3d':'enabled' 'webgl':'enabled' 'video_encode':'enabled' 'multiple_raster_threads':'enabled_on' '2d_canvas':'unavailable_software' 'rasterization':'disabled_software' 'flash_stage3d_baseline':'enabled'} 'aux_attributes':{'optimus':<false> 'sandboxed':<true> 'basic_info_state':1 'adapter_luid':0.0 'driver_version':'331.79' 'direct_rendering':<true> 'amd_switchable':<false> 'context_info_state':1 'process_crash_count':0 'pixel_shader_version':'4.40' 'gl_ws_version':'1.4' 'can_lose_context':<false> 'driver_vendor':'NVIDIA' 'max_msaa_samples':'64' 'software_rendering':<false> 'gl_version':'4.4.0 NVIDIA 331.79' 'gl_ws_vendor':'NVIDIA Corporation' 'vertex_shader_version':'4.40' 'initialization_time':1.284043 'gl_reset_notification_strategy':33362 'gl_ws_extensions':'GLX_EXT_visual_info GLX_EXT_visual_rating GLX_SGIX_fbconfig '<concat>'GLX_SGIX_pbuffer GLX_SGI_video_sync GLX_SGI_swap_control '<concat>'GLX_EXT_swap_control GLX_EXT_swap_control_tear '<concat>'GLX_EXT_texture_from_pixmap GLX_EXT_buffer_age '<concat>'GLX_ARB_create_context GLX_ARB_create_context_profile '<concat>'GLX_EXT_create_context_es_profile '<concat>'GLX_EXT_create_context_es2_profile '<concat>'GLX_ARB_create_context_robustness GLX_ARB_multisample '<concat>'GLX_NV_float_buffer GLX_ARB_fbconfig_float GLX_NV_swap_group'<concat>' GLX_EXT_framebuffer_sRGB GLX_NV_multisample_coverage '<concat>'GLX_NV_copy_image GLX_NV_video_capture ' 'gl_renderer':'Quadro 600/PCIe/SSE2' 'driver_date':'' 'gl_vendor':'NVIDIA Corporation' 'gl_extensions':'GL_AMD_multi_draw_indirect GL_ARB_arrays_of_arrays '<concat>'GL_ARB_base_instance GL_ARB_blend_func_extended '<concat>'GL_ARB_buffer_storage GL_ARB_clear_buffer_object '<concat>'GL_ARB_clear_texture GL_ARB_color_buffer_float '<concat>'GL_ARB_compatibility GL_ARB_compressed_texture_pixel_storage'<concat>' GL_ARB_conservative_depth GL_ARB_compute_shader '<concat>'GL_ARB_compute_variable_group_size GL_ARB_copy_buffer '<concat>'GL_ARB_copy_image GL_ARB_debug_output '<concat>'GL_ARB_depth_buffer_float GL_ARB_depth_clamp '<concat>'GL_ARB_depth_texture GL_ARB_draw_buffers '<concat>'GL_ARB_draw_buffers_blend GL_ARB_draw_indirect '<concat>'GL_ARB_draw_elements_base_vertex GL_ARB_draw_instanced '<concat>'GL_ARB_enhanced_layouts GL_ARB_ES2_compatibility '<concat>'GL_ARB_ES3_compatibility GL_ARB_explicit_attrib_location '<concat>'GL_ARB_explicit_uniform_location '<concat>'GL_ARB_fragment_coord_conventions '<concat>'GL_ARB_fragment_layer_viewport GL_ARB_fragment_program '<concat>'GL_ARB_fragment_program_shadow GL_ARB_fragment_shader '<concat>'GL_ARB_framebuffer_no_attachments GL_ARB_framebuffer_object '<concat>'GL_ARB_framebuffer_sRGB GL_ARB_geometry_shader4 '<concat>'GL_ARB_get_program_binary GL_ARB_gpu_shader5 '<concat>'GL_ARB_gpu_shader_fp64 GL_ARB_half_float_pixel '<concat>'GL_ARB_half_float_vertex GL_ARB_imaging '<concat>'GL_ARB_indirect_parameters GL_ARB_instanced_arrays '<concat>'GL_ARB_internalformat_query GL_ARB_internalformat_query2 '<concat>'GL_ARB_invalidate_subdata GL_ARB_map_buffer_alignment '<concat>'GL_ARB_map_buffer_range GL_ARB_multi_bind '<concat>'GL_ARB_multi_draw_indirect GL_ARB_multisample '<concat>'GL_ARB_multitexture GL_ARB_occlusion_query '<concat>'GL_ARB_occlusion_query2 GL_ARB_pixel_buffer_object '<concat>'GL_ARB_point_parameters GL_ARB_point_sprite '<concat>'GL_ARB_program_interface_query GL_ARB_provoking_vertex '<concat>'GL_ARB_robust_buffer_access_behavior GL_ARB_robustness '<concat>'GL_ARB_sample_shading GL_ARB_sampler_objects '<concat>'GL_ARB_seamless_cube_map GL_ARB_separate_shader_objects '<concat>'GL_ARB_shader_atomic_counters GL_ARB_shader_bit_encoding '<concat>'GL_ARB_shader_draw_parameters GL_ARB_shader_group_vote '<concat>'GL_ARB_shader_image_load_store GL_ARB_shader_image_size '<concat>'GL_ARB_shader_objects GL_ARB_shader_precision '<concat>'GL_ARB_query_buffer_object '<concat>'GL_ARB_shader_storage_buffer_object GL_ARB_shader_subroutine'<concat>' GL_ARB_shader_texture_lod GL_ARB_shading_language_100 '<concat>'GL_ARB_shading_language_420pack '<concat>'GL_ARB_shading_language_include '<concat>'GL_ARB_shading_language_packing GL_ARB_shadow '<concat>'GL_ARB_stencil_texturing GL_ARB_sync '<concat>'GL_ARB_tessellation_shader GL_ARB_texture_border_clamp '<concat>'GL_ARB_texture_buffer_object '<concat>'GL_ARB_texture_buffer_object_rgb32 '<concat>'GL_ARB_texture_buffer_range GL_ARB_texture_compression '<concat>'GL_ARB_texture_compression_bptc '<concat>'GL_ARB_texture_compression_rgtc GL_ARB_texture_cube_map '<concat>'GL_ARB_texture_cube_map_array GL_ARB_texture_env_add '<concat>'GL_ARB_texture_env_combine GL_ARB_texture_env_crossbar '<concat>'GL_ARB_texture_env_dot3 GL_ARB_texture_float '<concat>'GL_ARB_texture_gather GL_ARB_texture_mirror_clamp_to_edge '<concat>'GL_ARB_texture_mirrored_repeat GL_ARB_texture_multisample '<concat>'GL_ARB_texture_non_power_of_two GL_ARB_texture_query_levels '<concat>'GL_ARB_texture_query_lod GL_ARB_texture_rectangle '<concat>'GL_ARB_texture_rg GL_ARB_texture_rgb10_a2ui '<concat>'GL_ARB_texture_stencil8 GL_ARB_texture_storage '<concat>'GL_ARB_texture_storage_multisample GL_ARB_texture_swizzle '<concat>'GL_ARB_texture_view GL_ARB_timer_query '<concat>'GL_ARB_transform_feedback2 GL_ARB_transform_feedback3 '<concat>'GL_ARB_transform_feedback_instanced GL_ARB_transpose_matrix '<concat>'GL_ARB_uniform_buffer_object GL_ARB_vertex_array_bgra '<concat>'GL_ARB_vertex_array_object GL_ARB_vertex_attrib_64bit '<concat>'GL_ARB_vertex_attrib_binding GL_ARB_vertex_buffer_object '<concat>'GL_ARB_vertex_program GL_ARB_vertex_shader '<concat>'GL_ARB_vertex_type_10f_11f_11f_rev '<concat>'GL_ARB_vertex_type_2_10_10_10_rev GL_ARB_viewport_array '<concat>'GL_ARB_window_pos GL_ATI_draw_buffers GL_ATI_texture_float '<concat>'GL_ATI_texture_mirror_once GL_S3_s3tc GL_EXT_texture_env_add'<concat>' GL_EXT_abgr GL_EXT_bgra GL_EXT_bindable_uniform '<concat>'GL_EXT_blend_color GL_EXT_blend_equation_separate '<concat>'GL_EXT_blend_func_separate GL_EXT_blend_minmax '<concat>'GL_EXT_blend_subtract GL_EXT_compiled_vertex_array '<concat>'GL_EXT_Cg_shader GL_EXT_depth_bounds_test '<concat>'GL_EXT_direct_state_access GL_EXT_draw_buffers2 '<concat>'GL_EXT_draw_instanced GL_EXT_draw_range_elements '<concat>'GL_EXT_fog_coord GL_EXT_framebuffer_blit '<concat>'GL_EXT_framebuffer_multisample '<concat>'GL_EXTX_framebuffer_mixed_formats '<concat>'GL_EXT_framebuffer_multisample_blit_scaled '<concat>'GL_EXT_framebuffer_object GL_EXT_framebuffer_sRGB '<concat>'GL_EXT_geometry_shader4 GL_EXT_gpu_program_parameters '<concat>'GL_EXT_gpu_shader4 GL_EXT_multi_draw_arrays '<concat>'GL_EXT_packed_depth_stencil GL_EXT_packed_float '<concat>'GL_EXT_packed_pixels GL_EXT_pixel_buffer_object '<concat>'GL_EXT_point_parameters GL_EXT_provoking_vertex '<concat>'GL_EXT_rescale_normal GL_EXT_secondary_color '<concat>'GL_EXT_separate_shader_objects '<concat>'GL_EXT_separate_specular_color '<concat>'GL_EXT_shader_image_load_store GL_EXT_shadow_funcs '<concat>'GL_EXT_stencil_two_side GL_EXT_stencil_wrap GL_EXT_texture3D'<concat>' GL_EXT_texture_array GL_EXT_texture_buffer_object '<concat>'GL_EXT_texture_compression_dxt1 '<concat>'GL_EXT_texture_compression_latc '<concat>'GL_EXT_texture_compression_rgtc '<concat>'GL_EXT_texture_compression_s3tc GL_EXT_texture_cube_map '<concat>'GL_EXT_texture_edge_clamp GL_EXT_texture_env_combine '<concat>'GL_EXT_texture_env_dot3 GL_EXT_texture_filter_anisotropic '<concat>'GL_EXT_texture_integer GL_EXT_texture_lod '<concat>'GL_EXT_texture_lod_bias GL_EXT_texture_mirror_clamp '<concat>'GL_EXT_texture_object GL_EXT_texture_shared_exponent '<concat>'GL_EXT_texture_sRGB GL_EXT_texture_sRGB_decode '<concat>'GL_EXT_texture_storage GL_EXT_texture_swizzle '<concat>'GL_EXT_timer_query GL_EXT_transform_feedback2 '<concat>'GL_EXT_vertex_array GL_EXT_vertex_array_bgra '<concat>'GL_EXT_vertex_attrib_64bit GL_EXT_x11_sync_object '<concat>'GL_EXT_import_sync_object GL_IBM_rasterpos_clip '<concat>'GL_IBM_texture_mirrored_repeat GL_KHR_debug '<concat>'GL_KTX_buffer_region GL_NV_bindless_multi_draw_indirect '<concat>'GL_NV_blend_equation_advanced GL_NV_blend_square '<concat>'GL_NV_compute_program5 GL_NV_conditional_render '<concat>'GL_NV_copy_depth_to_color GL_NV_copy_image '<concat>'GL_NV_depth_buffer_float GL_NV_depth_clamp '<concat>'GL_NV_draw_texture GL_NV_ES1_1_compatibility '<concat>'GL_NV_explicit_multisample GL_NV_fence GL_NV_float_buffer '<concat>'GL_NV_fog_distance GL_NV_fragment_program '<concat>'GL_NV_fragment_program_option GL_NV_fragment_program2 '<concat>'GL_NV_framebuffer_multisample_coverage '<concat>'GL_NV_geometry_shader4 GL_NV_gpu_program4 '<concat>'GL_NV_gpu_program4_1 GL_NV_gpu_program5 '<concat>'GL_NV_gpu_program5_mem_extended GL_NV_gpu_program_fp64 '<concat>'GL_NV_gpu_shader5 GL_NV_half_float GL_NV_light_max_exponent '<concat>'GL_NV_multisample_coverage GL_NV_multisample_filter_hint '<concat>'GL_NV_occlusion_query GL_NV_packed_depth_stencil '<concat>'GL_NV_parameter_buffer_object GL_NV_parameter_buffer_object2'<concat>' GL_NV_path_rendering GL_NV_pixel_data_range '<concat>'GL_NV_point_sprite GL_NV_primitive_restart '<concat>'GL_NV_register_combiners GL_NV_register_combiners2 '<concat>'GL_NV_shader_atomic_counters GL_NV_shader_atomic_float '<concat>'GL_NV_shader_buffer_load GL_NV_shader_storage_buffer_object '<concat>'GL_ARB_sparse_texture GL_NV_texgen_reflection '<concat>'GL_NV_texture_barrier GL_NV_texture_compression_vtc '<concat>'GL_NV_texture_env_combine4 GL_NV_texture_expand_normal '<concat>'GL_NV_texture_multisample GL_NV_texture_rectangle '<concat>'GL_NV_texture_shader GL_NV_texture_shader2 '<concat>'GL_NV_texture_shader3 GL_NV_transform_feedback '<concat>'GL_NV_transform_feedback2 GL_NV_vdpau_interop '<concat>'GL_NV_vertex_array_range GL_NV_vertex_array_range2 '<concat>'GL_NV_vertex_attrib_integer_64bit '<concat>'GL_NV_vertex_buffer_unified_memory GL_NV_vertex_program '<concat>'GL_NV_vertex_program1_1 GL_NV_vertex_program2 '<concat>'GL_NV_vertex_program2_option GL_NV_vertex_program3 '<concat>'GL_NVX_conditional_render GL_NVX_gpu_memory_info '<concat>'GL_SGIS_generate_mipmap GL_SGIS_texture_lod '<concat>'GL_SGIX_depth_texture GL_SGIX_shadow GL_SUN_slice_accum '} 'devices':[{'device_string':'' 'vendor_id':4318.0 'device_id':3576.0 'vendor_string':''}] 'driver_bug_workarounds':['clear_uniforms_before_first_program_use' 'disable_gl_path_rendering' 'init_gl_position_in_vertex_shader' 'init_vertex_attributes' 'remove_pow_with_constant_exponent' 'scalarize_vec_and_mat_constructor_args' 'use_current_program_after_successful_link' 'use_virtualized_gl_contexts']}<line_sep>
<import_from_stmt>BTrees OOBTree<import_from_stmt>datetime datetime date timedelta<import_from_stmt>persistent Persistent<import_from_stmt>.vulnerability Vulnerability<import_stmt>fcntl<import_stmt>glob<import_stmt>gzip<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>os.path<as>p<import_stmt>requests<import_stmt>transaction<import_stmt>ZODB<import_stmt>ZODB.FileStorage<line_sep>DEFAULT_MIRROR='https://nvd.nist.gov/feeds/json/cve/1.1/'<line_sep>DEFAULT_CACHE_DIR='~/.cache/vulnix'<line_sep>_log=logging.getLogger(__name__)<class_stmt>NVD(object)<block_start>"""Access to the National Vulnerability Database. https://nvd.nist.gov/ """<def_stmt>__init__ self mirror=DEFAULT_MIRROR cache_dir=DEFAULT_CACHE_DIR<block_start>self.mirror=mirror.rstrip('/')+'/'<line_sep>self.cache_dir=p.expanduser(cache_dir)<line_sep>current=date.today().year<line_sep>self.available_archives=[y<for>y range(current-5 current+1)]<block_end><def_stmt>lock self<block_start>self._lock=open(p.join(self.cache_dir 'lock') 'a')<try_stmt><block_start>fcntl.lockf(self._lock fcntl.LOCK_EX|fcntl.LOCK_NB)<block_end><except_stmt>OSError<block_start>_log.info('Waiting for NVD lock...')<block_end>fcntl.lockf(self._lock fcntl.LOCK_EX)<block_end><def_stmt>__enter__ self<block_start>"""Keeps database connection open while in this context."""<line_sep>_log.debug('Opening database in %s' self.cache_dir)<line_sep>os.makedirs(self.cache_dir exist_ok=<true>)<line_sep>self.lock()<line_sep>self._db=ZODB.DB(ZODB.FileStorage.FileStorage(p.join(self.cache_dir 'Data.fs')))<line_sep>self._connection=self._db.open()<line_sep>self._root=self._connection.root()<try_stmt><block_start>self._root.setdefault('advisory' OOBTree.OOBTree())<line_sep>self._root.setdefault('by_product' OOBTree.OOBTree())<line_sep>self._root.setdefault('meta' Meta())<line_sep># may trigger exceptions if the database is inconsistent list(self._root['by_product'].keys())<if_stmt>'archives'<in>self._root<block_start>_log.warn('Pre-1.9.0 database found - rebuilding')<line_sep>self.reinit()<block_end><block_end><except_stmt>(TypeError EOFError)<block_start>_log.warn('Incompatible objects found in database - rebuilding DB')<line_sep>self.reinit()<block_end><return>self<block_end><def_stmt>__exit__ self exc_type=<none> exc_value=<none> exc_tb=<none><block_start><if_stmt>exc_type<is><none><block_start><if_stmt>self.meta.should_pack()<block_start>_log.debug('Packing database')<line_sep>self._db.pack()<block_end>transaction.commit()<block_end><else_stmt><block_start>transaction.abort()<block_end>self._connection.close()<line_sep>self._db.close()<line_sep>self._lock=<none><block_end><def_stmt>reinit self<block_start>"""Remove old DB and rebuild it from scratch."""<line_sep>self._root=<none><line_sep>transaction.abort()<line_sep>self._connection.close()<line_sep>self._db=<none><for_stmt>f glob.glob(p.join(self.cache_dir "Data.fs*"))<block_start>os.unlink(f)<block_end>self._db=ZODB.DB(ZODB.FileStorage.FileStorage(p.join(self.cache_dir 'Data.fs')))<line_sep>self._connection=self._db.open()<line_sep>self._root=self._connection.root()<line_sep>self._root['advisory']=OOBTree.OOBTree()<line_sep>self._root['by_product']=OOBTree.OOBTree()<line_sep>self._root['meta']=Meta()<block_end>@property<def_stmt>meta self<block_start><return>self._root['meta']<block_end><def_stmt>relevant_archives self<block_start>"""Returns list of NVD archives to check. If there was an update within the last two hours, nothing is done. If the last update was recent enough to be covered by the 'modified' feed, only that is checked. Else, all feeds are checked. """<line_sep>last_update=self.meta.last_update<if_stmt>last_update<g>datetime.now()-timedelta(hours=2)<block_start><return>[]<block_end># the "modified" feed is sufficient if used frequently enough <if_stmt>last_update<g>datetime.now()-timedelta(days=7)<block_start><return>['modified']<block_end><return>self.available_archives<block_end><def_stmt>update self<block_start>"""Download archives (if changed) and add CVEs to database."""<line_sep>changed=[]<for_stmt>a self.relevant_archives()<block_start>arch=Archive(a)<line_sep>changed.append(arch.download(self.mirror self.meta))<line_sep>self.add(arch)<block_end><if_stmt>any(changed)<block_start>self.meta.last_update=datetime.now()<line_sep>self.reindex()<block_end><block_end><def_stmt>add self archive<block_start>advisories=self._root['advisory']<for_stmt>(cve_id adv) archive.items()<block_start>advisories[cve_id]=adv<block_end><block_end><def_stmt>reindex self<block_start>"""Regenerate product index."""<line_sep>_log.info('Reindexing database')<del_stmt>self._root['by_product']<line_sep>bp=OOBTree.OOBTree()<for_stmt>vuln self._root['advisory'].values()<block_start><if_stmt>vuln.nodes<block_start><for_stmt>prod (n.product<for>n vuln.nodes)<block_start>bp.setdefault(prod [])<line_sep>bp[prod].append(vuln)<block_end><block_end><block_end>self._root['by_product']=bp<line_sep>transaction.commit()<block_end><def_stmt>by_id self cve_id<block_start>"""Returns vuln or raises KeyError."""<line_sep><return>self._root['advisory'][cve_id]<block_end><def_stmt>by_product self product<block_start>"""Returns list of matching vulns or empty list."""<try_stmt><block_start><return>self._root['by_product'][product]<block_end><except_stmt>KeyError<block_start><return>[]<block_end><block_end><def_stmt>affected self pname version<block_start>"""Returns list of matching vulnerabilities."""<line_sep>res=set()<for_stmt>vuln self.by_product(pname)<block_start><if_stmt>vuln.match(pname version)<block_start>res.add(vuln)<block_end><block_end><return>res<block_end><block_end><class_stmt>Archive<block_start>"""Single JSON data structure from NIST NVD."""<def_stmt>__init__ self name<block_start>"""Creates JSON feed object. `name` consists of a year or "modified". """<line_sep>self.name=name<line_sep>self.download_uri='nvdcve-1.1-{}.json.gz'.format(name)<line_sep>self.advisories={}<block_end><def_stmt>download self mirror meta<block_start>"""Fetches compressed JSON data from NIST. Nothing is done if we have already seen the same version of the feed before. Returns True if anything has been loaded successfully. """<line_sep>url=mirror+self.download_uri<line_sep>_log.info('Loading %s' url)<line_sep>r=requests.get(url headers=meta.headers_for(url))<line_sep>r.raise_for_status()<if_stmt>r.status_code<eq>200<block_start>_log.debug('Loading JSON feed "%s"' self.name)<line_sep>self.parse(gzip.decompress(r.content))<line_sep>meta.update_headers_for(url r.headers)<line_sep><return><true><block_end><else_stmt><block_start>_log.debug('Skipping JSON feed "%s" (%s)' self.name r.reason)<line_sep><return><false><block_end><block_end><def_stmt>parse self nvd_json<block_start>added=0<line_sep>raw=json.loads(nvd_json)<for_stmt>item raw['CVE_Items']<block_start><try_stmt><block_start>vuln=Vulnerability.parse(item)<line_sep>self.advisories[vuln.cve_id]=vuln<line_sep>added<augadd>1<block_end><except_stmt>ValueError<block_start>_log.debug('Failed to parse NVD item: %s' item)<block_end><block_end>_log.debug("Added %s vulnerabilities" added)<block_end><def_stmt>items self<block_start><return>self.advisories.items()<block_end><block_end><class_stmt>Meta(Persistent)<block_start>"""Metadate for database maintenance control"""<line_sep>pack_counter=0<line_sep>last_update=datetime(1970 1 1)<line_sep>etag=<none><def_stmt>should_pack self<block_start>self.pack_counter<augadd>1<if_stmt>self.pack_counter<g>25<block_start>self.pack_counter=0<line_sep><return><true><block_end><return><false><block_end><def_stmt>headers_for self url<block_start>"""Returns dict of additional request headers."""<if_stmt>self.etag<and>url<in>self.etag<block_start><return>{'If-None-Match':self.etag[url]}<block_end><return>{}<block_end><def_stmt>update_headers_for self url resp_headers<block_start>"""Updates self from HTTP response headers."""<if_stmt>'ETag'<in>resp_headers<block_start><if_stmt>self.etag<is><none><block_start>self.etag=OOBTree.OOBTree()<block_end>self.etag[url]=resp_headers['ETag']<block_end><block_end><block_end>
<def_stmt>add_cswrapper filename outfilename=<none><block_start><import_from_stmt>fmpy read_model_description extract sharedLibraryExtension platform __version__<import_from_stmt>lxml etree<import_stmt>os<import_from_stmt>shutil copyfile rmtree<if_stmt>outfilename<is><none><block_start>outfilename=filename<block_end>model_description=read_model_description(filename)<if_stmt>model_description.fmiVersion<ne>'2.0'<block_start><raise>Exception("%s is not an FMI 2.0 FMU."%filename)<block_end><if_stmt>model_description.modelExchange<is><none><block_start><raise>Exception("%s does not support Model Exchange."%filename)<block_end>unzipdir=extract(filename)<line_sep>xml=os.path.join(unzipdir 'modelDescription.xml')<line_sep>tree=etree.parse(xml)<line_sep>root=tree.getroot()<line_sep># update description generation_tool=root.attrib.get('generationTool' 'Unknown')+" with FMPy %s Co-Simulation wrapper"%__version__<line_sep>root.attrib['generationTool']=generation_tool<line_sep># remove any existing <CoSimulation> element <for_stmt>e root.findall('CoSimulation')<block_start>root.remove(e)<block_end><for_stmt>i,child enumerate(root)<block_start><if_stmt>child.tag<eq>'ModelExchange'<block_start><break><block_end><block_end>model_identifier='%s_%s_%s'%(model_description.modelExchange.modelIdentifier model_description.numberOfContinuousStates model_description.numberOfEventIndicators)<line_sep>e=etree.Element("CoSimulation")<line_sep>e.attrib['modelIdentifier']=model_identifier<line_sep>root.insert(i+1 e)<line_sep>tree.write(xml pretty_print=<true> encoding='utf-8')<line_sep>shared_library=os.path.join(os.path.dirname(__file__) 'cswrapper'+sharedLibraryExtension)<line_sep>license_file=os.path.join(os.path.dirname(__file__) 'license.txt')<line_sep>licenses_dir=os.path.join(unzipdir 'documentation' 'licenses')<if_stmt><not>os.path.isdir(licenses_dir)<block_start>os.mkdir(licenses_dir)<block_end>copyfile(src=shared_library dst=os.path.join(unzipdir 'binaries' platform model_identifier+sharedLibraryExtension))<line_sep>copyfile(license_file os.path.join(unzipdir 'documentation' 'licenses' 'fmpy-cswrapper.txt'))<line_sep>create_zip_archive(outfilename unzipdir)<line_sep>rmtree(unzipdir ignore_errors=<true>)<block_end><def_stmt>create_zip_archive filename source_dir<block_start><import_stmt>zipfile<import_stmt>os<with_stmt>zipfile.ZipFile(filename 'w' zipfile.ZIP_DEFLATED)<as>zf<block_start>base_path=os.path.normpath(source_dir)<for_stmt>dirpath,dirnames,filenames os.walk(source_dir)<block_start><for_stmt>name sorted(dirnames)<block_start>path=os.path.normpath(os.path.join(dirpath name))<line_sep>zf.write(path os.path.relpath(path base_path))<block_end><for_stmt>name filenames<block_start>path=os.path.normpath(os.path.join(dirpath name))<if_stmt>os.path.isfile(path)<block_start>zf.write(path os.path.relpath(path base_path))<block_end><block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_from_stmt>helpers unittest in_parse<import_stmt>luigi<import_stmt>luigi.interface<import_stmt>json<import_stmt>collections<class_stmt>DictParameterTask(luigi.Task)<block_start>param=luigi.DictParameter()<block_end><class_stmt>DictParameterTest(unittest.TestCase)<block_start>_dict=collections.OrderedDict([('username' 'me') ('password' '<PASSWORD>')])<def_stmt>test_parse self<block_start>d=luigi.DictParameter().parse(json.dumps(DictParameterTest._dict))<line_sep>self.assertEqual(d DictParameterTest._dict)<block_end><def_stmt>test_serialize self<block_start>d=luigi.DictParameter().serialize(DictParameterTest._dict)<line_sep>self.assertEqual(d '{"username": "me", "password": "<PASSWORD>"}')<block_end><def_stmt>test_parse_and_serialize self<block_start>inputs=['{"username": "me", "password": "<PASSWORD>"}' '{"password": "<PASSWORD>", "username": "me"}']<for_stmt>json_input inputs<block_start>_dict=luigi.DictParameter().parse(json_input)<line_sep>self.assertEqual(json_input luigi.DictParameter().serialize(_dict))<block_end><block_end><def_stmt>test_parse_interface self<block_start>in_parse(["DictParameterTask" "--param" '{"username": "me", "password": "<PASSWORD>"}'] <lambda>task:self.assertEqual(task.param DictParameterTest._dict))<block_end><def_stmt>test_serialize_task self<block_start>t=DictParameterTask(DictParameterTest._dict)<line_sep>self.assertEqual(str(t) 'DictParameterTask(param={"username": "me", "password": "<PASSWORD>"})')<block_end><def_stmt>test_parse_invalid_input self<block_start>self.assertRaises(ValueError <lambda>:luigi.DictParameter().parse('{"invalid"}'))<block_end><def_stmt>test_hash_normalize self<block_start>self.assertRaises(TypeError <lambda>:hash(luigi.DictParameter().parse('{"a": {"b": []}}')))<line_sep>a=luigi.DictParameter().normalize({"a":[{"b":[]}]})<line_sep>b=luigi.DictParameter().normalize({"a":[{"b":[]}]})<line_sep>self.assertEqual(hash(a) hash(b))<block_end><block_end>
<import_from_future_stmt> unicode_literals<import_stmt>unittest<import_from_stmt>nose.tools *# PEP8 asserts <import_from_stmt>nose.plugins.attrib attr<import_from_stmt>textblob.sentiments PatternAnalyzer NaiveBayesAnalyzer DISCRETE CONTINUOUS<class_stmt>TestPatternSentiment(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.analyzer=PatternAnalyzer()<block_end><def_stmt>test_kind self<block_start>assert_equal(self.analyzer.kind CONTINUOUS)<block_end><def_stmt>test_analyze self<block_start>p1="I feel great this morning."<line_sep>n1="This is a terrible car."<line_sep>p1_result=self.analyzer.analyze(p1)<line_sep>n1_result=self.analyzer.analyze(n1)<line_sep>assert_true(p1_result[0]<g>0)<line_sep>assert_true(n1_result[0]<l>0)<line_sep>assert_equal(p1_result.polarity p1_result[0])<line_sep>assert_equal(p1_result.subjectivity p1_result[1])<block_end><def_stmt>test_analyze_assessments self<block_start>p1="I feel great this morning."<line_sep>n1="This is a terrible car."<line_sep>p1_result=self.analyzer.analyze(p1 keep_assessments=<true>)<line_sep>n1_result=self.analyzer.analyze(n1 keep_assessments=<true>)<line_sep>p1_assessment=p1_result.assessments[0]<line_sep>n1_assessment=n1_result.assessments[0]<line_sep>assert_true(p1_assessment[1]<g>0)<line_sep>assert_true(n1_assessment[1]<l>0)<line_sep>assert_equal(p1_result.polarity p1_assessment[1])<line_sep>assert_equal(p1_result.subjectivity p1_assessment[2])<block_end><block_end><class_stmt>TestNaiveBayesAnalyzer(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.analyzer=NaiveBayesAnalyzer()<block_end><def_stmt>test_kind self<block_start>assert_equal(self.analyzer.kind DISCRETE)<block_end>@attr('slow')<def_stmt>test_analyze self<block_start>p1='I feel great this morning.'<line_sep>n1='This is a terrible car.'<line_sep>p1_result=self.analyzer.analyze(p1)<line_sep>assert_equal(p1_result[0] 'pos')<line_sep>assert_equal(self.analyzer.analyze(n1)[0] 'neg')<line_sep># The 2nd item should be the probability that it is positive assert_true(isinstance(p1_result[1] float))<line_sep># 3rd item is probability that it is negative assert_true(isinstance(p1_result[2] float))<line_sep>assert_about_equal(p1_result[1]+p1_result[2] 1)<line_sep>assert_equal(p1_result.classification p1_result[0])<line_sep>assert_equal(p1_result.p_pos p1_result[1])<line_sep>assert_equal(p1_result.p_neg p1_result[2])<block_end><block_end><def_stmt>assert_about_equal first second places=4<block_start><return>assert_equal(round(first places) second)<block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
""" Manage Linux kernel packages on APT-based systems """<import_stmt>functools<import_stmt>logging<import_stmt>re<try_stmt><block_start><import_from_stmt>salt.utils.versions LooseVersion<as>_LooseVersion<import_from_stmt>salt.exceptions CommandExecutionError<line_sep>HAS_REQUIRED_LIBS=<true><block_end><except_stmt>ImportError<block_start>HAS_REQUIRED_LIBS=<false><block_end>log=logging.getLogger(__name__)<line_sep>__virtualname__="kernelpkg"<def_stmt>__virtual__ <block_start>""" Load this module on Debian-based systems only """<if_stmt><not>HAS_REQUIRED_LIBS<block_start><return>(<false> "Required library could not be imported")<block_end><if_stmt>__grains__.get("os_family" "")<in>("Kali" "Debian")<block_start><return>__virtualname__<block_end><elif_stmt>__grains__.get("os_family" "")<eq>"Cumulus"<block_start><return>__virtualname__<block_end><return>(<false> "Module kernelpkg_linux_apt: no APT based system detected")<block_end><def_stmt>active <block_start>""" Return the version of the running kernel. CLI Example: .. code-block:: bash salt '*' kernelpkg.active """<if_stmt>"pkg.normalize_name"<in>__salt__<block_start><return>__salt__["pkg.normalize_name"](__grains__["kernelrelease"])<block_end><return>__grains__["kernelrelease"]<block_end><def_stmt>list_installed <block_start>""" Return a list of all installed kernels. CLI Example: .. code-block:: bash salt '*' kernelpkg.list_installed """<line_sep>pkg_re=re.compile(r"^{}-[\d.-]+-{}$".format(_package_prefix() _kernel_type()))<line_sep>pkgs=__salt__["pkg.list_pkgs"](versions_as_list=<true>)<if_stmt>pkgs<is><none><block_start>pkgs=[]<block_end>result=list(filter(pkg_re.match pkgs))<if_stmt>result<is><none><block_start><return>[]<block_end>prefix_len=len(_package_prefix())+1<line_sep><return>sorted([pkg[prefix_len:]<for>pkg result] key=functools.cmp_to_key(_cmp_version))<block_end><def_stmt>latest_available <block_start>""" Return the version of the latest kernel from the package repositories. CLI Example: .. code-block:: bash salt '*' kernelpkg.latest_available """<line_sep>result=__salt__["pkg.latest_version"]("{}-{}".format(_package_prefix() _kernel_type()))<if_stmt>result<eq>""<block_start><return>latest_installed()<block_end>version=re.match(r"^(\d+\.\d+\.\d+)\.(\d+)" result)<line_sep><return>"{}-{}-{}".format(version.group(1) version.group(2) _kernel_type())<block_end><def_stmt>latest_installed <block_start>""" Return the version of the latest installed kernel. CLI Example: .. code-block:: bash salt '*' kernelpkg.latest_installed .. note:: This function may not return the same value as :py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel has been installed and the system has not yet been rebooted. The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function exists to detect this condition. """<line_sep>pkgs=list_installed()<if_stmt>pkgs<block_start><return>pkgs[-1]<block_end><return><none><block_end><def_stmt>needs_reboot <block_start>""" Detect if a new kernel version has been installed but is not running. Returns True if a new kernel is installed, False otherwise. CLI Example: .. code-block:: bash salt '*' kernelpkg.needs_reboot """<line_sep><return>_LooseVersion(active())<l>_LooseVersion(latest_installed())<block_end><def_stmt>upgrade reboot=<false> at_time=<none><block_start>""" Upgrade the kernel and optionally reboot the system. reboot : False Request a reboot if a new kernel is available. at_time : immediate Schedule the reboot at some point in the future. This argument is ignored if ``reboot=False``. See :py:func:`~salt.modules.system.reboot` for more details on this argument. CLI Example: .. code-block:: bash salt '*' kernelpkg.upgrade salt '*' kernelpkg.upgrade reboot=True at_time=1 .. note:: An immediate reboot often shuts down the system before the minion has a chance to return, resulting in errors. A minimal delay (1 minute) is useful to ensure the result is delivered to the master. """<line_sep>result=__salt__["pkg.install"](name="{}-{}".format(_package_prefix() latest_available()))<line_sep>_needs_reboot=needs_reboot()<line_sep>ret={"upgrades":result "active":active() "latest_installed":latest_installed() "reboot_requested":reboot "reboot_required":_needs_reboot }<if_stmt>reboot<and>_needs_reboot<block_start>log.warning("Rebooting system due to kernel upgrade")<line_sep>__salt__["system.reboot"](at_time=at_time)<block_end><return>ret<block_end><def_stmt>upgrade_available <block_start>""" Detect if a new kernel version is available in the repositories. Returns True if a new kernel is available, False otherwise. CLI Example: .. code-block:: bash salt '*' kernelpkg.upgrade_available """<line_sep><return>_LooseVersion(latest_available())<g>_LooseVersion(latest_installed())<block_end><def_stmt>remove release<block_start>""" Remove a specific version of the kernel. release The release number of an installed kernel. This must be the entire release number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`, not the package name. CLI Example: .. code-block:: bash salt '*' kernelpkg.remove 4.4.0-70-generic """<if_stmt>release<not><in>list_installed()<block_start><raise>CommandExecutionError("Kernel release '{}' is not installed".format(release))<block_end><if_stmt>release<eq>active()<block_start><raise>CommandExecutionError("Active kernel cannot be removed")<block_end>target="{}-{}".format(_package_prefix() release)<line_sep>log.info("Removing kernel package %s" target)<line_sep>__salt__["pkg.purge"](target)<line_sep><return>{"removed":[target]}<block_end><def_stmt>cleanup keep_latest=<true><block_start>""" Remove all unused kernel packages from the system. keep_latest : True In the event that the active kernel is not the latest one installed, setting this to True will retain the latest kernel package, in addition to the active one. If False, all kernel packages other than the active one will be removed. CLI Example: .. code-block:: bash salt '*' kernelpkg.cleanup """<line_sep>removed=[]<line_sep># Loop over all installed kernel packages <for_stmt>kernel list_installed()# Keep the active kernel package <block_start><if_stmt>kernel<eq>active()<block_start><continue><block_end># Optionally keep the latest kernel package <if_stmt>keep_latest<and>kernel<eq>latest_installed()<block_start><continue><block_end># Remove the kernel package removed.extend(remove(kernel)["removed"])<block_end><return>{"removed":removed}<block_end><def_stmt>_package_prefix <block_start>""" Return static string for the package prefix """<line_sep><return>"linux-image"<block_end><def_stmt>_kernel_type <block_start>""" Parse the kernel name and return its type """<line_sep><return>re.match(r"^[\d.-]+-(.+)$" active()).group(1)<block_end><def_stmt>_cmp_version item1 item2<block_start>""" Compare function for package version sorting """<line_sep>vers1=_LooseVersion(item1)<line_sep>vers2=_LooseVersion(item2)<if_stmt>vers1<l>vers2<block_start><return>-1<block_end><if_stmt>vers1<g>vers2<block_start><return>1<block_end><return>0<block_end>
<import_stmt>os tempfile subprocess<import_from_stmt>hammer_vlsi MMMCCorner MMMCCornerType HammerTool HammerToolStep HammerSRAMGeneratorTool SRAMParameters<import_from_stmt>hammer_vlsi.units VoltageValue TemperatureValue<import_from_stmt>hammer_tech Library ExtraLibrary<import_from_stmt>typing NamedTuple Dict Any List<import_from_stmt>abc ABCMeta abstractmethod<class_stmt>SKY130SRAMGenerator(HammerSRAMGeneratorTool)<block_start><def_stmt>tool_config_prefix self<arrow>str<block_start><return>"sram_generator.sky130"<block_end><def_stmt>version_number self version:str<arrow>int<block_start><return>0<block_end># Run generator for a single sram and corner <def_stmt>generate_sram self params:SRAMParameters corner:MMMCCorner<arrow>ExtraLibrary<block_start>tech_cache_dir=os.path.abspath(self.technology.cache_dir)<line_sep>#TODO: this is really an abuse of the corner stuff <if_stmt>corner.type<eq>MMMCCornerType.Setup<block_start>speed_name="slow"<line_sep>speed="SS"<block_end><elif_stmt>corner.type<eq>MMMCCornerType.Hold<block_start>speed_name="fast"<line_sep>speed="FF"<block_end><elif_stmt>corner.type<eq>MMMCCornerType.Extra<block_start>speed_name="typical"<line_sep>speed="TT"<block_end># Different target memories based on port count # if params.family == "1rw": # self.logger.info("Compiling 1rw memories to DFFRAM instances") # base_dir = self.get_setting("technology.sky130.dffram_lib") # fam_code = params.family # sram_name = "RAM{d}x{w}".format( # d=params.depth, # w=params.width) # #TODO: need real libs (perhaps run Liberate here?) # #For now, use the dummy lib for all corners # corner_str = "" # # lib_path = "{b}/{n}.lib".format( # b=base_dir, # n=sram_name) # if not os.path.exists(lib_path): # self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str)) # return ExtraLibrary(prefix=None, library=Library( # name=sram_name, # nldm_liberty_file=lib_path, # lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name), # #TODO: GDS not generated. Unclear which DEF to use? # #gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name), # spice_file="{b}/{n}/{n}.spice".format(b=base_dir,n=sram_name), # #TODO: Will not work as-is for behav. sim (this is a structural netlist referencing std. cells) # #Need to add std cell behavioral Verilog to sim.inputs.input_files # verilog_sim="{b}/{n}/{n}.nl.v".format(b=base_dir,n=sram_name), # corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"}, # supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"}, # provides=[{'lib_type': "sram", 'vt': params.vt}])) # elif params.family == "1rw1r": <if_stmt>params.family<eq>"1rw"<block_start>self.logger.info("Compiling 1rw1r memories to OpenRAM instances")<line_sep>base_dir=self.get_setting("technology.sky130.openram_lib")<line_sep>fam_code=params.family<line_sep>s=round(round(params.width<times>params.depth/8 -3)/1000)# size in kiB w=params.width<line_sep>d=params.depth<line_sep>m=8<line_sep>sram_name=f"sky130_sram_{s}kbyte_1rw1r_{w}x{d}_{m}"<line_sep>print(f"SRAM_NAME: {sram_name}")<line_sep>#TODO: Hammer SRAMParameters doesn't have this info #TODO: replace this if OpenRAM characterization done for other corners #For now, use typical lib for all corners corner_str="TT_1p8V_25C"<line_sep>#corner_str = "{speed}_{volt}V_{temp}C".format( # speed = speed, # volt = str(corner.voltage.value_in_units("V")).replace(".","p"), # temp = str(int(corner.temp.value_in_units("C"))).replace(".","p")) lib_path="{b}/{n}/{n}_{c}.lib".format(b=base_dir n=sram_name c=corner_str)<if_stmt><not>os.path.exists(lib_path)<block_start>self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))<block_end><return>ExtraLibrary(prefix=<none> library=Library(name=sram_name nldm_liberty_file=lib_path lef_file="{b}/{n}/{n}.lef".format(b=base_dir n=sram_name) gds_file="{b}/{n}/{n}.gds".format(b=base_dir n=sram_name) spice_file="{b}/{n}/{n}.lvs.sp".format(b=base_dir n=sram_name) verilog_sim="{b}/{n}/{n}.v".format(b=base_dir n=sram_name) corner={'nmos':speed_name 'pmos':speed_name 'temperature':str(corner.temp.value_in_units("C"))+" C"} supplies={'VDD':str(corner.voltage.value_in_units("V"))+" V" 'GND':"0 V"} provides=[{'lib_type':"sram" 'vt':params.vt}]))<block_end><else_stmt><block_start>self.logger.error("SKY130 SRAM cache does not support family:{f}".format(f=params.family))<line_sep><return>ExtraLibrary(prefix=<none> library=<none>)<block_end><block_end><block_end>tool=SKY130SRAMGenerator<line_sep>
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>json<import_stmt>pickle<import_from_stmt>pathlib Path<import_from_stmt>typing Callable Dict List Optional Union<import_stmt>librosa<import_stmt>torch<import_from_stmt>nemo_text_processing.text_normalization.normalize Normalizer<import_from_stmt>tqdm tqdm<import_from_stmt>nemo.collections.asr.parts.preprocessing.features WaveformFeaturizer<import_from_stmt>nemo.collections.tts.torch.helpers BetaBinomialInterpolator beta_binomial_prior_distribution general_padding <import_from_stmt>nemo.collections.tts.torch.tts_data_types DATA_STR2DATA_CLASS MAIN_DATA_TYPES VALID_SUPPLEMENTARY_DATA_TYPES DurationPrior Durations Energy LMTokens LogMel Pitch SpeakerID WithLens <import_from_stmt>nemo.collections.tts.torch.tts_tokenizers BaseTokenizer EnglishCharsTokenizer EnglishPhonemesTokenizer<import_from_stmt>nemo.core.classes Dataset<import_from_stmt>nemo.utils logging<class_stmt>TTSDataset(Dataset)<block_start><def_stmt>__init__ self manifest_filepath:str sample_rate:int text_tokenizer:Union[BaseTokenizer Callable[[str] List[int]]] tokens:Optional[List[str]]=<none> text_normalizer:Optional[Union[Normalizer Callable[[str] str]]]=<none> text_normalizer_call_args:Optional[Dict]=<none> text_tokenizer_pad_id:Optional[int]=<none> sup_data_types:Optional[List[str]]=<none> sup_data_path:Optional[Union[Path str]]=<none> max_duration:Optional[float]=<none> min_duration:Optional[float]=<none> ignore_file:Optional[str]=<none> trim:bool=<false> n_fft=1024 win_length=<none> hop_length=<none> window="hann" n_mels=80 lowfreq=0 highfreq=<none> **kwargs <block_start>"""Dataset that loads main data types (audio and text) and specified supplementary data types (e.g. log mel, durations, pitch). Most supplementary data types will be computed on the fly and saved in the supplementary_folder if they did not exist before. Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section). Args: manifest_filepath (str, Path, List[str, Path]): Path(s) to the .json manifests containing information on the dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid json. Each line should contain the following: "audio_filepath": <PATH_TO_WAV> "mel_filepath": <PATH_TO_LOG_MEL_PT> (Optional) "duration": <Duration of audio clip in seconds> (Optional) "text": <THE_TRANSCRIPT> (Optional) sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to. text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer. tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer. text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer. text_normalizer_call_args (Optional[Dict]): Additional arguments for text_normalizer function. text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer. sup_data_types (Optional[List[str]]): List of supplementary data types. sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch). max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load audio to compute duration. Defaults to None which does not prune. min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load audio to compute duration. Defaults to None which does not prune. ignore_file (Optional[str, Path]): The location of a pickle-saved list of audio_ids (the stem of the audio files) that will be pruned prior to training. Defaults to None which does not prune. trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False. n_fft (Optional[int]): The number of fft samples. Defaults to 1024 win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft. hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4. window (Optional[str]): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the equivalent torch window function. n_mels (Optional[int]): The number of mel filters. Defaults to 80. lowfreq (Optional[int]): The lowfreq input to the mel filter calculation. Defaults to 0. highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None. Keyword Args: durs_file (Optional[str]): String path to pickled durations location. durs_type (Optional[str]): Type of durations. Currently supported only "aligned-based". use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator. Defaults to False. pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2'). pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7'). pitch_avg (Optional[float]): The mean that we use to normalize the pitch. pitch_std (Optional[float]): The std that we use to normalize the pitch. pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_avg and pitch_std) or not. """<line_sep>super().__init__()<line_sep>self.text_normalizer=text_normalizer<line_sep>self.text_normalizer_call=(self.text_normalizer.normalize<if>isinstance(self.text_normalizer Normalizer)<else>self.text_normalizer)<line_sep>self.text_normalizer_call_args=text_normalizer_call_args<if>text_normalizer_call_args<is><not><none><else>{}<line_sep>self.text_tokenizer=text_tokenizer<if_stmt>isinstance(self.text_tokenizer BaseTokenizer)<block_start>self.text_tokenizer_pad_id=text_tokenizer.pad<line_sep>self.tokens=text_tokenizer.tokens<block_end><else_stmt><block_start><if_stmt>text_tokenizer_pad_id<is><none><block_start><raise>ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer")<block_end><if_stmt>tokens<is><none><block_start><raise>ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer")<block_end>self.text_tokenizer_pad_id=text_tokenizer_pad_id<line_sep>self.tokens=tokens<block_end><if_stmt>isinstance(manifest_filepath str)<block_start>manifest_filepath=[manifest_filepath]<block_end>self.manifest_filepath=manifest_filepath<if_stmt>sup_data_path<is><not><none><block_start>Path(sup_data_path).mkdir(parents=<true> exist_ok=<true>)<line_sep>self.sup_data_path=sup_data_path<block_end>self.sup_data_types=([DATA_STR2DATA_CLASS[d_as_str]<for>d_as_str sup_data_types]<if>sup_data_types<is><not><none><else>[])<line_sep>self.sup_data_types_set=set(self.sup_data_types)<line_sep>self.data=[]<line_sep>audio_files=[]<line_sep>total_duration=0<for_stmt>manifest_file self.manifest_filepath<block_start><with_stmt>open(Path(manifest_file).expanduser() 'r')<as>f<block_start>logging.info(f"Loading dataset from {manifest_file}.")<for_stmt>line tqdm(f)<block_start>item=json.loads(line)<line_sep>file_info={"audio_filepath":item["audio_filepath"] "mel_filepath":item["mel_filepath"]<if>"mel_filepath"<in>item<else><none> "duration":item["duration"]<if>"duration"<in>item<else><none> "text_tokens":<none> "speaker_id":item["speaker"]<if>"speaker"<in>item<else><none> }<if_stmt>"text"<in>item<block_start>text=item["text"]<if_stmt>self.text_normalizer<is><not><none><block_start>text=self.text_normalizer_call(text **self.text_normalizer_call_args)<block_end>text_tokens=self.text_tokenizer(text)<line_sep>file_info["raw_text"]=item["text"]<line_sep>file_info["text_tokens"]=text_tokens<block_end>audio_files.append(file_info)<if_stmt>file_info["duration"]<is><none><block_start>logging.info("Not all audio files have duration information. Duration logging will be disabled.")<line_sep>total_duration=<none><block_end><if_stmt>total_duration<is><not><none><block_start>total_duration<augadd>item["duration"]<block_end><block_end><block_end><block_end>logging.info(f"Loaded dataset with {len(audio_files)} files.")<if_stmt>total_duration<is><not><none><block_start>logging.info(f"Dataset contains {total_duration/3600:.2f} hours.")<block_end><if_stmt>ignore_file<block_start>logging.info(f"using {ignore_file} to prune dataset.")<with_stmt>open(Path(ignore_file).expanduser() "rb")<as>f<block_start>wavs_to_ignore=set(pickle.load(f))<block_end><block_end>pruned_duration=0<if>total_duration<is><not><none><else><none><line_sep>pruned_items=0<for_stmt>item audio_files<block_start>audio_path=item['audio_filepath']<line_sep>audio_id=Path(audio_path).stem<line_sep># Prune data according to min/max_duration & the ignore file <if_stmt>total_duration<is><not><none><block_start><if_stmt>(min_duration<and>item["duration"]<l>min_duration)<or>(max_duration<and>item["duration"]<g>max_duration)<block_start>pruned_duration<augadd>item["duration"]<line_sep>pruned_items<augadd>1<line_sep><continue><block_end><block_end><if_stmt>ignore_file<and>(audio_id<in>wavs_to_ignore)<block_start>pruned_items<augadd>1<line_sep>pruned_duration<augadd>item["duration"]<line_sep>wavs_to_ignore.remove(audio_id)<line_sep><continue><block_end>self.data.append(item)<block_end>logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(self.data)} files")<if_stmt>pruned_duration<is><not><none><block_start>logging.info(f"Pruned {pruned_duration/3600:.2f} hours. Final dataset contains "<concat>f"{(total_duration-pruned_duration)/3600:.2f} hours.")<block_end>self.sample_rate=sample_rate<line_sep>self.featurizer=WaveformFeaturizer(sample_rate=self.sample_rate)<line_sep>self.trim=trim<line_sep>self.n_fft=n_fft<line_sep>self.n_mels=n_mels<line_sep>self.lowfreq=lowfreq<line_sep>self.highfreq=highfreq<line_sep>self.window=window<line_sep>self.win_length=win_length<or>self.n_fft<line_sep>self.hop_length=hop_length<line_sep>self.hop_len=self.hop_length<or>self.n_fft<floordiv>4<line_sep>self.fb=torch.tensor(librosa.filters.mel(self.sample_rate self.n_fft n_mels=self.n_mels fmin=self.lowfreq fmax=self.highfreq) dtype=torch.float ).unsqueeze(0)<line_sep>window_fn={'hann':torch.hann_window 'hamming':torch.hamming_window 'blackman':torch.blackman_window 'bartlett':torch.bartlett_window 'none':<none> }.get(self.window <none>)<line_sep>self.stft=<lambda>x:torch.stft(input=x n_fft=self.n_fft hop_length=self.hop_len win_length=self.win_length window=window_fn(self.win_length periodic=<false>).to(torch.float)<if>window_fn<else><none> )<for_stmt>data_type self.sup_data_types<block_start><if_stmt>data_type<not><in>VALID_SUPPLEMENTARY_DATA_TYPES<block_start><raise>NotImplementedError(f"Current implementation of TTSDataset doesn't support {data_type} type.")<block_end>getattr(self f"add_{data_type.name}")(**kwargs)<block_end><block_end><def_stmt>add_log_mel self **kwargs<block_start><pass><block_end><def_stmt>add_durations self **kwargs<block_start>durs_file=kwargs.pop('durs_file')<line_sep>durs_type=kwargs.pop('durs_type')<line_sep>audio_stem2durs=torch.load(durs_file)<line_sep>self.durs=[]<for_stmt>tag [Path(d["audio_filepath"]).stem<for>d self.data]<block_start>durs=audio_stem2durs[tag]<if_stmt>durs_type<eq>"aligner-based"<block_start>self.durs.append(durs)<block_end><else_stmt><block_start><raise>NotImplementedError(f"{durs_type} duration type is not supported. Only align-based is supported at this moment.")<block_end><block_end><block_end><def_stmt>add_duration_prior self **kwargs<block_start>self.use_beta_binomial_interpolator=kwargs.pop('use_beta_binomial_interpolator' <false>)<if_stmt>self.use_beta_binomial_interpolator<block_start>self.beta_binomial_interpolator=BetaBinomialInterpolator()<block_end><block_end><def_stmt>add_pitch self **kwargs<block_start>self.pitch_fmin=kwargs.pop("pitch_fmin" librosa.note_to_hz('C2'))<line_sep>self.pitch_fmax=kwargs.pop("pitch_fmax" librosa.note_to_hz('C7'))<line_sep>self.pitch_avg=kwargs.pop("pitch_avg" <none>)<line_sep>self.pitch_std=kwargs.pop("pitch_std" <none>)<line_sep>self.pitch_norm=kwargs.pop("pitch_norm" <false>)<block_end><def_stmt>add_energy self **kwargs<block_start><pass><block_end><def_stmt>add_speaker_id self **kwargs<block_start><pass><block_end><def_stmt>get_spec self audio<block_start><with_stmt>torch.cuda.amp.autocast(enabled=<false>)<block_start>spec=self.stft(audio)<if_stmt>spec.dtype<in>[torch.cfloat torch.cdouble]<block_start>spec=torch.view_as_real(spec)<block_end>spec=torch.sqrt(spec.pow(2).sum(-1)+1e-9)<block_end><return>spec<block_end><def_stmt>get_log_mel self audio<block_start><with_stmt>torch.cuda.amp.autocast(enabled=<false>)<block_start>spec=self.get_spec(audio)<line_sep>mel=torch.matmul(self.fb.to(spec.dtype) spec)<line_sep>log_mel=torch.log(torch.clamp(mel min=torch.finfo(mel.dtype).tiny))<block_end><return>log_mel<block_end><def_stmt>__getitem__ self index<block_start>sample=self.data[index]<line_sep>audio_stem=Path(sample["audio_filepath"]).stem<line_sep>features=self.featurizer.process(sample["audio_filepath"] trim=self.trim)<line_sep>audio,audio_length=features torch.tensor(features.shape[0]).long()<line_sep>text=torch.tensor(sample["text_tokens"]).long()<line_sep>text_length=torch.tensor(len(sample["text_tokens"])).long()<line_sep>log_mel,log_mel_length=<none> <none><if_stmt>LogMel<in>self.sup_data_types_set<block_start>mel_path=sample["mel_filepath"]<if_stmt>mel_path<is><not><none><and>Path(mel_path).exists()<block_start>log_mel=torch.load(mel_path)<block_end><else_stmt><block_start>mel_path=Path(self.sup_data_path)/f"mel_{audio_stem}.pt"<if_stmt>mel_path.exists()<block_start>log_mel=torch.load(mel_path)<block_end><else_stmt><block_start>log_mel=self.get_log_mel(audio)<line_sep>torch.save(log_mel mel_path)<block_end><block_end>log_mel=log_mel.squeeze(0)<line_sep>log_mel_length=torch.tensor(log_mel.shape[1]).long()<block_end>durations=<none><if_stmt>Durations<in>self.sup_data_types_set<block_start>durations=self.durs[index]<block_end>duration_prior=<none><if_stmt>DurationPrior<in>self.sup_data_types_set<block_start><if_stmt>self.use_beta_binomial_interpolator<block_start>mel_len=self.get_log_mel(audio).shape[2]<line_sep>duration_prior=torch.from_numpy(self.beta_binomial_interpolator(mel_len text_length.item()))<block_end><else_stmt><block_start>prior_path=Path(self.sup_data_path)/f"pr_{audio_stem}.pt"<if_stmt>prior_path.exists()<block_start>duration_prior=torch.load(prior_path)<block_end><else_stmt><block_start>mel_len=self.get_log_mel(audio).shape[2]<line_sep>duration_prior=beta_binomial_prior_distribution(text_length mel_len)<line_sep>duration_prior=torch.from_numpy(duration_prior)<line_sep>torch.save(duration_prior prior_path)<block_end><block_end><block_end>pitch,pitch_length=<none> <none><if_stmt>Pitch<in>self.sup_data_types_set<block_start>pitch_name=(f"{audio_stem}_pitch_pyin_"<concat>f"fmin{self.pitch_fmin}_fmax{self.pitch_fmax}_"<concat>f"fl{self.win_length}_hs{self.hop_len}.pt")<line_sep>pitch_path=Path(self.sup_data_path)/pitch_name<if_stmt>pitch_path.exists()<block_start>pitch=torch.load(pitch_path).float()<block_end><else_stmt><block_start>pitch,_,_=librosa.pyin(audio.numpy() fmin=self.pitch_fmin fmax=self.pitch_fmax frame_length=self.win_length sr=self.sample_rate fill_na=0.0 )<line_sep>pitch=torch.from_numpy(pitch).float()<line_sep>torch.save(pitch pitch_path)<block_end><if_stmt>self.pitch_avg<is><not><none><and>self.pitch_std<is><not><none><and>self.pitch_norm<block_start>pitch<augsub>self.pitch_avg<line_sep>pitch[pitch<eq>-self.pitch_avg]=0.0# Zero out values that were perviously zero pitch<augdiv>self.pitch_std<block_end>pitch_length=torch.tensor(len(pitch)).long()<block_end>energy,energy_length=<none> <none><if_stmt>Energy<in>self.sup_data_types_set<block_start>energy_path=Path(self.sup_data_path)/f"{audio_stem}_energy_wl{self.win_length}_hs{self.hop_len}.pt"<if_stmt>energy_path.exists()<block_start>energy=torch.load(energy_path).float()<block_end><else_stmt><block_start>spec=self.get_spec(audio)<line_sep>energy=torch.linalg.norm(spec.squeeze(0) axis=0).float()<line_sep>torch.save(energy energy_path)<block_end>energy_length=torch.tensor(len(energy)).long()<block_end>speaker_id=<none><if_stmt>SpeakerID<in>self.sup_data_types_set<block_start>speaker_id=torch.tensor(sample["speaker_id"]).long()<block_end><return>(audio audio_length text text_length log_mel log_mel_length durations duration_prior pitch pitch_length energy energy_length speaker_id )<block_end><def_stmt>__len__ self<block_start><return>len(self.data)<block_end><def_stmt>join_data self data_dict<block_start>result=[]<for_stmt>data_type MAIN_DATA_TYPES+self.sup_data_types<block_start>result.append(data_dict[data_type.name])<if_stmt>issubclass(data_type WithLens)<block_start>result.append(data_dict[f"{data_type.name}_lens"])<block_end><block_end><return>tuple(result)<block_end><def_stmt>general_collate_fn self batch<block_start>(_ audio_lengths _ tokens_lengths _ log_mel_lengths durations_list duration_priors_list pitches pitches_lengths energies energies_lengths _ )=zip(*batch)<line_sep>max_audio_len=max(audio_lengths).item()<line_sep>max_tokens_len=max(tokens_lengths).item()<line_sep>max_log_mel_len=max(log_mel_lengths)<if>LogMel<in>self.sup_data_types_set<else><none><line_sep>max_durations_len=max([len(i)<for>i durations_list])<if>Durations<in>self.sup_data_types_set<else><none><line_sep>max_pitches_len=max(pitches_lengths).item()<if>Pitch<in>self.sup_data_types_set<else><none><line_sep>max_energies_len=max(energies_lengths).item()<if>Energy<in>self.sup_data_types_set<else><none><if_stmt>LogMel<in>self.sup_data_types_set<block_start>log_mel_pad=torch.finfo(batch[0][2].dtype).tiny<block_end>duration_priors=(torch.zeros(len(duration_priors_list) max([prior_i.shape[0]<for>prior_i duration_priors_list]) max([prior_i.shape[1]<for>prior_i duration_priors_list]) )<if>DurationPrior<in>self.sup_data_types_set<else>[])<line_sep>audios,tokens,log_mels,durations_list,pitches,energies,speaker_ids=[] [] [] [] [] [] []<for_stmt>i,sample_tuple enumerate(batch)<block_start>(audio audio_len token token_len log_mel log_mel_len durations duration_prior pitch pitch_length energy energy_length speaker_id )=sample_tuple<line_sep>audio=general_padding(audio audio_len.item() max_audio_len)<line_sep>audios.append(audio)<line_sep>token=general_padding(token token_len.item() max_tokens_len pad_value=self.text_tokenizer_pad_id)<line_sep>tokens.append(token)<if_stmt>LogMel<in>self.sup_data_types_set<block_start>log_mels.append(general_padding(log_mel log_mel_len max_log_mel_len pad_value=log_mel_pad))<block_end><if_stmt>Durations<in>self.sup_data_types_set<block_start>durations_list.append(general_padding(durations len(durations) max_durations_len))<block_end><if_stmt>DurationPrior<in>self.sup_data_types_set<block_start>duration_priors[i :duration_prior.shape[0] :duration_prior.shape[1]]=duration_prior<block_end><if_stmt>Pitch<in>self.sup_data_types_set<block_start>pitches.append(general_padding(pitch pitch_length.item() max_pitches_len))<block_end><if_stmt>Energy<in>self.sup_data_types_set<block_start>energies.append(general_padding(energy energy_length.item() max_energies_len))<block_end><if_stmt>SpeakerID<in>self.sup_data_types_set<block_start>speaker_ids.append(speaker_id)<block_end><block_end>data_dict={"audio":torch.stack(audios) "audio_lens":torch.stack(audio_lengths) "text":torch.stack(tokens) "text_lens":torch.stack(tokens_lengths) "log_mel":torch.stack(log_mels)<if>LogMel<in>self.sup_data_types_set<else><none> "log_mel_lens":torch.stack(log_mel_lengths)<if>LogMel<in>self.sup_data_types_set<else><none> "durations":torch.stack(durations_list)<if>Durations<in>self.sup_data_types_set<else><none> "duration_prior":duration_priors<if>DurationPrior<in>self.sup_data_types_set<else><none> "pitch":torch.stack(pitches)<if>Pitch<in>self.sup_data_types_set<else><none> "pitch_lens":torch.stack(pitches_lengths)<if>Pitch<in>self.sup_data_types_set<else><none> "energy":torch.stack(energies)<if>Energy<in>self.sup_data_types_set<else><none> "energy_lens":torch.stack(energies_lengths)<if>Energy<in>self.sup_data_types_set<else><none> "speaker_id":torch.stack(speaker_ids)<if>SpeakerID<in>self.sup_data_types_set<else><none> }<line_sep><return>data_dict<block_end><def_stmt>_collate_fn self batch<block_start>data_dict=self.general_collate_fn(batch)<line_sep>joined_data=self.join_data(data_dict)<line_sep><return>joined_data<block_end><block_end><class_stmt>MixerTTSDataset(TTSDataset)<block_start><def_stmt>__init__ self **kwargs<block_start>super().__init__(**kwargs)<block_end><def_stmt>_albert self<block_start><import_from_stmt>transformers AlbertTokenizer# noqa pylint: disable=import-outside-toplevel self.lm_model_tokenizer=AlbertTokenizer.from_pretrained('albert-base-v2')<line_sep>self.lm_padding_value=self.lm_model_tokenizer._convert_token_to_id('<pad>')<line_sep>space_value=self.lm_model_tokenizer._convert_token_to_id('▁')<line_sep>self.id2lm_tokens={}<for_stmt>i,d enumerate(self.data)<block_start>raw_text=d["raw_text"]<assert_stmt>isinstance(self.text_tokenizer EnglishPhonemesTokenizer)<or>isinstance(self.text_tokenizer EnglishCharsTokenizer)<line_sep>preprocess_text_as_tts_input=self.text_tokenizer.text_preprocessing_func(raw_text)<line_sep>lm_tokens_as_ids=self.lm_model_tokenizer.encode(preprocess_text_as_tts_input add_special_tokens=<false>)<if_stmt>self.text_tokenizer.pad_with_space<block_start>lm_tokens_as_ids=[space_value]+lm_tokens_as_ids+[space_value]<block_end>self.id2lm_tokens[i]=lm_tokens_as_ids<block_end><block_end><def_stmt>add_lm_tokens self **kwargs<block_start>lm_model=kwargs.pop('lm_model')<if_stmt>lm_model<eq>"albert"<block_start>self._albert()<block_end><else_stmt><block_start><raise>NotImplementedError(f"{lm_model} lm model is not supported. Only albert is supported at this moment.")<block_end><block_end><def_stmt>__getitem__ self index<block_start>(audio audio_length text text_length log_mel log_mel_length durations duration_prior pitch pitch_length energy energy_length speaker_id )=super().__getitem__(index)<line_sep>lm_tokens=<none><if_stmt>LMTokens<in>self.sup_data_types_set<block_start>lm_tokens=torch.tensor(self.id2lm_tokens[index]).long()<block_end><return>(audio audio_length text text_length log_mel log_mel_length durations duration_prior pitch pitch_length energy energy_length speaker_id lm_tokens )<block_end><def_stmt>_collate_fn self batch<block_start>batch=list(zip(*batch))<line_sep>data_dict=self.general_collate_fn(list(zip(*batch[:13])))<line_sep>lm_tokens_list=batch[13]<if_stmt>LMTokens<in>self.sup_data_types_set<block_start>lm_tokens=torch.full((len(lm_tokens_list) max([lm_tokens.shape[0]<for>lm_tokens lm_tokens_list])) fill_value=self.lm_padding_value )<for_stmt>i,lm_tokens_i enumerate(lm_tokens_list)<block_start>lm_tokens[i :lm_tokens_i.shape[0]]=lm_tokens_i<block_end>data_dict[LMTokens.name]=lm_tokens<block_end>joined_data=self.join_data(data_dict)<line_sep><return>joined_data<block_end><block_end>
# -*- coding: utf-8 -*- # vispy: gallery 10 # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. <import_stmt>sys<import_stmt>numpy<as>np<import_from_stmt>vispy app gloo visuals<import_from_stmt>vispy.visuals.filters Clipper ColorFilter<import_from_stmt>vispy.visuals.shaders MultiProgram<import_from_stmt>vispy.visuals.collections PointCollection<import_from_stmt>vispy.visuals.transforms STTransform<import_from_stmt>vispy.scene SceneCanvas<import_from_stmt>vispy.scene.visuals create_visual_node<class_stmt>LineVisual(visuals.Visual)<block_start>"""Example of a very simple GL-line visual. This shows the minimal set of methods that need to be reimplemented to make a new visual class. """<def_stmt>__init__ self pos=<none> color=(1 1 1 1)<block_start>vcode=""" attribute vec2 a_pos; void main() { gl_Position = $transform(vec4(a_pos, 0., 1.)); gl_PointSize = 10.; } """<line_sep>fcode=""" void main() { gl_FragColor = $color; } """<line_sep>visuals.Visual.__init__(self vcode=vcode fcode=fcode)<line_sep>self.pos_buf=gloo.VertexBuffer()<line_sep># The Visual superclass contains a MultiProgram, which is an object # that behaves like a normal shader program (you can assign shader # code, upload values, set template variables, etc.) but internally # manages multiple ModularProgram instances, one per view. # The MultiProgram is accessed via the `shared_program` property, so # the following modifications to the program will be applied to all # views: self.shared_program['a_pos']=self.pos_buf<line_sep>self.shared_program.frag['color']=color<line_sep>self._need_upload=<false><line_sep># Visual keeps track of draw mode, index buffer, and GL state. These # are shared between all views. self._draw_mode='line_strip'<line_sep>self.set_gl_state('translucent' depth_test=<false>)<if_stmt>pos<is><not><none><block_start>self.set_data(pos)<block_end><block_end><def_stmt>set_data self pos<block_start>self._pos=pos<line_sep>self._need_upload=<true><block_end><def_stmt>_prepare_transforms self view=<none><block_start>view.view_program.vert['transform']=view.transforms.get_transform()<block_end><def_stmt>_prepare_draw self view=<none><block_start>"""This method is called immediately before each draw. The *view* argument indicates which view is about to be drawn. """<if_stmt>self._need_upload# Note that pos_buf is shared between all views, so we have no need # to use the *view* argument in this example. This will be true # for most visuals. <block_start>self.pos_buf.set_data(self._pos)<line_sep>self._need_upload=<false><block_end><block_end><block_end><class_stmt>PointVisual(LineVisual)<block_start>"""Another simple visual class. Due to the simplicity of these example classes, it was only necessary to subclass from LineVisual and set the draw mode to 'points'. A more fully-featured PointVisual class might not follow this approach. """<def_stmt>__init__ self pos=<none> color=(1 1 1 1)<block_start>LineVisual.__init__(self pos color)<line_sep>self._draw_mode='points'<block_end><block_end><class_stmt>PlotLineVisual(visuals.CompoundVisual)<block_start>"""An example compound visual that draws lines and points. To the user, the compound visual behaves exactly like a normal visual--it has a transform system, draw() and bounds() methods, etc. Internally, the compound visual automatically manages proxying these transforms and methods to its sub-visuals. """<def_stmt>__init__ self pos=<none> line_color=(1 1 1 1) point_color=(1 1 1 1)<block_start>self._line=LineVisual(pos color=line_color)<line_sep>self._point=PointVisual(pos color=point_color)<line_sep>visuals.CompoundVisual.__init__(self [self._line self._point])<block_end><block_end><class_stmt>PointCollectionVisual(visuals.Visual)<block_start>"""Thin wrapper around a point collection. Note: This is currently broken! """<def_stmt>__init__ self<block_start>prog=MultiProgram(vcode='' fcode='')<line_sep>self.points=PointCollection("agg" color="shared" program=prog)<line_sep>visuals.Visual.__init__(self program=prog)<block_end><def_stmt>_prepare_draw self view<block_start><if_stmt>self.points._need_update<block_start>self.points._update()<block_end>self._draw_mode=self.points._mode<line_sep>self._index_buffer=self.points._indices_buffer<block_end><def_stmt>append self *args **kwargs<block_start>self.points.append(*args **kwargs)<block_end><def_stmt>_prepare_transforms self view=<none><block_start><pass><block_end>@property<def_stmt>color self<block_start><return>self.points['color']<block_end>@color.setter<def_stmt>color self c<block_start>self.points['color']=c<block_end><block_end><class_stmt>PanZoomTransform(STTransform)<block_start><def_stmt>__init__ self canvas=<none> aspect=<none> **kwargs<block_start>self._aspect=aspect<line_sep>self.attach(canvas)<line_sep>STTransform.__init__(self **kwargs)<block_end><def_stmt>attach self canvas<block_start>""" Attach this tranform to a canvas """<line_sep>self._canvas=canvas<line_sep>canvas.events.mouse_wheel.connect(self.on_mouse_wheel)<line_sep>canvas.events.mouse_move.connect(self.on_mouse_move)<block_end><def_stmt>on_mouse_move self event<block_start><if_stmt>event.is_dragging<block_start>dxy=event.pos-event.last_event.pos<line_sep>button=event.press_event.button<if_stmt>button<eq>1<block_start>self.move(dxy)<block_end><elif_stmt>button<eq>2<block_start>center=event.press_event.pos<if_stmt>self._aspect<is><none><block_start>self.zoom(np.exp(dxy<times>(0.01 -0.01)) center)<block_end><else_stmt><block_start>s=dxy[1]<times>-0.01<line_sep>self.zoom(np.exp(np.array([s s])) center)<block_end><block_end><block_end><block_end><def_stmt>on_mouse_wheel self event<block_start>self.zoom(np.exp(event.delta<times>(0.01 -0.01)) event.pos)<block_end><block_end>canvas=app.Canvas(keys='interactive' size=(900 600) show=<true> title="Visual Canvas")<line_sep>pos=np.random.normal(size=(1000 2) loc=0 scale=50).astype('float32')<line_sep>pos[0]=[0 0]<line_sep># Make a line visual line=LineVisual(pos=pos)<line_sep>line.transforms.canvas=canvas<line_sep>line.transform=STTransform(scale=(2 1) translate=(20 20))<line_sep>panzoom=PanZoomTransform(canvas)<line_sep>line.transforms.scene_transform=panzoom<line_sep>panzoom.changed.connect(<lambda>ev:canvas.update())<line_sep># Attach color filter to all views (current and future) of the visual line.attach(ColorFilter((1 1 0.5 0.7)))<line_sep># Attach a clipper just to this view. The Clipper filter requires a # transform that maps from the framebuffer coordinate system to the # clipping coordinates. tr=line.transforms.get_transform('framebuffer' 'canvas')<line_sep>line.attach(Clipper((20 20 260 260) transform=tr) view=line)<line_sep># Make a view of the line that will draw its shadow shadow=line.view()<line_sep>shadow.transforms.canvas=canvas<line_sep>shadow.transform=STTransform(scale=(2 1) translate=(25 25))<line_sep>shadow.transforms.scene_transform=panzoom<line_sep>shadow.attach(ColorFilter((0 0 0 0.6)) view=shadow)<line_sep>tr=shadow.transforms.get_transform('framebuffer' 'canvas')<line_sep>shadow.attach(Clipper((20 20 260 260) transform=tr) view=shadow)<line_sep># And make a second view of the line with different clipping bounds view=line.view()<line_sep>view.transforms.canvas=canvas<line_sep>view.transform=STTransform(scale=(2 0.5) translate=(450 150))<line_sep>tr=view.transforms.get_transform('framebuffer' 'canvas')<line_sep>view.attach(Clipper((320 20 260 260) transform=tr) view=view)<line_sep># Make a compound visual plot=PlotLineVisual(pos (0.5 1 0.5 0.2) (0.5 1 1 0.3))<line_sep>plot.transforms.canvas=canvas<line_sep>plot.transform=STTransform(translate=(80 450) scale=(1.5 1))<line_sep>tr=plot.transforms.get_transform('framebuffer' 'canvas')<line_sep>plot.attach(Clipper((20 320 260 260) transform=tr) view=plot)<line_sep># And make a view on the compound view2=plot.view()<line_sep>view2.transforms.canvas=canvas<line_sep>view2.transform=STTransform(scale=(1.5 1) translate=(450 400))<line_sep>tr=view2.transforms.get_transform('framebuffer' 'canvas')<line_sep>view2.attach(Clipper((320 320 260 260) transform=tr) view=view2)<line_sep># And a shadow for the view shadow2=plot.view()<line_sep>shadow2.transforms.canvas=canvas<line_sep>shadow2.transform=STTransform(scale=(1.5 1) translate=(455 405))<line_sep>shadow2.attach(ColorFilter((0 0 0 0.6)) view=shadow2)<line_sep>tr=shadow2.transforms.get_transform('framebuffer' 'canvas')<line_sep>shadow2.attach(Clipper((320 320 260 260) transform=tr) view=shadow2)<line_sep># Example of a collection visual collection=PointCollectionVisual()<line_sep>collection.transforms.canvas=canvas<line_sep>collection.transform=STTransform(translate=(750 150))<line_sep>collection.append(np.random.normal(loc=0 scale=20 size=(10000 3)) itemsize=5000)<line_sep>collection.color=(1 0.5 0.5 1) (0.5 0.5 1 1)<line_sep>shadow3=collection.view()<line_sep>shadow3.transforms.canvas=canvas<line_sep>shadow3.transform=STTransform(scale=(1 1) translate=(752 152))<line_sep>shadow3.attach(ColorFilter((0 0 0 0.6)) view=shadow3)<line_sep># tr = shadow3.transforms.get_transform('framebuffer', 'canvas') # shadow3.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2) order=[shadow line view plot shadow2 view2 shadow3 collection]<line_sep>@canvas.connect<def_stmt>on_draw event<block_start>canvas.context.clear((0.3 0.3 0.3 1.0))<for_stmt>v order<block_start>v.draw()<block_end><block_end><def_stmt>on_resize event# Set canvas viewport and reconfigure visual transforms to match. <block_start>vp=(0 0 canvas.physical_size[0] canvas.physical_size[1])<line_sep>canvas.context.set_viewport(*vp)<for_stmt>v order<block_start>v.transforms.configure(canvas=canvas viewport=vp)<block_end><block_end>canvas.events.resize.connect(on_resize)<line_sep>on_resize(<none>)<line_sep>Line=create_visual_node(LineVisual)<line_sep>canvas2=SceneCanvas(keys='interactive' title='Scene Canvas' show=<true>)<line_sep>v=canvas2.central_widget.add_view(margin=10)<line_sep>v.border_color=(1 1 1 1)<line_sep>v.bgcolor=(0.3 0.3 0.3 1)<line_sep>v.camera='panzoom'<line_sep>line2=Line(pos parent=v.scene)<def_stmt>mouse ev<block_start>print(ev)<block_end>v.events.mouse_press.connect(mouse)<if_stmt>__name__<eq>'__main__'<block_start><if_stmt>sys.flags.interactive<ne>1<block_start>app.run()<block_end><block_end>
"""Auxiliary methods."""<import_stmt>os<import_stmt>json<import_from_stmt>errno EEXIST<import_stmt>numpy<as>np<import_stmt>seaborn<as>sns<import_stmt>cPickle<as>pickle<import_stmt>matplotlib.pyplot<as>plt<line_sep>sns.set()<line_sep>DEFAULT_LOG_DIR='log'<line_sep>ATOB_WEIGHTS_FILE='atob_weights.h5'<line_sep>D_WEIGHTS_FILE='d_weights.h5'<class_stmt>MyDict(dict)<block_start>""" Dictionary that allows to access elements with dot notation. ex: >> d = MyDict({'key': 'val'}) >> d.key 'val' >> d.key2 = 'val2' >> d {'key2': 'val2', 'key': 'val'} """<line_sep>__getattr__=dict.get<line_sep>__setattr__=dict.__setitem__<block_end><def_stmt>convert_to_rgb img is_binary=<false><block_start>"""Given an image, make sure it has 3 channels and that it is between 0 and 1."""<if_stmt>len(img.shape)<ne>3<block_start><raise>Exception("""Image must have 3 dimensions (channels x height x width). """<concat>"""Given {0}""".format(len(img.shape)))<block_end>img_ch,_,_=img.shape<if_stmt>img_ch<ne>3<and>img_ch<ne>1<block_start><raise>Exception("""Unsupported number of channels. """<concat>"""Must be 1 or 3, given {0}.""".format(img_ch))<block_end>imgp=img<if_stmt>img_ch<eq>1<block_start>imgp=np.repeat(img 3 axis=0)<block_end><if_stmt><not>is_binary<block_start>imgp=imgp<times>127.5+127.5<line_sep>imgp<augdiv>255.<block_end><return>np.clip(imgp.transpose((1 2 0)) 0 1)<block_end><def_stmt>compose_imgs a b is_a_binary=<true> is_b_binary=<false><block_start>"""Place a and b side by side to be plotted."""<line_sep>ap=convert_to_rgb(a is_binary=is_a_binary)<line_sep>bp=convert_to_rgb(b is_binary=is_b_binary)<if_stmt>ap.shape<ne>bp.shape<block_start><raise>Exception("""A and B must have the same size. """<concat>"""{0} != {1}""".format(ap.shape bp.shape))<block_end># ap.shape and bp.shape must have the same size here h,w,ch=ap.shape<line_sep>composed=np.zeros((h 2<times>w ch))<line_sep>composed[: :w :]=ap<line_sep>composed[: w: :]=bp<line_sep><return>composed<block_end><def_stmt>get_log_dir log_dir expt_name<block_start>"""Compose the log_dir with the experiment name."""<if_stmt>log_dir<is><none><block_start><raise>Exception('log_dir can not be None.')<block_end><if_stmt>expt_name<is><not><none><block_start><return>os.path.join(log_dir expt_name)<block_end><return>log_dir<block_end><def_stmt>mkdir mypath<block_start>"""Create a directory if it does not exist."""<try_stmt><block_start>os.makedirs(mypath)<block_end><except_stmt>OSError<as>exc<block_start><if_stmt>exc.errno<eq>EEXIST<and>os.path.isdir(mypath)<block_start><pass><block_end><else_stmt><block_start><raise><block_end><block_end><block_end><def_stmt>create_expt_dir params<block_start>"""Create the experiment directory and return it."""<line_sep>expt_dir=get_log_dir(params.log_dir params.expt_name)<line_sep># Create directories if they do not exist mkdir(params.log_dir)<line_sep>mkdir(expt_dir)<line_sep># Save the parameters json.dump(params open(os.path.join(expt_dir 'params.json') 'wb') indent=4 sort_keys=<true>)<line_sep><return>expt_dir<block_end><def_stmt>plot_loss loss label filename log_dir<block_start>"""Plot a loss function and save it in a file."""<line_sep>plt.figure(figsize=(5 4))<line_sep>plt.plot(loss label=label)<line_sep>plt.legend()<line_sep>plt.savefig(os.path.join(log_dir filename))<line_sep>plt.clf()<block_end><def_stmt>log losses atob it_val N=4 log_dir=DEFAULT_LOG_DIR expt_name=<none> is_a_binary=<true> is_b_binary=<false><block_start>"""Log losses and atob results."""<line_sep>log_dir=get_log_dir(log_dir expt_name)<line_sep># Save the losses for further inspection pickle.dump(losses open(os.path.join(log_dir 'losses.pkl') 'wb'))<line_sep>########################################################################### # PLOT THE LOSSES # ########################################################################### plot_loss(losses['d'] 'discriminator' 'd_loss.png' log_dir)<line_sep>plot_loss(losses['d_val'] 'discriminator validation' 'd_val_loss.png' log_dir)<line_sep>plot_loss(losses['p2p'] 'Pix2Pix' 'p2p_loss.png' log_dir)<line_sep>plot_loss(losses['p2p_val'] 'Pix2Pix validation' 'p2p_val_loss.png' log_dir)<line_sep>########################################################################### # PLOT THE A->B RESULTS # ########################################################################### plt.figure(figsize=(10 6))<for_stmt>i range(N<times>N)<block_start>a,_=next(it_val)<line_sep>bp=atob.predict(a)<line_sep>img=compose_imgs(a[0] bp[0] is_a_binary=is_a_binary is_b_binary=is_b_binary)<line_sep>plt.subplot(N N i+1)<line_sep>plt.imshow(img)<line_sep>plt.axis('off')<block_end>plt.savefig(os.path.join(log_dir 'atob.png'))<line_sep>plt.clf()<line_sep># Make sure all the figures are closed. plt.close('all')<block_end><def_stmt>save_weights models log_dir=DEFAULT_LOG_DIR expt_name=<none><block_start>"""Save the weights of the models into a file."""<line_sep>log_dir=get_log_dir(log_dir expt_name)<line_sep>models.atob.save_weights(os.path.join(log_dir ATOB_WEIGHTS_FILE) overwrite=<true>)<line_sep>models.d.save_weights(os.path.join(log_dir D_WEIGHTS_FILE) overwrite=<true>)<block_end><def_stmt>load_weights atob d log_dir=DEFAULT_LOG_DIR expt_name=<none><block_start>"""Load the weights into the corresponding models."""<line_sep>log_dir=get_log_dir(log_dir expt_name)<line_sep>atob.load_weights(os.path.join(log_dir ATOB_WEIGHTS_FILE))<line_sep>d.load_weights(os.path.join(log_dir D_WEIGHTS_FILE))<block_end><def_stmt>load_weights_of m weights_file log_dir=DEFAULT_LOG_DIR expt_name=<none><block_start>"""Load the weights of the model m."""<line_sep>log_dir=get_log_dir(log_dir expt_name)<line_sep>m.load_weights(os.path.join(log_dir weights_file))<block_end><def_stmt>load_losses log_dir=DEFAULT_LOG_DIR expt_name=<none><block_start>"""Load the losses of the given experiment."""<line_sep>log_dir=get_log_dir(log_dir expt_name)<line_sep>losses=pickle.load(open(os.path.join(log_dir 'losses.pkl') 'rb'))<line_sep><return>losses<block_end><def_stmt>load_params params<block_start>""" Load the parameters of an experiment and return them. The params passed as argument will be merged with the new params dict. If there is a conflict with a key, the params passed as argument prevails. """<line_sep>expt_dir=get_log_dir(params.log_dir params.expt_name)<line_sep>expt_params=json.load(open(os.path.join(expt_dir 'params.json') 'rb'))<line_sep># Update the loaded parameters with the current parameters. This will # override conflicting keys as expected. expt_params.update(params)<line_sep><return>expt_params<block_end>
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>json<import_stmt>copy<import_stmt>numpy<as>np<import_from_stmt>paddleslim.nas GPNAS<line_sep># 使用GP-NAS参加[CVPR 2021 NAS国际比赛](https://www.cvpr21-nas.com/competition) Track2 demo # [CVPR 2021 NAS国际比赛Track2 studio地址](https://aistudio.baidu.com/aistudio/competition/detail/71?lang=en) # [AI studio GP-NAS demo](https://aistudio.baidu.com/aistudio/projectdetail/1824958) # demo 基于paddleslim自研NAS算法GP-NAS:Gaussian Process based Neural Architecture Search # 基于本demo的改进版可以获得双倍奖金 <def_stmt>preprare_trainning_data file_name t_flag## t_flag ==1 using all trainning data ## t_flag ==2 using half trainning data <block_start><with_stmt>open(file_name 'r')<as>f<block_start>arch_dict=json.load(f)<block_end>Y_all=[]<line_sep>X_all=[]<for_stmt>sub_dict arch_dict.items()<block_start>Y_all.append(sub_dict[1]['acc']<times>100)<line_sep>X_all.append(np.array(sub_dict[1]['arch']).T.reshape(4 16)[2])<block_end>X_all,Y_all=np.array(X_all) np.array(Y_all)<line_sep>X_train,Y_train,X_test,Y_test=X_all[0::t_flag] Y_all[0::t_flag] X_all[1::t_flag] Y_all[1::t_flag]<line_sep><return>X_train Y_train X_test Y_test<block_end><if_stmt>__name__<eq>'__main__'<block_start>stage1_file='./datasets/Track2_stage1_trainning.json'<line_sep>stage2_file='./datasets/Track2_stage2_few_show_trainning.json'<line_sep>X_train_stage1,Y_train_stage1,X_test_stage1,Y_test_stage1=preprare_trainning_data(stage1_file 1)<line_sep>X_train_stage2,Y_train_stage2,X_test_stage2,Y_test_stage2=preprare_trainning_data(stage2_file 2)<line_sep>gpnas=GPNAS()<line_sep>w=gpnas.get_initial_mean(X_test_stage1 Y_test_stage1)<line_sep>init_cov=gpnas.get_initial_cov(X_train_stage1)<line_sep>error_list=np.array(Y_test_stage2.reshape(len(Y_test_stage2) 1)-gpnas.get_predict(X_test_stage2))<line_sep>print('RMSE trainning on stage1 testing on stage2:' np.sqrt(np.dot(error_list.T error_list)/len(error_list)))<line_sep>gpnas.get_posterior_mean(X_train_stage2[0::3] Y_train_stage2[0::3])<line_sep>gpnas.get_posterior_mean(X_train_stage2[1::3] Y_train_stage2[1::3])<line_sep>gpnas.get_posterior_cov(X_train_stage2[1::3] Y_train_stage2[1::3])<line_sep>error_list=np.array(Y_test_stage2.reshape(len(Y_test_stage2) 1)-gpnas.get_predict_jiont(X_test_stage2 X_train_stage2[::1] Y_train_stage2[::1]))<line_sep>print('RMSE using stage1 as prior:' np.sqrt(np.dot(error_list.T error_list)/len(error_list)))<block_end>
<import_from_future_stmt> absolute_import division print_function unicode_literals<import_stmt>torch.nn.qat<as>nnqat<import_stmt>torch.nn.intrinsic<import_stmt>torch.nn.functional<as>F<class_stmt>LinearReLU(nnqat.Linear)<block_start>r""" A LinearReLU module fused from Linear and ReLU modules, attached with FakeQuantize modules for output activation and weight, used in quantization aware training. We adopt the same interface as :class:`torch.nn.Linear`. Similar to `torch.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to default. Attributes: activation_post_process: fake quant module for output activation weight: fake quant module for weight Examples:: >>> m = nn.qat.LinearReLU(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30]) """<line_sep>_FLOAT_MODULE=torch.nn.intrinsic.LinearReLU<def_stmt>__init__ self in_features out_features bias=<true> qconfig=<none><block_start>super(LinearReLU self).__init__(in_features out_features bias qconfig)<block_end><def_stmt>forward self input<block_start><return>self.activation_post_process(F.relu(F.linear(input self.weight_fake_quant(self.weight) self.bias)))<block_end>@classmethod<def_stmt>from_float cls mod qconfig=<none><block_start><return>super(LinearReLU cls).from_float(mod qconfig)<block_end><block_end>
<import_stmt>unittest<import_stmt>astar<class_stmt>BasicTests(unittest.TestCase)<block_start><def_stmt>test_bestpath self<block_start>"""ensure that we take the shortest path, and not the path with less elements. the path with less elements is A -> B with a distance of 100 the shortest path is A -> C -> D -> B with a distance of 60 """<line_sep>nodes={'A':[('B' 100) ('C' 20)] 'C':[('D' 20)] 'D':[('B' 20)]}<def_stmt>neighbors n<block_start><for_stmt>n1,d nodes[n]<block_start><yield>n1<block_end><block_end><def_stmt>distance n1 n2<block_start><for_stmt>n,d nodes[n1]<block_start><if_stmt>n<eq>n2<block_start><return>d<block_end><block_end><block_end><def_stmt>cost n goal<block_start><return>1<block_end>path=list(astar.find_path('A' 'B' neighbors_fnct=neighbors heuristic_cost_estimate_fnct=cost distance_between_fnct=distance))<line_sep>self.assertEqual(4 len(path))<for_stmt>i,n enumerate('ACDB')<block_start>self.assertEqual(n path[i])<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. <import_stmt>logging<import_from_stmt>typing Callable Optional List Tuple<import_stmt>pandas<as>pd<import_from_stmt>autogluon.tabular TabularPredictor<as>AutogluonTabularPredictor<import_from_stmt>gluonts.core.component validated<import_from_stmt>gluonts.dataset.common Dataset<import_from_stmt>gluonts.dataset.util to_pandas<import_from_stmt>gluonts.model.estimator Estimator<import_from_stmt>gluonts.time_feature TimeFeature get_lags_for_frequency time_features_from_frequency_str <import_from_stmt>.predictor TabularPredictor mean_abs_scaling get_features_dataframe <line_sep>logger=logging.getLogger(__name__)<class_stmt>TabularEstimator(Estimator)<block_start>"""An estimator that trains an Autogluon Tabular model for time series forecasting. Additional keyword arguments to the constructor, other than the ones documented below, will be passed on to Autogluon Tabular's ``fit`` method used for training the model. Parameters ---------- freq Frequency of the data to handle prediction_length Prediction length lag_indices List of indices of the lagged observations to use as features. If None, this will be set automatically based on the frequency. time_features List of time features to be used. If None, this will be set automatically based on the frequency. scaling Function to be used to scale time series. This should take a pd.Series object as input, and return a scaled pd.Series and the scale (float). By default, this divides a series by the mean of its absolute value. batch_size Batch size of the resulting predictor; this is just used at prediction time, and does not affect training in any way. disable_auto_regression Whether to forecefully disable auto-regression in the model. If ``True``, this will remove any lag index which is smaller than ``prediction_length``. This will make predictions more efficient, but may impact their accuracy. quantiles_to_predict Whether to forecast in quantile way. If assigned with quantile values, this will train model using quantile prediction model. If None, then the model will be trained in a regular way. """<line_sep>@validated()<def_stmt>__init__ self freq:str prediction_length:int lag_indices:Optional[List[int]]=<none> time_features:Optional[List[TimeFeature]]=<none> scaling:Callable[[pd.Series] Tuple[pd.Series float]]=mean_abs_scaling batch_size:Optional[int]=32 disable_auto_regression:bool=<false> last_k_for_val:Optional[int]=<none> quantiles_to_predict:Optional[List[float]]=<none> eval_metric:str="mean_absolute_error" **kwargs <arrow><none><block_start>super().__init__()<line_sep>self.freq=freq<line_sep>self.prediction_length=prediction_length<line_sep>self.lag_indices=(lag_indices<if>lag_indices<is><not><none><else>get_lags_for_frequency(self.freq))<line_sep>self.time_features=(time_features<if>time_features<is><not><none><else>time_features_from_frequency_str(self.freq))<line_sep>self.batch_size=batch_size<line_sep>self.disable_auto_regression=disable_auto_regression<line_sep>self.scaling=scaling<line_sep>self.last_k_for_val=last_k_for_val<line_sep>self.eval_metric=eval_metric<line_sep>self.quantiles_to_predict=quantiles_to_predict<if_stmt>self.disable_auto_regression<block_start>self.lag_indices=[lag_idx<for>lag_idx self.lag_indices<if>lag_idx<ge>self.prediction_length]<block_end>default_kwargs={"time_limit":60 # "excluded_model_types": ["KNN", "XT", "RF"], "presets":["high_quality_fast_inference_only_refit" "optimize_for_deployment" ] "auto_stack":<true> }<line_sep>self.kwargs={**default_kwargs **kwargs}<block_end><def_stmt>train self training_data:Dataset validation_data:Optional[Dataset]=<none> <arrow>TabularPredictor<block_start>kwargs_override={}<line_sep>dfs=[get_features_dataframe(series=self.scaling(to_pandas(entry))[0] time_features=self.time_features lag_indices=self.lag_indices )<for>entry training_data]<if_stmt>validation_data<is><not><none><or>self.last_k_for_val<is><not><none><block_start>kwargs_override["auto_stack"]=<false><line_sep>logger.warning("Auto Stacking is turned off "<concat>"as validation dataset is provided before input into Tabular Predictor.")<block_end><if_stmt>validation_data<is><not><none><block_start>logger.log(20 "Validation dataset is directly provided.")<line_sep>validation_dfs=[get_features_dataframe(series=self.scaling(to_pandas(entry))[0] time_features=self.time_features lag_indices=self.lag_indices )<for>entry validation_data]<line_sep>train_df=pd.concat(dfs)<line_sep>val_df=pd.concat(validation_dfs)<block_end><elif_stmt>self.last_k_for_val<is><not><none><block_start>logger.log(20 f"last_k_for_val is provided, choosing last {self.last_k_for_val} of each time series as validation set." )<line_sep>train_dfs=[tmp_df.iloc[:-self.last_k_for_val :]<for>tmp_df dfs]<line_sep>validation_dfs=[tmp_df.iloc[-self.last_k_for_val: :]<for>tmp_df dfs]<line_sep>train_df=pd.concat(train_dfs)<line_sep>val_df=pd.concat(validation_dfs)<block_end><else_stmt><block_start>logger.log(20 "No validation dataset is provided, will let TabularPredictor do the splitting automatically,"<concat>"Note that this might break the time order of time series data." )<line_sep>train_df=pd.concat(dfs)<line_sep>val_df=<none><block_end><if_stmt>self.quantiles_to_predict<is><not><none><block_start>ag_model=AutogluonTabularPredictor(label="target" problem_type="quantile" quantile_levels=self.quantiles_to_predict ).fit(train_df tuning_data=val_df **{**self.kwargs **kwargs_override} )<block_end><else_stmt><block_start>ag_model=AutogluonTabularPredictor(label="target" problem_type="regression" eval_metric=self.eval_metric ).fit(train_df tuning_data=val_df **{**self.kwargs **kwargs_override} )<block_end><return>TabularPredictor(ag_model=ag_model freq=self.freq prediction_length=self.prediction_length time_features=self.time_features lag_indices=self.lag_indices scaling=self.scaling batch_size=self.batch_size quantiles_to_predict=self.quantiles_to_predict )<block_end><block_end>
<import_stmt>numpy<as>np<import_from_stmt>collections defaultdict Counter<import_stmt>random<import_stmt>json<import_from_stmt>tqdm tqdm<def_stmt>transX dataset<block_start>rel2id=json.load(open(dataset+'/relation2ids'))<line_sep>ent2id=json.load(open(dataset+'/ent2ids'))<with_stmt>open('../Fast-TransX/'+dataset+'_base/entity2id.txt' 'w')<as>g1<block_start>num_ents=len(ent2id.keys())<line_sep>g1.write(str(num_ents)+'\n')<for_stmt>k,v ent2id.items()<block_start>g1.write(k+'\t'+str(v)+'\n')<block_end><block_end><with_stmt>open('../Fast-TransX/'+dataset+'_base/relation2id.txt' 'w')<as>g1<block_start>num_rels=len(rel2id.keys())<line_sep>g1.write(str(num_rels)+'\n')<for_stmt>k,v rel2id.items()<block_start>g1.write(k+'\t'+str(v)+'\n')<block_end><block_end>file_name=dataset+'/path_graph'<line_sep>train_triples=[]<with_stmt>open(file_name)<as>f<block_start>lines=f.readlines()<for_stmt>line tqdm(lines)<block_start>e1=line.split('\t')[0]<line_sep>e2=line.rstrip().split('\t')[2]<line_sep>rel=line.split('\t')[1]<line_sep>train_triples.append([e1 rel e2])<line_sep>train_triples.append([e2 rel+'_inv' e1])<block_end><block_end><with_stmt>open('../Fast-TransX/'+dataset+'_base/train2id.txt' 'w')<as>g3<block_start>num_triples=len(train_triples)<line_sep>g3.write(str(num_triples)+'\n')<for_stmt>triple train_triples<block_start>e1,rel,e2=triple<line_sep>g3.write(str(ent2id[e1])+'\t'+str(ent2id[e2])+'\t'+str(rel2id[rel])+'\n')<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>transX('Wiki')<block_end>
<import_from_stmt>amadeus.client.decorator Decorator<class_stmt>TripParserStatus(Decorator object)<block_start><def_stmt>__init__ self client job_id<block_start>Decorator.__init__(self client)<line_sep>self.job_id=job_id<block_end><def_stmt>get self **params<block_start>''' Returns the parsing status and the link to the result in case of successful parsing. .. code-block:: python amadeus.travel.trip_parser_jobs.status('XXX').get :rtype: amadeus.Response :raises amadeus.ResponseError: if the request could not be completed '''<line_sep><return>self.client.get('/v2/travel/trip-parser-jobs/{0}'.format(self.job_id) **params)<block_end><block_end>
<import_stmt>py<import_stmt>pytest<import_from_stmt>iniconfig IniConfig ParseError __all__<as>ALL<import_from_stmt>iniconfig iscommentline<import_from_stmt>textwrap dedent<line_sep>check_tokens={'section':('[section]' [(0 'section' <none> <none>)]) 'value':('value = 1' [(0 <none> 'value' '1')]) 'value in section':('[section]\nvalue=1' [(0 'section' <none> <none>) (1 'section' 'value' '1')]) 'value with continuation':('names =\n Alice\n Bob' [(0 <none> 'names' 'Alice\nBob')]) 'value with aligned continuation':('names = Alice\n'<concat>' Bob' [(0 <none> 'names' 'Alice\nBob')]) 'blank line':('[section]\n\nvalue=1' [(0 'section' <none> <none>) (2 'section' 'value' '1')]) 'comment':('# comment' []) 'comment on value':('value = 1' [(0 <none> 'value' '1')]) 'comment on section':('[section] #comment' [(0 'section' <none> <none>)]) 'comment2':('; comment' []) 'comment2 on section':('[section] ;comment' [(0 'section' <none> <none>)]) 'pseudo section syntax in value':('name = value []' [(0 <none> 'name' 'value []')]) 'assignment in value':('value = x = 3' [(0 <none> 'value' 'x = 3')]) 'use of colon for name-values':('name: y' [(0 <none> 'name' 'y')]) 'use of colon without space':('value:y=5' [(0 <none> 'value' 'y=5')]) 'equality gets precedence':('value=xyz:5' [(0 <none> 'value' 'xyz:5')]) }<line_sep>@pytest.fixture(params=sorted(check_tokens))<def_stmt>input_expected request<block_start><return>check_tokens[request.param]<block_end>@pytest.fixture<def_stmt>input input_expected<block_start><return>input_expected[0]<block_end>@pytest.fixture<def_stmt>expected input_expected<block_start><return>input_expected[1]<block_end><def_stmt>parse input# only for testing purposes - _parse() does not use state except path <block_start>ini=object.__new__(IniConfig)<line_sep>ini.path="sample"<line_sep><return>ini._parse(input.splitlines(<true>))<block_end><def_stmt>parse_a_error input<block_start><return>py.test.raises(ParseError parse input)<block_end><def_stmt>test_tokenize input expected<block_start>parsed=parse(input)<assert_stmt>parsed<eq>expected<block_end><def_stmt>test_parse_empty <block_start>parsed=parse("")<assert_stmt><not>parsed<line_sep>ini=IniConfig("sample" "")<assert_stmt><not>ini.sections<block_end><def_stmt>test_ParseError <block_start>e=ParseError("filename" 0 "hello")<assert_stmt>str(e)<eq>"filename:1: hello"<block_end><def_stmt>test_continuation_needs_perceeding_token <block_start>excinfo=parse_a_error(' Foo')<assert_stmt>excinfo.value.lineno<eq>0<block_end><def_stmt>test_continuation_cant_be_after_section <block_start>excinfo=parse_a_error('[section]\n Foo')<assert_stmt>excinfo.value.lineno<eq>1<block_end><def_stmt>test_section_cant_be_empty <block_start>excinfo=parse_a_error('[]')<assert_stmt>excinfo.value.lineno<eq>0<block_end>@py.test.mark.parametrize('line' ['!!' ])<def_stmt>test_error_on_weird_lines line<block_start>parse_a_error(line)<block_end><def_stmt>test_iniconfig_from_file tmpdir<block_start>path=tmpdir/'test.txt'<line_sep>path.write('[metadata]\nname=1')<line_sep>config=IniConfig(path=path)<assert_stmt>list(config.sections)<eq>['metadata']<line_sep>config=IniConfig(path "[diff]")<assert_stmt>list(config.sections)<eq>['diff']<with_stmt>pytest.raises(TypeError)<block_start>IniConfig(data=path.read())<block_end><block_end><def_stmt>test_iniconfig_section_first tmpdir<block_start><with_stmt>pytest.raises(ParseError)<as>excinfo<block_start>IniConfig("x" data='name=1')<block_end><assert_stmt>excinfo.value.msg<eq>"no section header defined"<block_end><def_stmt>test_iniconig_section_duplicate_fails <block_start><with_stmt>pytest.raises(ParseError)<as>excinfo<block_start>IniConfig("x" data='[section]\n[section]')<block_end><assert_stmt>'duplicate section'<in>str(excinfo.value)<block_end><def_stmt>test_iniconfig_duplicate_key_fails <block_start><with_stmt>pytest.raises(ParseError)<as>excinfo<block_start>IniConfig("x" data='[section]\nname = Alice\nname = bob')<block_end><assert_stmt>'duplicate name'<in>str(excinfo.value)<block_end><def_stmt>test_iniconfig_lineof <block_start>config=IniConfig("x.ini" data=('[section]\n'<concat>'value = 1\n'<concat>'[section2]\n'<concat>'# comment\n'<concat>'value =2'))<assert_stmt>config.lineof('missing')<is><none><assert_stmt>config.lineof('section')<eq>1<assert_stmt>config.lineof('section2')<eq>3<assert_stmt>config.lineof('section' 'value')<eq>2<assert_stmt>config.lineof('section2' 'value')<eq>5<assert_stmt>config['section'].lineof('value')<eq>2<assert_stmt>config['section2'].lineof('value')<eq>5<block_end><def_stmt>test_iniconfig_get_convert <block_start>config=IniConfig("x" data='[section]\nint = 1\nfloat = 1.1')<assert_stmt>config.get('section' 'int')<eq>'1'<assert_stmt>config.get('section' 'int' convert=int)<eq>1<block_end><def_stmt>test_iniconfig_get_missing <block_start>config=IniConfig("x" data='[section]\nint = 1\nfloat = 1.1')<assert_stmt>config.get('section' 'missing' default=1)<eq>1<assert_stmt>config.get('section' 'missing')<is><none><block_end><def_stmt>test_section_get <block_start>config=IniConfig("x" data='[section]\nvalue=1')<line_sep>section=config['section']<assert_stmt>section.get('value' convert=int)<eq>1<assert_stmt>section.get('value' 1)<eq>"1"<assert_stmt>section.get('missing' 2)<eq>2<block_end><def_stmt>test_missing_section <block_start>config=IniConfig("x" data='[section]\nvalue=1')<with_stmt>pytest.raises(KeyError)<block_start>config["other"]<block_end><block_end><def_stmt>test_section_getitem <block_start>config=IniConfig("x" data='[section]\nvalue=1')<assert_stmt>config['section']['value']<eq>'1'<assert_stmt>config['section']['value']<eq>'1'<block_end><def_stmt>test_section_iter <block_start>config=IniConfig("x" data='[section]\nvalue=1')<line_sep>names=list(config['section'])<assert_stmt>names<eq>['value']<line_sep>items=list(config['section'].items())<assert_stmt>items<eq>[('value' '1')]<block_end><def_stmt>test_config_iter <block_start>config=IniConfig("x.ini" data=dedent(''' [section1] value=1 [section2] value=2 '''))<line_sep>l=list(config)<assert_stmt>len(l)<eq>2<assert_stmt>l[0].name<eq>'section1'<assert_stmt>l[0]['value']<eq>'1'<assert_stmt>l[1].name<eq>'section2'<assert_stmt>l[1]['value']<eq>'2'<block_end><def_stmt>test_config_contains <block_start>config=IniConfig("x.ini" data=dedent(''' [section1] value=1 [section2] value=2 '''))<assert_stmt>'xyz'<not><in>config<assert_stmt>'section1'<in>config<assert_stmt>'section2'<in>config<block_end><def_stmt>test_iter_file_order <block_start>config=IniConfig("x.ini" data=""" [section2] #cpython dict ordered before section value = 1 value2 = 2 # dict ordered before value [section] a = 1 b = 2 """)<line_sep>l=list(config)<line_sep>secnames=[x.name<for>x l]<assert_stmt>secnames<eq>['section2' 'section']<assert_stmt>list(config['section2'])<eq>['value' 'value2']<assert_stmt>list(config['section'])<eq>['a' 'b']<block_end><def_stmt>test_example_pypirc <block_start>config=IniConfig("pypirc" data=dedent(''' [distutils] index-servers = pypi other [pypi] repository: <repository-url> username: <username> password: <password> [other] repository: http://example.com/pypi username: <username> password: <password> '''))<line_sep>distutils,pypi,other=list(config)<assert_stmt>distutils["index-servers"]<eq>"pypi\nother"<assert_stmt>pypi['repository']<eq>'<repository-url>'<assert_stmt>pypi['username']<eq>'<username>'<assert_stmt>pypi['password']<eq>'<password>'<assert_stmt>['repository' 'username' 'password']<eq>list(other)<block_end><def_stmt>test_api_import <block_start><assert_stmt>ALL<eq>['IniConfig' 'ParseError']<block_end>@pytest.mark.parametrize("line" ["#qwe" " #qwe" ";qwe" " ;qwe" ])<def_stmt>test_iscommentline_true line<block_start><assert_stmt>iscommentline(line)<block_end>
<import_stmt>sys<line_sep>sys.path.insert(0 '..')<import_from_stmt>data.whale_data exchnage_accounts<import_from_stmt>data.html_helper check_if_address_name_exists<import_from_stmt>data.whale_eth_tx_data *<import_from_stmt>data.whale_token_tx_data identify_investor_type_token<line_sep>holding_account="holding_account"<line_sep>deposit_account='deposit_account'<line_sep>withdraw_account="withdraw_account"<line_sep>in_type="IN"<line_sep>out_type="OUT"<line_sep>all_acc_types=dict()<for_stmt>acc exchnage_accounts<block_start>all_acc_types[acc]=exchange_type<block_end><def_stmt>update_y_array X y timestamp amount<block_start>target_index=0<for_stmt>i range(len(X))<block_start>x_time=X[i]<if_stmt>timestamp<l>x_time<block_start>target_index=i<line_sep><break><block_end><block_end><for_stmt>i range(target_index len(y))<block_start>y[i]<augadd>amount<block_end><return>y<block_end><def_stmt>perform_bfs_on_accounts out_txs top_holder_type acc m_type='OUT'<block_start>print("\t"+m_type)<line_sep>unique_out=set()<for_stmt>out out_txs<block_start>unique_out.add(out[3])<block_end>unique_out=list(unique_out)[:5]<for_stmt>out unique_out<block_start>print("\t"+out)<if_stmt>out<not><in>all_acc_types<block_start>investor_type=identify_investor_type(out)<if_stmt>investor_type<eq>affliate_type<block_start>investor_type=identify_investor_type_token(out)<block_end>print("\t\t{}".format(investor_type))<block_end><else_stmt><block_start>investor_type=all_acc_types[out]<block_end><if_stmt>investor_type<eq>exchange_type<block_start>top_holder_type[acc]=deposit_account<if>m_type<eq>"OUT"<else>withdraw_account<block_end>all_acc_types[out]=investor_type<block_end><if_stmt>acc<not><in>top_holder_type<block_start>top_holder_type[acc]=holding_account<block_end><return>top_holder_type<block_end><def_stmt>calculate_holding_amount X escape_accounts txs<block_start>top_holder_type=dict()<for_stmt>acc txs<block_start>tx=txs[acc]<if_stmt>acc<in>escape_accounts<block_start><continue><block_end>#如果当前账户从来没有向外打过token,ignore out_txs=[item<for>item tx<if>item[2]<eq>'OUT']<if_stmt>len(out_txs)<eq>0<block_start>print("\tholding account")<line_sep>top_holder_type[acc]=holding_account<line_sep><continue><block_end><block_end># build all traxe Y: holding_amount, deposit_amount, withdraw_amount amount_trace_y=[0]<times>len(X)<for_stmt>holder txs<block_start><if_stmt>holder<in>escape_accounts<block_start><continue><block_end><if_stmt>holder<not><in>top_holder_type<block_start>print("{} not identified! ".format(holder))<line_sep><continue><block_end>holder_type=top_holder_type[holder]<line_sep>holder_txs=txs[holder]<line_sep>print("{} {}".format(holder holder_type))<for_stmt>tx holder_txs<block_start>[timestamp from_a tx_type to_a amount]=tx<if_stmt>holder_type<eq>holding_account<block_start><if_stmt>tx_type<eq>in_type<block_start>amount_trace_y=update_y_array(X amount_trace_y timestamp amount)<block_end><else_stmt><block_start>amount_trace_y=update_y_array(X amount_trace_y timestamp -amount)<block_end><block_end><block_end><block_end><return>amount_trace_y<block_end>
"""Computation of ensemble anomalies based on a desired value."""<import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>scipy stats<line_sep># User-defined packages <import_from_stmt>read_netcdf read_iris save_n_2d_fields<import_from_stmt>sel_season_area sel_area sel_season<def_stmt>ens_anom filenames dir_output name_outputs varname numens season area extreme<block_start>"""Ensemble anomalies. Computation of the ensemble anomalies based on the desired value from the input variable (it can be the percentile, mean, maximum, standard deviation or trend) OUTPUT: NetCDF files of ensemble mean of climatology, selected value and anomaly maps. """<line_sep>print('The name of the output files will be <variable>_{0}.txt'.format(name_outputs))<line_sep>print('Number of ensemble members: {0}'.format(numens))<line_sep>outfiles=[]<line_sep># Reading the netCDF file of 3Dfield, for all the ensemble members var_ens=[]<for_stmt>ens range(numens)<block_start>ifile=filenames[ens]<line_sep># print('ENSEMBLE MEMBER %s' %ens) var,varunits,lat,lon,dates,_=read_iris(ifile)<line_sep># Convertion from kg m-2 s-1 to mm/day <if_stmt>varunits<eq>'kg m-2 s-1'<block_start>var=var<times>86400# there are 86400 seconds in a day varunits='mm/day'<block_end># Selecting a season (DJF,DJFM,NDJFM,JJA) var_season,_=sel_season(var dates season)<line_sep># Selecting only [latS-latN, lonW-lonE] box region var_area,lat_area,lon_area=sel_area(lat lon var_season area)<line_sep>var_ens.append(var_area)<block_end><if_stmt>varunits<eq>'kg m-2 s-1'<block_start>print('\nPrecipitation rate units were converted from kg m-2 s-1 '<concat>'to mm/day')<block_end>print('The variable is {0} ({1})'.format(varname varunits))<line_sep>print('Original var shape: (time x lat x lon)={0}'.format(var.shape))<line_sep>print('var shape after selecting season {0} and area {1}: '<concat>'(time x lat x lon)={2}'.format(season area var_area.shape))<if_stmt>extreme<eq>'mean'# Compute the time mean over the entire period, for each ens member <block_start>varextreme_ens=[np.nanmean(var_ens[i] axis=0)<for>i range(numens)]<block_end><elif_stmt>len(extreme.split("_"))<eq>2# Compute the chosen percentile over the period, for each ens member <block_start>quant=int(extreme.partition("th")[0])<line_sep>varextreme_ens=[np.nanpercentile(var_ens[i] quant axis=0)<for>i range(numens)]<block_end><elif_stmt>extreme<eq>'maximum'# Compute the maximum value over the period, for each ensemble member <block_start>varextreme_ens=[np.nanmax(var_ens[i] axis=0)<for>i range(numens)]<block_end><elif_stmt>extreme<eq>'std'# Compute the standard deviation over the period, for each ens member <block_start>varextreme_ens=[np.nanstd(var_ens[i] axis=0)<for>i range(numens)]<block_end><elif_stmt>extreme<eq>'trend'# Compute the linear trend over the period, for each ensemble member <block_start>trendmap=np.empty((var_ens[0].shape[1] var_ens[0].shape[2]))<line_sep>trendmap_ens=[]<for_stmt>i range(numens)<block_start><for_stmt>jla range(var_ens[0].shape[1])<block_start><for_stmt>jlo range(var_ens[0].shape[2])<block_start>slope,_,_,_,_=stats.linregress(range(var_ens[0].shape[0]) var_ens[i][: jla jlo])<line_sep>trendmap[jla jlo]=slope<block_end><block_end>trendmap_ens.append(trendmap.copy())<block_end>varextreme_ens=trendmap_ens<block_end>varextreme_ens_np=np.array(varextreme_ens)<line_sep>print('Anomalies are computed with respect to the {0}'.format(extreme))<line_sep># Compute and save the anomalies with respect to the ensemble ens_anomalies=varextreme_ens_np-np.nanmean(varextreme_ens_np axis=0)<line_sep>varsave='ens_anomalies'<line_sep>ofile=os.path.join(dir_output 'ens_anomalies_{0}.nc'.format(name_outputs))<line_sep># print(ofile) print('ens_anomalies shape: (numens x lat x lon)={0}'.format(ens_anomalies.shape))<line_sep>save_n_2d_fields(lat_area lon_area ens_anomalies varsave varunits ofile)<line_sep>outfiles.append(ofile)<line_sep># Compute and save the climatology vartimemean_ens=[np.mean(var_ens[i] axis=0)<for>i range(numens)]<line_sep>ens_climatologies=np.array(vartimemean_ens)<line_sep>varsave='ens_climatologies'<line_sep>ofile=os.path.join(dir_output 'ens_climatologies_{0}.nc'.format(name_outputs))<line_sep>save_n_2d_fields(lat_area lon_area ens_climatologies varsave varunits ofile)<line_sep>outfiles.append(ofile)<line_sep>ens_extreme=varextreme_ens_np<line_sep>varsave='ens_extreme'<line_sep>ofile=os.path.join(dir_output 'ens_extreme_{0}.nc'.format(name_outputs))<line_sep>save_n_2d_fields(lat_area lon_area ens_extreme varsave varunits ofile)<line_sep>outfiles.append(ofile)<line_sep><return>outfiles<block_end>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) <import_stmt>os<import_stmt>pytest<import_from_stmt>llnl.util.filesystem mkdirp touch<import_stmt>spack.config<import_from_stmt>spack.fetch_strategy CacheURLFetchStrategy NoCacheError<import_from_stmt>spack.stage Stage<line_sep>@pytest.mark.parametrize('_fetch_method' ['curl' 'urllib'])<def_stmt>test_fetch_missing_cache tmpdir _fetch_method<block_start>"""Ensure raise a missing cache file."""<line_sep>testpath=str(tmpdir)<with_stmt>spack.config.override('config:url_fetch_method' _fetch_method)<block_start>fetcher=CacheURLFetchStrategy(url='file:///not-a-real-cache-file')<with_stmt>Stage(fetcher path=testpath)<block_start><with_stmt>pytest.raises(NoCacheError match=r'No cache')<block_start>fetcher.fetch()<block_end><block_end><block_end><block_end>@pytest.mark.parametrize('_fetch_method' ['curl' 'urllib'])<def_stmt>test_fetch tmpdir _fetch_method<block_start>"""Ensure a fetch after expanding is effectively a no-op."""<line_sep>testpath=str(tmpdir)<line_sep>cache=os.path.join(testpath 'cache.tar.gz')<line_sep>touch(cache)<line_sep>url='file:///{0}'.format(cache)<with_stmt>spack.config.override('config:url_fetch_method' _fetch_method)<block_start>fetcher=CacheURLFetchStrategy(url=url)<with_stmt>Stage(fetcher path=testpath)<as>stage<block_start>source_path=stage.source_path<line_sep>mkdirp(source_path)<line_sep>fetcher.fetch()<block_end><block_end><block_end>
# Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tfx.orchestration.experimental.core.service_jobs."""<import_from_stmt>absl.testing.absltest mock<import_stmt>tensorflow<as>tf<import_from_stmt>tfx.orchestration.experimental.core service_jobs<import_from_stmt>tfx.orchestration.experimental.core test_utils<class_stmt>ExceptionHandlingServiceJobManagerWrapperTest(test_utils.TfxTest)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self._mock_service_job_manager=mock.create_autospec(service_jobs.ServiceJobManager instance=<true>)<line_sep>self._mock_service_job_manager.ensure_node_services.return_value=(service_jobs.ServiceStatus.SUCCESS)<line_sep>self._mock_service_job_manager.stop_node_services.return_value=<true><line_sep>self._mock_service_job_manager.is_pure_service_node.return_value=<true><line_sep>self._mock_service_job_manager.is_mixed_service_node.return_value=<false><line_sep>self._wrapper=service_jobs.ExceptionHandlingServiceJobManagerWrapper(self._mock_service_job_manager)<block_end><def_stmt>test_calls_forwarded_to_underlying_instance self<block_start>self.assertEqual(service_jobs.ServiceStatus.SUCCESS self._wrapper.ensure_node_services(mock.Mock() 'node1'))<line_sep>self.assertTrue(self._wrapper.stop_node_services(mock.Mock() 'node2'))<line_sep>self.assertTrue(self._wrapper.is_pure_service_node(mock.Mock() 'node3'))<line_sep>self.assertFalse(self._wrapper.is_mixed_service_node(mock.Mock() 'node4'))<line_sep>self._mock_service_job_manager.ensure_node_services.assert_called_once_with(mock.ANY 'node1')<line_sep>self._mock_service_job_manager.stop_node_services.assert_called_once_with(mock.ANY 'node2')<line_sep>self._mock_service_job_manager.is_pure_service_node.assert_called_once_with(mock.ANY 'node3')<line_sep>self._mock_service_job_manager.is_mixed_service_node.assert_called_once_with(mock.ANY 'node4')<block_end><def_stmt>test_ensure_node_services_exception_handling self<block_start>self._mock_service_job_manager.ensure_node_services.side_effect=RuntimeError('test error')<line_sep>self.assertEqual(service_jobs.ServiceStatus.FAILED self._wrapper.ensure_node_services(mock.Mock() 'node1'))<line_sep>self._mock_service_job_manager.ensure_node_services.assert_called_once_with(mock.ANY 'node1')<block_end><def_stmt>test_stop_node_services_exception_handling self<block_start>self._mock_service_job_manager.stop_node_services.side_effect=RuntimeError('test error')<line_sep>self.assertFalse(self._wrapper.stop_node_services(mock.Mock() 'node2'))<line_sep>self._mock_service_job_manager.stop_node_services.assert_called_once_with(mock.ANY 'node2')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
<import_from_future_stmt> absolute_import division print_function<import_stmt>matplotlib<import_stmt>numpy<as>np<import_stmt>os<import_stmt>subprocess<import_stmt>sys<import_stmt>tempfile<line_sep>matplotlib.use('pdf')<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>abc abstractmethod ABCMeta<import_from_stmt>dragonn.metrics ClassificationResult<import_from_stmt>sklearn.svm SVC<as>scikit_SVC<import_from_stmt>sklearn.tree DecisionTreeClassifier<as>scikit_DecisionTree<import_from_stmt>sklearn.ensemble RandomForestClassifier<import_from_stmt>keras.models load_model<import_from_stmt>dragonn.runtime_metrics *<import_from_stmt>dragonn.custom_losses *<import_stmt>warnings<line_sep>warnings.filterwarnings('ignore')<def_stmt>load_dragonn_model model_string<block_start>custom_objects={"recall":recall "sensitivity":recall "specificity":specificity "fpr":fpr "fnr":fnr "fdr":fdr "precision":precision "f1":f1 "spearman_corr":spearman_corr "ambig_binary_crossentropy":ambig_binary_crossentropy "ambig_mean_squared_error":ambig_mean_squared_error}<line_sep>model=load_model(model_string custom_objects=custom_objects)<line_sep><return>model<block_end><class_stmt>Model(object)<block_start>__metaclass__=ABCMeta<line_sep>@abstractmethod<def_stmt>__init__ self **hyperparameters<block_start><pass><block_end>@abstractmethod<def_stmt>train self X y validation_data<block_start><pass><block_end>@abstractmethod<def_stmt>predict self X<block_start><pass><block_end><def_stmt>test self X y<block_start><return>ClassificationResult(y self.predict(X))<block_end><def_stmt>score self X y metric<block_start><return>self.test(X y)[metric]<block_end><block_end><class_stmt>SequenceDNN(Model)<block_start>""" Sequence DNN models. Parameters ---------- seq_length : int, optional length of input sequence. keras_model : instance of keras.models.Sequential, optional seq_length or keras_model must be specified. num_tasks : int, optional number of tasks. Default: 1. num_filters : list[int] | tuple[int] number of convolutional filters in each layer. Default: (15,). conv_width : list[int] | tuple[int] width of each layer's convolutional filters. Default: (15,). pool_width : int width of max pooling after the last layer. Default: 35. L1 : float strength of L1 penalty. dropout : float dropout probability in every convolutional layer. Default: 0. verbose: int Verbosity level during training. Valida values: 0, 1, 2. Returns ------- Compiled DNN model. """<def_stmt>__init__ self seq_length=<none> keras_model=<none> use_RNN=<false> num_tasks=1 num_filters=(15 15 15) conv_width=(15 15 15) pool_width=35 GRU_size=35 TDD_size=15 L1=0 dropout=0.0 num_epochs=100 verbose=1<block_start><import_from_stmt>keras.models Sequential<import_from_stmt>keras.layers.core Activation Dense Dropout Flatten Permute Reshape <import_from_stmt>keras.layers.convolutional Convolution2D MaxPooling2D<import_from_stmt>keras.layers.recurrent GRU<import_from_stmt>keras.regularizers l1<line_sep>self.num_tasks=num_tasks<line_sep>self.num_epochs=num_epochs<line_sep>self.verbose=verbose<line_sep>self.train_metrics=[]<line_sep>self.valid_metrics=[]<if_stmt>keras_model<is><not><none><and>seq_length<is><none><block_start>self.model=keras_model<line_sep>self.num_tasks=keras_model.layers[-1].output_shape[-1]<block_end><elif_stmt>seq_length<is><not><none><and>keras_model<is><none><block_start>self.model=Sequential()<assert_stmt>len(num_filters)<eq>len(conv_width)<for_stmt>i,(nb_filter nb_col) enumerate(zip(num_filters conv_width))<block_start>conv_height=4<if>i<eq>0<else>1<line_sep>self.model.add(Convolution2D(nb_filter=nb_filter nb_row=conv_height nb_col=nb_col activation='linear' init='he_normal' input_shape=(1 4 seq_length) W_regularizer=l1(L1) b_regularizer=l1(L1)))<line_sep>self.model.add(Activation('relu'))<line_sep>self.model.add(Dropout(dropout))<block_end>self.model.add(MaxPooling2D(pool_size=(1 pool_width)))<if_stmt>use_RNN<block_start>num_max_pool_outputs=self.model.layers[-1].output_shape[-1]<line_sep>self.model.add(Reshape((num_filters[-1] num_max_pool_outputs)))<line_sep>self.model.add(Permute((2 1)))<line_sep>self.model.add(GRU(GRU_size return_sequences=<true>))<line_sep>self.model.add(TimeDistributedDense(TDD_size activation='relu'))<block_end>self.model.add(Flatten())<line_sep>self.model.add(Dense(output_dim=self.num_tasks))<line_sep>self.model.add(Activation('sigmoid'))<line_sep>self.model.compile(optimizer='adam' loss='binary_crossentropy')<block_end><else_stmt><block_start><raise>ValueError("Exactly one of seq_length or keras_model must be specified!")<block_end><block_end><def_stmt>train self X y validation_data early_stopping_metric='Loss' early_stopping_patience=5 save_best_model_to_prefix=<none><block_start><if_stmt>y.dtype<ne>bool<block_start><assert_stmt>set(np.unique(y))<eq>{0 1}<line_sep>y=y.astype(bool)<block_end>multitask=y.shape[1]<g>1<if_stmt><not>multitask<block_start>num_positives=y.sum()<line_sep>num_sequences=len(y)<line_sep>num_negatives=num_sequences-num_positives<block_end><if_stmt>self.verbose<ge>1<block_start>print('Training model (* indicates new best result)...')<block_end>X_valid,y_valid=validation_data<line_sep>early_stopping_wait=0<line_sep>best_metric=np.inf<if>early_stopping_metric<eq>'Loss'<else>-np.inf<for_stmt>epoch range(1 self.num_epochs+1)<block_start>self.model.fit(X y batch_size=128 nb_epoch=1 class_weight={<true>:num_sequences/num_positives <false>:num_sequences/num_negatives}<if><not>multitask<else><none> verbose=self.verbose<ge>2)<line_sep>epoch_train_metrics=self.test(X y)<line_sep>epoch_valid_metrics=self.test(X_valid y_valid)<line_sep>self.train_metrics.append(epoch_train_metrics)<line_sep>self.valid_metrics.append(epoch_valid_metrics)<if_stmt>self.verbose<ge>1<block_start>print('Epoch {}:'.format(epoch))<line_sep>print('Train {}'.format(epoch_train_metrics))<line_sep>print('Valid {}'.format(epoch_valid_metrics) end='')<block_end>current_metric=epoch_valid_metrics[early_stopping_metric].mean()<if_stmt>(early_stopping_metric<eq>'Loss')<eq>(current_metric<le>best_metric)<block_start><if_stmt>self.verbose<ge>1<block_start>print(' *')<block_end>best_metric=current_metric<line_sep>best_epoch=epoch<line_sep>early_stopping_wait=0<if_stmt>save_best_model_to_prefix<is><not><none><block_start>self.save(save_best_model_to_prefix)<block_end><block_end><else_stmt><block_start><if_stmt>self.verbose<ge>1<block_start>print()<block_end><if_stmt>early_stopping_wait<ge>early_stopping_patience<block_start><break><block_end>early_stopping_wait<augadd>1<block_end><block_end><if_stmt>self.verbose<ge>1<block_start>print('Finished training after {} epochs.'.format(epoch))<if_stmt>save_best_model_to_prefix<is><not><none><block_start>print("The best model's architecture and weights (from epoch {0}) "<concat>'were saved to {1}.arch.json and {1}.weights.h5'.format(best_epoch save_best_model_to_prefix))<block_end><block_end><block_end><def_stmt>predict self X<block_start><return>self.model.predict(X batch_size=128 verbose=<false>)<block_end><def_stmt>get_sequence_filters self<block_start>""" Returns 3D array of 2D sequence filters. """<line_sep><return>self.model.layers[0].get_weights()[0].squeeze(axis=1)<block_end>@staticmethod<def_stmt>_plot_scores X output_directory peak_width score_func score_name<block_start><import_from_stmt>dragonn.plot plot_bases_on_ax<line_sep>scores=score_func(X).squeeze(axis=2)# (num_task, num_samples, num_bases, sequence_length) <try_stmt><block_start>os.makedirs(output_directory)<block_end><except_stmt>OSError<block_start><pass><block_end>num_tasks=len(scores)<for_stmt>task_index,task_scores enumerate(scores)<block_start><for_stmt>sequence_index,sequence_scores enumerate(task_scores)# sequence_scores is num_bases x sequence_length <block_start>basewise_max_sequence_scores=sequence_scores.max(axis=0)<line_sep>plt.clf()<line_sep>figure,(top_axis bottom_axis)=plt.subplots(2)<line_sep>top_axis.plot(range(1 len(basewise_max_sequence_scores)+1) basewise_max_sequence_scores)<line_sep>top_axis.set_title('{} scores (motif highlighted)'.format(score_name))<line_sep>peak_position=basewise_max_sequence_scores.argmax()<line_sep>top_axis.axvspan(peak_position-peak_width peak_position+peak_width color='grey' alpha=0.1)<line_sep>peak_sequence_scores=sequence_scores[: peak_position-peak_width:peak_position+peak_width].T<line_sep># Set non-max letter_heights to zero letter_heights=np.zeros_like(peak_sequence_scores)<line_sep>letter_heights[np.arange(len(letter_heights)) peak_sequence_scores.argmax(axis=1)]=basewise_max_sequence_scores[peak_position-peak_width:peak_position+peak_width]<line_sep>plot_bases_on_ax(letter_heights bottom_axis)<line_sep>bottom_axis.set_xticklabels(tuple(map(str np.arange(peak_position-peak_width peak_position+peak_width+1))))<line_sep>bottom_axis.tick_params(axis='x' labelsize='small')<line_sep>plt.xlabel('Position')<line_sep>plt.ylabel('Score')<line_sep>plt.savefig(os.path.join(output_directory 'sequence_{}{}'.format(sequence_index '_task_{}'.format(task_index)<if>num_tasks<g>1<else>'')))<line_sep>plt.close()<block_end><block_end><block_end><def_stmt>plot_deeplift self X output_directory peak_width=10<block_start>self._plot_scores(X output_directory peak_width score_func=self.deeplift score_name='DeepLift')<block_end><def_stmt>plot_in_silico_mutagenesis self X output_directory peak_width=10<block_start>self._plot_scores(X output_directory peak_width score_func=self.in_silico_mutagenesis score_name='ISM')<block_end><def_stmt>plot_architecture self output_file<block_start><import_from_stmt>dragonn.visualize_util plot<as>plot_keras_model<line_sep>plot_keras_model(self.model output_file show_shape=<true>)<block_end><def_stmt>save self save_best_model_to_prefix<block_start>arch_fname=save_best_model_to_prefix+'.arch.json'<line_sep>weights_fname=save_best_model_to_prefix+'.weights.h5'<line_sep>open(arch_fname 'w').write(self.model.to_json())<line_sep>self.model.save_weights(weights_fname overwrite=<true>)<block_end>@staticmethod<def_stmt>load model_hdf5_fname=<none> arch_fname=<none> weights_fname=<none><block_start><if_stmt>model_hdf5_fname<ne><none><block_start><import_from_stmt>keras.models load_model<line_sep>sequence_dnn=SequenceDNN(keras_model=load_model(model_hdf5_fname))<block_end><else_stmt><block_start><import_from_stmt>keras.models model_from_json<line_sep>model_json_string=open(arch_fname).read()<line_sep>sequence_dnn=SequenceDNN(keras_model=model_from_json(model_json_string))<if_stmt>weights_fname<is><not><none><block_start>sequence_dnn.model.load_weights(weights_fname)<block_end><block_end><return>sequence_dnn<block_end><block_end><class_stmt>MotifScoreRNN(Model)<block_start><def_stmt>__init__ self input_shape gru_size=10 tdd_size=4<block_start><import_from_stmt>keras.models Sequential<import_from_stmt>keras.layers.core Activation Dense Flatten TimeDistributedDense <import_from_stmt>keras.layers.recurrent GRU<line_sep>self.model=Sequential()<line_sep>self.model.add(GRU(gru_size return_sequences=<true> input_shape=input_shape))<if_stmt>tdd_size<is><not><none><block_start>self.model.add(TimeDistributedDense(tdd_size))<block_end>self.model.add(Flatten())<line_sep>self.model.add(Dense(1))<line_sep>self.model.add(Activation('sigmoid'))<line_sep>print('Compiling model...')<line_sep>self.model.compile(optimizer='adam' loss='binary_crossentropy')<block_end><def_stmt>train self X y validation_data<block_start><import_from_stmt>keras.callbacks EarlyStopping<line_sep>print('Training model...')<line_sep>multitask=y.shape[1]<g>1<if_stmt><not>multitask<block_start>num_positives=y.sum()<line_sep>num_sequences=len(y)<line_sep>num_negatives=num_sequences-num_positives<block_end>self.model.fit(X y batch_size=128 nb_epoch=100 validation_data=validation_data class_weight={<true>:num_sequences/num_positives <false>:num_sequences/num_negatives}<if><not>multitask<else><none> callbacks=[EarlyStopping(monitor='val_loss' patience=10)] verbose=<true>)<block_end><def_stmt>predict self X<block_start><return>self.model.predict(X batch_size=128 verbose=<false>)<block_end><block_end><class_stmt>gkmSVM(Model)<block_start><def_stmt>__init__ self prefix='./gkmSVM' word_length=11 mismatches=3 C=1 threads=1 cache_memory=100 verbosity=4<block_start>self.word_length=word_length<line_sep>self.mismatches=mismatches<line_sep>self.C=C<line_sep>self.threads=threads<line_sep>self.prefix='_'.join(map(str (prefix word_length mismatches C)))<line_sep>options_list=zip(['-l' '-d' '-c' '-T' '-m' '-v'] map(str (word_length mismatches C threads cache_memory verbosity)))<line_sep>self.options=' '.join([' '.join(option)<for>option options_list])<block_end>@property<def_stmt>model_file self<block_start>model_fname='{}.model.txt'.format(self.prefix)<line_sep><return>model_fname<if>os.path.isfile(model_fname)<else><none><block_end>@staticmethod<def_stmt>encode_sequence_into_fasta_file sequence_iterator ofname<block_start>"""writes sequences into fasta file """<with_stmt>open(ofname "w")<as>wf<block_start><for_stmt>i,seq enumerate(sequence_iterator)<block_start>print('>{}'.format(i) file=wf)<line_sep>print(seq file=wf)<block_end><block_end><block_end><def_stmt>train self X y validation_data=<none><block_start>""" Trains gkm-svm, saves model file. """<line_sep>y=y.squeeze()<line_sep>pos_sequence=X[y]<line_sep>neg_sequence=X[~y]<line_sep>pos_fname="%s.pos_seq.fa"%self.prefix<line_sep>neg_fname="%s.neg_seq.fa"%self.prefix<line_sep># create temporary fasta files self.encode_sequence_into_fasta_file(pos_sequence pos_fname)<line_sep>self.encode_sequence_into_fasta_file(neg_sequence neg_fname)<line_sep># run command command=' '.join(('gkmtrain' self.options pos_fname neg_fname self.prefix))<line_sep>process=subprocess.Popen(command stdout=subprocess.PIPE shell=<true>)<line_sep>process.wait()# wait for it to finish # remove fasta files os.system("rm %s"%pos_fname)<line_sep>os.system("rm %s"%neg_fname)<block_end><def_stmt>predict self X<block_start><if_stmt>self.model_file<is><none><block_start><raise>RuntimeError("GkmSvm hasn't been trained!")<block_end># write test fasta file test_fname="%s.test.fa"%self.prefix<line_sep>self.encode_sequence_into_fasta_file(X test_fname)<line_sep># test gkmsvm temp_ofp=tempfile.NamedTemporaryFile()<line_sep>threads_option='-T %s'%(str(self.threads))<line_sep>command=' '.join(['gkmpredict' test_fname self.model_file temp_ofp.name threads_option])<line_sep>process=subprocess.Popen(command shell=<true>)<line_sep>process.wait()# wait for it to finish os.system("rm %s"%test_fname)# remove fasta file # get classification results temp_ofp.seek(0)<line_sep>y=np.array([line.split()[-1]<for>line temp_ofp] dtype=float)<line_sep>temp_ofp.close()<line_sep><return>np.expand_dims(y 1)<block_end><block_end><class_stmt>SVC(Model)<block_start><def_stmt>__init__ self<block_start>self.classifier=scikit_SVC(probability=<true> kernel='linear')<block_end><def_stmt>train self X y validation_data=<none><block_start>self.classifier.fit(X y)<block_end><def_stmt>predict self X<block_start><return>self.classifier.predict_proba(X)[: 1:]<block_end><block_end><class_stmt>DecisionTree(Model)<block_start><def_stmt>__init__ self<block_start>self.classifier=scikit_DecisionTree()<block_end><def_stmt>train self X y validation_data=<none><block_start>self.classifier.fit(X y)<block_end><def_stmt>predict self X<block_start>predictions=np.asarray(self.classifier.predict_proba(X))[<ellipsis> 1]<if_stmt>len(predictions.shape)<eq>2# multitask <block_start>predictions=predictions.T<block_end><else_stmt># single-task <block_start>predictions=np.expand_dims(predictions 1)<block_end><return>predictions<block_end><block_end><class_stmt>RandomForest(DecisionTree)<block_start><def_stmt>__init__ self<block_start>self.classifier=RandomForestClassifier(n_estimators=100)<block_end><block_end>
# Copyright 2019 BDL Benchmarks Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Diabetic retinopathy diagnosis BDL Benchmark."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>collections<import_stmt>os<import_from_stmt>typing Callable<import_from_stmt>typing Dict<import_from_stmt>typing Optional<import_from_stmt>typing Sequence<import_from_stmt>typing Text<import_from_stmt>typing Tuple<import_from_stmt>typing Union<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>tensorflow<as>tf<import_from_stmt>absl logging<import_from_stmt>..core transforms<import_from_stmt>..core.benchmark Benchmark<import_from_stmt>..core.benchmark BenchmarkInfo<import_from_stmt>..core.benchmark DataSplits<import_from_stmt>..core.constants DATA_DIR<import_from_stmt>..core.levels Level<line_sep>tfk=tf.keras<line_sep>_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR=os.path.join(DATA_DIR "downloads" "manual" "diabetic_retinopathy_diagnosis")<class_stmt>DiabeticRetinopathyDiagnosisBecnhmark(Benchmark)<block_start>"""Diabetic retinopathy diagnosis benchmark class."""<def_stmt>__init__ self level:Union[Text Level] batch_size:int=64 data_dir:Optional[Text]=<none> download_and_prepare:bool=<false> <block_start>"""Constructs a benchmark object. Args: level: `Level` or `str, downstream task level. batch_size: (optional) `int`, number of datapoints per mini-batch. data_dir: (optional) `str`, path to parent data directory. download_and_prepare: (optional) `bool`, if the data is not available it downloads and preprocesses it. """<line_sep>self.__level=level<if>isinstance(level Level)<else>Level.from_str(level)<try_stmt><block_start>self.__ds=self.load(level=level batch_size=batch_size data_dir=data_dir<or>DATA_DIR)<block_end><except_stmt>AssertionError<block_start><if_stmt><not>download_and_prepare<block_start><raise><block_end><else_stmt><block_start>logging.info("Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()`"<concat>" is now running...")<line_sep>self.download_and_prepare()<block_end><block_end><block_end>@classmethod<def_stmt>evaluate cls estimator:Callable[[np.ndarray] Tuple[np.ndarray np.ndarray]] dataset:tf.data.Dataset output_dir:Optional[Text]=<none> name:Optional[Text]=<none> <arrow>Dict[Text float]<block_start>"""Evaluates an `estimator` on the `mode` benchmark dataset. Args: estimator: `lambda x: mu_x, uncertainty_x`, an uncertainty estimation function, which returns `mean_x` and predictive `uncertainty_x`. dataset: `tf.data.Dataset`, on which dataset to performance evaluation. output_dir: (optional) `str`, directory to save figures. name: (optional) `str`, the name of the method. """<import_stmt>inspect<import_stmt>tqdm<import_stmt>tensorflow_datasets<as>tfds<import_from_stmt>sklearn.metrics roc_auc_score<import_from_stmt>sklearn.metrics accuracy_score<import_stmt>matplotlib.pyplot<as>plt<line_sep># Containers used for caching performance evaluation y_true=list()<line_sep>y_pred=list()<line_sep>y_uncertainty=list()<line_sep># Convert to NumPy iterator if necessary ds=dataset<if>inspect.isgenerator(dataset)<else>tfds.as_numpy(dataset)<for_stmt>x,y tqdm.tqdm(ds)# Sample from probabilistic model <block_start>mean,uncertainty=estimator(x)<line_sep># Cache predictions y_true.append(y)<line_sep>y_pred.append(mean)<line_sep>y_uncertainty.append(uncertainty)<block_end># Use vectorized NumPy containers y_true=np.concatenate(y_true).flatten()<line_sep>y_pred=np.concatenate(y_pred).flatten()<line_sep>y_uncertainty=np.concatenate(y_uncertainty).flatten()<line_sep>fractions=np.asarray([0.5 0.6 0.7 0.8 0.9 1.0])<line_sep># Metrics for evaluation metrics=zip(["accuracy" "auc"] cls.metrics())<line_sep><return>{metric:cls._evaluate_metric(y_true y_pred y_uncertainty fractions <lambda>y_true y_pred:metric_fn(y_true y_pred).numpy() name )<for>(metric metric_fn) metrics}<block_end>@staticmethod<def_stmt>_evaluate_metric y_true:np.ndarray y_pred:np.ndarray y_uncertainty:np.ndarray fractions:Sequence[float] metric_fn:Callable[[np.ndarray np.ndarray] float] name=<none> <arrow>pd.DataFrame<block_start>"""Evaluate model predictive distribution on `metric_fn` at data retain `fractions`. Args: y_true: `numpy.ndarray`, the ground truth labels, with shape [N]. y_pred: `numpy.ndarray`, the model predictions, with shape [N]. y_uncertainty: `numpy.ndarray`, the model uncertainties, with shape [N]. fractions: `iterable`, the percentages of data to retain for calculating `metric_fn`. metric_fn: `lambda(y_true, y_pred) -> float`, a metric function that provides a score given ground truths and predictions. name: (optional) `str`, the name of the method. Returns: A `pandas.DataFrame` with columns ["retained_data", "mean", "std"], that summarizes the scores at different data retained fractions. """<line_sep>N=y_true.shape[0]<line_sep># Sorts indexes by ascending uncertainty I_uncertainties=np.argsort(y_uncertainty)<line_sep># Score containers mean=np.empty_like(fractions)<line_sep># TODO(filangel): do bootstrap sampling and estimate standard error std=np.zeros_like(fractions)<for_stmt>i,frac enumerate(fractions)# Keep only the %-frac of lowest uncertainties <block_start>I=np.zeros(N dtype=bool)<line_sep>I[I_uncertainties[:int(N<times>frac)]]=<true><line_sep>mean[i]=metric_fn(y_true[I] y_pred[I])<block_end># Store df=pd.DataFrame(dict(retained_data=fractions mean=mean std=std))<line_sep>df.name=name<line_sep><return>df<block_end>@property<def_stmt>datasets self<arrow>tf.data.Dataset<block_start>"""Pointer to the processed datasets."""<line_sep><return>self.__ds<block_end>@property<def_stmt>info self<arrow>BenchmarkInfo<block_start>"""Text description of the benchmark."""<line_sep><return>BenchmarkInfo(description="" urls="" setup="" citation="")<block_end>@property<def_stmt>level self<arrow>Level<block_start>"""The downstream task level."""<line_sep><return>self.__level<block_end>@staticmethod<def_stmt>loss <arrow>tfk.losses.Loss<block_start>"""Loss used for training binary classifiers."""<line_sep><return>tfk.losses.BinaryCrossentropy()<block_end>@staticmethod<def_stmt>metrics <arrow>tfk.metrics.Metric<block_start>"""Evaluation metrics used for monitoring training."""<line_sep><return>[tfk.metrics.BinaryAccuracy() tfk.metrics.AUC()]<block_end>@staticmethod<def_stmt>class_weight <arrow>Sequence[float]<block_start>"""Class weights used for rebalancing the dataset, by skewing the `loss` accordingly."""<line_sep><return>[1.0 4.0]<block_end>@classmethod<def_stmt>load cls level:Union[Text Level]="realworld" batch_size:int=64 data_dir:Optional[Text]=<none> as_numpy:bool=<false> <arrow>DataSplits<block_start>"""Loads the datasets for the benchmark. Args: level: `Level` or `str, downstream task level. batch_size: (optional) `int`, number of datapoints per mini-batch. data_dir: (optional) `str`, path to parent data directory. as_numpy: (optional) `bool`, if True returns python generators with `numpy.ndarray` outputs. Returns: A namedtuple with properties: * train: `tf.data.Dataset`, train dataset. * validation: `tf.data.Dataset`, validation dataset. * test: `tf.data.Dataset`, test dataset. """<import_stmt>tensorflow_datasets<as>tfds<import_from_stmt>.tfds_adapter DiabeticRetinopathyDiagnosis<line_sep># Fetch datasets <try_stmt><block_start>ds_train,ds_validation,ds_test=DiabeticRetinopathyDiagnosis(data_dir=data_dir<or>DATA_DIR config=level).as_dataset(split=["train" "validation" "test"] shuffle_files=<true> batch_size=batch_size)<block_end><except_stmt>AssertionError<as>ae<block_start><raise>AssertionError(str(ae)+" Run DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()"<concat>" first and then retry.")<block_end># Parse task level level=level<if>isinstance(level Level)<else>Level.from_str(level)<line_sep># Dataset tranformations transforms_train,transforms_eval=cls._preprocessors()<line_sep># Apply transformations ds_train=ds_train.map(transforms_train num_parallel_calls=tf.data.experimental.AUTOTUNE)<line_sep>ds_validation=ds_validation.map(transforms_eval num_parallel_calls=tf.data.experimental.AUTOTUNE)<line_sep>ds_test=ds_test.map(transforms_eval num_parallel_calls=tf.data.experimental.AUTOTUNE)<line_sep># Prefetches datasets to memory ds_train=ds_train.prefetch(tf.data.experimental.AUTOTUNE)<line_sep>ds_validation=ds_validation.prefetch(tf.data.experimental.AUTOTUNE)<line_sep>ds_test=ds_test.prefetch(tf.data.experimental.AUTOTUNE)<if_stmt>as_numpy# Convert to NumPy iterators <block_start>ds_train=tfds.as_numpy(ds_train)<line_sep>ds_validation=tfds.as_numpy(ds_validation)<line_sep>ds_test=tfds.as_numpy(ds_test)<block_end><return>DataSplits(ds_train ds_validation ds_test)<block_end>@classmethod<def_stmt>download_and_prepare cls levels=<none><arrow><none><block_start>"""Downloads dataset from Kaggle, extracts zip files and processes it using `tensorflow_datasets`. Args: levels: (optional) `iterable` of `str`, specifies which levels from {'medium', 'realworld'} to prepare, if None it prepares all the levels. Raises: OSError: if `~/.kaggle/kaggle.json` is not set up. """<line_sep># Disable GPU for data download, extraction and preparation <import_stmt>os<line_sep>os.environ["CUDA_VISIBLE_DEVICES"]="-1"<line_sep>cls._download()<line_sep># cls._extract() #cls._prepare(levels) <block_end>@staticmethod<def_stmt>_download <arrow><none><block_start>"""Downloads data from Kaggle using `tensorflow_datasets`. Raises: OSError: if `~/.kaggle/kaggle.json` is not set up. """<import_stmt>subprocess<as>sp<import_stmt>tensorflow_datasets<as>tfds<line_sep># Append `/home/$USER/.local/bin` to path os.environ["PATH"]<augadd>":/home/{}/.local/bin/".format(os.environ["USER"])<line_sep># Download all files from Kaggle drd=tfds.download.kaggle.KaggleCompetitionDownloader("diabetic-retinopathy-detection")<try_stmt><block_start><for_stmt>dfile drd.competition_files<block_start>drd.download_file(dfile output_dir=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)<block_end><block_end><except_stmt>sp.CalledProcessError<as>cpe<block_start><raise>OSError(str(cpe)+"."+" Make sure you have ~/.kaggle/kaggle.json setup, fetched from the Kaggle website"<concat>" https://www.kaggle.com/<username>/account -> 'Create New API Key'."<concat>" Also accept the dataset license by going to"<concat>" https://www.kaggle.com/c/diabetic-retinopathy-detection/rules"<concat>" and look for the button 'I Understand and Accept' (make sure when reloading the"<concat>" page that the button does not pop up again).")<block_end><block_end>@staticmethod<def_stmt>_extract <arrow><none><block_start>"""Extracts zip files downloaded from Kaggle."""<import_stmt>glob<import_stmt>tqdm<import_stmt>zipfile<import_stmt>tempfile<line_sep># Extract train and test original images <for_stmt>split ["train" "test"]# Extract "<split>.zip.00*"" files to "<split>" <block_start><with_stmt>tempfile.NamedTemporaryFile()<as>tmp# Concatenate "<split>.zip.00*" to "<split>.zip" <block_start><for_stmt>fname tqdm.tqdm(sorted(glob.glob(os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR "{split}.zip.00*".format(split=split)))))# Unzip "<split>.zip" to "<split>" <block_start><with_stmt>open(fname "rb")<as>ztmp<block_start>tmp.write(ztmp.read())<block_end><block_end><with_stmt>zipfile.ZipFile(tmp)<as>zfile<block_start><for_stmt>image tqdm.tqdm(iterable=zfile.namelist() total=len(zfile.namelist()))<block_start>zfile.extract(member=image path=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)<block_end><block_end><block_end># Delete "<split>.zip.00*" files <for_stmt>splitzip os.listdir(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)<block_start><if_stmt>"{split}.zip.00".format(split=split)<in>splitzip<block_start>os.remove(os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR splitzip))<block_end><block_end><block_end># Extract "sample.zip", "trainLabels.csv.zip" <for_stmt>fname ["sample" "trainLabels.csv"]<block_start>zfname=os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR "{fname}.zip".format(fname=fname))<with_stmt>zipfile.ZipFile(zfname)<as>zfile<block_start>zfile.extractall(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)<block_end>os.remove(zfname)<block_end><block_end>@staticmethod<def_stmt>_prepare levels=<none><arrow><none><block_start>"""Generates the TFRecord objects for medium and realworld experiments."""<import_stmt>multiprocessing<import_from_stmt>absl logging<import_from_stmt>.tfds_adapter DiabeticRetinopathyDiagnosis<line_sep># Hangle each level individually <for_stmt>level levels<or>["medium" "realworld"]<block_start>dtask=DiabeticRetinopathyDiagnosis(data_dir=DATA_DIR config=level)<line_sep>logging.debug("=== Preparing TFRecords for {} ===".format(level))<line_sep>dtask.download_and_prepare()<block_end><block_end>@classmethod<def_stmt>_preprocessors cls<arrow>Tuple[transforms.Transform transforms.Transform]<block_start>"""Applies transformations to the raw data."""<import_stmt>tensorflow_datasets<as>tfds<line_sep># Transformation hyperparameters mean=np.asarray([0.42606387 0.29752496 0.21309826])<line_sep>stddev=np.asarray([0.27662534 0.20280295 0.1687619])<class_stmt>Parse(transforms.Transform)<block_start>"""Parses datapoints from raw `tf.data.Dataset`."""<def_stmt>__call__ self x y=<none><block_start>"""Returns `as_supervised` tuple."""<line_sep><return>x["image"] x["label"]<block_end><block_end><class_stmt>CastX(transforms.Transform)<block_start>"""Casts image to `dtype`."""<def_stmt>__init__ self dtype<block_start>"""Constructs a type caster."""<line_sep>self.dtype=dtype<block_end><def_stmt>__call__ self x y<block_start>"""Returns casted image (to `dtype`) and its (unchanged) label as tuple."""<line_sep><return>tf.cast(x self.dtype) y<block_end><block_end><class_stmt>To01X(transforms.Transform)<block_start>"""Rescales image to [min, max]=[0, 1]."""<def_stmt>__call__ self x y<block_start>"""Returns rescaled image and its (unchanged) label as tuple."""<line_sep><return>x/255.0 y<block_end><block_end># Get augmentation schemes [augmentation_config no_augmentation_config]=cls._ImageDataGenerator_config()<line_sep># Transformations for train dataset transforms_train=transforms.Compose([Parse() CastX(tf.float32) To01X() transforms.Normalize(mean stddev) # TODO(filangel): hangle batch with ImageDataGenerator # transforms.RandomAugment(**augmentation_config), ])<line_sep># Transformations for validation/test dataset transforms_eval=transforms.Compose([Parse() CastX(tf.float32) To01X() transforms.Normalize(mean stddev) # TODO(filangel): hangle batch with ImageDataGenerator # transforms.RandomAugment(**no_augmentation_config), ])<line_sep><return>transforms_train transforms_eval<block_end>@staticmethod<def_stmt>_ImageDataGenerator_config <block_start>"""Returns the configs for the `tensorflow.keras.preprocessing.image.ImageDataGenerator`, used for the random augmentation of the dataset, following the implementation of https://github.com/chleibig/disease-detection/blob/f3401b26aa9b832ff77afe93 e3faa342f7d088e5/scripts/inspect_data_augmentation.py."""<line_sep>augmentation_config=dict(featurewise_center=<false> samplewise_center=<false> featurewise_std_normalization=<false> samplewise_std_normalization=<false> zca_whitening=<false> rotation_range=180.0 width_shift_range=0.05 height_shift_range=0.05 shear_range=0. zoom_range=0.10 channel_shift_range=0. fill_mode="constant" cval=0. horizontal_flip=<true> vertical_flip=<true> data_format="channels_last" )<line_sep>no_augmentation_config=dict(featurewise_center=<false> samplewise_center=<false> featurewise_std_normalization=<false> samplewise_std_normalization=<false> zca_whitening=<false> rotation_range=0.0 width_shift_range=0.0 height_shift_range=0.0 shear_range=0. zoom_range=0.0 channel_shift_range=0. fill_mode="nearest" cval=0. horizontal_flip=<false> vertical_flip=<false> data_format="channels_last" )<line_sep><return>augmentation_config no_augmentation_config<block_end><block_end>
<import_stmt>setuptools<line_sep>setuptools.setup(name="monet_memory_optimized_training" version="0.0.1" description="Memory Optimized Network Training Framework" url="https://github.com/philkr/lowrank_conv" packages=setuptools.find_packages(include=['monet' 'monet.*' 'models' 'checkmate' 'gist']) classifiers=["Programming Language :: Python :: 3" "License :: OSI Approved :: MIT License" "Operating System :: OS Independent" ] python_requires='>=3.6' )<line_sep>
# Copyright (c) 2009-2021 The Regents of the University of Michigan # This file is part of the HOOMD-blue project, released under the BSD 3-Clause # License. """MPI communicator."""<import_from_stmt>hoomd _hoomd<import_stmt>hoomd<import_stmt>contextlib<class_stmt>Communicator(object)<block_start>"""MPI communicator. Args: mpi_comm: Accepts an mpi4py communicator. Use this argument to perform many independent hoomd simulations where you communicate between those simulations using mpi4py. ranks_per_partition (int): (MPI) Number of ranks to include in a partition. `Communicator` initialize MPI communications for a `hoomd.Simulation`. To use MPI, launch your Python script with an MPI launcher (e.g. ``mpirun`` or ``mpiexec``). By default, `Communicator` uses all ranks provided by the launcher ``num_launch_ranks`` for a single `hoomd.Simulation` object which decomposes the state onto that many domains. Set ``ranks_per_partition`` to an integer to partition launched ranks into ``num_launch_ranks / ranks_per_partition`` communicators, each with their own `partition` index. Use this to perform many simulations in parallel, for example by using `partition` as an index into an array of state points to execute. """<def_stmt>__init__ self mpi_comm=<none> ranks_per_partition=<none># check ranks_per_partition <block_start><if_stmt>ranks_per_partition<is><not><none><block_start><if_stmt><not>hoomd.version.mpi_enabled<block_start><raise>RuntimeError("The ranks_per_partition option is only available in MPI.\n")<block_end><block_end>mpi_available=hoomd.version.mpi_enabled<line_sep>self.cpp_mpi_conf=<none><line_sep># create the specified configuration <if_stmt>mpi_comm<is><none><block_start>self.cpp_mpi_conf=_hoomd.MPIConfiguration()<block_end><else_stmt><block_start><if_stmt><not>mpi_available<block_start><raise>RuntimeError("mpi_comm is not supported in serial builds")<block_end>handled=<false><line_sep># pass in pointer to MPI_Comm object provided by mpi4py <try_stmt><block_start><import_stmt>mpi4py<if_stmt>isinstance(mpi_comm mpi4py.MPI.Comm)<block_start>addr=mpi4py.MPI._addressof(mpi_comm)<line_sep>self.cpp_mpi_conf=_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr)<line_sep>handled=<true><block_end><block_end><except_stmt>ImportError# silently ignore when mpi4py is missing <block_start><pass><block_end># undocumented case: handle plain integers as pointers to MPI_Comm # objects <if_stmt><not>handled<and>isinstance(mpi_comm int)<block_start>self.cpp_mpi_conf=_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm)<line_sep>handled=<true><block_end><if_stmt><not>handled<block_start><raise>RuntimeError("Invalid mpi_comm object: {}".format(mpi_comm))<block_end><block_end><if_stmt>ranks_per_partition<is><not><none># check validity <block_start><if_stmt>(self.cpp_mpi_conf.getNRanksGlobal()%ranks_per_partition)<block_start><raise>RuntimeError('Total number of ranks is not a multiple of '<concat>'ranks_per_partition.')<block_end># split the communicator into partitions self.cpp_mpi_conf.splitPartitions(ranks_per_partition)<block_end><block_end>@property<def_stmt>num_ranks self<block_start>"""int: The number of ranks in this partition. When initialized with ``ranks_per_partition=None``, `num_ranks` is equal to the ``num_launch_ranks`` set by the MPI launcher. When using partitions, `num_ranks` is equal to ``ranks_per_partition``. Note: Returns 1 in builds with ENABLE_MPI=off. """<if_stmt>hoomd.version.mpi_enabled<block_start><return>self.cpp_mpi_conf.getNRanks()<block_end><else_stmt><block_start><return>1<block_end><block_end>@property<def_stmt>rank self<block_start>"""int: The current rank within the partition. Note: Returns 0 in builds with ENABLE_MPI=off. """<if_stmt>hoomd.version.mpi_enabled<block_start><return>self.cpp_mpi_conf.getRank()<block_end><else_stmt><block_start><return>0<block_end><block_end>@property<def_stmt>num_partitions self<block_start>"""int: The number of partitions in this execution. Create partitions with the ``ranks_per_partition`` argument on initialization. Then, the number of partitions is ``num_launch_ranks / ranks_per_partition``. Note: Returns 1 in builds with ENABLE_MPI=off. """<if_stmt>hoomd.version.mpi_enabled<block_start><return>self.cpp_mpi_conf.getNPartitions()<block_end><else_stmt><block_start><return>1<block_end><block_end>@property<def_stmt>partition self<block_start>"""int: The current partition. Note: Returns 0 in builds with ENABLE_MPI=off. """<if_stmt>hoomd.version.mpi_enabled<block_start><return>self.cpp_mpi_conf.getPartition()<block_end><else_stmt><block_start><return>0<block_end><block_end><def_stmt>barrier_all self<block_start>"""Perform a MPI barrier synchronization across all ranks. Note: Does nothing in builds with ENABLE_MPI=off. """<if_stmt>hoomd.version.mpi_enabled<block_start>_hoomd.mpi_barrier_world()<block_end><block_end><def_stmt>barrier self<block_start>"""Perform a barrier synchronization across all ranks in the partition. Note: Does nothing in builds with ENABLE_MPI=off. """<if_stmt>hoomd.version.mpi_enabled<block_start>self.cpp_mpi_conf.barrier()<block_end><block_end>@contextlib.contextmanager<def_stmt>localize_abort self<block_start>"""Localize MPI_Abort to this partition. HOOMD calls ``MPI_Abort`` to tear down all running MPI processes whenever there is an uncaught exception. By default, this will abort the entire MPI execution. When using partitions, an uncaught exception on one partition will therefore abort all of them. Use the return value of :py:meth:`localize_abort()` as a context manager to tell HOOMD that all operations within the context will use only that MPI communicator so that an uncaught exception in one partition will only abort that partition and leave the others running. """<line_sep><global>_current_communicator<line_sep>prev=_current_communicator<line_sep>_current_communicator=self<line_sep><yield><none><line_sep>_current_communicator=prev<block_end><block_end># store the "current" communicator to be used for MPI_Abort calls. This defaults # to the world communicator, but users can opt in to a more specific # communicator using the Device.localize_abort context manager _current_communicator=Communicator()<line_sep>
__author__='Rio'<line_sep>
''' Given an array of intervals, merge all overlapping intervals, and return an array of the non-overlapping intervals that cover all the intervals in the input. Input: intervals = [[1,3],[2,6],[8,10],[15,18]] Output: [[1,6],[8,10],[15,18]] Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6]. '''<def_stmt>merge intervals#sort the array <block_start>intervals.sort()<line_sep>#take another empty list intervals_stack=[]<for_stmt>pair intervals<block_start><if_stmt>len(intervals_stack)<eq>0<block_start>intervals_stack.append(pair)#adding all the number in intervals elements in empty list <block_end>#check number is equal or greater and less than pop elements <else_stmt><block_start>current_pair=intervals_stack[-1]<if_stmt>current_pair[1]<ge>pair[0]<block_start>intervals_stack.pop()<if_stmt>current_pair[1]<l>pair[1]<block_start>new_pair=[current_pair[0] pair[1]]<line_sep>intervals_stack.append(new_pair)<block_end><else_stmt><block_start>new_pair=[current_pair[0] current_pair[1]]<line_sep>intervals_stack.append(new_pair)<block_end><block_end><else_stmt><block_start>intervals_stack.append(pair)<block_end><block_end><block_end># result <return>intervals_stack<block_end><if_stmt>__name__<eq>'__main__'<block_start>R=int(input("Enter the number of rows:"))<line_sep>C=int(input("Enter the number of columns:"))<line_sep>interval=[[int(input("Enter the elements: "))<for>x range(C)]<for>y range(R)]<line_sep>print("Overlapping interval: " interval)<line_sep>print("Non-overlapping intervals: " merge(interval))<block_end>""" Time complexity : O(n^2) Space complexity : O(n^2) INPUT:- Enter the number of rows:4 Enter the number of columns:2 Enter the elements: 1 Enter the elements: 3 Enter the elements: 2 Enter the elements: 6 Enter the elements: 8 Enter the elements: 10 Enter the elements: 15 Enter the elements: 18 OUTPUT:- Overlapping interval: [[1, 3], [2, 6], [8, 10], [15, 18]] Non-overlapping intervals: [[1, 6], [8, 10], [15, 18]] """<line_sep>
print(18<times>1234)<line_sep>print(18<times>1234<times>2)<line_sep>print(0<times>1)<line_sep>print(1<times>0)<line_sep>print(0.0<times>1.0)<line_sep>print(1.0<times>0.0)<line_sep>
"""Command to run Nile scripts."""<import_stmt>logging<import_from_stmt>importlib.machinery SourceFileLoader<import_from_stmt>nile.nre NileRuntimeEnvironment<def_stmt>run path network<block_start>"""Run nile scripts passing on the NRE object."""<line_sep>logger=logging.getLogger()<line_sep>logger.disabled=<true><line_sep>script=SourceFileLoader("script" path).load_module()<line_sep>nre=NileRuntimeEnvironment(network)<line_sep>script.run(nre)<block_end>
array=[]<for_stmt>_ range(int(input()))<block_start>command=input().strip().split(" ")<line_sep>cmd_type=command[0]<if_stmt>(cmd_type<eq>"print")<block_start>print(array)<block_end><elif_stmt>(cmd_type<eq>"sort")<block_start>array.sort()<block_end><elif_stmt>(cmd_type<eq>"reverse")<block_start>array.reverse()<block_end><elif_stmt>(cmd_type<eq>"pop")<block_start>array.pop()<block_end><elif_stmt>(cmd_type<eq>"remove")<block_start>array.remove(int(command[1]))<block_end><elif_stmt>(cmd_type<eq>"append")<block_start>array.append(int(command[1]))<block_end><elif_stmt>(cmd_type<eq>"insert")<block_start>array.insert(int(command[1]) int(command[2]))<block_end><block_end>
<import_from_stmt>..downloader Downloader<import_stmt>os<import_stmt>pytest<line_sep>@pytest.fixture<def_stmt>cwd_to_tmpdir tmpdir<block_start>os.chdir(str(tmpdir))<block_end><def_stmt>test_audiobook_download cwd_to_tmpdir monkeypatch<block_start>audiobook_url="https://www.scribd.com/audiobook/237606860/100-Ways-to-Motivate-Yourself-Change-Your-Life-Forever"<line_sep>audiobook_downloader=Downloader(audiobook_url)<line_sep>audio=audiobook_downloader.download()<assert_stmt>audio[0]<eq>"100_Ways_to_Motivate_Yourself__Change_Your_Life_Forever_preview.mp3"<assert_stmt>os.path.getsize(audio[0])<eq>2127830<block_end><def_stmt>test_text_document_download cwd_to_tmpdir<block_start>text_doc_url="https://www.scribd.com/document/96882378/Trademark-License-Agreement"<line_sep>text_downloader=Downloader(text_doc_url)<line_sep>md_doc=text_downloader.download(is_image_document=<false>)<assert_stmt>os.path.getsize(md_doc.input_content)<in>range(1000 2000)<line_sep>md_doc.to_pdf()<assert_stmt>os.path.getsize(md_doc.pdf_path)<in>range(20000 31000)<block_end><def_stmt>test_img_document_download cwd_to_tmpdir<block_start>img_doc_url="https://www.scribd.com/doc/136711944/Signature-Scanning-and-Verification-in-Finacle"<line_sep>img_downloader=Downloader(img_doc_url)<line_sep>imgs=img_downloader.download(is_image_document=<true>)<assert_stmt>len(imgs.input_content)<eq>2<line_sep>imgs.to_pdf()<assert_stmt>os.path.getsize(imgs.pdf_path)<in>range(140000 150000)<block_end><def_stmt>test_book_download cwd_to_tmpdir monkeypatch<block_start>book_url="https://www.scribd.com/read/262694921/Acting-The-First-Six-Lessons"<line_sep>book_downloader=Downloader(book_url)<line_sep># We don't want to clutter stdout with book contents if this test fails monkeypatch.setattr("builtins.print" <lambda>x:<none>)<line_sep>md_book=book_downloader.download()<assert_stmt>os.path.getsize(md_book.input_content)<in>range(10000 20000)<line_sep>md_book.to_pdf()<assert_stmt>os.path.getsize(md_book.pdf_path)<in>range(200000 2500000)<block_end>
# -*- coding:utf-8 -*- # edit by fuzongfei <import_stmt>base64<import_stmt>datetime<line_sep># Create your views here. <import_stmt>json<import_from_stmt>django.http Http404 HttpResponse<import_from_stmt>django.utils timezone<import_from_stmt>django_filters.rest_framework DjangoFilterBackend<import_from_stmt>rest_framework filters<import_from_stmt>rest_framework.exceptions PermissionDenied<import_from_stmt>rest_framework.generics ListAPIView GenericAPIView CreateAPIView UpdateAPIView DestroyAPIView<import_from_stmt>rest_framework.views APIView<import_from_stmt>rest_framework.viewsets ViewSet<import_from_stmt>libs permissions<import_from_stmt>libs.Pagination Pagination<import_from_stmt>libs.RenderColumns render_dynamic_columns<import_from_stmt>libs.response JsonResponseV1<import_from_stmt>sqlorders models serializers<import_from_stmt>sqlorders.filters SqlOrderListFilter GetTasksListFilter<class_stmt>GetDBEnvironment(ListAPIView)<block_start>queryset=models.DbEnvironment.objects.all()<line_sep>serializer_class=serializers.DbEnvironmentSerializer<line_sep># 获取工单环境 <def_stmt>get self request *args **kwargs<block_start>serializer=self.get_serializer(self.get_queryset() many=<true>)<line_sep><return>JsonResponseV1(data=serializer.data)<block_end><block_end><class_stmt>GetDbSchemas(APIView)# 获取指定环境指定用途的schemas列表 <block_start><def_stmt>get self request<block_start>serializer=serializers.DbSchemasSerializer(data=request.query_params)<if_stmt>serializer.is_valid()<block_start><return>JsonResponseV1(data=serializer.query)<block_end><return>JsonResponseV1(message=serializer.errors code='0001')<block_end><block_end><class_stmt>IncepSyntaxCheckView(APIView)<block_start><def_stmt>post self request *args **kwargs<block_start>serializer=serializers.IncepSyntaxCheckSerializer(data=request.data)<if_stmt>serializer.is_valid()<block_start>s,data=serializer.check()<line_sep>render_columns=[{'key':'order_id' 'value':'序号'} {'key':'stage' 'value':'阶段'} {'key':'stage_status' 'value':'阶段状态'} {'key':'error_level' 'value':'错误级别'} {'key':'error_message' 'value':'错误信息' 'width':'35%'} {'key':'sql' 'value':'SQL内容' 'width':'25%' 'ellipsis':<true>} {'key':'affected_rows' 'value':'影响/扫描行数'}]<line_sep>columns=render_dynamic_columns(render_columns)<line_sep>message='语法检查未发现异常,可以提交'<if_stmt><not>s<block_start>message='语法检查发现异常,详情请查看输出,更正后在提交'<block_end>d={'status':0<if>s<else>1 'data':data}<line_sep>data={'columns':columns 'data':d}<line_sep><return>JsonResponseV1(data=data message=message)<block_end><return>JsonResponseV1(message=serializer.errors code='0001' flat=<true>)<block_end><block_end><class_stmt>SqlOrdersCommit(GenericAPIView)<block_start>permission_classes=(permissions.CanCommitOrdersPermission )<line_sep>serializer_class=serializers.SqlOrdersCommitSerializer<def_stmt>post self request *args **kwargs<block_start>serializer=self.get_serializer(data=request.data)<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep><return>JsonResponseV1(message="提交成功")<block_end><return>JsonResponseV1(message=serializer.errors code='0001' flat=<true>)<block_end><block_end><class_stmt>SqlOrdersList(ListAPIView)<block_start>permission_classes=(permissions.CanViewOrdersPermission )<line_sep>queryset=models.DbOrders.objects.all()<line_sep>serializer_class=serializers.SqlOrdersListSerializer<line_sep>pagination_class=Pagination<line_sep>filter_backends=[DjangoFilterBackend filters.SearchFilter filters.OrderingFilter]<line_sep>filter_class=SqlOrderListFilter<line_sep>ordering=['-created_at']<line_sep>search_fields=['title' 'database' 'remark' 'applicant' 'progress' 'contents']<def_stmt>get self request *args **kwargs<block_start>queryset=self.filter_queryset(self.get_queryset())<line_sep>page=self.paginate_queryset(queryset)<line_sep>serializer=self.get_serializer(page many=<true>)<line_sep>render_columns=[{'key':'progress' 'value':'进度' 'width':'8%'} {'key':'applicant' 'value':'申请人'} {'key':'department' 'value':'部门'} {'key':'env_name' 'value':'环境'} {'key':'escape_title' 'value':'标题' 'width':'18%' 'ellipsis':<true>} {'key':'sql_type' 'value':'类型'} {'key':'remark' 'value':'备注'} {'key':'version' 'value':'版本'} {'key':'host' 'value':'实例/库'} {'key':'auditor' 'value':'审核人'} {'key':'reviewer' 'value':'复核人'} ]<line_sep>columns=render_dynamic_columns(render_columns)<line_sep>data={'columns':columns 'data':serializer.data}<line_sep><return>self.get_paginated_response(data)<block_end><block_end><class_stmt>SqlOrdersDetail(ListAPIView)<block_start>"""SQL工单详情"""<line_sep>permission_classes=(permissions.CanViewOrdersPermission )<line_sep>queryset=models.DbOrders.objects.all()<line_sep>serializer_class=serializers.SqlOrderDetailSerializer<line_sep>lookup_field='order_id'<def_stmt>get self request *args **kwargs<block_start>queryset=self.get_object()<line_sep>serializer=self.get_serializer(queryset context={"request":request})<line_sep><return>JsonResponseV1(data=serializer.data)<block_end><block_end><class_stmt>OpSqlOrderView(ViewSet)<block_start>"""更新SQL工单状态,如:审核,关闭等"""<line_sep>permission_classes=(permissions.CanViewOrdersPermission )<def_stmt>get_obj self pk<block_start><try_stmt><block_start>obj=models.DbOrders.objects.get(pk=pk)<line_sep><return>obj<block_end><except_stmt>models.DbOrders.DoesNotExist<block_start><raise>Http404<block_end><block_end><def_stmt>approve self request pk<block_start>serializer=serializers.OpSqlOrderSerializer(instance=self.get_obj(pk) data=request.data context={"request":request "handler":"_approve"})<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep><return>JsonResponseV1(data=serializer.data message="操作成功")<block_end><return>JsonResponseV1(message=serializer.errors code='0001')<block_end><def_stmt>feedback self request pk<block_start>serializer=serializers.OpSqlOrderSerializer(instance=self.get_obj(pk) data=request.data context={"request":request "handler":"_feedback"})<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep><return>JsonResponseV1(data=serializer.data message="操作成功")<block_end><return>JsonResponseV1(message=serializer.errors code='0001')<block_end><def_stmt>close self request pk<block_start>serializer=serializers.OpSqlOrderSerializer(instance=self.get_obj(pk) data=request.data context={"request":request "handler":"_close"})<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep><return>JsonResponseV1(data=serializer.data message="操作成功")<block_end><return>JsonResponseV1(message=serializer.errors code='0001')<block_end><def_stmt>review self request pk<block_start>serializer=serializers.OpSqlOrderSerializer(instance=self.get_obj(pk) data=request.data context={"request":request "handler":"_review"})<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep><return>JsonResponseV1(data=serializer.data message="操作成功")<block_end><return>JsonResponseV1(message=serializer.errors code='0001')<block_end><block_end><class_stmt>GenerateTasksView(APIView)<block_start>permission_classes=(permissions.CanExecuteOrdersPermission )<def_stmt>post self request *args **kwargs<block_start>serializer=serializers.GenerateSqlOrdersTasksSerializer(data=request.data)<if_stmt>serializer.is_valid()<block_start>data=serializer.save(request)<line_sep><return>JsonResponseV1(data=data)<block_end><return>JsonResponseV1(message=serializer.errors code='0001' flat=<true>)<block_end><block_end><class_stmt>GetTaskIdView(APIView)<block_start><def_stmt>get self request *args **kwargs<block_start>"""根据order id返回taskid"""<line_sep>order_id=kwargs.get('order_id')<line_sep>task_id=models.DbOrdersExecuteTasks.objects.filter(order_id=order_id).first().task_id<line_sep><return>JsonResponseV1(data=task_id)<block_end><block_end><class_stmt>GetTasksPreviewView(ListAPIView)<block_start>permission_classes=(permissions.CanViewOrdersPermission )<line_sep>queryset=models.DbOrdersExecuteTasks.objects.all()<line_sep>serializer_class=serializers.SqlOrdersTasksListSerializer<line_sep>pagination_class=Pagination<line_sep>filter_backends=[DjangoFilterBackend filters.SearchFilter filters.OrderingFilter]<line_sep>filter_class=GetTasksListFilter<line_sep>search_fields=['sql']<line_sep>ordering=['created_time']<def_stmt>get self request *args **kwargs<block_start>task_id=kwargs.get('task_id')<line_sep>queryset=self.filter_queryset(self.get_queryset().filter(task_id=task_id))<line_sep># 数据隐藏按钮打开了 # 仅允许申请人、审核人、复核人和超权用户查看数据 obj=models.DbOrders.objects.get(pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id)<if_stmt>obj.is_hide<eq>'ON'<and><not>request.user.is_superuser<block_start>allowed_view_users=[obj.applicant]<line_sep>allowed_view_users.extend([x['user']<for>x json.loads(obj.auditor)])<line_sep>allowed_view_users.extend([x['user']<for>x json.loads(obj.reviewer)])<if_stmt>request.user.username<not><in>allowed_view_users<block_start><raise>PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')<block_end><block_end>origin_queryset=self.queryset.filter(task_id=task_id)<line_sep>total=origin_queryset.count()<line_sep>progress_0=origin_queryset.filter(progress=0).count()<line_sep>progress_1=origin_queryset.filter(progress=1).count()<line_sep>progress_3=origin_queryset.filter(progress=3).count()<line_sep>page=self.paginate_queryset(queryset)<line_sep>serializer=self.get_serializer(page context={'request':request} many=<true>)<line_sep>render_columns=[{'key':'num' 'value':'序号'} # 自定义num,前台显示序号使用 {'key':'applicant' 'value':'申请人'} {'key':'sql' 'value':'SQL' 'ellipsis':<true> 'width':'50%'} {'key':'progress' 'value':'进度'} {'key':'result' 'value':'查看结果'} # 自定义result ]<line_sep>columns=render_dynamic_columns(render_columns)<line_sep>data={'columns':columns 'data':{'data':serializer.data 'total':total 'progress_0':progress_0 'progress_1':progress_1 'progress_3':progress_3}}<line_sep><return>self.get_paginated_response(data)<block_end><block_end><class_stmt>GetTasksListView(ListAPIView)<block_start>permission_classes=(permissions.CanViewOrdersPermission )<line_sep>queryset=models.DbOrdersExecuteTasks.objects.all()<line_sep>serializer_class=serializers.SqlOrdersTasksListSerializer<line_sep>pagination_class=Pagination<line_sep>filter_backends=[DjangoFilterBackend filters.SearchFilter filters.OrderingFilter]<line_sep>filter_class=GetTasksListFilter<line_sep>search_fields=['sql']<line_sep>ordering=['created_time']<def_stmt>get self request *args **kwargs<block_start>task_id=kwargs.get('task_id')<line_sep>queryset=self.filter_queryset(self.get_queryset().filter(task_id=task_id))<line_sep># 数据隐藏按钮打开了 # 仅允许申请人、审核人、复核人和超权用户查看数据 obj=models.DbOrders.objects.get(pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id)<if_stmt>obj.is_hide<eq>'ON'<and><not>request.user.is_superuser<block_start>allowed_view_users=[obj.applicant]<line_sep>allowed_view_users.extend([x['user']<for>x json.loads(obj.auditor)])<line_sep>allowed_view_users.extend([x['user']<for>x json.loads(obj.reviewer)])<if_stmt>request.user.username<not><in>allowed_view_users<block_start><raise>PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')<block_end><block_end>page=self.paginate_queryset(queryset)<line_sep>serializer=self.get_serializer(page context={'request':request} many=<true>)<line_sep>render_columns=[{'key':'num' 'value':'序号'} # 自定义num,前台显示序号使用 {'key':'applicant' 'value':'申请人'} {'key':'sql' 'value':'SQL' 'ellipsis':<true> 'width':'50%'} {'key':'progress' 'value':'进度'} {'key':'execute' 'value':'执行'} # 自定义execute {'key':'result' 'value':'查看结果'} # 自定义result ]<if_stmt>queryset.exists()<block_start><if_stmt>queryset.first().sql_type<eq>'DDL'<block_start>render_columns.insert(-1 {'key':'ghost_pause' 'value':'暂停(gh-ost)'})<line_sep>render_columns.insert(-1 {'key':'ghost_recovery' 'value':'恢复(gh-ost)'})<block_end><block_end>columns=render_dynamic_columns(render_columns)<line_sep>data={'columns':columns 'data':serializer.data}<line_sep><return>self.get_paginated_response(data)<block_end><block_end><class_stmt>ExecuteSingleTaskView(APIView)<block_start>permission_classes=(permissions.CanExecuteOrdersPermission )<def_stmt>post self request *args **kwargs<block_start>serializer=serializers.ExecuteSingleTaskSerializer(data=request.data)<if_stmt>serializer.is_valid()<block_start>serializer.execute(request)<line_sep><return>JsonResponseV1(message="任务提交成功,请查看输出")<block_end><return>JsonResponseV1(message=serializer.errors code='0001' flat=<true>)<block_end><block_end><class_stmt>ExecuteMultiTasksView(APIView)<block_start>permission_classes=(permissions.CanExecuteOrdersPermission )<def_stmt>post self request *args **kwargs<block_start>serializer=serializers.ExecuteMultiTasksSerializer(data=request.data)<if_stmt>serializer.is_valid()<block_start>serializer.execute(request)<line_sep><return>JsonResponseV1(message="任务提交成功,请查看输出")<block_end><return>JsonResponseV1(message=serializer.errors code='0001' flat=<true>)<block_end><block_end><class_stmt>ThrottleTaskView(APIView)<block_start>permission_classes=(permissions.CanExecuteOrdersPermission )<def_stmt>post self request *args **kwargs<block_start>serializer=serializers.ThrottleTaskSerializer(data=request.data)<if_stmt>serializer.is_valid()<block_start>message=serializer.execute(request)<line_sep><return>JsonResponseV1(message=message)<block_end><return>JsonResponseV1(message=serializer.errors code='0001' flat=<true>)<block_end><block_end><class_stmt>GetTasksResultView(ListAPIView)<block_start>"""SQL工单详情"""<line_sep>permission_classes=(permissions.CanViewOrdersPermission )<line_sep>queryset=models.DbOrdersExecuteTasks.objects.all()<line_sep>serializer_class=serializers.GetTasksResultSerializer<line_sep>lookup_field='id'<def_stmt>get self request *args **kwargs<block_start>queryset=self.get_object()<line_sep>serializer=self.get_serializer(queryset context={"request":request})<line_sep><return>JsonResponseV1(data=serializer.data)<block_end><block_end><class_stmt>HookSqlOrdersView(APIView)<block_start>permission_classes=(permissions.anyof(permissions.CanCommitOrdersPermission permissions.CanViewOrdersPermission permissions.CanExecuteOrdersPermission permissions.CanAuditOrdersPermission) )<def_stmt>post self request *args **kwargs<block_start>serializer=serializers.HookSqlOrdersSerializer(data=request.data)<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep><return>JsonResponseV1(message="任务提交成功,请查看输出")<block_end><return>JsonResponseV1(message=serializer.errors code='0001' flat=<true>)<block_end><block_end><class_stmt>DownloadExportFilesView(APIView)<block_start>"""下载导出文件"""<line_sep>permission_classes=(permissions.CanViewOrdersPermission )<def_stmt>get self request base64_filename<block_start>file_name=base64.b64decode(base64_filename).decode()<if_stmt><not>models.DbExportFiles.objects.filter(file_name=file_name).exists()<block_start><raise>Http404<block_end>obj=models.DbExportFiles.objects.get(file_name=file_name)<if_stmt><not>models.DbOrdersExecuteTasks.objects.get(pk=obj.task_id).applicant<eq>request.user.username<block_start><raise>PermissionDenied(detail='您没有权限')<block_end>fsock=open(f"media/{obj.files}" 'rb')<line_sep>response=HttpResponse(fsock content_type="application/zip")<line_sep>response['Content-Disposition']=f'attachment; filename={file_name}'<line_sep><return>response<block_end><block_end><class_stmt>ReleaseVersionsGet(APIView)<block_start>"""获取上线版本号,提交工单使用"""<def_stmt>get self request<block_start>before_30_days=(timezone.now()-datetime.timedelta(days=30))<line_sep>queryset=models.ReleaseVersions.objects.filter(expire_time__gte=before_30_days).values('id' 'version' 'expire_time').order_by('-created_at')<for_stmt>row queryset<block_start>row['disabled']=0<if_stmt>row['expire_time']<l>datetime.datetime.date(timezone.now())<block_start>row['disabled']=1<block_end><block_end><return>JsonResponseV1(data=queryset)<block_end><block_end><class_stmt>ReleaseVersionsList(ListAPIView)<block_start>"""获取上线版本号列表,管理上线版本号使用"""<line_sep>permission_classes=(permissions.CanViewVersionPermission )<line_sep>queryset=models.ReleaseVersions.objects.all()<line_sep>serializer_class=serializers.ReleaseVersionsListSerializer<line_sep>pagination_class=Pagination<line_sep>filter_backends=[filters.SearchFilter filters.OrderingFilter]<line_sep>search_fields=['username' 'version' 'expire_time']<line_sep>ordering=['-created_at']<def_stmt>get self request *args **kwargs<block_start>queryset=self.filter_queryset(self.get_queryset())<line_sep>page=self.paginate_queryset(queryset)<line_sep>serializer=self.get_serializer(page many=<true>)<line_sep>render_columns=[{'key':'version' 'value':'版本'} {'key':'username' 'value':'创建人'} {'key':'expire_time' 'value':'截止日期'} {'key':'created_at' 'value':'创建时间'} {'key':'key' 'value':'操作'} {'key':'id' 'value':'详情'} ]<line_sep>columns=render_dynamic_columns(render_columns)<line_sep>data={'columns':columns 'data':serializer.data}<line_sep><return>self.get_paginated_response(data)<block_end><block_end><class_stmt>ReleaseVersionsCreate(CreateAPIView)<block_start>"""创建版本"""<line_sep>permission_classes=(permissions.CanCreateVersionsPermission )<line_sep>serializer_class=serializers.ReleaseVersionsCreateSerializer<def_stmt>create self request *args **kwargs<block_start>serializer=self.get_serializer(data=request.data)<if_stmt>serializer.is_valid()<block_start>self.perform_create(serializer)<line_sep><return>JsonResponseV1(message="创建成功")<block_end><return>JsonResponseV1(code='0001' message=serializer.errors flat=<true>)<block_end><block_end><class_stmt>ReleaseVersionsUpdate(UpdateAPIView)<block_start>"""更新版本号,该类只更新单条记录"""<line_sep>permission_classes=(permissions.CanUpdateVersionsPermission )<def_stmt>put self request *args **kwargs<block_start>serializer=serializers.ReleaseVersionsSerializer(instance=models.ReleaseVersions.objects.get(pk=kwargs['key']) # 返回单条记录 data=request.data)<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep><return>JsonResponseV1(message="更新成功")<block_end><return>JsonResponseV1(code='0001' message=serializer.errors flat=<true>)<block_end><block_end><class_stmt>ReleaseVersionsDelete(DestroyAPIView)<block_start>"""删除版本"""<line_sep>permission_classes=(permissions.CanDeleteVersionsPermission )<line_sep>queryset=models.ReleaseVersions.objects.all()<line_sep>lookup_field='id'# 默认为主键,可不写 <def_stmt>destroy self request *args **kwargs<block_start>instance=self.get_object()<line_sep>self.perform_destroy(instance)<line_sep><return>JsonResponseV1(message="删除成功")<block_end><block_end><class_stmt>ReleaseVersionsView(APIView)<block_start>"""获取指定版本内工单在所有环境的进度"""<def_stmt>get self request *args **kwargs# 获取版本对应的主键 <block_start>version=kwargs.get('version')<line_sep>version_id=models.ReleaseVersions.objects.get(version=version).pk<line_sep># 获取环境,行转为动态列 obj=models.DbEnvironment.objects.values('id' 'name')<line_sep>row2columns=''<for_stmt>row obj<block_start>row2columns<augadd>f"max(if(env_id={row['id']}, progress, -1)) as {row['name']},"<block_end># 获取任务下所有工单分别在各个环境中的状态,此处的环境为动态环境 # id没有实际意义 query=f"select "+row2columns+f"substring(MD5(RAND()),1,20) as id,title as escape_title,order_id, applicant "<concat>f"from yasql_dborders where version_id='{version_id}' group by escape_title,order_id,applicant"<line_sep>rawquery=models.DbOrders.objects.raw(query)<line_sep># 获取环境列名 dynamic_columns=list(rawquery.columns)[:-4]<line_sep>data=[]<for_stmt>row rawquery<block_start>columns={'id':row.id 'escape_title':row.escape_title 'order_id':row.order_id 'applicant':row.applicant }<for_stmt>col dynamic_columns<block_start>columns[col]=getattr(row col)<block_end>data.append(columns)<block_end>render_columns=[{'key':'escape_title' 'ellipsis':<true> 'value':'标题'} {'key':'applicant' 'value':'申请人'} ]<line_sep>render_columns.extend([{'key':x 'value':x}<for>x dynamic_columns])<line_sep>columns=render_dynamic_columns(render_columns)<line_sep>data={'columns':columns 'data':data}<line_sep><return>JsonResponseV1(data=data)<block_end><block_end>
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>logging<import_stmt>traceback<import_from_stmt>yapsy.IPlugin IPlugin<import_from_stmt>activitystreams.models.activity Activity<import_from_stmt>dino utils<import_from_stmt>dino.config ErrorCodes<import_from_stmt>dino.config ConfigKeys<import_from_stmt>dino.environ GNEnvironment<line_sep>logger=logging.getLogger(__name__)<line_sep>__author__='<NAME> <<EMAIL>>'<class_stmt>OnMessageCheckContentLength(IPlugin)<block_start><def_stmt>__init__ self<block_start>super(OnMessageCheckContentLength self).__init__()<line_sep>self.env=<none><line_sep>self.enabled=<false><line_sep>self.max_length=1000<block_end><def_stmt>setup self env:GNEnvironment<block_start>self.env=env<line_sep>validation_config=self.env.config.get(ConfigKeys.VALIDATION)<if_stmt>'on_message'<not><in>validation_config<or>'limit_msg_length'<not><in>validation_config.get('on_message')<block_start>logger.info('no config enabled for plugin not_full, ignoring plugin')<line_sep><return><block_end>on_create_config=validation_config.get('on_message').get('limit_msg_length')<line_sep>self.enabled=<true><line_sep>self.max_length=on_create_config.get(ConfigKeys.MAX_MSG_LENGTH 1000)<block_end><def_stmt>_process self data:dict activity:Activity<block_start>message=activity.object.content<if_stmt>message<is><none><or>len(message.strip())<eq>0<block_start><return><true> <none> <none><block_end><if_stmt><not>utils.is_base64(message)<block_start><return><false> ErrorCodes.NOT_BASE64 'invalid message content, not base64 encoded'<block_end>message=utils.b64d(message)<if_stmt>len(message)<g>self.max_length<block_start><return><false> ErrorCodes.MSG_TOO_LONG 'message content needs to be shorter than %s characters'%self.max_length<block_end><return><true> <none> <none><block_end><def_stmt>__call__ self *args **kwargs<arrow>(bool str)<block_start><if_stmt><not>self.enabled<block_start><return><block_end>data,activity=args[0] args[1]<try_stmt><block_start><return>self._process(data activity)<block_end><except_stmt>Exception<as>e<block_start>logger.error('could not execute plugin not_full: %s'%str(e))<line_sep>logger.exception(traceback.format_exc())<line_sep><return><false> ErrorCodes.VALIDATION_ERROR 'could not execute validation plugin not_full'<block_end><block_end><block_end>
# encoding: utf-8 <import_stmt>datetime<import_from_stmt>south.db db<import_from_stmt>south.v2 SchemaMigration<import_from_stmt>django.db models<class_stmt>Migration(SchemaMigration)<block_start><def_stmt>forwards self orm# Adding model 'ProductType' <block_start>db.create_table('inventory_producttype' (('id' self.gf('django.db.models.fields.AutoField')(primary_key=<true>)) ))<line_sep>db.send_create_signal('inventory' ['ProductType'])<line_sep># Adding model 'Product' db.create_table('inventory_product' (('id' self.gf('django.db.models.fields.AutoField')(primary_key=<true>)) ('shop' self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])) ('title' self.gf('django.db.models.fields.CharField')(max_length=200)) ('description' self.gf('django.db.models.fields.TextField')()) ('category' self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketCategory'])) ('subcategory' self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketSubCategory'])) ('date_time' self.gf('django.db.models.fields.DateTimeField')(auto_now_add=<true> blank=<true>)) ('weight' self.gf('django.db.models.fields.DecimalField')(default='0' max_digits=11 decimal_places=2)) ('type' self.gf('django.db.models.fields.related.ForeignKey')(to=orm['inventory.ProductType'] null=<true> blank=<true>)) ))<line_sep>db.send_create_signal('inventory' ['Product'])<line_sep># Adding model 'Coin' db.create_table('inventory_coin' (('producttype_ptr' self.gf('django.db.models.fields.related.OneToOneField')(to=orm['inventory.ProductType'] unique=<true> primary_key=<true>)) ('category' self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketCategory'] null=<true> blank=<true>)) ('subcategory' self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketSubCategory'] null=<true> blank=<true>)) ('country_code' self.gf('django.db.models.fields.CharField')(default='us' max_length=2)) ('pcgs_number' self.gf('django.db.models.fields.IntegerField')(null=<true> blank=<true>)) ('description' self.gf('django.db.models.fields.TextField')(default='' blank='')) ('year_issued' self.gf('django.db.models.fields.CharField')(default='' max_length=24 blank='')) ('actual_year' self.gf('django.db.models.fields.CharField')(default='' max_length=24 blank='')) ('denomination' self.gf('django.db.models.fields.CharField')(default='' max_length=60 blank='')) ('major_variety' self.gf('django.db.models.fields.CharField')(default='' max_length=60 blank='')) ('die_variety' self.gf('django.db.models.fields.CharField')(default='' max_length=60 blank='')) ('prefix' self.gf('django.db.models.fields.CharField')(default='' max_length=60 blank='')) ('suffix' self.gf('django.db.models.fields.CharField')(default='' max_length=60 blank='')) ('sort_order' self.gf('django.db.models.fields.CharField')(default='' max_length=60 blank='')) ('heading' self.gf('django.db.models.fields.CharField')(default='' max_length=60 blank='')) ('holder_variety' self.gf('django.db.models.fields.CharField')(default='' max_length=60 blank='')) ('holder_variety_2' self.gf('django.db.models.fields.CharField')(default='' max_length=60 blank='')) ('additional_data' self.gf('django.db.models.fields.TextField')(default='' blank='')) ('last_update' self.gf('django.db.models.fields.DateTimeField')(auto_now=<true> blank=<true>)) ))<line_sep>db.send_create_signal('inventory' ['Coin'])<block_end><def_stmt>backwards self orm# Deleting model 'ProductType' <block_start>db.delete_table('inventory_producttype')<line_sep># Deleting model 'Product' db.delete_table('inventory_product')<line_sep># Deleting model 'Coin' db.delete_table('inventory_coin')<block_end>models={'auth.group':{'Meta':{'object_name':'Group'} 'id':('django.db.models.fields.AutoField' [] {'primary_key':'True'}) 'name':('django.db.models.fields.CharField' [] {'unique':'True' 'max_length':'80'}) 'permissions':('django.db.models.fields.related.ManyToManyField' [] {'to':"orm['auth.Permission']" 'symmetrical':'False' 'blank':'True'})} 'auth.permission':{'Meta':{'unique_together':"(('content_type', 'codename'),)" 'object_name':'Permission'} 'codename':('django.db.models.fields.CharField' [] {'max_length':'100'}) 'content_type':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['contenttypes.ContentType']"}) 'id':('django.db.models.fields.AutoField' [] {'primary_key':'True'}) 'name':('django.db.models.fields.CharField' [] {'max_length':'50'})} 'auth.user':{'Meta':{'object_name':'User'} 'date_joined':('django.db.models.fields.DateTimeField' [] {'default':'datetime.datetime.now'}) 'email':('django.db.models.fields.EmailField' [] {'max_length':'75'}) 'first_name':('django.db.models.fields.CharField' [] {'max_length':'30' 'blank':'True'}) 'groups':('django.db.models.fields.related.ManyToManyField' [] {'to':"orm['auth.Group']" 'symmetrical':'False' 'blank':'True'}) 'id':('django.db.models.fields.AutoField' [] {'primary_key':'True'}) 'is_active':('django.db.models.fields.BooleanField' [] {'default':'True' 'blank':'True'}) 'is_staff':('django.db.models.fields.BooleanField' [] {'default':'False' 'blank':'True'}) 'is_superuser':('django.db.models.fields.BooleanField' [] {'default':'False' 'blank':'True'}) 'last_login':('django.db.models.fields.DateTimeField' [] {'default':'datetime.datetime.now'}) 'last_name':('django.db.models.fields.CharField' [] {'max_length':'30' 'blank':'True'}) 'password':('<PASSWORD>' [] {'max_length':'128'}) 'user_permissions':('django.db.models.fields.related.ManyToManyField' [] {'to':"orm['auth.Permission']" 'symmetrical':'False' 'blank':'True'}) 'username':('django.db.models.fields.CharField' [] {'unique':'True' 'max_length':'32'})} 'contenttypes.contenttype':{'Meta':{'unique_together':"(('app_label', 'model'),)" 'object_name':'ContentType' 'db_table':"'django_content_type'"} 'app_label':('django.db.models.fields.CharField' [] {'max_length':'100'}) 'id':('django.db.models.fields.AutoField' [] {'primary_key':'True'}) 'model':('django.db.models.fields.CharField' [] {'max_length':'100'}) 'name':('django.db.models.fields.CharField' [] {'max_length':'100'})} 'inventory.coin':{'Meta':{'object_name':'Coin' '_ormbases':['inventory.ProductType']} 'actual_year':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'24' 'blank':"''"}) 'additional_data':('django.db.models.fields.TextField' [] {'default':"''" 'blank':"''"}) 'category':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['market.MarketCategory']" 'null':'True' 'blank':'True'}) 'country_code':('django.db.models.fields.CharField' [] {'default':"'us'" 'max_length':'2'}) 'denomination':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'60' 'blank':"''"}) 'description':('django.db.models.fields.TextField' [] {'default':"''" 'blank':"''"}) 'die_variety':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'60' 'blank':"''"}) 'heading':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'60' 'blank':"''"}) 'holder_variety':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'60' 'blank':"''"}) 'holder_variety_2':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'60' 'blank':"''"}) 'last_update':('django.db.models.fields.DateTimeField' [] {'auto_now':'True' 'blank':'True'}) 'major_variety':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'60' 'blank':"''"}) 'pcgs_number':('django.db.models.fields.IntegerField' [] {'null':'True' 'blank':'True'}) 'prefix':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'60' 'blank':"''"}) 'producttype_ptr':('django.db.models.fields.related.OneToOneField' [] {'to':"orm['inventory.ProductType']" 'unique':'True' 'primary_key':'True'}) 'sort_order':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'60' 'blank':"''"}) 'subcategory':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['market.MarketSubCategory']" 'null':'True' 'blank':'True'}) 'suffix':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'60' 'blank':"''"}) 'year_issued':('django.db.models.fields.CharField' [] {'default':"''" 'max_length':'24' 'blank':"''"})} 'inventory.product':{'Meta':{'object_name':'Product'} 'category':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['market.MarketCategory']"}) 'date_time':('django.db.models.fields.DateTimeField' [] {'auto_now_add':'True' 'blank':'True'}) 'description':('django.db.models.fields.TextField' [] {}) 'id':('django.db.models.fields.AutoField' [] {'primary_key':'True'}) 'shop':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['shops.Shop']"}) 'subcategory':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['market.MarketSubCategory']"}) 'title':('django.db.models.fields.CharField' [] {'max_length':'200'}) 'type':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['inventory.ProductType']" 'null':'True' 'blank':'True'}) 'weight':('django.db.models.fields.DecimalField' [] {'default':"'0'" 'max_digits':'11' 'decimal_places':'2'})} 'inventory.producttype':{'Meta':{'object_name':'ProductType'} 'id':('django.db.models.fields.AutoField' [] {'primary_key':'True'})} 'market.marketcategory':{'Meta':{'object_name':'MarketCategory'} 'id':('django.db.models.fields.AutoField' [] {'primary_key':'True'}) 'marketplace':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['market.MarketPlace']"}) 'name':('django.db.models.fields.CharField' [] {'max_length':'60'}) 'order':('django.db.models.fields.IntegerField' [] {'default':'255'}) 'slug':('django.db.models.fields.SlugField' [] {'unique':'True' 'max_length':'60' 'db_index':'True'})} 'market.marketplace':{'Meta':{'object_name':'MarketPlace'} 'base_domain':('django.db.models.fields.CharField' [] {'unique':'True' 'max_length':'128'}) 'id':('django.db.models.fields.AutoField' [] {'primary_key':'True'}) 'name':('django.db.models.fields.CharField' [] {'max_length':'92'}) 'slug':('django.db.models.fields.SlugField' [] {'unique':'True' 'max_length':'92' 'db_index':'True'}) 'template_prefix':('django.db.models.fields.SlugField' [] {'unique':'True' 'max_length':'92' 'db_index':'True'}) 'title':('django.db.models.fields.CharField' [] {'max_length':'92'})} 'market.marketsubcategory':{'Meta':{'unique_together':"(('parent', 'slug'),)" 'object_name':'MarketSubCategory'} 'id':('django.db.models.fields.AutoField' [] {'primary_key':'True'}) 'marketplace':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['market.MarketPlace']"}) 'name':('django.db.models.fields.CharField' [] {'max_length':'60'}) 'order':('django.db.models.fields.IntegerField' [] {'default':'255'}) 'parent':('django.db.models.fields.related.ForeignKey' [] {'blank':'True' 'related_name':"'subcategories'" 'null':'True' 'to':"orm['market.MarketCategory']"}) 'slug':('django.db.models.fields.SlugField' [] {'max_length':'60' 'db_index':'True'})} 'shops.shop':{'Meta':{'object_name':'Shop'} 'admin':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['auth.User']"}) 'bids':('django.db.models.fields.IntegerField' [] {'default':'0'}) 'date_time':('django.db.models.fields.DateTimeField' [] {'auto_now_add':'True' 'blank':'True'}) 'id':('django.db.models.fields.AutoField' [] {'primary_key':'True'}) 'location':('django.db.models.fields.CharField' [] {'default':"'39.29038,-76.61219'" 'max_length':'255'}) 'marketplace':('django.db.models.fields.related.ForeignKey' [] {'to':"orm['market.MarketPlace']"}) 'name':('django.db.models.fields.CharField' [] {'max_length':'60'}) 'views':('django.db.models.fields.IntegerField' [] {'default':'0'})}}<line_sep>complete_apps=['inventory']<block_end>
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. <import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401 <import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>ManagementAgentAggregationDimensions(object)<block_start>""" The Aggregation of Management Agent Dimensions """<line_sep>#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions. #: This constant has a value of "ACTIVE" AVAILABILITY_STATUS_ACTIVE="ACTIVE"<line_sep>#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions. #: This constant has a value of "SILENT" AVAILABILITY_STATUS_SILENT="SILENT"<line_sep>#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions. #: This constant has a value of "NOT_AVAILABLE" AVAILABILITY_STATUS_NOT_AVAILABLE="NOT_AVAILABLE"<line_sep>#: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions. #: This constant has a value of "LINUX" PLATFORM_TYPE_LINUX="LINUX"<line_sep>#: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions. #: This constant has a value of "WINDOWS" PLATFORM_TYPE_WINDOWS="WINDOWS"<line_sep>#: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions. #: This constant has a value of "AGENT" INSTALL_TYPE_AGENT="AGENT"<line_sep>#: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions. #: This constant has a value of "GATEWAY" INSTALL_TYPE_GATEWAY="GATEWAY"<def_stmt>__init__ self **kwargs<block_start>""" Initializes a new ManagementAgentAggregationDimensions object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param availability_status: The value to assign to the availability_status property of this ManagementAgentAggregationDimensions. Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type availability_status: str :param platform_type: The value to assign to the platform_type property of this ManagementAgentAggregationDimensions. Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type platform_type: str :param version: The value to assign to the version property of this ManagementAgentAggregationDimensions. :type version: str :param has_plugins: The value to assign to the has_plugins property of this ManagementAgentAggregationDimensions. :type has_plugins: bool :param install_type: The value to assign to the install_type property of this ManagementAgentAggregationDimensions. Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type install_type: str """<line_sep>self.swagger_types={'availability_status':'str' 'platform_type':'str' 'version':'str' 'has_plugins':'bool' 'install_type':'str'}<line_sep>self.attribute_map={'availability_status':'availabilityStatus' 'platform_type':'platformType' 'version':'version' 'has_plugins':'hasPlugins' 'install_type':'installType'}<line_sep>self._availability_status=<none><line_sep>self._platform_type=<none><line_sep>self._version=<none><line_sep>self._has_plugins=<none><line_sep>self._install_type=<none><block_end>@property<def_stmt>availability_status self<block_start>""" Gets the availability_status of this ManagementAgentAggregationDimensions. The availability status of managementAgent Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The availability_status of this ManagementAgentAggregationDimensions. :rtype: str """<line_sep><return>self._availability_status<block_end>@availability_status.setter<def_stmt>availability_status self availability_status<block_start>""" Sets the availability_status of this ManagementAgentAggregationDimensions. The availability status of managementAgent :param availability_status: The availability_status of this ManagementAgentAggregationDimensions. :type: str """<line_sep>allowed_values=["ACTIVE" "SILENT" "NOT_AVAILABLE"]<if_stmt><not>value_allowed_none_or_none_sentinel(availability_status allowed_values)<block_start>availability_status='UNKNOWN_ENUM_VALUE'<block_end>self._availability_status=availability_status<block_end>@property<def_stmt>platform_type self<block_start>""" Gets the platform_type of this ManagementAgentAggregationDimensions. Platform Type Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The platform_type of this ManagementAgentAggregationDimensions. :rtype: str """<line_sep><return>self._platform_type<block_end>@platform_type.setter<def_stmt>platform_type self platform_type<block_start>""" Sets the platform_type of this ManagementAgentAggregationDimensions. Platform Type :param platform_type: The platform_type of this ManagementAgentAggregationDimensions. :type: str """<line_sep>allowed_values=["LINUX" "WINDOWS"]<if_stmt><not>value_allowed_none_or_none_sentinel(platform_type allowed_values)<block_start>platform_type='UNKNOWN_ENUM_VALUE'<block_end>self._platform_type=platform_type<block_end>@property<def_stmt>version self<block_start>""" Gets the version of this ManagementAgentAggregationDimensions. Agent image version :return: The version of this ManagementAgentAggregationDimensions. :rtype: str """<line_sep><return>self._version<block_end>@version.setter<def_stmt>version self version<block_start>""" Sets the version of this ManagementAgentAggregationDimensions. Agent image version :param version: The version of this ManagementAgentAggregationDimensions. :type: str """<line_sep>self._version=version<block_end>@property<def_stmt>has_plugins self<block_start>""" Gets the has_plugins of this ManagementAgentAggregationDimensions. Whether or not a managementAgent has at least one plugin :return: The has_plugins of this ManagementAgentAggregationDimensions. :rtype: bool """<line_sep><return>self._has_plugins<block_end>@has_plugins.setter<def_stmt>has_plugins self has_plugins<block_start>""" Sets the has_plugins of this ManagementAgentAggregationDimensions. Whether or not a managementAgent has at least one plugin :param has_plugins: The has_plugins of this ManagementAgentAggregationDimensions. :type: bool """<line_sep>self._has_plugins=has_plugins<block_end>@property<def_stmt>install_type self<block_start>""" Gets the install_type of this ManagementAgentAggregationDimensions. The install type, either AGENT or GATEWAY Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The install_type of this ManagementAgentAggregationDimensions. :rtype: str """<line_sep><return>self._install_type<block_end>@install_type.setter<def_stmt>install_type self install_type<block_start>""" Sets the install_type of this ManagementAgentAggregationDimensions. The install type, either AGENT or GATEWAY :param install_type: The install_type of this ManagementAgentAggregationDimensions. :type: str """<line_sep>allowed_values=["AGENT" "GATEWAY"]<if_stmt><not>value_allowed_none_or_none_sentinel(install_type allowed_values)<block_start>install_type='UNKNOWN_ENUM_VALUE'<block_end>self._install_type=install_type<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
""" Cement generate extension module. """<import_stmt>re<import_stmt>os<import_stmt>inspect<import_stmt>yaml<import_stmt>shutil<import_from_stmt>.. Controller minimal_logger shell<import_from_stmt>..utils.version VERSION get_version<line_sep>LOG=minimal_logger(__name__)<class_stmt>GenerateTemplateAbstractBase(Controller)<block_start><class_stmt>Meta<block_start><pass><block_end><def_stmt>_generate self source dest<block_start>msg='Generating %s %s in %s'%(self.app._meta.label self._meta.label dest)<line_sep>self.app.log.info(msg)<line_sep>data={}<line_sep># builtin vars maj_min=float('%s.%s'%(VERSION[0] VERSION[1]))<line_sep>data['cement']={}<line_sep>data['cement']['version']=get_version()<line_sep>data['cement']['major_version']=VERSION[0]<line_sep>data['cement']['minor_version']=VERSION[1]<line_sep>data['cement']['major_minor_version']=maj_min<line_sep>f=open(os.path.join(source '.generate.yml'))<line_sep>yaml_load=yaml.full_load<if>hasattr(yaml 'full_load')<else>yaml.load<line_sep>g_config=yaml_load(f)<line_sep>f.close()<line_sep>vars=g_config.get('variables' {})<line_sep>exclude_list=g_config.get('exclude' [])<line_sep>ignore_list=g_config.get('ignore' [])<line_sep># default ignore the .generate.yml config g_config_yml=r'^(.*)[\/\\\\]%s[\/\\\\]\.generate\.yml$'%self._meta.label<line_sep>ignore_list.append(g_config_yml)<line_sep>var_defaults={'name':<none> 'prompt':<none> 'validate':<none> 'case':<none> 'default':<none> }<for_stmt>defined_var vars<block_start>var=var_defaults.copy()<line_sep>var.update(defined_var)<for_stmt>key ['name' 'prompt']<block_start><assert_stmt>var[key]<is><not><none> "Required generate config key missing: %s"%key<block_end>val=<none><if_stmt>var['default']<is><not><none><and>self.app.pargs.defaults<block_start>val=var['default']<block_end><elif_stmt>var['default']<is><not><none><block_start>default_text=' [%s]'%var['default']<block_end><else_stmt><block_start>default_text=''<block_end># pragma: nocover <if_stmt>val<is><none><block_start><class_stmt>MyPrompt(shell.Prompt)<block_start><class_stmt>Meta<block_start>text="%s%s:"%(var['prompt'] default_text)<line_sep>default=var.get('default' <none>)<block_end><block_end>p=MyPrompt()<line_sep>val=p.prompt()<block_end># pragma: nocover <if_stmt>var['case']<in>['lower' 'upper' 'title']<block_start>val=getattr(val var['case'])()<block_end><elif_stmt>var['case']<is><not><none><block_start>self.app.log.warning("Invalid configuration for variable "+"'%s': "%var['name']+"case must be one of lower, upper, or title.")<block_end><if_stmt>var['validate']<is><not><none><block_start><assert_stmt>re.match(var['validate'] val) "Invalid Response (must match: '%s')"%var['validate']<block_end>data[var['name']]=val<block_end><try_stmt><block_start>self.app.template.copy(source dest data force=self.app.pargs.force ignore=ignore_list exclude=exclude_list)<block_end><except_stmt>AssertionError<as>e<block_start><if_stmt>re.match('(.*)already exists(.*)' e.args[0])<block_start><raise>AssertionError(e.args[0]+' (try: --force)')<block_end><else_stmt><block_start><raise><block_end><block_end><block_end># pragma: nocover <def_stmt>_clone self source dest<block_start>msg='Cloning %s %s template to %s'%(self.app._meta.label self._meta.label dest)<line_sep>self.app.log.info(msg)<if_stmt>os.path.exists(dest)<and>self.app.pargs.force<is><true><block_start>shutil.rmtree(dest)<block_end><elif_stmt>os.path.exists(dest)<block_start>msg="Destination path already exists: %s (try: --force)"%dest<line_sep><raise>AssertionError(msg)<block_end>shutil.copytree(source dest)<block_end><def_stmt>_default self<block_start>source=self._meta.source_path<line_sep>dest=self.app.pargs.dest<if_stmt>self.app.pargs.clone<is><true><block_start>self._clone(source dest)<block_end><else_stmt><block_start>self._generate(source dest)<block_end><block_end><block_end><def_stmt>setup_template_items app<block_start>template_dirs=[]<line_sep>template_items=[]<line_sep># look in app template dirs <for_stmt>path app._meta.template_dirs<block_start>subpath=os.path.join(path 'generate')<if_stmt>os.path.exists(subpath)<and>subpath<not><in>template_dirs<block_start>template_dirs.append(subpath)<block_end><block_end># use app template module, find it's path on filesystem <if_stmt>app._meta.template_module<is><not><none><block_start>mod_parts=app._meta.template_module.split('.')<line_sep>mod=mod_parts.pop()<try_stmt><block_start>mod=app.__import__(mod from_module='.'.join(mod_parts))<line_sep>mod_path=os.path.dirname(inspect.getfile(mod))<line_sep>subpath=os.path.join(mod_path 'generate')<if_stmt>os.path.exists(subpath)<and>subpath<not><in>template_dirs<block_start>template_dirs.append(subpath)<block_end><block_end># FIXME: not exactly sure how to test for this so not covering <except_stmt>AttributeError# pragma: nocover <block_start>msg='unable to load template module'+'%s from %s'%(mod '.'.join(mod_parts))<line_sep># pragma: nocover app.log.debug(msg)# pragma: nocover <block_end><block_end><for_stmt>path template_dirs<block_start><for_stmt>item os.listdir(path)<block_start><if_stmt>item<not><in>template_items<block_start>template_items.append(item)<block_end><class_stmt>GenerateTemplate(GenerateTemplateAbstractBase)<block_start><class_stmt>Meta<block_start>label=item<line_sep>stacked_on='generate'<line_sep>stacked_type='nested'<line_sep>help='generate %s from template'%item<line_sep>arguments=[# ------------------------------------------------------ (['dest'] {'help':'destination directory path'}) # ------------------------------------------------------ (['-f' '--force'] {'help':'force operation if destination exists' 'dest':'force' 'action':'store_true'}) # ------------------------------------------------------ (['-D' '--defaults'] {'help':'use all default variable values' 'dest':'defaults' 'action':'store_true'}) # ------------------------------------------------------ (['--clone'] {'help':'clone this template to destination path' 'dest':'clone' 'action':'store_true'}) ]<line_sep>source_path=os.path.join(path item)<block_end><block_end>app.handler.register(GenerateTemplate)<block_end><block_end><block_end><class_stmt>Generate(Controller)<block_start><class_stmt>Meta<block_start>label='generate'<line_sep>stacked_on='base'<line_sep>stacked_type='nested'<line_sep>config_section='generate'<block_end><def_stmt>_setup self app<block_start>super(Generate self)._setup(app)<block_end><def_stmt>_default self<block_start>self._parser.print_help()<block_end><block_end><def_stmt>load app<block_start>app.handler.register(Generate)<line_sep>app.hook.register('pre_run' setup_template_items)<block_end>
# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_stmt>functools<import_from_stmt>oslo_config cfg<import_from_stmt>neutron.conf.agent ovs_conf<as>agent_ovs_conf<import_from_stmt>neutron.conf.plugins.ml2.drivers ovs_conf<as>ml2_ovs_conf<import_from_stmt>neutron.privileged.agent.ovsdb.native helpers<as>priv_helpers<line_sep>agent_ovs_conf.register_ovs_agent_opts(cfg.CONF)<line_sep>ml2_ovs_conf.register_ovs_opts(cfg=cfg.CONF)<line_sep>enable_connection_uri=functools.partial(priv_helpers.enable_connection_uri log_fail_as_error=<false> check_exit_code=<false> timeout=cfg.CONF.OVS.ovsdb_timeout inactivity_probe=cfg.CONF.OVS.of_inactivity_probe<times>1000)<line_sep>
""" insert default serverInfo """<import_from_stmt>yoyo step<line_sep>__depends__={'20220114_02_lHBKM-new-table-serverinfo'}<line_sep>steps=[step("INSERT INTO `serverInfo` (`onlineMinute`) VALUES (0);")]<line_sep>
# Copyright 2017 FUJITSU LIMITED # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # <import_from_stmt>osc_lib.cli format_columns<import_from_stmt>osc_lib.command command<import_from_stmt>osc_lib exceptions<import_from_stmt>osc_lib utils<import_from_stmt>osc_lib.utils columns<as>column_util<import_from_stmt>oslo_log log<as>logging<import_from_stmt>neutronclient._i18n _<import_from_stmt>neutronclient.common utils<as>nc_utils<import_from_stmt>neutronclient.osc utils<as>osc_utils<import_from_stmt>neutronclient.osc.v2.vpnaas utils<as>vpn_utils<line_sep>LOG=logging.getLogger(__name__)<line_sep>_formatters={'peer_cidrs':format_columns.ListColumn}<line_sep>_attr_map=(('id' 'ID' column_util.LIST_BOTH) ('name' 'Name' column_util.LIST_BOTH) ('peer_address' 'Peer Address' column_util.LIST_BOTH) ('auth_mode' 'Authentication Algorithm' column_util.LIST_BOTH) ('status' 'Status' column_util.LIST_BOTH) ('tenant_id' 'Project' column_util.LIST_LONG_ONLY) ('peer_cidrs' 'Peer CIDRs' column_util.LIST_LONG_ONLY) ('vpnservice_id' 'VPN Service' column_util.LIST_LONG_ONLY) ('ipsecpolicy_id' 'IPSec Policy' column_util.LIST_LONG_ONLY) ('ikepolicy_id' 'IKE Policy' column_util.LIST_LONG_ONLY) ('mtu' 'MTU' column_util.LIST_LONG_ONLY) ('initiator' 'Initiator' column_util.LIST_LONG_ONLY) ('admin_state_up' 'State' column_util.LIST_LONG_ONLY) ('description' 'Description' column_util.LIST_LONG_ONLY) ('psk' 'Pre-shared Key' column_util.LIST_LONG_ONLY) ('route_mode' 'Route Mode' column_util.LIST_LONG_ONLY) ('local_id' 'Local ID' column_util.LIST_LONG_ONLY) ('peer_id' 'Peer ID' column_util.LIST_LONG_ONLY) ('local_ep_group_id' 'Local Endpoint Group ID' column_util.LIST_LONG_ONLY) ('peer_ep_group_id' 'Peer Endpoint Group ID' column_util.LIST_LONG_ONLY) )<def_stmt>_convert_to_lowercase string<block_start><return>string.lower()<block_end><def_stmt>_get_common_parser parser is_create=<true><block_start>parser.add_argument('--description' metavar='<description>' help=_('Description for the connection'))<line_sep>parser.add_argument('--dpd' metavar="action=ACTION,interval=INTERVAL,timeout=TIMEOUT" type=nc_utils.str2dict_type(optional_keys=['action' 'interval' 'timeout']) help=vpn_utils.dpd_help("IPsec connection"))<line_sep>parser.add_argument('--mtu' help=_('MTU size for the connection'))<line_sep>parser.add_argument('--initiator' choices=['bi-directional' 'response-only'] type=_convert_to_lowercase help=_('Initiator state'))<line_sep>peer_group=parser.add_mutually_exclusive_group()<line_sep>peer_group.add_argument('--peer-cidr' dest='peer_cidrs' help=_('Remote subnet(s) in CIDR format. '<concat>'Cannot be specified when using endpoint groups. Only '<concat>'applicable, if subnet provided for VPN service.'))<line_sep>peer_group.add_argument('--local-endpoint-group' help=_('Local endpoint group (name or ID) with subnet(s) '<concat>'for IPsec connection'))<line_sep>parser.add_argument('--peer-endpoint-group' help=_('Peer endpoint group (name or ID) with CIDR(s) for '<concat>'IPSec connection'))<line_sep>admin_group=parser.add_mutually_exclusive_group()<line_sep>admin_group.add_argument('--enable' action='store_true' help=_("Enable IPSec site connection"))<line_sep>admin_group.add_argument('--disable' action='store_true' help=_("Disable IPSec site connection"))<line_sep>parser.add_argument('--local-id' help=_('An ID to be used instead of the external IP '<concat>'address for a virtual router'))<line_sep><return>parser<block_end><def_stmt>_get_common_attrs client_manager parsed_args is_create=<true><block_start>attrs={}<if_stmt>is_create<block_start><if_stmt>'project'<in>parsed_args<and>parsed_args.project<is><not><none><block_start>attrs['tenant_id']=osc_utils.find_project(client_manager.identity parsed_args.project parsed_args.project_domain ).id<block_end><block_end><if_stmt>parsed_args.description<block_start>attrs['description']=str(parsed_args.description)<block_end><if_stmt>parsed_args.mtu<block_start>attrs['mtu']=parsed_args.mtu<block_end><if_stmt>parsed_args.enable<block_start>attrs['admin_state_up']=<true><block_end><if_stmt>parsed_args.disable<block_start>attrs['admin_state_up']=<false><block_end><if_stmt>parsed_args.initiator<block_start>attrs['initiator']=parsed_args.initiator<block_end><if_stmt>parsed_args.dpd<block_start>vpn_utils.validate_dpd_dict(parsed_args.dpd)<line_sep>attrs['dpd']=parsed_args.dpd<block_end><if_stmt>parsed_args.local_endpoint_group<block_start>_local_epg=client_manager.neutronclient.find_resource('endpoint_group' parsed_args.local_endpoint_group cmd_resource='endpoint_group')['id']<line_sep>attrs['local_ep_group_id']=_local_epg<block_end><if_stmt>parsed_args.peer_endpoint_group<block_start>_peer_epg=client_manager.neutronclient.find_resource('endpoint_group' parsed_args.peer_endpoint_group cmd_resource='endpoint_group')['id']<line_sep>attrs['peer_ep_group_id']=_peer_epg<block_end><if_stmt>parsed_args.peer_cidrs<block_start>attrs['peer_cidrs']=parsed_args.peer_cidrs<block_end><if_stmt>parsed_args.local_id<block_start>attrs['local_id']=parsed_args.local_id<block_end><return>attrs<block_end><class_stmt>CreateIPsecSiteConnection(command.ShowOne)<block_start>_description=_("Create an IPsec site connection")<def_stmt>get_parser self prog_name<block_start>parser=super(CreateIPsecSiteConnection self).get_parser(prog_name)<line_sep>_get_common_parser(parser)<line_sep>parser.add_argument('--peer-id' required=<true> help=_('Peer router identity for authentication. Can be '<concat>'IPv4/IPv6 address, e-mail address, key id, or FQDN'))<line_sep>parser.add_argument('--peer-address' required=<true> help=_('Peer gateway public IPv4/IPv6 address or FQDN'))<line_sep>parser.add_argument('--psk' required=<true> help=_('Pre-shared key string.'))<line_sep>parser.add_argument('--vpnservice' metavar='VPNSERVICE' required=<true> help=_('VPN service instance associated with this '<concat>'connection (name or ID)'))<line_sep>parser.add_argument('--ikepolicy' metavar='IKEPOLICY' required=<true> help=_('IKE policy associated with this connection (name or ID)'))<line_sep>parser.add_argument('--ipsecpolicy' metavar='IPSECPOLICY' required=<true> help=_('IPsec policy associated with this connection '<concat>'(name or ID)'))<line_sep>parser.add_argument('name' metavar='<name>' help=_('Set friendly name for the connection'))<line_sep>osc_utils.add_project_owner_option_to_parser(parser)<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start>client=self.app.client_manager.neutronclient<line_sep>attrs=_get_common_attrs(self.app.client_manager parsed_args)<if_stmt>parsed_args.vpnservice<block_start>_vpnservice_id=client.find_resource('vpnservice' parsed_args.vpnservice cmd_resource='vpnservice')['id']<line_sep>attrs['vpnservice_id']=_vpnservice_id<block_end><if_stmt>parsed_args.ikepolicy<block_start>_ikepolicy_id=client.find_resource('ikepolicy' parsed_args.ikepolicy cmd_resource='ikepolicy')['id']<line_sep>attrs['ikepolicy_id']=_ikepolicy_id<block_end><if_stmt>parsed_args.ipsecpolicy<block_start>_ipsecpolicy_id=client.find_resource('ipsecpolicy' parsed_args.ipsecpolicy cmd_resource='ipsecpolicy')['id']<line_sep>attrs['ipsecpolicy_id']=_ipsecpolicy_id<block_end><if_stmt>parsed_args.peer_id<block_start>attrs['peer_id']=parsed_args.peer_id<block_end><if_stmt>parsed_args.peer_address<block_start>attrs['peer_address']=parsed_args.peer_address<block_end><if_stmt>parsed_args.psk<block_start>attrs['psk']=parsed_args.psk<block_end><if_stmt>parsed_args.name<block_start>attrs['name']=parsed_args.name<block_end><if_stmt>(bool(parsed_args.local_endpoint_group)<ne>bool(parsed_args.peer_endpoint_group))<block_start>message=_("You must specify both local and peer endpoint "<concat>"groups")<line_sep><raise>exceptions.CommandError(message)<block_end><if_stmt><not>parsed_args.peer_cidrs<and><not>parsed_args.local_endpoint_group<block_start>message=_("You must specify endpoint groups or peer CIDR(s)")<line_sep><raise>exceptions.CommandError(message)<block_end>obj=client.create_ipsec_site_connection({'ipsec_site_connection':attrs})['ipsec_site_connection']<line_sep>columns,display_columns=column_util.get_columns(obj _attr_map)<line_sep>data=utils.get_dict_properties(obj columns formatters=_formatters)<line_sep><return>display_columns data<block_end><block_end><class_stmt>DeleteIPsecSiteConnection(command.Command)<block_start>_description=_("Delete IPsec site connection(s)")<def_stmt>get_parser self prog_name<block_start>parser=super(DeleteIPsecSiteConnection self).get_parser(prog_name)<line_sep>parser.add_argument('ipsec_site_connection' metavar='<ipsec-site-connection>' nargs='+' help=_('IPsec site connection to delete (name or ID)'))<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start>client=self.app.client_manager.neutronclient<line_sep>result=0<for_stmt>ipsec_conn parsed_args.ipsec_site_connection<block_start><try_stmt><block_start>ipsec_con_id=client.find_resource('ipsec_site_connection' ipsec_conn cmd_resource='ipsec_site_connection')['id']<line_sep>client.delete_ipsec_site_connection(ipsec_con_id)<block_end><except_stmt>Exception<as>e<block_start>result<augadd>1<line_sep>LOG.error(_("Failed to delete IPsec site connection with "<concat>"name or ID '%(ipsec_site_conn)s': %(e)s") {'ipsec_site_conn':ipsec_conn 'e':e})<block_end><block_end><if_stmt>result<g>0<block_start>total=len(parsed_args.ipsec_site_connection)<line_sep>msg=(_("%(result)s of %(total)s IPsec site connection failed "<concat>"to delete.")%{'result':result 'total':total})<line_sep><raise>exceptions.CommandError(msg)<block_end><block_end><block_end><class_stmt>ListIPsecSiteConnection(command.Lister)<block_start>_description=_("List IPsec site connections "<concat>"that belong to a given project")<def_stmt>get_parser self prog_name<block_start>parser=super(ListIPsecSiteConnection self).get_parser(prog_name)<line_sep>parser.add_argument('--long' action='store_true' default=<false> help=_("List additional fields in output"))<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start>client=self.app.client_manager.neutronclient<line_sep>obj=client.list_ipsec_site_connections()['ipsec_site_connections']<line_sep>headers,columns=column_util.get_column_definitions(_attr_map long_listing=parsed_args.long)<line_sep><return>(headers (utils.get_dict_properties(s columns formatters=_formatters)<for>s obj))<block_end><block_end><class_stmt>SetIPsecSiteConnection(command.Command)<block_start>_description=_("Set IPsec site connection properties")<def_stmt>get_parser self prog_name<block_start>parser=super(SetIPsecSiteConnection self).get_parser(prog_name)<line_sep>_get_common_parser(parser)<line_sep>parser.add_argument('--peer-id' help=_('Peer router identity for authentication. Can be '<concat>'IPv4/IPv6 address, e-mail address, key id, or FQDN'))<line_sep>parser.add_argument('--peer-address' help=_('Peer gateway public IPv4/IPv6 address or FQDN'))<line_sep>parser.add_argument('--name' metavar='<name>' help=_('Set friendly name for the connection'))<line_sep>parser.add_argument('ipsec_site_connection' metavar='<ipsec-site-connection>' help=_('IPsec site connection to set (name or ID)'))<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start>client=self.app.client_manager.neutronclient<line_sep>attrs=_get_common_attrs(self.app.client_manager parsed_args is_create=<false>)<if_stmt>parsed_args.peer_id<block_start>attrs['peer_id']=parsed_args.peer_id<block_end><if_stmt>parsed_args.peer_address<block_start>attrs['peer_address']=parsed_args.peer_address<block_end><if_stmt>parsed_args.name<block_start>attrs['name']=parsed_args.name<block_end>ipsec_conn_id=client.find_resource('ipsec_site_connection' parsed_args.ipsec_site_connection cmd_resource='ipsec_site_connection')['id']<try_stmt><block_start>client.update_ipsec_site_connection(ipsec_conn_id {'ipsec_site_connection':attrs})<block_end><except_stmt>Exception<as>e<block_start>msg=(_("Failed to set IPsec site "<concat>"connection '%(ipsec_conn)s': %(e)s")%{'ipsec_conn':parsed_args.ipsec_site_connection 'e':e})<line_sep><raise>exceptions.CommandError(msg)<block_end><block_end><block_end><class_stmt>ShowIPsecSiteConnection(command.ShowOne)<block_start>_description=_("Show information of a given IPsec site connection")<def_stmt>get_parser self prog_name<block_start>parser=super(ShowIPsecSiteConnection self).get_parser(prog_name)<line_sep>parser.add_argument('ipsec_site_connection' metavar='<ipsec-site-connection>' help=_('IPsec site connection to display (name or ID)'))<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start>client=self.app.client_manager.neutronclient<line_sep>ipsec_site_id=client.find_resource('ipsec_site_connection' parsed_args.ipsec_site_connection cmd_resource='ipsec_site_connection')['id']<line_sep>obj=client.show_ipsec_site_connection(ipsec_site_id)['ipsec_site_connection']<line_sep>columns,display_columns=column_util.get_columns(obj _attr_map)<line_sep>data=utils.get_dict_properties(obj columns formatters=_formatters)<line_sep><return>(display_columns data)<block_end><block_end>
<import_stmt>sys<import_from_stmt>abc ABCMeta abstractmethod<import_from_stmt>collections namedtuple<import_from_stmt>six add_metaclass<class_stmt>SaveException(Exception)<block_start><def_stmt>__init__ self other<block_start>self.traceback=sys.exc_info()<line_sep>super(SaveException self).__init__(str(other))<block_end><block_end><class_stmt>DeleteException(Exception)<block_start><def_stmt>__init__ self other<block_start>self.traceback=sys.exc_info()<line_sep>super(DeleteException self).__init__(str(other))<block_end><block_end><class_stmt>Role(namedtuple("Role" ["role_name"]))<block_start><def_stmt>to_dict self<block_start><return>{"role":self.role_name }<block_end><block_end><class_stmt>UserPermission(namedtuple("UserPermission" ["role_name" "username" "is_robot" "avatar" "is_org_member" "has_org" ] ))<block_start><def_stmt>to_dict self<block_start>perm_dict={"role":self.role_name "name":self.username "is_robot":self.is_robot "avatar":self.avatar }<if_stmt>self.has_org<block_start>perm_dict["is_org_member"]=self.is_org_member<block_end><return>perm_dict<block_end><block_end><class_stmt>RobotPermission(namedtuple("RobotPermission" ["role_name" "username" "is_robot" "is_org_member" ] ))<block_start><def_stmt>to_dict self user=<none> team=<none> org_members=<none><block_start><return>{"role":self.role_name "name":self.username "is_robot":<true> "is_org_member":self.is_org_member }<block_end><block_end><class_stmt>TeamPermission(namedtuple("TeamPermission" ["role_name" "team_name" "avatar" ] ))<block_start><def_stmt>to_dict self<block_start><return>{"role":self.role_name "name":self.team_name "avatar":self.avatar }<block_end><block_end>@add_metaclass(ABCMeta)<class_stmt>PermissionDataInterface(object)<block_start>""" Data interface used by permissions API. """<line_sep>@abstractmethod<def_stmt>get_repo_permissions_by_user self namespace_name repository_name<block_start>""" Args: namespace_name: string repository_name: string Returns: list(UserPermission) """<block_end>@abstractmethod<def_stmt>get_repo_roles self username namespace_name repository_name<block_start>""" Args: username: string namespace_name: string repository_name: string Returns: list(Role) or None """<block_end>@abstractmethod<def_stmt>get_repo_permission_for_user self username namespace_name repository_name<block_start>""" Args: username: string namespace_name: string repository_name: string Returns: UserPermission """<block_end>@abstractmethod<def_stmt>set_repo_permission_for_user self username namespace_name repository_name role_name<block_start>""" Args: username: string namespace_name: string repository_name: string role_name: string Returns: UserPermission Raises: SaveException """<block_end>@abstractmethod<def_stmt>delete_repo_permission_for_user self username namespace_name repository_name<block_start>""" Args: username: string namespace_name: string repository_name: string Returns: void Raises: DeleteException """<block_end>@abstractmethod<def_stmt>get_repo_permissions_by_team self namespace_name repository_name<block_start>""" Args: namespace_name: string repository_name: string Returns: list(TeamPermission) """<block_end>@abstractmethod<def_stmt>get_repo_role_for_team self team_name namespace_name repository_name<block_start>""" Args: team_name: string namespace_name: string repository_name: string Returns: Role """<block_end>@abstractmethod<def_stmt>set_repo_permission_for_team self team_name namespace_name repository_name permission<block_start>""" Args: team_name: string namespace_name: string repository_name: string permission: string Returns: TeamPermission Raises: SaveException """<block_end>@abstractmethod<def_stmt>delete_repo_permission_for_team self team_name namespace_name repository_name<block_start>""" Args: team_name: string namespace_name: string repository_name: string Returns: TeamPermission Raises: DeleteException """<block_end><block_end>
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class to offload the end to end flow of U2F signing."""<import_stmt>base64<import_stmt>hashlib<import_stmt>json<import_stmt>os<import_stmt>struct<import_stmt>subprocess<import_stmt>sys<import_from_stmt>pyu2f errors<import_from_stmt>pyu2f model<import_from_stmt>pyu2f.convenience baseauthenticator<line_sep>SK_SIGNING_PLUGIN_ENV_VAR='SK_SIGNING_PLUGIN'<line_sep>U2F_SIGNATURE_TIMEOUT_SECONDS=5<line_sep>SK_SIGNING_PLUGIN_NO_ERROR=0<line_sep>SK_SIGNING_PLUGIN_TOUCH_REQUIRED=0x6985<line_sep>SK_SIGNING_PLUGIN_WRONG_DATA=0x6A80<class_stmt>CustomAuthenticator(baseauthenticator.BaseAuthenticator)<block_start>"""Offloads U2F signing to a pluggable command-line tool. Offloads U2F signing to a signing plugin which takes the form of a command-line tool. The command-line tool is configurable via the SK_SIGNING_PLUGIN environment variable. The signing plugin should implement the following interface: Communication occurs over stdin/stdout, and messages are both sent and received in the form: [4 bytes - payload size (little-endian)][variable bytes - json payload] Signing Request JSON { "type": "sign_helper_request", "signData": [{ "keyHandle": <url-safe base64-encoded key handle>, "appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>, "challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>, "version": U2F protocol version (usually "U2F_V2") },...], "timeoutSeconds": <security key touch timeout> } Signing Response JSON { "type": "sign_helper_reply", "code": <result code>. "errorDetail": <text description of error>, "responseData": { "appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>, "challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>, "keyHandle": <url-safe base64-encoded key handle>, "version": <U2F protocol version>, "signatureData": <url-safe base64-encoded signature> } } Possible response error codes are: NoError = 0 UnknownError = -127 TouchRequired = 0x6985 WrongData = 0x6a80 """<def_stmt>__init__ self origin<block_start>self.origin=origin<block_end><def_stmt>Authenticate self app_id challenge_data print_callback=sys.stderr.write<block_start>"""See base class."""<line_sep># Ensure environment variable is present plugin_cmd=os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR)<if_stmt>plugin_cmd<is><none><block_start><raise>errors.PluginError('{} env var is not set'.format(SK_SIGNING_PLUGIN_ENV_VAR))<block_end># Prepare input to signer client_data_map,signing_input=self._BuildPluginRequest(app_id challenge_data self.origin)<line_sep># Call plugin print_callback('Please insert and touch your security key\n')<line_sep>response=self._CallPlugin([plugin_cmd] signing_input)<line_sep># Handle response key_challenge_pair=(response['keyHandle'] response['challengeHash'])<line_sep>client_data_json=client_data_map[key_challenge_pair]<line_sep>client_data=client_data_json.encode()<line_sep><return>self._BuildAuthenticatorResponse(app_id client_data response)<block_end><def_stmt>IsAvailable self<block_start>"""See base class."""<line_sep><return>os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR)<is><not><none><block_end><def_stmt>_BuildPluginRequest self app_id challenge_data origin<block_start>"""Builds a JSON request in the form that the plugin expects."""<line_sep>client_data_map={}<line_sep>encoded_challenges=[]<line_sep>app_id_hash_encoded=self._Base64Encode(self._SHA256(app_id))<for_stmt>challenge_item challenge_data<block_start>key=challenge_item['key']<line_sep>key_handle_encoded=self._Base64Encode(key.key_handle)<line_sep>raw_challenge=challenge_item['challenge']<line_sep>client_data_json=model.ClientData(model.ClientData.TYP_AUTHENTICATION raw_challenge origin).GetJson()<line_sep>challenge_hash_encoded=self._Base64Encode(self._SHA256(client_data_json))<line_sep># Populate challenges list encoded_challenges.append({'appIdHash':app_id_hash_encoded 'challengeHash':challenge_hash_encoded 'keyHandle':key_handle_encoded 'version':key.version })<line_sep># Populate ClientData map key_challenge_pair=(key_handle_encoded challenge_hash_encoded)<line_sep>client_data_map[key_challenge_pair]=client_data_json<block_end>signing_request={'type':'sign_helper_request' 'signData':encoded_challenges 'timeoutSeconds':U2F_SIGNATURE_TIMEOUT_SECONDS 'localAlways':<true>}<line_sep><return>client_data_map json.dumps(signing_request)<block_end><def_stmt>_BuildAuthenticatorResponse self app_id client_data plugin_response<block_start>"""Builds the response to return to the caller."""<line_sep>encoded_client_data=self._Base64Encode(client_data)<line_sep>signature_data=str(plugin_response['signatureData'])<line_sep>key_handle=str(plugin_response['keyHandle'])<line_sep>response={'clientData':encoded_client_data 'signatureData':signature_data 'applicationId':app_id 'keyHandle':key_handle }<line_sep><return>response<block_end><def_stmt>_CallPlugin self cmd input_json<block_start>"""Calls the plugin and validates the response."""<line_sep># Calculate length of input input_length=len(input_json)<line_sep>length_bytes_le=struct.pack('<I' input_length)<line_sep>request=length_bytes_le+input_json.encode()<line_sep># Call plugin sign_process=subprocess.Popen(cmd stdin=subprocess.PIPE stdout=subprocess.PIPE)<line_sep>stdout=sign_process.communicate(request)[0]<line_sep>exit_status=sign_process.wait()<line_sep># Parse and validate response size response_len_le=stdout[:4]<line_sep>response_len=struct.unpack('<I' response_len_le)[0]<line_sep>response=stdout[4:]<if_stmt>response_len<ne>len(response)<block_start><raise>errors.PluginError('Plugin response length {} does not match data {} (exit_status={})'.format(response_len len(response) exit_status))<block_end># Ensure valid json <try_stmt><block_start>json_response=json.loads(response.decode())<block_end><except_stmt>ValueError<block_start><raise>errors.PluginError('Plugin returned invalid output (exit_status={})'.format(exit_status))<block_end># Ensure response type <if_stmt>json_response.get('type')<ne>'sign_helper_reply'<block_start><raise>errors.PluginError('Plugin returned invalid response type '<concat>'(exit_status={})'.format(exit_status))<block_end># Parse response codes result_code=json_response.get('code')<if_stmt>result_code<is><none><block_start><raise>errors.PluginError('Plugin missing result code (exit_status={})'.format(exit_status))<block_end># Handle errors <if_stmt>result_code<eq>SK_SIGNING_PLUGIN_TOUCH_REQUIRED<block_start><raise>errors.U2FError(errors.U2FError.TIMEOUT)<block_end><elif_stmt>result_code<eq>SK_SIGNING_PLUGIN_WRONG_DATA<block_start><raise>errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)<block_end><elif_stmt>result_code<ne>SK_SIGNING_PLUGIN_NO_ERROR<block_start><raise>errors.PluginError('Plugin failed with error {} - {} (exit_status={})'.format(result_code json_response.get('errorDetail') exit_status))<block_end># Ensure response data is present response_data=json_response.get('responseData')<if_stmt>response_data<is><none><block_start><raise>errors.PluginErrors('Plugin returned output with missing responseData (exit_status={})'.format(exit_status))<block_end><return>response_data<block_end><def_stmt>_SHA256 self string<block_start>"""Helper method to perform SHA256."""<line_sep>md=hashlib.sha256()<line_sep>md.update(string.encode())<line_sep><return>md.digest()<block_end><def_stmt>_Base64Encode self bytes_data<block_start>"""Helper method to base64 encode, strip padding, and return str result."""<line_sep><return>base64.urlsafe_b64encode(bytes_data).decode().rstrip('=')<block_end><block_end>
# # This file is part of pyasn1 software. # # Copyright (c) 2005-2019, <NAME> <<EMAIL>> # License: http://snmplabs.com/pyasn1/license.html # <import_stmt>sys<try_stmt><block_start><import_stmt>unittest2<as>unittest<block_end><except_stmt>ImportError<block_start><import_stmt>unittest<block_end><import_from_stmt>tests.base BaseTestCase<import_from_stmt>pyasn1.type namedval<class_stmt>NamedValuesCaseBase(BaseTestCase)<block_start><def_stmt>setUp self<block_start>BaseTestCase.setUp(self)<line_sep>self.e=namedval.NamedValues(('off' 0) ('on' 1))<block_end><def_stmt>testDict self<block_start><assert_stmt>set(self.e.items())<eq>set([('off' 0) ('on' 1)])<assert_stmt>set(self.e.keys())<eq>set(['off' 'on'])<assert_stmt>set(self.e)<eq>set(['off' 'on'])<assert_stmt>set(self.e.values())<eq>set([0 1])<assert_stmt>'on'<in>self.e<and>'off'<in>self.e<and>'xxx'<not><in>self.e<assert_stmt>0<in>self.e<and>1<in>self.e<and>2<not><in>self.e<block_end><def_stmt>testInit self<block_start><assert_stmt>namedval.NamedValues(off=0 on=1)<eq>{'off':0 'on':1}<assert_stmt>namedval.NamedValues('off' 'on')<eq>{'off':0 'on':1}<assert_stmt>namedval.NamedValues(('c' 0))<eq>{'c':0}<assert_stmt>namedval.NamedValues('a' 'b' ('c' 0) d=1)<eq>{'c':0 'd':1 'a':2 'b':3}<block_end><def_stmt>testLen self<block_start><assert_stmt>len(self.e)<eq>2<assert_stmt>len(namedval.NamedValues())<eq>0<block_end><def_stmt>testAdd self<block_start><assert_stmt>namedval.NamedValues(off=0)+namedval.NamedValues(on=1)<eq>{'off':0 'on':1}<block_end><def_stmt>testClone self<block_start><assert_stmt>namedval.NamedValues(off=0).clone(('on' 1))<eq>{'off':0 'on':1}<assert_stmt>namedval.NamedValues(off=0).clone(on=1)<eq>{'off':0 'on':1}<block_end><def_stmt>testStrRepr self<block_start><assert_stmt>str(self.e)<assert_stmt>repr(self.e)<block_end><block_end>suite=unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])<if_stmt>__name__<eq>'__main__'<block_start>unittest.TextTestRunner(verbosity=2).run(suite)<block_end>
<import_stmt>dash_core_components<as>dcc<import_stmt>dash_html_components<as>html<import_from_stmt>config strings<def_stmt>make_tab_port_map_controls port_arr:list port_val:str vessel_types_arr:list vessel_type_val:str year_arr:list year_val:int month_arr:list month_val:int <arrow>html.Div<block_start>""" Returns a HTML div of user controls found on top of the map tab. :param port_arr: list, all possible ports :param port_val: str, current port value :param vessel_types_arr: list, all possible vessel types :param vessel_type_val: str, current vessel type value :param year_arr: list, all possible years :param year_val: str, current year value :param month_arr: list, all possible months :param month_val: str, current month value :return: HTML div """<line_sep><return>html.Div(className="tab-port-map-controls" children=[html.Div(className="tab-port-map-single-control-container area-a" children=[html.Label(className="control-label" children=[strings.LABEL_PORT]) dcc.Dropdown(id="port-map-dropdown-port" clearable=<false> options=[{"label":port "value":port}<for>port port_arr] value=port_val ) ] ) html.Div(className="tab-port-map-single-control-separator area-b") html.Div(className="tab-port-map-single-control-container area-c" children=[html.Label(className="control-label" children=[strings.LABEL_VESSEL]) dcc.Dropdown(id="port-map-dropdown-vessel-type" clearable=<false> options=[{"label":vessel_type "value":vessel_type}<for>vessel_type vessel_types_arr] value=vessel_type_val ) ] ) html.Div(className="tab-port-map-single-control-separator area-d") html.Div(className="tab-port-map-single-control-container date-grid area-e" children=[html.Div(className="tab-port-map-single-control-container-date" children=[html.Label(className="control-label" children=[strings.LABEL_YEAR]) dcc.Dropdown(id="port-map-dropdown-year" clearable=<false> options=[{"label":year "value":year}<for>year year_arr] value=year_val ) ] ) html.Div(className="tab-port-map-single-control-separator smaller-line") html.Div(className="tab-port-map-single-control-container-date" children=[html.Label(className="control-label" children=[strings.LABEL_MONTH] ) dcc.Dropdown(id="port-map-dropdown-month" clearable=<false> options=[{"label":month "value":month}<for>month month_arr] value=month_val ) ] ) ] ) ] )<block_end>
# -*- coding: utf-8 -*- """Actions and snippets."""<line_sep># ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- <import_stmt>inspect<import_from_stmt>functools partial wraps<import_stmt>logging<import_stmt>re<import_stmt>sys<import_stmt>traceback<import_from_stmt>.qt QKeySequence QAction require_qt input_dialog busy_cursor _get_icon<import_from_stmt>phylib.utils Bunch<line_sep>logger=logging.getLogger(__name__)<line_sep># ----------------------------------------------------------------------------- # Snippet parsing utilities # ----------------------------------------------------------------------------- <def_stmt>_parse_arg s<block_start>"""Parse a number or string."""<try_stmt><block_start><return>int(s)<block_end><except_stmt>ValueError<block_start><pass><block_end><try_stmt><block_start><return>float(s)<block_end><except_stmt>ValueError<block_start><pass><block_end><return>s<block_end><def_stmt>_parse_list s<block_start>"""Parse a comma-separated list of values (strings or numbers)."""<line_sep># Range: 'x-y' <if_stmt>'-'<in>s<block_start>m,M=map(_parse_arg s.split('-'))<line_sep><return>list(range(m M+1))<block_end># List of ids: 'x,y,z' <elif_stmt>','<in>s<block_start><return>list(map(_parse_arg s.split(',')))<block_end><else_stmt><block_start><return>_parse_arg(s)<block_end><block_end><def_stmt>_parse_snippet s<block_start>"""Parse an entire snippet command."""<line_sep><return>tuple(map(_parse_list s.split(' ')))<block_end><def_stmt>_prompt_args title docstring default=<none><block_start>"""Display a prompt dialog requesting function arguments. 'default' is a function returning the default value for the proposed input dialog. """<line_sep># There are args, need to display the dialog. # Extract Example: `...` in the docstring to put a predefined text # in the input dialog. logger.debug("Prompting arguments for %s" title)<line_sep>r=re.search('Example: `([^`]+)`' docstring)<line_sep>docstring_=docstring[:r.start()].strip()<if>r<else>docstring<try_stmt><block_start>text=str(default())<if>default<else>(r.group(1)<if>r<else><none>)<block_end><except_stmt>Exception<as>e# pragma: no cover <block_start>logger.error("Error while handling user input: %s" str(e))<line_sep><return><block_end>s,ok=input_dialog(title docstring_ text)<if_stmt><not>ok<or><not>s<block_start><return><block_end># Parse user-supplied arguments and call the function. args=_parse_snippet(s)<line_sep><return>args<block_end># ----------------------------------------------------------------------------- # Show shortcut utility functions # ----------------------------------------------------------------------------- <def_stmt>_get_shortcut_string shortcut<block_start>"""Return a string representation of a shortcut."""<if_stmt><not>shortcut<block_start><return>''<block_end><if_stmt>isinstance(shortcut (tuple list))<block_start><return>', '.join([_get_shortcut_string(s)<for>s shortcut])<block_end><if_stmt>isinstance(shortcut str)<block_start><if_stmt>hasattr(QKeySequence shortcut)<block_start>shortcut=QKeySequence(getattr(QKeySequence shortcut))<block_end><else_stmt><block_start><return>shortcut.lower()<block_end><block_end><assert_stmt>isinstance(shortcut QKeySequence)<line_sep>s=shortcut.toString()<or>''<line_sep><return>str(s).lower()<block_end><def_stmt>_get_qkeysequence shortcut<block_start>"""Return a QKeySequence or list of QKeySequence from a shortcut string."""<if_stmt>shortcut<is><none><block_start><return>[]<block_end><if_stmt>isinstance(shortcut (tuple list))<block_start><return>[_get_qkeysequence(s)<for>s shortcut]<block_end><assert_stmt>isinstance(shortcut str)<if_stmt>hasattr(QKeySequence shortcut)<block_start><return>QKeySequence(getattr(QKeySequence shortcut))<block_end>sequence=QKeySequence.fromString(shortcut)<assert_stmt><not>sequence.isEmpty()<line_sep><return>sequence<block_end><def_stmt>_show_shortcuts shortcuts<block_start>"""Display shortcuts."""<line_sep>out=[]<for_stmt>n sorted(shortcuts)<block_start>shortcut=_get_shortcut_string(shortcuts[n])<if_stmt><not>n.startswith('_')<and><not>shortcut.startswith('-')<block_start>out.append('- {0:<40} {1:s}'.format(n shortcut))<block_end><block_end><if_stmt>out<block_start>print('Keyboard shortcuts')<line_sep>print('\n'.join(out))<line_sep>print('')<block_end><block_end><def_stmt>_show_snippets snippets<block_start>"""Display snippets."""<line_sep>out=[]<for_stmt>n sorted(snippets)<block_start>snippet=snippets[n]<if_stmt><not>n.startswith('_')<block_start>out.append('- {0:<40} :{1:s}'.format(n snippet))<block_end><block_end><if_stmt>out<block_start>print('Snippets')<line_sep>print('\n'.join(out))<line_sep>print('')<block_end><block_end><def_stmt>show_shortcuts_snippets actions<block_start>"""Show the shortcuts and snippets of an Actions instance."""<line_sep>print(actions.name)<line_sep>print('-'<times>len(actions.name))<line_sep>print()<line_sep>_show_shortcuts(actions.shortcuts)<line_sep>_show_snippets(actions._default_snippets)<block_end># ----------------------------------------------------------------------------- # Actions # ----------------------------------------------------------------------------- <def_stmt>_alias name# Get the alias from the character after & if it exists. <block_start>alias=name[name.index('&')+1]<if>'&'<in>name<else>name<line_sep>alias=alias.replace(' ' '_').lower()<line_sep><return>alias<block_end><def_stmt>_expected_args f<block_start><if_stmt>isinstance(f partial)<block_start>argspec=inspect.getfullargspec(f.func)<block_end><else_stmt><block_start>argspec=inspect.getfullargspec(f)<block_end>f_args=argspec.args<if_stmt>'self'<in>f_args<block_start>f_args.remove('self')<block_end># Remove arguments with defaults from the list. <if_stmt>len(argspec.defaults<or>())<block_start>f_args=f_args[:-len(argspec.defaults)]<block_end># Remove arguments supplied in a partial. <if_stmt>isinstance(f partial)<block_start>f_args=f_args[len(f.args):]<line_sep>f_args=[arg<for>arg f_args<if>arg<not><in>f.keywords]<block_end><return>tuple(f_args)<block_end>@require_qt<def_stmt>_create_qaction gui **kwargs# Create the QAction instance. <block_start>name=kwargs.get('name' '')<line_sep>name=name[0].upper()+name[1:].replace('_' ' ')<line_sep>action=QAction(name gui)<line_sep># Show an input dialog if there are args. callback=kwargs.get('callback' <none>)<line_sep>title=getattr(callback '__name__' 'action')<line_sep># Number of expected arguments. n_args=kwargs.get('n_args' <none>)<or>len(_expected_args(callback))<line_sep>@wraps(callback)<def_stmt>wrapped is_checked *args<block_start><if_stmt>kwargs.get('checkable' <none>)<block_start>args=(is_checked )+args<block_end><if_stmt>kwargs.get('prompt' <none>)<block_start>args<augadd>_prompt_args(title docstring default=kwargs.get('prompt_default' <none>))<or>()<if_stmt><not>args# pragma: no cover <block_start>logger.debug("User cancelled input prompt, aborting.")<line_sep><return><block_end><block_end><if_stmt>len(args)<l>n_args<block_start>logger.warning("Invalid function arguments: expecting %d but got %d" n_args len(args))<line_sep><return><block_end><try_stmt># Set a busy cursor if set_busy is True. <block_start><with_stmt>busy_cursor(kwargs.get('set_busy' <none>))<block_start><return>callback(*args)<block_end><block_end><except_stmt>Exception# pragma: no cover <block_start>logger.warning("Error when executing action %s." name)<line_sep>logger.debug(''.join(traceback.format_exception(*sys.exc_info())))<block_end><block_end>action.triggered.connect(wrapped)<line_sep>sequence=_get_qkeysequence(kwargs.get('shortcut' <none>))<if_stmt><not>isinstance(sequence (tuple list))<block_start>sequence=[sequence]<block_end>action.setShortcuts(sequence)<assert_stmt>kwargs.get('docstring' <none>)<line_sep>docstring=re.sub(r'\s+' ' ' kwargs.get('docstring' <none>))<line_sep>docstring<augadd>' (alias: {})'.format(kwargs.get('alias' <none>))<line_sep>action.setStatusTip(docstring)<line_sep>action.setWhatsThis(docstring)<line_sep>action.setCheckable(kwargs.get('checkable' <none>))<line_sep>action.setChecked(kwargs.get('checked' <none>))<if_stmt>kwargs.get('icon' <none>)<block_start>action.setIcon(_get_icon(kwargs['icon']))<block_end><return>action<block_end><class_stmt>Actions(object)<block_start>"""Group of actions bound to a GUI. This class attaches to a GUI and implements the following features: * Add and remove actions * Keyboard shortcuts for the actions * Display all shortcuts Constructor ----------- gui : GUI instance name : str Name of this group of actions. menu : str Name of the GUI menu that will contain the actions. submenu : str Name of the GUI submenu that will contain the actions. default_shortcuts : dict Map action names to keyboard shortcuts (regular strings). default_snippets : dict Map action names to snippets (regular strings). """<def_stmt>__init__ self gui name=<none> menu=<none> submenu=<none> view=<none> insert_menu_before=<none> default_shortcuts=<none> default_snippets=<none><block_start>self._actions_dict={}<line_sep>self._aliases={}<line_sep>self._default_shortcuts=default_shortcuts<or>{}<line_sep>self._default_snippets=default_snippets<or>{}<assert_stmt>name<line_sep>self.name=name<line_sep>self.menu=menu<line_sep>self.submenu=submenu<line_sep>self.view=view<line_sep>self.view_submenu=<none><line_sep>self.insert_menu_before=insert_menu_before<line_sep>self._view_submenus={}<line_sep>self.gui=gui<line_sep>gui.actions.append(self)<line_sep># Create the menu when creating the Actions instance. <if_stmt>menu<block_start>gui.get_menu(menu insert_menu_before)<block_end><block_end><def_stmt>_get_menu self menu=<none> submenu=<none> view=<none> view_submenu=<none><block_start>"""Return the QMenu depending on a combination of keyword arguments."""<line_sep># Defaults. menu=menu<or>self.menu<line_sep>submenu=submenu<or>self.submenu<line_sep>view=view<or>self.view<line_sep>view_submenu=view_submenu<or>self.view_submenu<line_sep># If the action is a view action, it should be added to the view's menu in the dock widget. <if_stmt>view<block_start><if_stmt>view_submenu<and>view_submenu<not><in>self._view_submenus<block_start>self._view_submenus[view_submenu]=view.dock._menu.addMenu(view_submenu)<block_end><if_stmt>view_submenu<block_start><return>self._view_submenus[view_submenu]<block_end><else_stmt><block_start><return>view.dock._menu<block_end><block_end># Create the submenu if there is one. <if_stmt>submenu# Create the submenu. <block_start>self.gui.get_submenu(menu submenu)<line_sep># Make sure the action gets added to the submenu. menu=submenu<block_end><if_stmt>menu<block_start><return>self.gui.get_menu(menu)<block_end><block_end><def_stmt>add self callback=<none> name=<none> shortcut=<none> alias=<none> prompt=<false> n_args=<none> docstring=<none> menu=<none> submenu=<none> view=<none> view_submenu=<none> verbose=<true> checkable=<false> checked=<false> set_busy=<false> prompt_default=<none> show_shortcut=<true> icon=<none> toolbar=<false><block_start>"""Add an action with a keyboard shortcut. Parameters ---------- callback : function Take no argument if checkable is False, or a boolean (checked) if it is True name : str Action name, the callback's name by default. shortcut : str The keyboard shortcut for this action. alias : str Snippet, the name by default. prompt : boolean Whether this action should display a dialog with an input box where the user can write arguments to the callback function. n_args : int If prompt is True, specify the number of expected arguments. set_busy : boolean Whether to use a busy cursor while performing the action. prompt_default : str The default text in the input text box, if prompt is True. docstring : str The action docstring, to be displayed in the status bar when hovering over the action item in the menu. By default, the function's docstring. menu : str The name of the menu where the action should be added. It is automatically created if it doesn't exist. submenu : str The name of the submenu where the action should be added. It is automatically created if it doesn't exist. view : QWidget A view that belongs to the GUI, if the actions are to be added to the view's menu bar. view_submenu : str The name of a submenu in the view menu. checkable : boolean Whether the action is checkable (toggle on/off). checked : boolean Whether the checkable action is initially checked or not. show_shortcut : boolean Whether to show the shortcut in the Help action that displays all GUI shortcuts. icon : str Hexadecimal code of the font-awesome icon. toolbar : boolean Whether to add the action to the toolbar. """<line_sep>param_names=sorted(inspect.signature(Actions.add).parameters)<line_sep>l=locals()<line_sep>kwargs={param_name:l[param_name]<for>param_name param_names<if>param_name<ne>'self'}<if_stmt>callback<is><none># Allow to use either add(func) or @add or @add(...). <block_start>kwargs.pop('callback' <none>)<line_sep><return>partial(self.add **kwargs)<block_end><assert_stmt>callback<line_sep># Get the name from the callback function if needed. name=name<or>callback.__name__<line_sep>alias=alias<or>self._default_snippets.get(name _alias(name)).split(' ')[0]<line_sep>name=name.replace('&' '')<line_sep>shortcut=shortcut<or>self._default_shortcuts.get(name <none>)<line_sep># Skip existing action. <if_stmt>name<in>self._actions_dict<block_start><return><block_end># Set the status tip from the function's docstring. docstring=docstring<or>callback.__doc__<or>name<line_sep>docstring=re.sub(r'[ \t\r\f\v]{2,}' ' ' docstring.strip())<line_sep># Create and register the action. kwargs.update(name=name alias=alias shortcut=shortcut docstring=docstring)<line_sep>action=_create_qaction(self.gui **kwargs)<line_sep>action_obj=Bunch(qaction=action **kwargs)<if_stmt>verbose<and><not>name.startswith('_')<block_start>logger.log(5 "Add action `%s` (%s)." name _get_shortcut_string(action.shortcut()))<block_end>self.gui.addAction(action)<line_sep># Do not show private actions in the menu. <if_stmt><not>name.startswith('_')# Find the menu in which the action should be added. <block_start>qmenu=self._get_menu(menu=menu submenu=submenu view=view view_submenu=view_submenu)<if_stmt>qmenu<block_start>qmenu.addAction(action)<block_end><block_end># Add the action to the toolbar. <if_stmt>toolbar<block_start>self.gui._toolbar.show()<line_sep>self.gui._toolbar.addAction(action)<block_end>self._actions_dict[name]=action_obj<line_sep># Register the alias -> name mapping. self._aliases[alias]=name<line_sep># Set the callback method. <if_stmt>callback<block_start>setattr(self name.lower().replace(' ' '_').replace(':' '') callback)<block_end><block_end><def_stmt>separator self **kwargs<block_start>"""Add a separator. Parameters ---------- menu : str The name of the menu where the separator should be added. It is automatically created if it doesn't exist. submenu : str The name of the submenu where the separator should be added. It is automatically created if it doesn't exist. view : QWidget A view that belongs to the GUI, if the separator is to be added to the view's menu bar. view_submenu : str The name of a submenu in the view menu. """<line_sep>self._get_menu(**kwargs).addSeparator()<block_end><def_stmt>disable self name=<none><block_start>"""Disable all actions, or only one if a name is passed."""<if_stmt>name<is><none><block_start><for_stmt>name self._actions_dict<block_start>self.disable(name)<block_end><return><block_end>self._actions_dict[name].qaction.setEnabled(<false>)<block_end><def_stmt>enable self name=<none><block_start>"""Enable all actions, or only one if a name is passed.."""<if_stmt>name<is><none><block_start><for_stmt>name self._actions_dict<block_start>self.enable(name)<block_end><return><block_end>self._actions_dict[name].qaction.setEnabled(<true>)<block_end><def_stmt>get self name<block_start>"""Get a QAction instance from its name."""<line_sep><return>self._actions_dict[name].qaction<if>name<in>self._actions_dict<else><none><block_end><def_stmt>run self name *args<block_start>"""Run an action as specified by its name."""<assert_stmt>isinstance(name str)<line_sep># Resolve the alias if it is an alias. name=self._aliases.get(name name)<line_sep># Get the action. action=self._actions_dict.get(name <none>)<if_stmt><not>action<block_start><raise>ValueError("Action `{}` doesn't exist.".format(name))<block_end><if_stmt><not>name.startswith('_')<block_start>logger.debug("Execute action `%s`." name)<block_end><try_stmt><block_start><return>action.callback(*args)<block_end><except_stmt>TypeError<as>e<block_start>logger.warning("Invalid action arguments: "+str(e))<line_sep><return><block_end><block_end><def_stmt>remove self name<block_start>"""Remove an action."""<line_sep>self.gui.removeAction(self._actions_dict[name].qaction)<del_stmt>self._actions_dict[name]<line_sep>delattr(self name)<block_end><def_stmt>remove_all self<block_start>"""Remove all actions."""<line_sep>names=sorted(self._actions_dict.keys())<for_stmt>name names<block_start>self.remove(name)<block_end><block_end>@property<def_stmt>shortcuts self<block_start>"""A dictionary mapping action names to keyboard shortcuts."""<line_sep>out={}<for_stmt>name sorted(self._actions_dict)<block_start>action=self._actions_dict[name]<if_stmt><not>action.show_shortcut<block_start><continue><block_end># Discard actions without shortcut and without an alias. <if_stmt><not>action.shortcut<and><not>action.alias<block_start><continue><block_end># Only show alias for actions with no shortcut. alias_str=' (:%s)'%action.alias<if>action.alias<ne>name<else>''<line_sep>shortcut=action.shortcut<or>'-'<line_sep>shortcut=shortcut<if>isinstance(action.shortcut str)<else>', '.join(shortcut)<line_sep>out[name]='%s%s'%(shortcut alias_str)<block_end><return>out<block_end><def_stmt>show_shortcuts self<block_start>"""Display all shortcuts in the console."""<line_sep>show_shortcuts_snippets(self)<block_end><def_stmt>__contains__ self name<block_start>"""Whether the Actions group contains a specified action."""<line_sep><return>name<in>self._actions_dict<block_end><def_stmt>__repr__ self<block_start><return>'<Actions {}>'.format(sorted(self._actions_dict))<block_end><block_end># ----------------------------------------------------------------------------- # Snippets # ----------------------------------------------------------------------------- <class_stmt>Snippets(object)<block_start>"""Provide keyboard snippets to quickly execute actions from a GUI. This class attaches to a GUI and an `Actions` instance. To every command is associated a snippet with the same name, or with an alias as indicated in the action. The arguments of the action's callback functions can be provided in the snippet's command with a simple syntax. For example, the following command: ``` :my_action string 3-6 ``` corresponds to: ```python my_action('string', (3, 4, 5, 6)) ``` The snippet mode is activated with the `:` keyboard shortcut. A snippet command is activated with `Enter`, and one can leave the snippet mode with `Escape`. When the snippet mode is enabled (with `:`), this object adds a hidden Qt action for every keystroke. These actions are removed when the snippet mode is disabled. Constructor ----------- gui : GUI instance """<line_sep># HACK: Unicode characters do not seem to work on Python 2 cursor='\u200A\u258C'<line_sep># Allowed characters in snippet mode. # A Qt shortcut will be created for every character. _snippet_chars=r"abcdefghijklmnopqrstuvwxyz0123456789 ,.;?!_-+~=*/\(){}[]<>&|"<def_stmt>__init__ self gui<block_start>self.gui=gui<line_sep>self._status_message=gui.status_message<line_sep>self.actions=Actions(gui name='Snippets' menu='&File')<line_sep># Register snippet mode shortcut. @self.actions.add(shortcut=':')<def_stmt>enable_snippet_mode <block_start>"""Enable the snippet mode (type action alias in the status bar)."""<line_sep>self.mode_on()<block_end>self._create_snippet_actions()<line_sep>self.mode_off()<block_end>@property<def_stmt>command self<block_start>"""This is used to write a snippet message in the status bar. A cursor is appended at the end."""<line_sep>msg=self.gui.status_message<line_sep>n=len(msg)<line_sep>n_cur=len(self.cursor)<line_sep><return>msg[:n-n_cur]<block_end>@command.setter<def_stmt>command self value<block_start>value<augadd>self.cursor<line_sep>self.gui.unlock_status()<line_sep>self.gui.status_message=value<line_sep>self.gui.lock_status()<block_end><def_stmt>_backspace self<block_start>"""Erase the last character in the snippet command."""<if_stmt>self.command<eq>':'<block_start><return><block_end>logger.log(5 "Snippet keystroke `Backspace`.")<line_sep>self.command=self.command[:-1]<block_end><def_stmt>_enter self<block_start>"""Disable the snippet mode and execute the command."""<line_sep>command=self.command<line_sep>logger.log(5 "Snippet keystroke `Enter`.")<line_sep># NOTE: we need to set back the actions (mode_off) before running # the command. self.mode_off()<line_sep>self.run(command)<block_end><def_stmt>_create_snippet_actions self<block_start>"""Add mock Qt actions for snippet keystrokes. Used to enable snippet mode. """<line_sep># One action per allowed character. <for_stmt>i,char enumerate(self._snippet_chars)<block_start><def_stmt>_make_func char<block_start><def_stmt>callback <block_start>logger.log(5 "Snippet keystroke `%s`." char)<line_sep>self.command<augadd>char<block_end><return>callback<block_end># Lowercase letters. self.actions.add(name='_snippet_{}'.format(i) shortcut=char callback=_make_func(char))<line_sep># Uppercase letters. <if_stmt>char<in>self._snippet_chars[:26]<block_start>self.actions.add(name='_snippet_{}_upper'.format(i) shortcut='shift+'+char callback=_make_func(char.upper()))<block_end><block_end>self.actions.add(name='_snippet_backspace' shortcut='backspace' callback=self._backspace)<line_sep>self.actions.add(name='_snippet_activate' shortcut=('enter' 'return') callback=self._enter)<line_sep>self.actions.add(name='_snippet_disable' shortcut='escape' callback=self.mode_off)<block_end><def_stmt>run self snippet<block_start>"""Execute a snippet command. May be overridden. """<assert_stmt>snippet[0]<eq>':'<line_sep>snippet=snippet[1:]<line_sep>snippet_args=_parse_snippet(snippet)<line_sep>name=snippet_args[0]<line_sep>logger.debug("Processing snippet `%s`." snippet)<try_stmt># Try to run the snippet on all attached Actions instances. <block_start><for_stmt>actions self.gui.actions<block_start><try_stmt><block_start>actions.run(name *snippet_args[1:])<line_sep><return><block_end><except_stmt>ValueError# This Actions instance doesn't contain the requested # snippet, trying the next attached Actions instance. <block_start><pass><block_end><block_end>logger.warning("Couldn't find action `%s`." name)<block_end><except_stmt>Exception<as>e<block_start>logger.warning("Error when executing snippet: \"%s\"." str(e))<line_sep>logger.debug(''.join(traceback.format_exception(*sys.exc_info())))<block_end><block_end><def_stmt>is_mode_on self<block_start>"""Whether the snippet mode is enabled."""<line_sep><return>self.command.startswith(':')<block_end><def_stmt>mode_on self<block_start>"""Enable the snippet mode."""<line_sep>logger.debug("Snippet mode enabled, press `escape` to leave this mode.")<line_sep># Save the current status message. self._status_message=self.gui.status_message<line_sep>self.gui.lock_status()<line_sep># Silent all actions except the Snippets actions. <for_stmt>actions self.gui.actions<block_start><if_stmt>actions<ne>self.actions<block_start>actions.disable()<block_end><block_end>self.actions.enable()<line_sep>self.command=':'<block_end><def_stmt>mode_off self<block_start>"""Disable the snippet mode."""<line_sep>self.gui.unlock_status()<line_sep># Reset the GUI status message that was set before the mode was # activated. self.gui.status_message=self._status_message<line_sep># Re-enable all actions except the Snippets actions. self.actions.disable()<for_stmt>actions self.gui.actions<block_start><if_stmt>actions<ne>self.actions<block_start>actions.enable()<block_end><block_end># The `:` shortcut should always be enabled. self.actions.enable('enable_snippet_mode')<block_end><block_end>
<import_stmt>logging<import_stmt>time<import_from_stmt>sqlalchemy func desc<import_from_stmt>src.models Play<import_from_stmt>src.utils db_session<line_sep>logger=logging.getLogger(__name__)<def_stmt>get_plays_metrics args<block_start>""" Returns metrics for play counts Args: args: dict The parsed args from the request args.start_time: date The start of the query args.limit: number The max number of responses to return args.bucket_size: string A date_trunc operation to aggregate timestamps by Returns: Array of dictionaries with the play counts and timestamp """<line_sep>db=db_session.get_db_read_replica()<with_stmt>db.scoped_session()<as>session<block_start><return>_get_plays_metrics(session args)<block_end><block_end><def_stmt>_get_plays_metrics session args<block_start>metrics_query=(session.query(func.date_trunc(args.get("bucket_size") Play.created_at).label("timestamp") func.count(Play.id).label("count") ).filter(Play.created_at<g>args.get("start_time")).group_by(func.date_trunc(args.get("bucket_size") Play.created_at)).order_by(desc("timestamp")).limit(args.get("limit")))<line_sep>metrics=metrics_query.all()<line_sep>metrics=[{"timestamp":int(time.mktime(m[0].timetuple())) "count":m[1]}<for>m metrics]<line_sep><return>metrics<block_end>
<import_stmt>pytz<import_from_stmt>rest_auth.serializers TokenSerializer<import_from_stmt>rest_framework.authtoken.models Token<import_from_stmt>rest_framework.exceptions ValidationError<import_from_stmt>rest_framework.fields CharField CurrentUserDefault HiddenField UUIDField ChoiceField <import_from_stmt>rest_framework.serializers ModelSerializer Serializer<import_from_stmt>rest_framework.validators UniqueValidator<import_from_stmt>django.contrib.auth.hashers check_password<import_from_stmt>open.users.models User<class_stmt>SimpleUserReadSerializer(ModelSerializer)<block_start><class_stmt>Meta<block_start>model=User<line_sep>fields=("name" "uuid" )<block_end><block_end><class_stmt>UserReadSerializer(ModelSerializer)<block_start><class_stmt>Meta<block_start>model=User<line_sep>fields=("name" "uuid" "signed_up_from" "date_joined" "username" "email" "created" "modified" )<block_end><block_end><class_stmt>UserTokenSerializer(TokenSerializer)<block_start>user=UserReadSerializer()<class_stmt>Meta<block_start>model=Token<line_sep>fields=["key" "user"]<block_end><block_end># TODO - this view and serializer is on hold as you figure out registration (later) <class_stmt>UserCreateSerializer(ModelSerializer)<block_start>username=CharField(validators=[UniqueValidator(queryset=User.objects.all())])<line_sep># need to make email optional ... prob should think through signup form a little email=CharField(validators=[UniqueValidator(queryset=User.objects.all())] required=<false>)<line_sep>password=CharField(write_only=<true> min_length=8)<line_sep>signed_up_from=CharField(write_only=<true> min_length=8 required=<false> default="" trim_whitespace=<true>)<line_sep>timezone_string=ChoiceField(choices=pytz.all_timezones required=<false> default="US/Eastern")<class_stmt>Meta<block_start>model=User<line_sep>fields=["username" "email" "password" "signed_up_from" "timezone_string"]<block_end># TODO test - does this work with just username / no email, etc. <def_stmt>create self validated_data<block_start>username=validated_data.pop("username")<line_sep>password=validated_data.pop("password")<line_sep>is_betterself_user=<false><if_stmt>validated_data["signed_up_from"]<eq>"betterself"<block_start>is_betterself_user=<true><block_end>validated_data["is_betterself_user"]=is_betterself_user<line_sep>user=User.objects.create(username=username **validated_data)<line_sep>user.set_password(password)<line_sep>user.save()<line_sep><return>user<block_end><block_end><class_stmt>UserDeleteSerializer(Serializer)# most of this is actually redundant, i don't need to have a validation step, but i do this # out of paranoia reasons that someone may delete their account by mistake <block_start>password=CharField()<line_sep>user=HiddenField(default=CurrentUserDefault())<line_sep>uuid=UUIDField()<def_stmt>validate self data<block_start>user=data["user"]<line_sep>validated_password=check_password(data["password"] user.password)<if_stmt><not>validated_password<block_start><raise>ValidationError("Invalid Password Entered")<block_end>validated_uuid=str(user.uuid)<eq>str(data["uuid"])<if_stmt><not>validated_uuid<block_start><raise>ValidationError("Invalid UUID" str(user.uuid))<block_end>validate_user=user.username<ne>"<EMAIL>"<if_stmt><not>validate_user<block_start><raise>ValidationError(f"This is a protected user and cannot be deleted. {user.username}")<block_end><return>data<block_end><block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>.discriminator Discriminator<import_from_stmt>.identity Identity<class_stmt>MultiScaleDiscriminator(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(MultiScaleDiscriminator self).__init__()<line_sep>self.discriminators=nn.ModuleList([Discriminator()<for>_ range(3)])<line_sep>self.pooling=nn.ModuleList([Identity()]+[nn.AvgPool1d(kernel_size=4 stride=2 padding=2)<for>_ range(1 3)])<block_end><def_stmt>forward self x<block_start>ret=list()<for_stmt>pool,disc zip(self.pooling self.discriminators)<block_start>x=pool(x)<line_sep>ret.append(disc(x))<block_end><return>ret<block_end><block_end># [(feat, score), (feat, score), (feat, score)]
<import_from_stmt>typing Optional Dict Any List Union<import_from_stmt>allennlp.common.checks ConfigurationError<class_stmt>MetricTracker<block_start>""" This class tracks a metric during training for the dual purposes of early stopping and for knowing whether the current value is the best so far. It mimics the PyTorch `state_dict` / `load_state_dict` interface, so that it can be checkpointed along with your model and optimizer. Some metrics improve by increasing; others by decreasing. You can provide a `metric_name` that starts with "+" to indicate an increasing metric, or "-" to indicate a decreasing metric. # Parameters metric_name : `Union[str, List[str]]` Specifies the metric or metrics to track. Metric names have to start with "+" for increasing metrics or "-" for decreasing ones. If you specify more than one, it tracks the sum of the increasing metrics metrics minus the sum of the decreasing metrics. patience : `int`, optional (default = `None`) If provided, then `should_stop_early()` returns True if we go this many epochs without seeing a new best value. """<def_stmt>__init__ self metric_name:Union[str List[str]] patience:Optional[int]=<none> <arrow><none><block_start>self._patience=patience<line_sep>self._best_so_far:Optional[float]=<none><line_sep>self._epochs_with_no_improvement=0<line_sep>self._is_best_so_far=<true><line_sep>self._epoch_number=0<line_sep>self.best_epoch:Optional[int]=<none><line_sep>self.best_epoch_metrics:Dict[str float]={}<if_stmt>isinstance(metric_name str)<block_start>metric_name=[metric_name]<block_end>self.tracked_metrics=[]<for_stmt>name metric_name<block_start><if_stmt>name.startswith("+")<block_start>self.tracked_metrics.append((1.0 name[1:]))<block_end><elif_stmt>name.startswith("-")<block_start>self.tracked_metrics.append((-1.0 name[1:]))<block_end><else_stmt><block_start><raise>ConfigurationError("metric_name must start with + or -")<block_end><block_end><block_end><def_stmt>clear self<arrow><none><block_start>""" Clears out the tracked metrics, but keeps the patience """<line_sep>self._best_so_far=<none><line_sep>self._epochs_with_no_improvement=0<line_sep>self._is_best_so_far=<true><line_sep>self._epoch_number=0<line_sep>self.best_epoch=<none><line_sep>self.best_epoch_metrics.clear()<block_end><def_stmt>state_dict self<arrow>Dict[str Any]<block_start>""" A `Trainer` can use this to serialize the state of the metric tracker. """<line_sep><return>{"best_so_far":self._best_so_far "epochs_with_no_improvement":self._epochs_with_no_improvement "is_best_so_far":self._is_best_so_far "epoch_number":self._epoch_number "best_epoch":self.best_epoch "best_epoch_metrics":self.best_epoch_metrics }<block_end><def_stmt>load_state_dict self state_dict:Dict[str Any]<arrow><none><block_start>""" A `Trainer` can use this to hydrate a metric tracker from a serialized state. """<line_sep>self._best_so_far=state_dict["best_so_far"]<line_sep>self._epochs_with_no_improvement=state_dict["epochs_with_no_improvement"]<line_sep>self._is_best_so_far=state_dict["is_best_so_far"]<line_sep>self._epoch_number=state_dict["epoch_number"]<line_sep>self.best_epoch=state_dict["best_epoch"]<line_sep># Even though we don't promise backwards compatibility for the --recover flag, # it's particularly easy and harmless to provide it here, so we do it. self.best_epoch_metrics=state_dict.get("best_epoch_metrics" {})<block_end><def_stmt>add_metrics self metrics:Dict[str float]<arrow><none><block_start>""" Record a new value of the metric and update the various things that depend on it. """<line_sep>combined_score=self.combined_score(metrics)<line_sep>new_best=(self._best_so_far<is><none>)<or>(combined_score<g>self._best_so_far)<if_stmt>new_best<block_start>self._best_so_far=combined_score<line_sep>self._epochs_with_no_improvement=0<line_sep>self._is_best_so_far=<true><line_sep>self.best_epoch=self._epoch_number<block_end><else_stmt><block_start>self._epochs_with_no_improvement<augadd>1<line_sep>self._is_best_so_far=<false><block_end>self._epoch_number<augadd>1<block_end><def_stmt>is_best_so_far self<arrow>bool<block_start>""" Returns true if the most recent value of the metric is the best so far. """<line_sep><return>self._is_best_so_far<block_end><def_stmt>should_stop_early self<arrow>bool<block_start>""" Returns true if improvement has stopped for long enough. """<if_stmt>self._patience<is><none><block_start><return><false><block_end><else_stmt><block_start><return>self._epochs_with_no_improvement<ge>self._patience<block_end><block_end><def_stmt>combined_score self metrics:Dict[str float]<arrow>float<block_start><try_stmt><block_start><return>sum(factor<times>metrics[metric_name]<for>factor,metric_name self.tracked_metrics)<block_end><except_stmt>KeyError<as>e<block_start><raise>ConfigurationError(f"You configured the trainer to use the {e.args[0]} "<concat>"metric for early stopping, but the model did not produce that metric.")<block_end><block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>DQMServices.Core.DQMEDAnalyzer DQMEDAnalyzer<line_sep>l1tGct=DQMEDAnalyzer('L1TGCT' gctCentralJetsSource=cms.InputTag("gctDigis" "cenJets") gctForwardJetsSource=cms.InputTag("gctDigis" "forJets") gctTauJetsSource=cms.InputTag("gctDigis" "tauJets") gctIsoTauJetsSource=cms.InputTag("gctDigis" "fake") gctEnergySumsSource=cms.InputTag("gctDigis") gctIsoEmSource=cms.InputTag("gctDigis" "isoEm") gctNonIsoEmSource=cms.InputTag("gctDigis" "nonIsoEm") monitorDir=cms.untracked.string("L1T/L1TGCT") verbose=cms.untracked.bool(<false>) stage1_layer2_=cms.bool(<false>) DQMStore=cms.untracked.bool(<true>) disableROOToutput=cms.untracked.bool(<true>) filterTriggerType=cms.int32(1))<line_sep>
# simple test function that uses python 3 features (e.g., f-strings) # see https://github.com/localstack/localstack/issues/264 <def_stmt>handler event context# the following line is Python 3.6+ specific <block_start>msg=f"Successfully processed {event}"# noqa This code is Python 3.6+ only <return>event<block_end>
<import_stmt>copy<import_stmt>inspect<import_stmt>json<import_stmt>logging<import_stmt>pytest<import_stmt>re<import_stmt>os<import_stmt>shutil<import_stmt>subprocess<import_stmt>time<import_from_stmt>datetime datetime timedelta<import_from_stmt>configparser ConfigParser ExtendedInterpolation<import_from_stmt>typing Dict List Optional<import_from_stmt>pyhttpd.certs CertificateSpec<import_from_stmt>.md_cert_util MDCertUtil<import_from_stmt>pyhttpd.env HttpdTestSetup HttpdTestEnv<import_from_stmt>pyhttpd.result ExecResult<line_sep>log=logging.getLogger(__name__)<class_stmt>MDTestSetup(HttpdTestSetup)<block_start><def_stmt>__init__ self env:'HttpdTestEnv'<block_start>super().__init__(env=env)<block_end><def_stmt>make self<block_start>super().make(add_modules=["proxy_connect" "md"])<if_stmt>"pebble"<eq>self.env.acme_server<block_start>self._make_pebble_conf()<block_end><block_end><def_stmt>_make_pebble_conf self<block_start>our_dir=os.path.dirname(inspect.getfile(MDTestSetup))<line_sep>conf_src_dir=os.path.join(our_dir 'pebble')<line_sep>conf_dest_dir=os.path.join(self.env.gen_dir 'pebble')<if_stmt><not>os.path.exists(conf_dest_dir)<block_start>os.makedirs(conf_dest_dir)<block_end><for_stmt>name os.listdir(conf_src_dir)<block_start>src_path=os.path.join(conf_src_dir name)<line_sep>m=re.match(r'(.+).template' name)<if_stmt>m<block_start>self._make_template(src_path os.path.join(conf_dest_dir m.group(1)))<block_end><elif_stmt>os.path.isfile(src_path)<block_start>shutil.copy(src_path os.path.join(conf_dest_dir name))<block_end><block_end><block_end><block_end><class_stmt>MDTestEnv(HttpdTestEnv)<block_start>MD_S_UNKNOWN=0<line_sep>MD_S_INCOMPLETE=1<line_sep>MD_S_COMPLETE=2<line_sep>MD_S_EXPIRED=3<line_sep>MD_S_ERROR=4<line_sep>EMPTY_JOUT={'status':0 'output':[]}<line_sep>DOMAIN_SUFFIX="%d.org"%time.time()<line_sep>LOG_FMT_TIGHT='%(levelname)s: %(message)s'<line_sep>@classmethod<def_stmt>get_acme_server cls<block_start><return>os.environ['ACME']<if>'ACME'<in>os.environ<else>"pebble"<block_end>@classmethod<def_stmt>has_acme_server cls<block_start><return>cls.get_acme_server()<ne>'none'<block_end>@classmethod<def_stmt>has_acme_eab cls<block_start><return>cls.get_acme_server()<eq>'pebble'<block_end>@classmethod<def_stmt>is_pebble cls<arrow>bool<block_start><return>cls.get_acme_server()<eq>'pebble'<block_end>@classmethod<def_stmt>lacks_ocsp cls<block_start><return>cls.is_pebble()<block_end><def_stmt>__init__ self pytestconfig=<none> setup_dirs=<true><block_start>super().__init__(pytestconfig=pytestconfig local_dir=os.path.dirname(inspect.getfile(MDTestEnv)) interesting_modules=["md"])<line_sep>self._acme_server=self.get_acme_server()<line_sep>self._acme_tos="accepted"<line_sep>self._acme_ca_pemfile=os.path.join(self.gen_dir "apache/acme-ca.pem")<if_stmt>"pebble"<eq>self._acme_server<block_start>self._acme_url="https://localhost:14000/dir"<line_sep>self._acme_eab_url="https://localhost:14001/dir"<block_end><elif_stmt>"boulder"<eq>self._acme_server<block_start>self._acme_url="http://localhost:4001/directory"<line_sep>self._acme_eab_url=<none><block_end><else_stmt><block_start><raise>Exception(f"unknown ACME server type: {self._acme_server}")<block_end>self._acme_server_down=<false><line_sep>self._acme_server_ok=<false><line_sep>self._a2md_bin=os.path.join(self.bin_dir 'a2md')<line_sep>self._default_domain=f"test1.{self.http_tld}"<line_sep>self._store_dir="./md"<line_sep>self.set_store_dir_default()<line_sep>self.add_cert_specs([CertificateSpec(domains=[f"expired.{self._http_tld}"] valid_from=timedelta(days=-100) valid_to=timedelta(days=-10)) CertificateSpec(domains=["localhost"] key_type='rsa2048') ])<line_sep>self.httpd_error_log.set_ignored_lognos([#"AH10045", # mod_md complains that there is no vhost for an MDomain "AH10105" # mod_md does not find a vhost with SSL enabled for an MDomain "AH10085"# mod_ssl complains about fallback certificates ])<if_stmt>self.lacks_ocsp()<block_start>self.httpd_error_log.set_ignored_patterns([re.compile(r'.*certificate with serial \S+ has no OCSP responder URL.*') ])<block_end><if_stmt>setup_dirs<block_start>self._setup=MDTestSetup(env=self)<line_sep>self._setup.make()<line_sep>self.issue_certs()<line_sep>self.clear_store()<block_end><block_end><def_stmt>set_store_dir_default self<block_start>dirpath="md"<if_stmt>self.httpd_is_at_least("2.5.0")<block_start>dirpath=os.path.join("state" dirpath)<block_end>self.set_store_dir(dirpath)<block_end><def_stmt>set_store_dir self dirpath<block_start>self._store_dir=os.path.join(self.server_dir dirpath)<if_stmt>self.acme_url<block_start>self.a2md_stdargs([self.a2md_bin "-a" self.acme_url "-d" self._store_dir "-C" self.acme_ca_pemfile "-j"])<line_sep>self.a2md_rawargs([self.a2md_bin "-a" self.acme_url "-d" self._store_dir "-C" self.acme_ca_pemfile])<block_end><block_end><def_stmt>get_apxs_var self name:str<arrow>str<block_start>p=subprocess.run([self._apxs "-q" name] capture_output=<true> text=<true>)<if_stmt>p.returncode<ne>0<block_start><return>""<block_end><return>p.stdout.strip()<block_end>@property<def_stmt>acme_server self<block_start><return>self._acme_server<block_end>@property<def_stmt>acme_url self<block_start><return>self._acme_url<block_end>@property<def_stmt>acme_tos self<block_start><return>self._acme_tos<block_end>@property<def_stmt>a2md_bin self<block_start><return>self._a2md_bin<block_end>@property<def_stmt>acme_ca_pemfile self<block_start><return>self._acme_ca_pemfile<block_end>@property<def_stmt>store_dir self<block_start><return>self._store_dir<block_end><def_stmt>get_request_domain self request<block_start><return>"%s-%s"%(re.sub(r'[_]' '-' request.node.originalname) MDTestEnv.DOMAIN_SUFFIX)<block_end><def_stmt>get_method_domain self method<block_start><return>"%s-%s"%(re.sub(r'[_]' '-' method.__name__.lower()) MDTestEnv.DOMAIN_SUFFIX)<block_end><def_stmt>get_module_domain self module<block_start><return>"%s-%s"%(re.sub(r'[_]' '-' module.__name__.lower()) MDTestEnv.DOMAIN_SUFFIX)<block_end><def_stmt>get_class_domain self c<block_start><return>"%s-%s"%(re.sub(r'[_]' '-' c.__name__.lower()) MDTestEnv.DOMAIN_SUFFIX)<block_end># --------- cmd execution --------- _a2md_args=[]<line_sep>_a2md_args_raw=[]<def_stmt>a2md_stdargs self args<block_start>self._a2md_args=[]+args<block_end><def_stmt>a2md_rawargs self args<block_start>self._a2md_args_raw=[]+args<block_end><def_stmt>a2md self args raw=<false><arrow>ExecResult<block_start>preargs=self._a2md_args<if_stmt>raw<block_start>preargs=self._a2md_args_raw<block_end>log.debug("running: {0} {1}".format(preargs args))<line_sep><return>self.run(preargs+args)<block_end><def_stmt>check_acme self<block_start><if_stmt>self._acme_server_ok<block_start><return><true><block_end><if_stmt>self._acme_server_down<block_start>pytest.skip(msg="ACME server not running")<line_sep><return><false><block_end><if_stmt>self.is_live(self.acme_url timeout=timedelta(seconds=0.5))<block_start>self._acme_server_ok=<true><line_sep><return><true><block_end><else_stmt><block_start>self._acme_server_down=<true><line_sep>pytest.fail(msg="ACME server not running" pytrace=<false>)<line_sep><return><false><block_end><block_end><def_stmt>get_ca_pem_file self hostname:str<arrow>Optional[str]<block_start>pem_file=super().get_ca_pem_file(hostname)<if_stmt>pem_file<is><none><block_start>pem_file=self.acme_ca_pemfile<block_end><return>pem_file<block_end># --------- access local store --------- <def_stmt>purge_store self<block_start>log.debug("purge store dir: %s"%self._store_dir)<assert_stmt>len(self._store_dir)<g>1<if_stmt>os.path.exists(self._store_dir)<block_start>shutil.rmtree(self._store_dir ignore_errors=<false>)<block_end>os.makedirs(self._store_dir)<block_end><def_stmt>clear_store self<block_start>log.debug("clear store dir: %s"%self._store_dir)<assert_stmt>len(self._store_dir)<g>1<if_stmt><not>os.path.exists(self._store_dir)<block_start>os.makedirs(self._store_dir)<block_end><for_stmt>dirpath ["challenges" "tmp" "archive" "domains" "accounts" "staging" "ocsp"]<block_start>shutil.rmtree(os.path.join(self._store_dir dirpath) ignore_errors=<true>)<block_end><block_end><def_stmt>clear_ocsp_store self<block_start><assert_stmt>len(self._store_dir)<g>1<line_sep>dirpath=os.path.join(self._store_dir "ocsp")<line_sep>log.debug("clear ocsp store dir: %s"%dir)<if_stmt>os.path.exists(dirpath)<block_start>shutil.rmtree(dirpath ignore_errors=<true>)<block_end><block_end><def_stmt>authz_save self name content<block_start>dirpath=os.path.join(self._store_dir 'staging' name)<line_sep>os.makedirs(dirpath)<line_sep>open(os.path.join(dirpath 'authz.json') "w").write(content)<block_end><def_stmt>path_store_json self<block_start><return>os.path.join(self._store_dir 'md_store.json')<block_end><def_stmt>path_account self acct<block_start><return>os.path.join(self._store_dir 'accounts' acct 'account.json')<block_end><def_stmt>path_account_key self acct<block_start><return>os.path.join(self._store_dir 'accounts' acct 'account.pem')<block_end><def_stmt>store_domains self<block_start><return>os.path.join(self._store_dir 'domains')<block_end><def_stmt>store_archives self<block_start><return>os.path.join(self._store_dir 'archive')<block_end><def_stmt>store_stagings self<block_start><return>os.path.join(self._store_dir 'staging')<block_end><def_stmt>store_challenges self<block_start><return>os.path.join(self._store_dir 'challenges')<block_end><def_stmt>store_domain_file self domain filename<block_start><return>os.path.join(self.store_domains() domain filename)<block_end><def_stmt>store_archived_file self domain version filename<block_start><return>os.path.join(self.store_archives() "%s.%d"%(domain version) filename)<block_end><def_stmt>store_staged_file self domain filename<block_start><return>os.path.join(self.store_stagings() domain filename)<block_end><def_stmt>path_fallback_cert self domain<block_start><return>os.path.join(self._store_dir 'domains' domain 'fallback-pubcert.pem')<block_end><def_stmt>path_job self domain<block_start><return>os.path.join(self._store_dir 'staging' domain 'job.json')<block_end><def_stmt>replace_store self src<block_start>shutil.rmtree(self._store_dir ignore_errors=<false>)<line_sep>shutil.copytree(src self._store_dir)<block_end><def_stmt>list_accounts self<block_start><return>os.listdir(os.path.join(self._store_dir 'accounts'))<block_end><def_stmt>check_md self domain md=<none> state=-1 ca=<none> protocol=<none> agreement=<none> contacts=<none><block_start>domains=<none><if_stmt>isinstance(domain list)<block_start>domains=domain<line_sep>domain=domains[0]<block_end><if_stmt>md<block_start>domain=md<block_end>path=self.store_domain_file(domain 'md.json')<with_stmt>open(path)<as>f<block_start>md=json.load(f)<block_end><assert_stmt>md<if_stmt>domains<block_start><assert_stmt>md['domains']<eq>domains<block_end><if_stmt>state<ge>0<block_start><assert_stmt>md['state']<eq>state<block_end><if_stmt>ca<block_start><assert_stmt>md['ca']['url']<eq>ca<block_end><if_stmt>protocol<block_start><assert_stmt>md['ca']['proto']<eq>protocol<block_end><if_stmt>agreement<block_start><assert_stmt>md['ca']['agreement']<eq>agreement<block_end><if_stmt>contacts<block_start><assert_stmt>md['contacts']<eq>contacts<block_end><block_end><def_stmt>pkey_fname self pkeyspec=<none><block_start><if_stmt>pkeyspec<and><not>re.match(r'^rsa( ?\d+)?$' pkeyspec.lower())<block_start><return>"privkey.{0}.pem".format(pkeyspec)<block_end><return>'privkey.pem'<block_end><def_stmt>cert_fname self pkeyspec=<none><block_start><if_stmt>pkeyspec<and><not>re.match(r'^rsa( ?\d+)?$' pkeyspec.lower())<block_start><return>"pubcert.{0}.pem".format(pkeyspec)<block_end><return>'pubcert.pem'<block_end><def_stmt>check_md_complete self domain pkey=<none><block_start>md=self.get_md_status(domain)<assert_stmt>md<assert_stmt>'state'<in>md "md is unexpected: {0}".format(md)<assert_stmt>md['state']<is>MDTestEnv.MD_S_COMPLETE "unexpected state: {0}".format(md['state'])<assert_stmt>os.path.isfile(self.store_domain_file(domain self.pkey_fname(pkey)))<assert_stmt>os.path.isfile(self.store_domain_file(domain self.cert_fname(pkey)))<block_end><def_stmt>check_md_credentials self domain<block_start><if_stmt>isinstance(domain list)<block_start>domains=domain<line_sep>domain=domains[0]<block_end><else_stmt><block_start>domains=[domain]<block_end># check private key, validate certificate, etc MDCertUtil.validate_privkey(self.store_domain_file(domain 'privkey.pem'))<line_sep>cert=MDCertUtil(self.store_domain_file(domain 'pubcert.pem'))<line_sep>cert.validate_cert_matches_priv_key(self.store_domain_file(domain 'privkey.pem'))<line_sep># check SANs and CN <assert_stmt>cert.get_cn()<eq>domain<line_sep># compare lists twice in opposite directions: SAN may not respect ordering san_list=list(cert.get_san_list())<assert_stmt>len(san_list)<eq>len(domains)<assert_stmt>set(san_list).issubset(domains)<assert_stmt>set(domains).issubset(san_list)<line_sep># check valid dates interval not_before=cert.get_not_before()<line_sep>not_after=cert.get_not_after()<assert_stmt>not_before<l>datetime.now(not_before.tzinfo)<assert_stmt>not_after<g>datetime.now(not_after.tzinfo)<block_end># --------- check utilities --------- <def_stmt>check_json_contains self actual expected# write all expected key:value bindings to a copy of the actual data ... # ... assert it stays unchanged <block_start>test_json=copy.deepcopy(actual)<line_sep>test_json.update(expected)<assert_stmt>actual<eq>test_json<block_end><def_stmt>check_file_access self path exp_mask<block_start>actual_mask=os.lstat(path).st_mode&0o777<assert_stmt>oct(actual_mask)<eq>oct(exp_mask)<block_end><def_stmt>check_dir_empty self path<block_start><assert_stmt>os.listdir(path)<eq>[]<block_end><def_stmt>get_http_status self domain path use_https=<true><block_start>r=self.get_meta(domain path use_https insecure=<true>)<line_sep><return>r.response['status']<block_end><def_stmt>get_cert self domain tls=<none> ciphers=<none><block_start><return>MDCertUtil.load_server_cert(self._httpd_addr self.https_port domain tls=tls ciphers=ciphers)<block_end><def_stmt>get_server_cert self domain proto=<none> ciphers=<none><block_start>args=["openssl" "s_client" "-status" "-connect" "%s:%s"%(self._httpd_addr self.https_port) "-CAfile" self.acme_ca_pemfile "-servername" domain "-showcerts"]<if_stmt>proto<is><not><none><block_start>args.extend(["-{0}".format(proto)])<block_end><if_stmt>ciphers<is><not><none><block_start>args.extend(["-cipher" ciphers])<block_end>r=self.run(args)<line_sep># noinspection PyBroadException <try_stmt><block_start><return>MDCertUtil.parse_pem_cert(r.stdout)<block_end><except_stmt><block_start><return><none><block_end><block_end><def_stmt>verify_cert_key_lenghts self domain pkeys<block_start><for_stmt>p pkeys<block_start>cert=self.get_server_cert(domain proto="tls1_2" ciphers=p['ciphers'])<if_stmt>0<eq>p['keylen']<block_start><assert_stmt>cert<is><none><block_end><else_stmt><block_start><assert_stmt>cert "no cert returned for cipher: {0}".format(p['ciphers'])<assert_stmt>cert.get_key_length()<eq>p['keylen'] "key length, expected {0}, got {1}".format(p['keylen'] cert.get_key_length())<block_end><block_end><block_end><def_stmt>get_meta self domain path use_https=<true> insecure=<false><block_start>schema="https"<if>use_https<else>"http"<line_sep>port=self.https_port<if>use_https<else>self.http_port<line_sep>r=self.curl_get(f"{schema}://{domain}:{port}{path}" insecure=insecure)<assert_stmt>r.exit_code<eq>0<assert_stmt>r.response<assert_stmt>r.response['header']<line_sep><return>r<block_end><def_stmt>get_content self domain path use_https=<true><block_start>schema="https"<if>use_https<else>"http"<line_sep>port=self.https_port<if>use_https<else>self.http_port<line_sep>r=self.curl_get(f"{schema}://{domain}:{port}{path}")<assert_stmt>r.exit_code<eq>0<line_sep><return>r.stdout<block_end><def_stmt>get_json_content self domain path use_https=<true> insecure=<false> debug_log=<true><block_start>schema="https"<if>use_https<else>"http"<line_sep>port=self.https_port<if>use_https<else>self.http_port<line_sep>url=f"{schema}://{domain}:{port}{path}"<line_sep>r=self.curl_get(url insecure=insecure debug_log=debug_log)<if_stmt>r.exit_code<ne>0<block_start>log.error(f"curl get on {url} returned {r.exit_code}"<concat>f"\nstdout: {r.stdout}"<concat>f"\nstderr: {r.stderr}")<block_end><assert_stmt>r.exit_code<eq>0 r.stderr<line_sep><return>r.json<block_end><def_stmt>get_certificate_status self domain<arrow>Dict<block_start><return>self.get_json_content(domain "/.httpd/certificate-status" insecure=<true>)<block_end><def_stmt>get_md_status self domain via_domain=<none> use_https=<true> debug_log=<false><arrow>Dict<block_start><if_stmt>via_domain<is><none><block_start>via_domain=self._default_domain<block_end><return>self.get_json_content(via_domain f"/md-status/{domain}" use_https=use_https debug_log=debug_log)<block_end><def_stmt>get_server_status self query="/" via_domain=<none> use_https=<true><block_start><if_stmt>via_domain<is><none><block_start>via_domain=self._default_domain<block_end><return>self.get_content(via_domain "/server-status%s"%query use_https=use_https)<block_end><def_stmt>await_completion self names must_renew=<false> restart=<true> timeout=60 via_domain=<none> use_https=<true><block_start>try_until=time.time()+timeout<line_sep>renewals={}<line_sep>names=names.copy()<while_stmt>len(names)<g>0<block_start><if_stmt>time.time()<ge>try_until<block_start><return><false><block_end><for_stmt>name names<block_start>mds=self.get_md_status(name via_domain=via_domain use_https=use_https)<if_stmt>mds<is><none><block_start>log.debug("not managed by md: %s"%name)<line_sep><return><false><block_end><if_stmt>'renewal'<in>mds<block_start>renewal=mds['renewal']<line_sep>renewals[name]=<true><if_stmt>'finished'<in>renewal<and>renewal['finished']<is><true><block_start><if_stmt>(<not>must_renew)<or>(name<in>renewals)<block_start>log.debug(f"domain cert was renewed: {name}")<line_sep>names.remove(name)<block_end><block_end><block_end><block_end><if_stmt>len(names)<ne>0<block_start>time.sleep(0.1)<block_end><block_end><if_stmt>restart<block_start>time.sleep(0.1)<line_sep><return>self.apache_restart()<eq>0<block_end><return><true><block_end><def_stmt>is_renewing self name<block_start>stat=self.get_certificate_status(name)<line_sep><return>'renewal'<in>stat<block_end><def_stmt>await_renewal self names timeout=60<block_start>try_until=time.time()+timeout<while_stmt>len(names)<g>0<block_start><if_stmt>time.time()<ge>try_until<block_start><return><false><block_end><for_stmt>name names<block_start>md=self.get_md_status(name)<if_stmt>md<is><none><block_start>log.debug("not managed by md: %s"%name)<line_sep><return><false><block_end><if_stmt>'renewal'<in>md<block_start>names.remove(name)<block_end><block_end><if_stmt>len(names)<ne>0<block_start>time.sleep(0.1)<block_end><block_end><return><true><block_end><def_stmt>await_error self domain timeout=60 via_domain=<none> use_https=<true> errors=1<block_start>try_until=time.time()+timeout<while_stmt><true><block_start><if_stmt>time.time()<ge>try_until<block_start><return><false><block_end>md=self.get_md_status(domain via_domain=via_domain use_https=use_https)<if_stmt>md<block_start><if_stmt>'state'<in>md<and>md['state']<eq>MDTestEnv.MD_S_ERROR<block_start><return>md<block_end><if_stmt>'renewal'<in>md<and>'errors'<in>md['renewal']<and>md['renewal']['errors']<ge>errors<block_start><return>md<block_end><block_end>time.sleep(0.1)<block_end><return><none><block_end><def_stmt>await_file self fpath timeout=60<block_start>try_until=time.time()+timeout<while_stmt><true><block_start><if_stmt>time.time()<ge>try_until<block_start><return><false><block_end><if_stmt>os.path.isfile(fpath)<block_start><return><true><block_end>time.sleep(0.1)<block_end><block_end><def_stmt>check_file_permissions self domain<block_start>md=self.a2md(["list" domain]).json['output'][0]<assert_stmt>md<line_sep>acct=md['ca']['account']<assert_stmt>acct<line_sep>self.check_file_access(self.path_store_json() 0o600)<line_sep># domains self.check_file_access(self.store_domains() 0o700)<line_sep>self.check_file_access(os.path.join(self.store_domains() domain) 0o700)<line_sep>self.check_file_access(self.store_domain_file(domain 'privkey.pem') 0o600)<line_sep>self.check_file_access(self.store_domain_file(domain 'pubcert.pem') 0o600)<line_sep>self.check_file_access(self.store_domain_file(domain 'md.json') 0o600)<line_sep># archive self.check_file_access(self.store_archived_file(domain 1 'md.json') 0o600)<line_sep># accounts self.check_file_access(os.path.join(self._store_dir 'accounts') 0o755)<line_sep>self.check_file_access(os.path.join(self._store_dir 'accounts' acct) 0o755)<line_sep>self.check_file_access(self.path_account(acct) 0o644)<line_sep>self.check_file_access(self.path_account_key(acct) 0o644)<line_sep># staging self.check_file_access(self.store_stagings() 0o755)<block_end><def_stmt>get_ocsp_status self domain proto=<none> cipher=<none> ca_file=<none><block_start>stat={}<line_sep>args=["openssl" "s_client" "-status" "-connect" "%s:%s"%(self._httpd_addr self.https_port) "-CAfile" ca_file<if>ca_file<else>self.acme_ca_pemfile "-servername" domain "-showcerts"]<if_stmt>proto<is><not><none><block_start>args.extend(["-{0}".format(proto)])<block_end><if_stmt>cipher<is><not><none><block_start>args.extend(["-cipher" cipher])<block_end>r=self.run(args debug_log=<false>)<line_sep>ocsp_regex=re.compile(r'OCSP response: +([^=\n]+)\n')<line_sep>matches=ocsp_regex.finditer(r.stdout)<for_stmt>m matches<block_start><if_stmt>m.group(1)<ne>""<block_start>stat['ocsp']=m.group(1)<block_end><block_end><if_stmt>'ocsp'<not><in>stat<block_start>ocsp_regex=re.compile(r'OCSP Response Status:\s*(.+)')<line_sep>matches=ocsp_regex.finditer(r.stdout)<for_stmt>m matches<block_start><if_stmt>m.group(1)<ne>""<block_start>stat['ocsp']=m.group(1)<block_end><block_end><block_end>verify_regex=re.compile(r'Verify return code:\s*(.+)')<line_sep>matches=verify_regex.finditer(r.stdout)<for_stmt>m matches<block_start><if_stmt>m.group(1)<ne>""<block_start>stat['verify']=m.group(1)<block_end><block_end><return>stat<block_end><def_stmt>await_ocsp_status self domain timeout=10 ca_file=<none><block_start>try_until=time.time()+timeout<while_stmt><true><block_start><if_stmt>time.time()<ge>try_until<block_start><break><block_end>stat=self.get_ocsp_status(domain ca_file=ca_file)<if_stmt>'ocsp'<in>stat<and>stat['ocsp']<ne>"no response sent"<block_start><return>stat<block_end>time.sleep(0.1)<block_end><raise>TimeoutError(f"ocsp respopnse not available: {domain}")<block_end><def_stmt>create_self_signed_cert self name_list valid_days serial=1000 path=<none><block_start>dirpath=path<if_stmt><not>path<block_start>dirpath=os.path.join(self.store_domains() name_list[0])<block_end><return>MDCertUtil.create_self_signed_cert(dirpath name_list valid_days serial)<block_end><block_end>
# Run with: gunicorn --workers=1 --worker-class=meinheld.gmeinheld.MeinheldWorker -b :8000 simple_server:app <import_stmt>bottle<import_stmt>ujson<import_from_stmt>bottle route run<line_sep>@route("/")<def_stmt>index <block_start><return>ujson.dumps({"test":<true>})<block_end>app=bottle.default_app()<line_sep>
# Copyright 2016-2018, Pulumi Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>asyncio<import_from_stmt>pulumi CustomResource Output Input<async_keyword><def_stmt>read_a_file_or_something <block_start><await>asyncio.sleep(0)<line_sep><return>"here's a file"<block_end><def_stmt>assert_eq l r<block_start><assert_stmt>l<eq>r<block_end><class_stmt>FileResource(CustomResource)<block_start>contents:Output[str]<def_stmt>__init__ self name:str file_contents:Input[str]<arrow><none><block_start>CustomResource.__init__(self "test:index:FileResource" name {"contents":file_contents})<block_end><block_end># read_a_file_or_something returns a coroutine when called, which needs to be scheduled # and awaited in order to yield a value. file_res=FileResource("file" read_a_file_or_something())<line_sep>file_res.contents.apply(<lambda>c:assert_eq(c "here's a file"))<line_sep>
"""Fix bound method attributes (method.im_? -> method.__?__). """<line_sep># Author: <NAME> # Local imports <import_from_stmt>.. fixer_base<import_from_stmt>..fixer_util Name<line_sep>MAP={"im_func":"__func__" "im_self":"__self__" "im_class":"__self__.__class__"}<class_stmt>FixMethodattrs(fixer_base.BaseFix)<block_start>BM_compatible=<true><line_sep>PATTERN=""" power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > """<def_stmt>transform self node results<block_start>attr=results["attr"][0]<line_sep>new=unicode(MAP[attr.value])<line_sep>attr.replace(Name(new prefix=attr.prefix))<block_end><block_end>
"""Tests for miscellaneous properties, such as debuggability."""<import_stmt>time<import_from_stmt>chopsticks.tunnel Docker<import_from_stmt>chopsticks.group Group<def_stmt>test_tunnel_repr <block_start>"""Tunnels have a usable repr."""<line_sep>tun=Docker('py36' image='python:3.6')<assert_stmt>repr(tun)<eq>"Docker('py36')"<block_end><def_stmt>test_group_repr <block_start>"""Groups have a usable repr."""<line_sep>grp=Group([Docker('py35' image='python:3.5') Docker('py36' image='python:3.6')])<assert_stmt>repr(grp)<eq>"Group([Docker('py35'), Docker('py36')])"<block_end><def_stmt>test_group_reuse <block_start>"""We can re-use a group."""<line_sep>grp=Group([Docker('py35' image='python:3.5') Docker('py36' image='python:3.6')])<with_stmt>grp<block_start>grp.call(time.time)<line_sep>grp.call(time.time)<block_end><block_end>
""" A simple, good-looking plot =========================== Demoing some simple features of matplotlib """<import_stmt>numpy<as>np<import_stmt>matplotlib<line_sep>matplotlib.use('Agg')<import_stmt>matplotlib.pyplot<as>plt<line_sep>fig=plt.figure(figsize=(5 4) dpi=72)<line_sep>axes=fig.add_axes([0.01 0.01 .98 0.98])<line_sep>X=np.linspace(0 2 200)<line_sep>Y=np.sin(2<times>np.pi<times>X)<line_sep>plt.plot(X Y lw=2)<line_sep>plt.ylim(-1.1 1.1)<line_sep>plt.grid()<line_sep>plt.show()<line_sep>
<import_from_stmt>backend.common.models.mytba MyTBAModel<class_stmt>Favorite(MyTBAModel)<block_start>""" In order to make strongly consistent DB requests, instances of this class should be created with a parent that is the associated Account key. """<def_stmt>__init__ self *args **kwargs<block_start>super(Favorite self).__init__(*args **kwargs)<block_end><block_end>
<class_stmt>Foo<block_start><def_stmt>bar self<block_start><return>"a"<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>f=Foo()<line_sep>b=f.bar()<line_sep>print(b)<block_end>
################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# """ Air separation phase equilibrium package using Peng-Robinson EoS. Example property package using the Generic Property Package Framework. This example shows how to set up a property package to do air separation phase equilibrium in the generic framework using Peng-Robinson equation along with methods drawn from the pre-built IDAES property libraries. The example includes two dictionaries. 1. The dictionary named configuration contains parameters obtained from The Properties of Gases and Liquids (1987) 4th edition and NIST. 2. The dictionary named configuration_Dowling_2015 contains parameters used in A framework for efficient large scale equation-oriented flowsheet optimization (2015) Dowling. The parameters are extracted from Properties of Gases and Liquids (1977) 3rd edition for Antoine's vapor equation and acentric factors and converted values from the Properties of Gases and Liquids (1977) 3rd edition to j. """<line_sep># Import Python libraries <import_stmt>logging<line_sep># Import Pyomo units <import_from_stmt>pyomo.environ units<as>pyunits<line_sep># Import IDAES cores <import_from_stmt>idaes.core LiquidPhase VaporPhase Component<import_from_stmt>idaes.generic_models.properties.core.state_definitions FTPx<import_from_stmt>idaes.generic_models.properties.core.eos.ceos Cubic CubicType<import_from_stmt>idaes.generic_models.properties.core.phase_equil SmoothVLE<import_from_stmt>idaes.generic_models.properties.core.phase_equil.bubble_dew LogBubbleDew<import_from_stmt>idaes.generic_models.properties.core.phase_equil.forms log_fugacity<import_from_stmt>idaes.generic_models.properties.core.pure RPP4<import_from_stmt>idaes.generic_models.properties.core.pure NIST<import_from_stmt>idaes.generic_models.properties.core.pure RPP3<line_sep># Set up logger _log=logging.getLogger(__name__)<line_sep># --------------------------------------------------------------------- # Configuration dictionary for a Peng-Robinson Oxygen-Argon-Nitrogen system # Data Sources: # [1] The Properties of Gases and Liquids (1987) # 4th edition, Chemical Engineering Series - <NAME> # [2] NIST, https://webbook.nist.gov/ # Retrieved 16th August, 2020 # [3] The Properties of Gases and Liquids (1987) # 3rd edition, Chemical Engineering Series - <NAME> # Cp parameters where converted to j in Dowling 2015 # [4] A framework for efficient large scale equation-oriented flowsheet optimization (2015) # Computers and Chemical Engineering - <NAME> configuration={# Specifying components "components":{"nitrogen":{"type":Component "enth_mol_ig_comp":RPP4 "entr_mol_ig_comp":RPP4 "pressure_sat_comp":NIST "phase_equilibrium_form":{("Vap" "Liq"):log_fugacity} "parameter_data":{"mw":(28.0135E-3 pyunits.kg/pyunits.mol) # [1] "pressure_crit":(34e5 pyunits.Pa) # [1] "temperature_crit":(126.2 pyunits.K) # [1] "omega":0.037 # [1] "cp_mol_ig_comp_coeff":{"A":(3.115E1 pyunits.J/pyunits.mol/pyunits.K) # [1] "B":(-1.357E-2 pyunits.J/pyunits.mol/pyunits.K<power>2) "C":(2.680E-5 pyunits.J/pyunits.mol/pyunits.K<power>3) "D":(-1.168E-8 pyunits.J/pyunits.mol/pyunits.K<power>4)} "enth_mol_form_vap_comp_ref":(0.0 pyunits.J/pyunits.mol) # [2] "entr_mol_form_vap_comp_ref":(191.61 pyunits.J/pyunits.mol/pyunits.K) # [2] "pressure_sat_comp_coeff":{"A":(3.7362 <none>) # [2] "B":(264.651 pyunits.K) "C":(-6.788 pyunits.K)}}} "argon":{"type":Component "enth_mol_ig_comp":RPP4 "entr_mol_ig_comp":RPP4 "pressure_sat_comp":NIST "phase_equilibrium_form":{("Vap" "Liq"):log_fugacity} "parameter_data":{"mw":(39.948E-3 pyunits.kg/pyunits.mol) # [1] "pressure_crit":(48.98e5 pyunits.Pa) # [1] "temperature_crit":(150.86 pyunits.K) # [1] "omega":0.001 # [1] "cp_mol_ig_comp_coeff":{"A":(2.050E1 pyunits.J/pyunits.mol/pyunits.K) # [1] "B":(0.0 pyunits.J/pyunits.mol/pyunits.K<power>2) "C":(0.0 pyunits.J/pyunits.mol/pyunits.K<power>3) "D":(0.0 pyunits.J/pyunits.mol/pyunits.K<power>4)} "enth_mol_form_vap_comp_ref":(0.0 pyunits.J/pyunits.mol) # [2] "entr_mol_form_vap_comp_ref":(154.8 pyunits.J/pyunits.mol/pyunits.K) # [2] "pressure_sat_comp_coeff":{"A":(3.29555 <none>) # [2] "B":(215.24 pyunits.K) "C":(-22.233 pyunits.K)}}} "oxygen":{"type":Component "enth_mol_ig_comp":RPP4 "entr_mol_ig_comp":RPP4 "pressure_sat_comp":NIST "phase_equilibrium_form":{("Vap" "Liq"):log_fugacity} "parameter_data":{"mw":(31.999E-3 pyunits.kg/pyunits.mol) # [1] "pressure_crit":(50.43e5 pyunits.Pa) # [1] "temperature_crit":(154.58 pyunits.K) # [1] "omega":0.025 # [1] "cp_mol_ig_comp_coeff":{"A":(2.811E1 pyunits.J/pyunits.mol/pyunits.K) "B":(-3.680E-6 pyunits.J/pyunits.mol/pyunits.K<power>2) "C":(1.746E-5 pyunits.J/pyunits.mol/pyunits.K<power>3) "D":(-1.065E-8 pyunits.J/pyunits.mol/pyunits.K<power>4)} "enth_mol_form_vap_comp_ref":(0.0 pyunits.J/pyunits.mol) # [2] "entr_mol_form_vap_comp_ref":(205.152 pyunits.J/pyunits.mol/pyunits.K) # [2] "pressure_sat_comp_coeff":{"A":(3.85845 <none>) # [2] "B":(325.675 pyunits.K) "C":(-5.667 pyunits.K)}}}} # Specifying phases "phases":{"Liq":{"type":LiquidPhase "equation_of_state":Cubic "equation_of_state_options":{"type":CubicType.PR}} "Vap":{"type":VaporPhase "equation_of_state":Cubic "equation_of_state_options":{"type":CubicType.PR}}} # Set base units of measurement "base_units":{"time":pyunits.s "length":pyunits.m "mass":pyunits.kg "amount":pyunits.mol "temperature":pyunits.K} # Specifying state definition "state_definition":FTPx "state_bounds":{"flow_mol":(0 100 1000 pyunits.mol/pyunits.s) "temperature":(10 300 350 pyunits.K) "pressure":(5e4 1e5 1e7 pyunits.Pa)} "pressure_ref":(101325 pyunits.Pa) "temperature_ref":(298.15 pyunits.K) # Defining phase equilibria "phases_in_equilibrium":[("Vap" "Liq")] "phase_equilibrium_state":{("Vap" "Liq"):SmoothVLE} "bubble_dew_method":LogBubbleDew "parameter_data":{"PR_kappa":{("nitrogen" "nitrogen"):0.000 ("nitrogen" "argon"):-0.26e-2 ("nitrogen" "oxygen"):-0.119e-1 ("argon" "nitrogen"):-0.26e-2 ("argon" "argon"):0.000 ("argon" "oxygen"):0.104e-1 ("oxygen" "nitrogen"):-0.119e-1 ("oxygen" "argon"):0.104e-1 ("oxygen" "oxygen"):0.000}}}<line_sep>configuration_Dowling_2015={# Specifying components "components":{"nitrogen":{"type":Component "enth_mol_ig_comp":RPP4 "entr_mol_ig_comp":RPP4 "pressure_sat_comp":RPP3 "phase_equilibrium_form":{("Vap" "Liq"):log_fugacity} "parameter_data":{"mw":(28.0135E-3 pyunits.kg/pyunits.mol) # [3] "pressure_crit":(33.943875e5 pyunits.Pa) # [4] "temperature_crit":(126.2 pyunits.K) # [4] "omega":0.04 # [3] "cp_mol_ig_comp_coeff":{'A':(3.112896E1 pyunits.J/pyunits.mol/pyunits.K) # [3] 'B':(-1.356E-2 pyunits.J/pyunits.mol/pyunits.K<power>2) 'C':(2.6878E-5 pyunits.J/pyunits.mol/pyunits.K<power>3) 'D':(-1.167E-8 pyunits.J/pyunits.mol/pyunits.K<power>4)} "enth_mol_form_vap_comp_ref":(0.0 pyunits.J/pyunits.mol) # [2] "entr_mol_form_vap_comp_ref":(191.61 pyunits.J/pyunits.mol/pyunits.K) # [2] "pressure_sat_comp_coeff":{'A':(14.9342 <none>) # [3] 'B':(588.72 pyunits.K) 'C':(-6.60 pyunits.K)}}} "argon":{"type":Component "enth_mol_ig_comp":RPP4 "entr_mol_ig_comp":RPP4 "pressure_sat_comp":RPP3 "phase_equilibrium_form":{("Vap" "Liq"):log_fugacity} "parameter_data":{"mw":(39.948E-3 pyunits.kg/pyunits.mol) # [3] "pressure_crit":(48.737325e5 pyunits.Pa) # [4] "temperature_crit":(150.86 pyunits.K) # [4] "omega":-0.004 # [1] "cp_mol_ig_comp_coeff":{'A':(2.0790296E1 pyunits.J/pyunits.mol/pyunits.K) # [3] 'B':(-3.209E-05 pyunits.J/pyunits.mol/pyunits.K<power>2) 'C':(5.163E-08 pyunits.J/pyunits.mol/pyunits.K<power>3) 'D':(0.0 pyunits.J/pyunits.mol/pyunits.K<power>4)} "enth_mol_form_vap_comp_ref":(0.0 pyunits.J/pyunits.mol) # [3] "entr_mol_form_vap_comp_ref":(154.8 pyunits.J/pyunits.mol/pyunits.K) # [3] "pressure_sat_comp_coeff":{'A':(15.2330 <none>) # [3] 'B':(700.51 pyunits.K) 'C':(-5.84 pyunits.K)}}} "oxygen":{"type":Component "enth_mol_ig_comp":RPP4 "entr_mol_ig_comp":RPP4 "pressure_sat_comp":RPP3 "phase_equilibrium_form":{("Vap" "Liq"):log_fugacity} "parameter_data":{"mw":(31.999E-3 pyunits.kg/pyunits.mol) # [3] "pressure_crit":(50.45985e5 pyunits.Pa) # [4] "temperature_crit":(154.58 pyunits.K) # [4] "omega":0.021 # [1] "cp_mol_ig_comp_coeff":{'A':(2.8087192E1 pyunits.J/pyunits.mol/pyunits.K) # [3] 'B':(-3.678E-6 pyunits.J/pyunits.mol/pyunits.K<power>2) 'C':(1.745E-5 pyunits.J/pyunits.mol/pyunits.K<power>3) 'D':(-1.064E-8 pyunits.J/pyunits.mol/pyunits.K<power>4)} "enth_mol_form_vap_comp_ref":(0.0 pyunits.J/pyunits.mol) # [2] "entr_mol_form_vap_comp_ref":(205.152 pyunits.J/pyunits.mol/pyunits.K) # [2] "pressure_sat_comp_coeff":{'A':(15.4075 <none>) # [3] 'B':(734.55 pyunits.K) 'C':(-6.45 pyunits.K)}}}} # Specifying phases "phases":{"Liq":{"type":LiquidPhase "equation_of_state":Cubic "equation_of_state_options":{"type":CubicType.PR}} "Vap":{"type":VaporPhase "equation_of_state":Cubic "equation_of_state_options":{"type":CubicType.PR}}} # Set base units of measurement "base_units":{"time":pyunits.s "length":pyunits.m "mass":pyunits.kg "amount":pyunits.mol "temperature":pyunits.K} # Specifying state definition "state_definition":FTPx "state_bounds":{"flow_mol":(0 100 1000 pyunits.mol/pyunits.s) "temperature":(10 300 350 pyunits.K) "pressure":(5e4 1e5 1e7 pyunits.Pa)} "pressure_ref":(101325 pyunits.Pa) "temperature_ref":(298.15 pyunits.K) # Defining phase equilibria "phases_in_equilibrium":[("Vap" "Liq")] "phase_equilibrium_state":{("Vap" "Liq"):SmoothVLE} "bubble_dew_method":LogBubbleDew "parameter_data":{"PR_kappa":{("nitrogen" "nitrogen"):0.000 ("nitrogen" "argon"):-0.26e-2 ("nitrogen" "oxygen"):-0.119e-1 ("argon" "nitrogen"):-0.26e-2 ("argon" "argon"):0.000 ("argon" "oxygen"):0.104e-1 ("oxygen" "nitrogen"):-0.119e-1 ("oxygen" "argon"):0.104e-1 ("oxygen" "oxygen"):0.000}}}<line_sep>
''' Speed: 95.97% Memory: 24.96% Time complexity: O(n) Space complexity: O(n) '''<class_stmt>Solution(object)<block_start><def_stmt>longestValidParentheses self s<block_start>ans=0<line_sep>stack=[-1]<for_stmt>i range(len(s))<block_start><if_stmt>(s[i]<eq>'(')<block_start>stack.append(i)<block_end><else_stmt><block_start>stack.pop()<if_stmt>(len(stack)<eq>0)<block_start>stack.append(i)<block_end><else_stmt><block_start>ans=max(ans i-stack[-1])<block_end><block_end><block_end><return>ans<block_end><block_end>
<import_from_stmt>distdeepq models# noqa <import_from_stmt>distdeepq.build_graph build_act build_train# noqa <import_from_stmt>distdeepq.simple learn load make_session# noqa <import_from_stmt>distdeepq.replay_buffer ReplayBuffer PrioritizedReplayBuffer# noqa <import_from_stmt>distdeepq.static *<import_from_stmt>distdeepq.plots PlotMachine<line_sep>
<class_stmt>Solution<block_start><def_stmt>findDuplicate self nums:List[int]<arrow>int<block_start>p1,p2=nums[0] nums[nums[0]]<while_stmt>nums[p1]<ne>nums[p2]<block_start>p1=nums[p1]<line_sep>p2=nums[nums[p2]]<block_end>p2=0<while_stmt>nums[p1]<ne>nums[p2]<block_start>p1=nums[p1]<line_sep>p2=nums[p2]<block_end><return>nums[p1]<block_end><block_end>
<class_stmt>A<block_start><def_stmt>a self<block_start><return>'a'<block_end><block_end><class_stmt>B(A object)<block_start><def_stmt>b self<block_start><return>'b'<block_end><block_end><class_stmt>Inherit(A)<block_start><def_stmt>a self<block_start><return>'c'<block_end><block_end>
<import_stmt>json<import_from_stmt>threading Semaphore<import_stmt>ee<import_from_stmt>flask request<import_from_stmt>google.auth crypt<import_from_stmt>google.oauth2 service_account<import_from_stmt>google.oauth2.credentials Credentials<line_sep>service_account_credentials=<none><import_stmt>logging<line_sep>export_semaphore=Semaphore(5)<line_sep>get_info_semaphore=Semaphore(2)<def_stmt>init_service_account_credentials args<block_start><global>service_account_credentials<with_stmt>open(args['gee_key_path'] 'r')<as>file_<block_start>key_data=file_.read()<block_end>signer=crypt.RSASigner.from_string(key_data)<line_sep>service_account_credentials=service_account.Credentials(signer=signer service_account_email=args['gee_email'] token_uri=ee.oauth.TOKEN_URI scopes=ee.oauth.SCOPES+['https://www.googleapis.com/auth/drive'])<block_end><def_stmt>init_ee <block_start>credentials=service_account_credentials<if_stmt>'sepal-user'<in>request.headers<block_start>user=json.loads(request.headers['sepal-user'])<line_sep>googleTokens=user.get('googleTokens' <none>)<if_stmt>googleTokens<block_start>credentials=Credentials(googleTokens['accessToken'])<block_end><block_end>ee.InitializeThread(credentials)<block_end><def_stmt>to_asset_id asset_path<block_start>asset_roots=ee.data.getAssetRoots()<if_stmt><not>asset_roots<block_start><raise>Exception('User has no GEE asset roots')<block_end><return>asset_roots[0]['id']+'/'+asset_path<block_end><def_stmt>delete_asset_collection asset_id<block_start>logging.info('Recursively deleting '+asset_id)<if_stmt>ee.data.getInfo(asset_id)<block_start>images=ee.data.getList({'id':asset_id 'fields':'id'})<for_stmt>image images<block_start>ee.data.deleteAsset(image['id'])<line_sep>logging.info('Deleted '+image['id'])<block_end>ee.data.deleteAsset(asset_id)<line_sep>logging.info('Deleted '+asset_id)<block_end><block_end><def_stmt>create_asset_image_collection asset_id<block_start>delete_asset_collection(asset_id)<line_sep>ee.data.create_assets(asset_ids=[asset_id] asset_type=ee.data.ASSET_TYPE_IMAGE_COLL mk_parents=<true>)<block_end><def_stmt>create_asset_folder asset_id<block_start>ee.data.create_assets(asset_ids=[asset_id] asset_type=ee.data.ASSET_TYPE_FOLDER mk_parents=<true>)<block_end><def_stmt>get_info ee_object<block_start><try_stmt><block_start>get_info_semaphore.acquire()<line_sep><return>ee_object.getInfo()<block_end><finally_stmt><block_start>get_info_semaphore.release()<block_end><block_end>
<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_stmt>struct<import_stmt>solvers<import_stmt>pid<import_from_stmt>util *<line_sep>MOTORSPEED=0.9<line_sep>MOTORMARGIN=1<line_sep>MOTORSLOPE=30<line_sep>ERRORLIM=5.0<class_stmt>ArmConfig<block_start>"""Holds an arm's proportions, limits and other configuration data"""<def_stmt>__init__ self main_length=148.4 forearm_length=160 linkage_length=155 lower_actuator_length=65 upper_actuator_length=54.4 wrist_length=90.52 shoulder_offset=[-9.7 18.71]<block_start>self.main_length=main_length<line_sep>self.forearm_length=forearm_length<line_sep>self.linkage_length=linkage_length<line_sep>self.lower_actuator_length=lower_actuator_length<line_sep>self.upper_actuator_length=upper_actuator_length<line_sep>self.wrist_length=wrist_length<line_sep>self.shoulder_offset=shoulder_offset<block_end><block_end><class_stmt>ArmPose<block_start>""" Defines a physical configuration of a LiteArm robot arm. Internal angles are relative to vertical (elevator/actuator) or straight forward (swing), and are stored in radians. Extracted servo angles range 0-300 and are measured in degrees. Provides methods for: - finding the required servo angles to reach the pose - checking the validity of the pose """<line_sep>structFormat='fffff'<line_sep>@staticmethod<def_stmt>calcElevatorAngle servoAngle<block_start><return>radians(178.21-servoAngle)<block_end>@staticmethod<def_stmt>calcSwingAngle servoAngle<block_start><return>radians(150.0-servoAngle)<block_end>@staticmethod<def_stmt>calcActuatorAngle servoAngle<block_start><return>radians(servoAngle-204.78)<block_end>@staticmethod<def_stmt>calcWristXAngle servoAngle<block_start><return>radians(150.0-servoAngle)<block_end>@staticmethod<def_stmt>calcWristYAngle servoAngle<block_start><return>radians(servoAngle-147.0)<block_end><def_stmt>__init__ self arm_config swing_angle shoulder_angle actuator_angle elbow_angle elbow2D wrist2D effector2D effector wrist_x wrist_y<block_start>self.cfg=arm_config<line_sep>self.swing_angle=swing_angle<line_sep>self.shoulder_angle=shoulder_angle<line_sep>self.actuator_angle=actuator_angle<line_sep>self.elbow_angle=elbow_angle<line_sep># Joints in the arm shoulder=rotate(self.cfg.shoulder_offset swing_angle)<line_sep>self.shoulder2D=[self.cfg.shoulder_offset[1] 0]<line_sep>self.shoulder=[shoulder[0] 0 shoulder[1]]<line_sep>self.wrist2D=wrist2D<line_sep>self.effector2D=effector2D<line_sep>self.effector=effector<line_sep># Construct the 3D elbow & wrist positions from the 2D (planar) IK # solution arm_vec=effector-self.shoulder<line_sep>arm_vec[1]=0<line_sep>self.elbow2D=elbow2D<line_sep>self.elbow=self.shoulder+normalize(arm_vec)<times>elbow2D[0]<line_sep>self.elbow[1]=elbow2D[1]<line_sep>self.wrist=self.effector-normalize(arm_vec)<times>arm_config.wrist_length<line_sep># Wrist pose self.wristXAngle=wrist_x<line_sep>self.wristYAngle=wrist_y<block_end><def_stmt>getServoElevator self<block_start><return>178.21-degrees(self.shoulder_angle)<block_end><def_stmt>getServoActuator self<block_start><return>degrees(self.actuator_angle)+204.78<block_end><def_stmt>getServoSwing self<block_start><return>150-degrees(self.swing_angle)<block_end><def_stmt>getServoWristX self<block_start><return>150-degrees(self.wristXAngle)<block_end><def_stmt>getServoWristY self<block_start><return>147+degrees(self.wristYAngle)<block_end><def_stmt>armDiffAngle self<block_start><return>degrees(self.shoulder_angle-self.actuator_angle)<block_end><def_stmt>checkActuator self<block_start>angle=self.getServoActuator()<line_sep><return>angle<ge>95<and>angle<le>250<block_end><def_stmt>checkDiff self<block_start>angle=self.armDiffAngle()<line_sep><return>angle<ge>44<and>angle<le>175<block_end><def_stmt>checkElevator self<block_start>angle=self.getServoElevator()<line_sep><return>angle<ge>60<and>angle<le>210<block_end><def_stmt>checkForearm self<block_start>angle=degrees(self.elbow_angle+self.shoulder_angle)<line_sep><return>angle<l>200<and>angle<g>80<block_end><def_stmt>checkSwing self<block_start>angle=self.getServoSwing()<line_sep><return>angle<ge>60<and>angle<le>240<block_end><def_stmt>checkWristX self<block_start>angle=self.getServoWristX()<line_sep><return>angle<ge>60<and>angle<le>240<block_end><def_stmt>checkWristY self<block_start>angle=self.getServoWristY()<line_sep><return>angle<ge>60<and>angle<le>160<block_end><def_stmt>checkPositioning self# When Y>0 Forearm always faces outwards <block_start><if_stmt>self.wrist2D[1]<g>0<and>self.wrist2D[0]<l>self.elbow2D[0]<block_start><return><false><block_end># No valid positions X<=0 <if_stmt>self.wrist2D[0]<le>0<block_start><return><false><block_end># Effector height range <if_stmt>self.effector[1]<g>180<or>self.effector[1]<l>-200<block_start><return><false><block_end><return><true><block_end><def_stmt>checkClearance self<block_start><return>(self.checkDiff()<and>self.checkActuator()<and>self.checkElevator()<and>self.checkSwing()<and>self.checkWristX()<and>self.checkWristY()<and>self.checkPositioning()<and>self.checkForearm())<block_end><def_stmt>serialize self<block_start>"""Returns a packed struct holding the pose information"""<line_sep><return>struct.pack(ArmPose.structFormat self.swing_angle self.shoulder_angle self.elbow_angle self.wristXAngle self.wristYAngle)<block_end><block_end><class_stmt>ArmController<block_start><def_stmt>__init__ self servo_swing servo_shoulder servo_elbow servo_wrist_x servo_wrist_y arm_config motion_enable=<false># Solvers are responsible for calculating the target servo positions to # reach a given goal position <block_start>self.ik=solvers.IKSolver(arm_config.main_length arm_config.forearm_length arm_config.wrist_length arm_config.shoulder_offset)<line_sep>self.physsolver=solvers.PhysicalSolver(arm_config.main_length arm_config.linkage_length arm_config.lower_actuator_length arm_config.upper_actuator_length)<line_sep># Servos self.servos={}<line_sep>self.servos["swing"]=servo_swing<line_sep>self.servos["shoulder"]=servo_shoulder<line_sep>self.servos["elbow"]=servo_elbow<line_sep>self.servos["wrist_x"]=servo_wrist_x<line_sep>self.servos["wrist_y"]=servo_wrist_y<for_stmt>key,servo self.servos.iteritems()<block_start><if_stmt>servo<is><none><block_start>print("Warning: {0} servo not connected".format(key))<block_end><else_stmt># Initialise a PID controller for the servo <block_start><if_stmt>servo.protocol<eq>1<block_start>servo.setGoalSpeed(-MOTORSPEED)<line_sep>servo.data['pid']=pid.PIDControl(2.4 0 0.4)<block_end><else_stmt><block_start>servo.setGoalSpeed(0)<block_end>servo.data['error']=0.0<line_sep># Make sure the goal speed is set servo.setTorqueEnable(1)<if_stmt>servo.protocol<eq>1<block_start>print("Setting slope")<line_sep>servo.setCWMargin(MOTORMARGIN)<line_sep>servo.setCCWMargin(MOTORMARGIN)<line_sep>servo.setCWSlope(MOTORSLOPE)<line_sep>servo.setCCWSlope(MOTORSLOPE)<block_end><block_end><block_end># Store parameters self.motion_enable=<true><line_sep>self.enableMovement(<false>)<line_sep>self.cfg=arm_config<line_sep># Dirty flags for stored poses self.ik_pose=<none><line_sep>self.ik_dirty=<true><line_sep>self.real_pose=<none><line_sep>self.real_dirty=<true><line_sep># Current target pose self.target_pose=<none><block_end><def_stmt>enableMovement self enable<block_start>changed=<false><if_stmt>enable<and><not>self.motion_enable<block_start>print("Warning: Arm enabled")<line_sep>self.motion_enable=<true><line_sep>changed=<true><block_end><elif_stmt><not>enable<block_start>self.motion_enable=<false><line_sep>changed=<true><block_end><if_stmt>changed# Set servos on/off <block_start><if_stmt>self.servos['swing']<is><not><none><block_start>self.servos['swing'].setTorqueEnable(self.motion_enable)<block_end><if_stmt>self.servos['shoulder']<is><not><none><block_start>self.servos['shoulder'].setTorqueEnable(self.motion_enable)<block_end><if_stmt>self.servos['elbow']<is><not><none><block_start>self.servos['elbow'].setTorqueEnable(self.motion_enable)<block_end><if_stmt>self.servos['wrist_x']<is><not><none><block_start>self.servos['wrist_x'].setTorqueEnable(self.motion_enable)<block_end><if_stmt>self.servos['wrist_y']<is><not><none><block_start>self.servos['wrist_y'].setTorqueEnable(self.motion_enable)<block_end><block_end><block_end><def_stmt>setWristGoalPosition self pos<block_start>self.ik.setGoal(pos)<line_sep>self.ik_dirty=<true><block_end><def_stmt>setWristGoalDirection self normal<block_start>self.ik.setWristDir(normal)<line_sep>self.ik_dirty=<true><block_end><def_stmt>getIKPose self<block_start><if_stmt>self.ik_dirty<and>self.ik.valid# Construct geometry of arm from IK state <block_start>main_arm=self.ik.elbow-self.ik.originpl<line_sep>arm_vert_angle=sigangle(main_arm vertical)<line_sep>forearm=self.ik.wristpl-self.ik.elbow<line_sep>elbow_angle=angle_between(main_arm forearm)<line_sep># Solve actuator angle for given elbow angle # Base angle is between the main arm and actuator base_angle=self.physsolver.inverse_forearm(elbow_angle)<line_sep>actuator_angle=arm_vert_angle-base_angle<line_sep>self.ik_pose=ArmPose(self.cfg swing_angle=self.ik.swing # angles from vertical shoulder_angle=arm_vert_angle actuator_angle=actuator_angle # angle between the main arm and forearm elbow_angle=elbow_angle elbow2D=self.ik.elbow wrist2D=self.ik.wristpl effector2D=self.ik.goalpl effector=self.ik.goal wrist_x=self.ik.wrist_x wrist_y=self.ik.wrist_y)<block_end><return>self.ik_pose<block_end><def_stmt>pollServos self<block_start>"""Poll the real-world servo positions"""<for_stmt>servo self.servos.itervalues()<block_start><if_stmt>servo<is><not><none><block_start>newPos=servo.getPosition()<if_stmt>type(newPos)<is>float<block_start>servo.data['pos']=newPos<block_end><block_end><block_end><block_end><def_stmt>clearPositionError self<block_start>"""Clears the servo's position-error accumulators"""<for_stmt>servo self.servos.itervalues()<block_start><if_stmt>servo<is><not><none><and>servo.protocol<eq>1<block_start>servo.data['error']=0.0<block_end><block_end><block_end><def_stmt>getRealPose self<block_start>"""Retrieve the real-world arm pose, or None if not all servos are connected. """<if_stmt>any([servo<is><none><for>servo self.servos.itervalues()])<block_start><return><none><block_end># This whole function is essentially just FK based on the known servo # angles swing_servo=self.servos['swing'].data['pos']<line_sep>elevator_servo=self.servos['shoulder'].data['pos']<line_sep>actuator_servo=self.servos['elbow'].data['pos']<line_sep>wrist_x_servo=self.servos['wrist_x'].data['pos']<line_sep>wrist_y_servo=self.servos['wrist_y'].data['pos']<line_sep># Find the internal arm-pose angles for the given servo positions swing_angle=ArmPose.calcSwingAngle(swing_servo)<line_sep>elevator_angle=ArmPose.calcElevatorAngle(elevator_servo)<line_sep>actuator_angle=ArmPose.calcActuatorAngle(actuator_servo)<line_sep>wrist_x_angle=ArmPose.calcWristXAngle(wrist_x_servo)<line_sep>wrist_y_angle=ArmPose.calcWristYAngle(wrist_y_servo)<line_sep># Solve elbow angle for given actuator and elevator angles # (this is the angle from the elevator arm's direction to the forearm's) elbow_angle=self.physsolver.solve_forearm(elevator_angle actuator_angle)<line_sep># FK positions from config and angles offset=self.cfg.shoulder_offset<line_sep>shoulder2D=np.array([offset[1] 0])<line_sep>elbow2D=shoulder2D+rotate(vertical elevator_angle)<times>self.cfg.main_length<line_sep>wrist2D=elbow2D+rotate(vertical elevator_angle+elbow_angle)<times>self.cfg.forearm_length<line_sep>effector2D=wrist2D+[self.cfg.wrist_length 0]<line_sep># 3D Effector calculation is a little more involved td=rotate([offset[0] effector2D[0]] swing_angle)<line_sep>effector=np.array([td[0] effector2D[1] td[1]])<line_sep>pose=ArmPose(self.cfg swing_angle elevator_angle actuator_angle elbow_angle elbow2D wrist2D effector2D effector wrist_x_angle wrist_y_angle)<line_sep><return>pose<block_end><def_stmt>setTargetPose self new_pose<block_start>self.target_pose=new_pose<block_end><def_stmt>tick self<block_start><if_stmt>self.target_pose<is><not><none><block_start><if_stmt>self.motion_enable# Drive servos <block_start>gain=0.1<if_stmt>self.servos['swing']<is><not><none><block_start>s=self.servos['swing']<line_sep>pos=s.data['pos']<line_sep>target=self.target_pose.getServoSwing()<line_sep># err = min(10, pos-target) # s.data['error'] += err*gain s.setGoalPosition(target)<block_end><if_stmt>self.servos['shoulder']<is><not><none><block_start>s=self.servos['shoulder']<line_sep># cumulative error pos=s.data['pos']<line_sep>target=self.target_pose.getServoElevator()<line_sep>err=min(10 pos-target)<line_sep>s.data['error']<augadd>err<times>gain<line_sep>s.data['error']=np.clip(s.data['error'] -ERRORLIM ERRORLIM)<line_sep>s.setGoalPosition(target-s.data['error'])<block_end><if_stmt>self.servos['elbow']<is><not><none><block_start>s=self.servos['elbow']<line_sep>pos=s.data['pos']<line_sep>target=self.target_pose.getServoActuator()<line_sep>err=min(10 pos-target)<line_sep>s.data['error']<augadd>err<times>gain<line_sep>s.data['error']=np.clip(s.data['error'] -ERRORLIM ERRORLIM)<line_sep>s.setGoalPosition(target-s.data['error'])<block_end><if_stmt>self.servos['wrist_x']<is><not><none><block_start>self.servos['wrist_x'].setGoalPosition(self.target_pose.getServoWristX())<block_end><if_stmt>self.servos['wrist_y']<is><not><none><block_start>self.servos['wrist_y'].setGoalPosition(self.target_pose.getServoWristY())<block_end><block_end><block_end><block_end><block_end>
<import_stmt>torch.nn<as>nn<import_from_stmt>.efficientnet EfficientNet_B4 EfficientNet_B0<import_from_stmt>.mobilenetv3 MobileNetV3_Large MobileNetV3_Small<def_stmt>get_trunk trunk_name<block_start>"""Retrieve the pretrained network trunk and channel counts"""<if_stmt>trunk_name<eq>'efficientnet_b4'<block_start>backbone=EfficientNet_B4(pretrained=<true>)<line_sep>s2_ch=24<line_sep>s4_ch=32<line_sep>high_level_ch=1792<block_end><elif_stmt>trunk_name<eq>'efficientnet_b0'<block_start>backbone=EfficientNet_B0(pretrained=<true>)<line_sep>s2_ch=16<line_sep>s4_ch=24<line_sep>high_level_ch=1280<block_end><elif_stmt>trunk_name<eq>'mobilenetv3_large'<block_start>backbone=MobileNetV3_Large(pretrained=<true>)<line_sep>s2_ch=16<line_sep>s4_ch=24<line_sep>high_level_ch=960<block_end><elif_stmt>trunk_name<eq>'mobilenetv3_small'<block_start>backbone=MobileNetV3_Small(pretrained=<true>)<line_sep>s2_ch=16<line_sep>s4_ch=16<line_sep>high_level_ch=576<block_end><else_stmt><block_start><raise>ValueError('unknown backbone {}'.format(trunk_name))<block_end><return>backbone s2_ch s4_ch high_level_ch<block_end><class_stmt>ConvBnRelu(nn.Module)<block_start>"""Convenience layer combining a Conv2d, BatchNorm2d, and a ReLU activation. Original source of this code comes from https://github.com/lingtengqiu/Deeperlab-pytorch/blob/master/seg_opr/seg_oprs.py """<def_stmt>__init__ self in_planes out_planes kernel_size stride=1 padding=0 norm_layer=nn.BatchNorm2d<block_start>super(ConvBnRelu self).__init__()<line_sep>self.conv=nn.Conv2d(in_planes out_planes kernel_size=kernel_size stride=stride padding=padding bias=<false>)<line_sep>self.bn=norm_layer(out_planes eps=1e-5)<line_sep>self.relu=nn.ReLU(inplace=<true>)<block_end><def_stmt>forward self x<block_start>x=self.conv(x)<line_sep>x=self.bn(x)<line_sep>x=self.relu(x)<line_sep><return>x<block_end><block_end>
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('cmsplugin_cascade' '0006_bootstrapgallerypluginmodel') ]<line_sep>operations=[]<block_end>
Credits=[('Bootstrap' 'https://getbootstrap.com' 'The Bootstrap team' 'MIT') ('Bottle' 'http://bottlepy.org' '<NAME>' 'MIT') ('Cheroot' 'https://github.com/cherrypy/cheroot' 'CherryPy Team' 'BSD 3-Clause "New" or "Revised" License') ('Click' 'https://github.com/pallets/click' 'Pallets' 'BSD 3-Clause "New" or "Revised" License') ('ConfigUpdater' 'https://github.com/pyscaffold/configupdater' '<NAME>' 'MIT') ('Glide' 'https://github.com/glidejs/glide' '@jedrzejchalubek' 'MIT') ('JQuery' 'https://jquery.com' 'The jQuery Foundation' 'MIT') ('jquery.pep.js' 'http://pep.briangonzalez.org' '@briangonzalez' 'MIT') ('js-md5' 'https://github.com/emn178/js-md5' '@emn178' 'MIT') ('PySocks' 'https://github.com/Anorov/PySocks' '@Anorov' 'Custom DAN HAIM') ('RapydScript-NG' 'https://github.com/kovidgoyal/rapydscript-ng' '@kovidgoyal' 'BSD 2-Clause "Simplified" License') ('Requests' 'https://requests.kennethreitz.org' '<NAME>' 'Apache License, Version 2.0') ('scrollMonitor' 'https://github.com/stutrek/scrollmonitor' '@stutrek' 'MIT') ('Smoothie Charts' 'https://github.com/joewalnes/smoothie' '@drewnoakes' 'MIT') ('stem' 'https://stem.torproject.org' '<NAME> and The Tor Project' 'GNU LESSER GENERAL PUBLIC LICENSE')]<line_sep>
<import_stmt>typing<as>tp<import_from_stmt>unittest TestCase<import_stmt>hypothesis<as>hp<import_from_stmt>hypothesis strategies<as>st<import_stmt>pypeln<as>pl<import_stmt>cytoolz<as>cz<line_sep>MAX_EXAMPLES=10<line_sep>T=tp.TypeVar("T")<line_sep>@hp.given(nums=st.lists(st.integers()))@hp.settings(max_examples=MAX_EXAMPLES)<def_stmt>test_from_to_iterable nums:tp.List[int]<block_start>nums_pl=nums<line_sep>nums_pl=pl.thread.from_iterable(nums_pl)<line_sep>nums_pl=cz.partition_all(10 nums_pl)<line_sep>nums_pl=pl.thread.map(sum nums_pl)<line_sep>nums_pl=pl.thread.to_iterable(nums_pl)<line_sep>nums_pl=list(nums_pl)<line_sep>nums_py=nums<line_sep>nums_py=cz.partition_all(10 nums_py)<line_sep>nums_py=map(sum nums_py)<line_sep>nums_py=list(nums_py)<assert_stmt>nums_py<eq>nums_pl<block_end>
<import_from_stmt>datadog_checks.base ConfigurationError OpenMetricsBaseCheck<line_sep>EVENT_TYPE=SOURCE_TYPE_NAME='pulsar'<class_stmt>PulsarCheck(OpenMetricsBaseCheck)<block_start>""" PulsarCheck derives from AgentCheck that provides the required check method """<def_stmt>__init__ self name init_config instances=<none><block_start>instance=instances[0]<line_sep>url=instance.get('prometheus_url')<if_stmt>url<is><none><block_start><raise>ConfigurationError("Unable to find prometheus_url in config file.")<block_end>self.NAMESPACE='kesque.pulsar'<line_sep>self.metrics_mapper={'pulsar_consumer_available_permits':'consumer.available_permits' 'pulsar_consumer_blocked_on_unacked_messages':'consumer.blocked_on_unacked_messages' 'pulsar_consumer_msg_rate_out':'consumer.msg_rate_out' 'pulsar_consumer_msg_rate_redeliver':'consumer.msg_rate_redeliver' 'pulsar_consumer_msg_throughput_out':'consumer.msg_throughput_out' 'pulsar_consumer_unacked_messages':'consumer.unacked_messages' 'pulsar_consumers_count':'consumers_count' 'pulsar_entry_size_count':'entry_size_count' 'pulsar_entry_size_le_100_kb':'entry_size_le_100_kb' 'pulsar_entry_size_le_128':'entry_size_le_128' 'pulsar_entry_size_le_16_kb':'entry_size_le_16_kb' 'pulsar_entry_size_le_1_kb':'entry_size_le_1_kb' 'pulsar_entry_size_le_1_mb':'entry_size_le_1_mb' 'pulsar_entry_size_le_2_kb':'entry_size_le_2_kb' 'pulsar_entry_size_le_4_kb':'entry_size_le_4_kb' 'pulsar_entry_size_le_512':'entry_size_le_512' 'pulsar_entry_size_le_overflow':'entry_size_le_overflow' 'pulsar_entry_size_sum':'entry_size_sum' 'pulsar_in_bytes_total':'in_bytes_total' 'pulsar_in_messages_total':'in_messages_total' 'pulsar_msg_backlog':'msg_backlog' 'pulsar_out_bytes_total':'out_bytes_total' 'pulsar_out_messages_total':'out_messages_total' 'pulsar_producers_count':'producers_count' 'pulsar_rate_in':'rate_in' 'pulsar_rate_out':'rate_out' 'pulsar_replication_backlog':'replication.backlog' 'pulsar_replication_rate_in':'replication.rate_in' 'pulsar_replication_rate_out':'replication.rate_out' 'pulsar_replication_throughput_in':'replication.throughput_in' 'pulsar_replication_throughput_out':'replication.throughput_out' 'pulsar_storage_backlog_quota_limit':'storage.backlog_quota_limit' 'pulsar_storage_backlog_size':'storage.backlog_size' 'pulsar_storage_read_rate':'storage.read_rate' 'pulsar_storage_offloaded_size':'storage.offloaded_size' 'pulsar_storage_size':'storage.size' 'pulsar_storage_write_latency_count':'storage.write_latency_count' 'pulsar_storage_write_latency_le_0_5':'storage.write_latency_le_0_5' 'pulsar_storage_write_latency_le_1':'storage.write_latency_le_1' 'pulsar_storage_write_latency_le_10':'storage.write_latency_le_10' 'pulsar_storage_write_latency_le_100':'storage.write_latency_le_100' 'pulsar_storage_write_latency_le_1000':'storage.write_latency_le_1000' 'pulsar_storage_write_latency_le_20':'storage.write_latency_le_20' 'pulsar_storage_write_latency_le_200':'storage.write_latency_le_200' 'pulsar_storage_write_latency_le_5':'storage.write_latency_le_5' 'pulsar_storage_write_latency_le_50':'storage.write_latency_le_50' 'pulsar_storage_write_latency_overflow':'storage.write_latency_overflow' 'pulsar_storage_write_latency_sum':'storage.write_latency_sum' 'pulsar_storage_write_rate':'storage.write_rate' 'pulsar_subscription_back_log':'subscription.back_log' 'pulsar_subscription_back_log_no_delayed':'subscription.back_log_no_delayed' 'pulsar_subscription_blocked_on_unacked_messages':'subscription.blocked_on_unacked_messages' 'pulsar_subscription_delayed':'subscription.delayed' 'pulsar_subscription_msg_rate_out':'subscription.msg_rate_out' 'pulsar_subscription_msg_rate_redeliver':'subscription.msg_rate_redeliver' 'pulsar_subscription_msg_throughput_out':'subscription.msg_throughput_out' 'pulsar_subscription_unacked_messages':'subscription.unacked_messages' 'pulsar_subscriptions_count':'subscriptions.count' 'pulsar_throughput_in':'throughput_in' 'pulsar_throughput_out':'throughput_out' 'pulsar_topics_count':'topics_count' 'scrape_duration_seconds':'scrape_duration_seconds' 'scrape_samples_post_metric_relabeling':'scrape_samples_post_metric_relabeling' 'scrape_samples_scraped':'scrape_samples_scraped' 'topic_load_times':'topic_load_times' 'topic_load_times_count':'topic_load_times_count' 'topic_load_times_sum':'topic_load_times_sum' 'up':'broker.up' }<line_sep>instance.update({'prometheus_url':url 'namespace':self.NAMESPACE 'metrics':[self.metrics_mapper] 'send_distribution_counts_as_monotonic':instance.get('send_distribution_counts_as_monotonic' <true>) 'send_distribution_sums_as_monotonic':instance.get('send_distribution_sums_as_monotonic' <true>) })<line_sep>super(PulsarCheck self).__init__(name init_config instances)<block_end><block_end>
<import_stmt>argparse<import_from_stmt>os listdir mkdir<import_from_stmt>os.path join isdir<import_from_stmt>subprocess call<import_stmt>sys<import_stmt>datasets<import_from_stmt>shutil which<line_sep>""" Script to use pdftoppm to turn the pdfs into single images per page """<def_stmt>get_images pdf_dir output_dir dpi mono=<true><block_start><if_stmt>which("pdftoppm")<is><none><block_start><raise>ValueError("Requires executable pdftopmm to be on the PATH")<block_end><if_stmt><not>isdir(output_dir)<block_start>print("Making %s to store rasterized PDF pages"%output_dir)<line_sep>mkdir(output_dir)<block_end><if_stmt><not>isdir(pdf_dir)<block_start><raise>ValueError(pdf_dir+" is not a directory")<block_end>pdf_doc_ids=[x.split(".pdf")[0]<for>x listdir(pdf_dir)]<line_sep>already_have=set()<for_stmt>filename listdir(output_dir)<block_start><if_stmt>"-page"<not><in>filename<block_start><raise>ValueError()<block_end>doc_id=filename.split("-page")[0]<if_stmt>doc_id<not><in>pdf_doc_ids<block_start><raise>ValueError("doc id %s in output dir not found in pdfs"%doc_id)<block_end>already_have.add(doc_id)<block_end><if_stmt>len(already_have)<ne>0<block_start>print("Already have %d docs"%len(already_have))<block_end>num_pdfs=len(listdir(pdf_dir))<for_stmt>(i pdfname) enumerate(listdir(pdf_dir))<block_start><if_stmt><not>pdfname.endswith(".pdf")<block_start><raise>ValueError()<block_end>doc_id=pdfname[:-4]<if_stmt>doc_id<in>already_have<block_start><continue><block_end>print("Creating images for pdf %s (%d / %d)"%(pdfname i+1 num_pdfs))<if_stmt>(mono)<block_start>args=["pdftoppm" "-gray" "-r" str(dpi) "-aa" "no" "-aaVector" "no" "-cropbox" join(pdf_dir pdfname) join(output_dir doc_id+"-page")]<block_end><else_stmt><block_start>args=["pdftoppm" "-jpeg" "-r" str(dpi) "-cropbox" join(pdf_dir pdfname) join(output_dir doc_id+"-page")]<block_end>retcode=call(args)<if_stmt>retcode<ne>0<block_start><raise>ValueError("Bad return code for <%s> (%d)" " ".join(args) retcode)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(description='Cache rasterized page images for a dataset')<line_sep>parser.add_argument("dataset" choices=datasets.DATASETS.keys() help="target dataset")<line_sep>parser.add_argument("color" choices=["gray" "color"] help="kind of images to render")<line_sep>args=parser.parse_args()<line_sep>dataset=datasets.get_dataset(args.dataset)<line_sep>print("Running on dataset: "+dataset.name)<if_stmt>args.color<eq>"gray"<block_start>get_images(dataset.pdf_dir dataset.page_images_gray_dir dataset.IMAGE_DPI <true>)<block_end><elif_stmt>args.color<eq>"color"<block_start>get_images(dataset.pdf_dir dataset.page_images_color_dir dataset.COLOR_IMAGE_DPI <false>)<block_end><else_stmt><block_start>exit(1)<block_end><block_end>
# imports <import_from_stmt>. coordinates<import_from_stmt>. data<import_from_stmt>.modflow *<import_from_stmt>. utils<import_from_stmt>.data mfdatascalar mfdatalist mfdataarray<import_from_stmt>.mfmodel MFModel<import_from_stmt>.mfbase ExtFileAction<line_sep>
<import_stmt>torch<line_sep>DEVICE=torch.device("cuda")<line_sep>SAVED_CHECKPOINTS=[32<times>1000 100<times>1000 150<times>1000 200<times>1000 300<times>1000 400<times>1000]<line_sep>SAVED_CHECKPOINTS<augadd>[10<times>1000 20<times>1000 30<times>1000 40<times>1000 50<times>1000 60<times>1000 70<times>1000 80<times>1000 90<times>1000]<line_sep>SAVED_CHECKPOINTS<augadd>[25<times>1000 50<times>1000 75<times>1000]<line_sep>SAVED_CHECKPOINTS=set(SAVED_CHECKPOINTS)<line_sep>
# RiveScript-Python # # This code is released under the MIT License. # See the "LICENSE" file for more information. # # https://www.rivescript.com/ <def_stmt>get_topic_triggers rs topic thats depth=0 inheritance=0 inherited=<false><block_start>"""Recursively scan a topic and return a list of all triggers. Arguments: rs (RiveScript): A reference to the parent RiveScript instance. topic (str): The original topic name. thats (bool): Are we getting triggers for 'previous' replies? depth (int): Recursion step counter. inheritance (int): The inheritance level counter, for topics that inherit other topics. inherited (bool): Whether the current topic is inherited by others. Returns: []str: List of all triggers found. """<line_sep># Break if we're in too deep. <if_stmt>depth<g>rs._depth<block_start>rs._warn("Deep recursion while scanning topic inheritance")<block_end># Keep in mind here that there is a difference between 'includes' and # 'inherits' -- topics that inherit other topics are able to OVERRIDE # triggers that appear in the inherited topic. This means that if the top # topic has a trigger of simply '*', then NO triggers are capable of # matching in ANY inherited topic, because even though * has the lowest # priority, it has an automatic priority over all inherited topics. # # The getTopicTriggers method takes this into account. All topics that # inherit other topics will have their triggers prefixed with a fictional # {inherits} tag, which would start at {inherits=0} and increment if this # topic has other inheriting topics. So we can use this tag to make sure # topics that inherit things will have their triggers always be on top of # the stack, from inherits=0 to inherits=n. # Important info about the depth vs inheritance params to this function: # depth increments by 1 each time this function recursively calls itrs. # inheritance increments by 1 only when this topic inherits another # topic. # # This way, '> topic alpha includes beta inherits gamma' will have this # effect: # alpha and beta's triggers are combined together into one matching # pool, and then those triggers have higher matching priority than # gamma's. # # The inherited option is True if this is a recursive call, from a topic # that inherits other topics. This forces the {inherits} tag to be added # to the triggers. This only applies when the top topic 'includes' # another topic. rs._say("\tCollecting trigger list for topic "+topic+"(depth="+str(depth)+"; inheritance="+str(inheritance)+"; "+"inherited="+str(inherited)+")")<line_sep># topic: the name of the topic # depth: starts at 0 and ++'s with each recursion # Topic doesn't exist? <if_stmt><not>topic<in>rs._topics<block_start>rs._warn("Inherited or included topic {} doesn't exist or has no triggers".format(topic))<line_sep><return>[]<block_end># Collect an array of triggers to return. triggers=[]<line_sep># Get those that exist in this topic directly. inThisTopic=[]<if_stmt><not>thats# The non-that structure is {topic}->[array of triggers] <block_start><if_stmt>topic<in>rs._topics<block_start><for_stmt>trigger rs._topics[topic]<block_start>inThisTopic.append([trigger["trigger"] trigger])<block_end><block_end><block_end><else_stmt># The 'that' structure is: {topic}->{cur trig}->{prev trig}->{trig info} <block_start><if_stmt>topic<in>rs._thats.keys()<block_start><for_stmt>curtrig rs._thats[topic].keys()<block_start><for_stmt>previous,pointer rs._thats[topic][curtrig].items()<block_start>inThisTopic.append([pointer["trigger"] pointer])<block_end><block_end><block_end><block_end># Does this topic include others? <if_stmt>topic<in>rs._includes# Check every included topic. <block_start><for_stmt>includes rs._includes[topic]<block_start>rs._say("\t\tTopic "+topic+" includes "+includes)<line_sep>triggers.extend(get_topic_triggers(rs includes thats (depth+1) inheritance <true>))<block_end><block_end># Does this topic inherit others? <if_stmt>topic<in>rs._lineage# Check every inherited topic. <block_start><for_stmt>inherits rs._lineage[topic]<block_start>rs._say("\t\tTopic "+topic+" inherits "+inherits)<line_sep>triggers.extend(get_topic_triggers(rs inherits thats (depth+1) (inheritance+1) <false>))<block_end><block_end># Collect the triggers for *this* topic. If this topic inherits any # other topics, it means that this topic's triggers have higher # priority than those in any inherited topics. Enforce this with an # {inherits} tag. <if_stmt>topic<in>rs._lineage<or>inherited<block_start><for_stmt>trigger inThisTopic<block_start>rs._say("\t\tPrefixing trigger with {inherits="+str(inheritance)+"}"+trigger[0])<line_sep>triggers.append(["{inherits="+str(inheritance)+"}"+trigger[0] trigger[1]])<block_end><block_end><else_stmt><block_start>triggers.extend(inThisTopic)<block_end><return>triggers<block_end><def_stmt>get_topic_tree rs topic depth=0<block_start>"""Given one topic, get the list of all included/inherited topics. :param str topic: The topic to start the search at. :param int depth: The recursion depth counter. :return []str: Array of topics. """<line_sep># Break if we're in too deep. <if_stmt>depth<g>rs._depth<block_start>rs._warn("Deep recursion while scanning topic trees!")<line_sep><return>[]<block_end># Collect an array of all topics. topics=[topic]<line_sep># Does this topic include others? <if_stmt>topic<in>rs._includes# Try each of these. <block_start><for_stmt>includes sorted(rs._includes[topic])<block_start>topics.extend(get_topic_tree(rs includes depth+1))<block_end><block_end># Does this topic inherit others? <if_stmt>topic<in>rs._lineage# Try each of these. <block_start><for_stmt>inherits sorted(rs._lineage[topic])<block_start>topics.extend(get_topic_tree(rs inherits depth+1))<block_end><block_end><return>topics<block_end>
<import_stmt>collections<import_stmt>datetime<import_stmt>json<import_stmt>multiprocessing<import_stmt>os<import_stmt>subprocess<import_stmt>sys<import_stmt>time<line_sep>_SSHD_BINARY_PATH="/usr/sbin/sshd"<line_sep>EnvironmentConfig=collections.namedtuple("EnvironmentConfig" ["hosts" "port" "is_chief" "pools" "job_id"])<class_stmt>DeadlineExceededError(Exception)<block_start>"""Indicates an action took too long."""<line_sep><pass><block_end><def_stmt>_sub_process_num_gpus unused<block_start><del_stmt>unused<line_sep># This is imported here so that we don't load tensorflow in the parent # process. Once the sub-process exits, it releases its allocated GPU memory. <import_from_stmt>tensorflow.python.client device_lib<line_sep>local_device_protos=device_lib.list_local_devices()<line_sep>gpus=[x.name<for>x local_device_protos<if>x.device_type<eq>"GPU"]<line_sep><return>len(gpus)<block_end><def_stmt>_get_available_gpus <block_start>"""Returns the number of GPUs on the machine."""<line_sep>pool=multiprocessing.Pool(1)<line_sep>result=pool.map(_sub_process_num_gpus [<none>])[0]<line_sep>pool.close()<line_sep>pool.join()<line_sep><return>result<block_end><def_stmt>parse_environment_config env_config_str job_id<block_start>"""Parses environment config and returns a list of hosts as well as the role. Returns: An EnvironmentConfig. """<if_stmt>env_config_str<block_start>ssh_port=-1<line_sep>env_config_json=json.loads(env_config_str)<line_sep>cluster=env_config_json.get("cluster")<if_stmt><not>cluster<block_start><return><none> <true><block_end>hosts=[]<line_sep>pools=collections.defaultdict(list)<for_stmt>pool_type,tasks_per_type cluster.items()<block_start><if_stmt>pool_type<eq>"master"<block_start>pool_type="chief"<block_end><for_stmt>host_and_port tasks_per_type<block_start>host,port=host_and_port.split(":")<if_stmt>host<eq>"127.0.0.1"<block_start>host="localhost"<block_end>port=int(port)<if_stmt>ssh_port<eq>-1<block_start>ssh_port=port<block_end><elif_stmt>ssh_port<ne>port<block_start><raise>ValueError("Inconsistent ssh ports across tasks %d != %d."%(ssh_port port))<block_end>hosts.append(host)<line_sep>pools[pool_type].append(host)<block_end><block_end>is_chief=<false><line_sep>has_chief="chief"<in>pools<if_stmt>(env_config_json["task"]["type"]<eq>"master"<or>env_config_json["task"]["type"]<eq>"chief")<block_start>is_chief=<true><if_stmt>int(env_config_json["task"]["index"])<ne>0<block_start><raise>ValueError("Only one master node is expected.")<block_end><block_end><elif_stmt>((<not>has_chief)<and>(env_config_json["task"]["type"]<eq>"worker")<and>int(env_config_json["task"]["index"])<eq>0)<block_start>is_chief=<true><line_sep>pools["chief"].append(pools["worker"].pop(0))<block_end><elif_stmt>env_config_json["task"]["type"]<ne>"worker"<block_start><raise>ValueError("Unexpected task type for Horovod training: %s."%env_config_json["task"]["type"])<block_end><return>EnvironmentConfig(hosts=hosts port=port is_chief=is_chief pools=pools job_id=job_id)<block_end><else_stmt><block_start><return>EnvironmentConfig(hosts=["localhost"] port=2222 is_chief=<true> pools={"chief":["localhost"]} job_id=job_id)<block_end><block_end><def_stmt>start_ssh_server port is_chief<block_start>ssh_server_command=[_SSHD_BINARY_PATH "-p" str(port)]<if_stmt><not>is_chief<block_start>ssh_server_command.append("-D")<block_end>completed=subprocess.call(ssh_server_command)<if_stmt>completed<ne>0<block_start><raise>OSError("SSH server did not start successfully.")<block_end><block_end><def_stmt>wait_for_ssh_servers hosts port timeout_seconds<block_start>deadline_datetime=datetime.datetime.utcnow()+datetime.timedelta(seconds=timeout_seconds)<line_sep>unavailable_hosts=[]<while_stmt>datetime.datetime.utcnow()<l>deadline_datetime<block_start>unavailable_hosts=[]<for_stmt>host hosts<block_start>ssh_command=["ssh" "-q" host "-p" str(port) "true"]<line_sep>result=subprocess.call(ssh_command)<if_stmt>result<ne>0<block_start>unavailable_hosts.append(host)<block_end><block_end><if_stmt><not>unavailable_hosts<block_start><return><block_end># Retry in 1 second. time.sleep(1)<block_end><raise>DeadlineExceededError("Timed out while waiting for all hosts to start. "<concat>"Hosts still not available: %s. TASK_STARTUP_TIMEOUT_SECONDS=%d"%(unavailable_hosts timeout_seconds))<block_end><def_stmt>run_horovod env_config jobs_per_host args<block_start>env=dict(os.environ)<del_stmt>env["TF_CONFIG"]<line_sep>num_jobs=len(env_config.hosts)<times>jobs_per_host<line_sep>hosts=",".join("%s:%d"%(h jobs_per_host)<for>h env_config.hosts)<line_sep>horovod_command=["horovodrun" "--ssh-port" str(env_config.port) "-H" hosts "--num-proc" str(num_jobs)]<line_sep>horovod_command.extend(args)<line_sep>exit_code=subprocess.call(horovod_command env=env)<line_sep><return>exit_code<block_end><def_stmt>benchmark_network env_config<block_start><if_stmt><not>env_config.pools["worker"]<block_start><raise>ValueError("No workers in the pool to do network benchmarking.")<block_end>iperf_server=["iperf" "-s" "-p" "6000"]<line_sep>server=subprocess.Popen(iperf_server)<line_sep># Wait 10 seconds for the local server to start. time.sleep(10)<line_sep>iperf_command=["ssh" "-q" env_config.pools["worker"][0] "-p" str(env_config.port) "iperf" "-p" "6000" "-c" env_config.pools["chief"][0]]<line_sep>subprocess.call(iperf_command)<line_sep>server.kill()<block_end><def_stmt>copy_files_recursively src dest<block_start><if_stmt><not>dest.startswith("gs://")<block_start><try_stmt><block_start>os.makedirs(dest)<block_end><except_stmt>OSError<block_start><pass><block_end><block_end>copy_cmd=["gsutil" "-m" "rsync" "-r" src dest]<line_sep>exit_code=subprocess.call(copy_cmd)<if_stmt>exit_code<ne>0<block_start><raise>RuntimeError("Error while copying %s to %s"%(src dest))<block_end><return>exit_code<block_end><def_stmt>main <block_start>env_config_str=os.environ.get("TF_CONFIG")<line_sep>job_id=os.environ.get("CLOUD_ML_JOB_ID" "localrun")<line_sep>env_config=parse_environment_config(env_config_str job_id)<line_sep>print(env_config env_config.pools env_config.hosts os.environ)<if_stmt>os.environ.get("STAGE_GCS_PATH" <false>)<block_start>copy_files_recursively(os.environ.get("STAGE_GCS_PATH") os.environ.get("STAGING_DIR" "/input"))<block_end>start_ssh_server(env_config.port env_config.is_chief)<line_sep>max_num_retries=os.environ.get("NUM_HOROVOD_RETRIES" 1)<if_stmt>env_config.is_chief<block_start>exit_code=0<for_stmt>retry range(max_num_retries)<block_start>staging_timeout_seconds=int(os.environ.get("TASK_STARTUP_TIMEOUT_SECONDS" 600))<line_sep>wait_for_ssh_servers(env_config.hosts env_config.port staging_timeout_seconds)<if_stmt>os.environ.get("BENCHMARK_NETWORK" <false>)<block_start>benchmark_network(env_config)<block_end>num_gpus=_get_available_gpus()<line_sep># If there are no GPUs, we can just run single process per machine. jobs_per_host=max(1 num_gpus)<line_sep>args=sys.argv[1:]<line_sep>exit_code=run_horovod(env_config=env_config jobs_per_host=jobs_per_host args=args)<if_stmt>exit_code<eq>0<block_start><break><block_end><else_stmt><block_start>print("Retrying..." retry "out of" max_num_retries)<block_end><block_end><if_stmt>os.environ.get("GCS_OUTPUT_PATH" <false>)<block_start>copy_files_recursively(os.environ.get("OUTPUT_DIR" "/output") os.path.join(os.environ.get("GCS_OUTPUT_PATH") job_id))<block_end>sys.exit(exit_code)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>random<import_from_stmt>typing Optional Tuple Union<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch Tensor<import_from_stmt>torch_geometric.utils coalesce degree remove_self_loops<import_from_stmt>.num_nodes maybe_num_nodes<def_stmt>negative_sampling edge_index:Tensor num_nodes:Optional[Union[int Tuple[int int]]]=<none> num_neg_samples:Optional[int]=<none> method:str="sparse" force_undirected:bool=<false><arrow>Tensor<block_start>r"""Samples random negative edges of a graph given by :attr:`edge_index`. Args: edge_index (LongTensor): The edge indices. num_nodes (int or Tuple[int, int], optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. If given as a tuple, then :obj:`edge_index` is interpreted as a bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`. (default: :obj:`None`) num_neg_samples (int, optional): The (approximate) number of negative samples to return. If set to :obj:`None`, will try to return a negative edge for every positive edge. (default: :obj:`None`) method (string, optional): The method to use for negative sampling, *i.e.*, :obj:`"sparse"` or :obj:`"dense"`. This is a memory/runtime trade-off. :obj:`"sparse"` will work on any graph of any size, while :obj:`"dense"` can perform faster true-negative checks. (default: :obj:`"sparse"`) force_undirected (bool, optional): If set to :obj:`True`, sampled negative edges will be undirected. (default: :obj:`False`) :rtype: LongTensor """<assert_stmt>method<in>['sparse' 'dense']<line_sep>size=num_nodes<line_sep>bipartite=isinstance(size (tuple list))<line_sep>size=maybe_num_nodes(edge_index)<if>size<is><none><else>size<line_sep>size=(size size)<if><not>bipartite<else>size<line_sep>force_undirected=<false><if>bipartite<else>force_undirected<line_sep>idx,population=edge_index_to_vector(edge_index size bipartite force_undirected)<if_stmt>idx.numel()<ge>population<block_start><return>edge_index.new_empty((2 0))<block_end><if_stmt>num_neg_samples<is><none><block_start>num_neg_samples=edge_index.size(1)<block_end><if_stmt>force_undirected<block_start>num_neg_samples=num_neg_samples<floordiv>2<block_end>prob=1.-idx.numel()/population# Probability to sample a negative. sample_size=int(1.1<times>num_neg_samples/prob)# (Over)-sample size. neg_idx=<none><if_stmt>method<eq>'dense'# The dense version creates a mask of shape `population` to check for # invalid samples. <block_start>mask=idx.new_ones(population dtype=torch.bool)<line_sep>mask[idx]=<false><for_stmt>_ range(3)# Number of tries to sample negative indices. <block_start>rnd=sample(population sample_size idx.device)<line_sep>rnd=rnd[mask[rnd]]# Filter true negatives. neg_idx=rnd<if>neg_idx<is><none><else>torch.cat([neg_idx rnd])<if_stmt>neg_idx.numel()<ge>num_neg_samples<block_start>neg_idx=neg_idx[:num_neg_samples]<line_sep><break><block_end>mask[neg_idx]=<false><block_end><block_end><else_stmt># 'sparse' # The sparse version checks for invalid samples via `np.isin`. <block_start>idx=idx.to('cpu')<for_stmt>_ range(3)# Number of tries to sample negative indices. <block_start>rnd=sample(population sample_size device='cpu')<line_sep>mask=np.isin(rnd idx)<if_stmt>neg_idx<is><not><none><block_start>mask<augor>np.isin(rnd neg_idx.to('cpu'))<block_end>mask=torch.from_numpy(mask).to(torch.bool)<line_sep>rnd=rnd[~mask].to(edge_index.device)<line_sep>neg_idx=rnd<if>neg_idx<is><none><else>torch.cat([neg_idx rnd])<if_stmt>neg_idx.numel()<ge>num_neg_samples<block_start>neg_idx=neg_idx[:num_neg_samples]<line_sep><break><block_end><block_end><block_end><return>vector_to_edge_index(neg_idx size bipartite force_undirected)<block_end><def_stmt>batched_negative_sampling edge_index:Tensor batch:Union[Tensor Tuple[Tensor Tensor]] num_neg_samples:Optional[int]=<none> method:str="sparse" force_undirected:bool=<false> <arrow>Tensor<block_start>r"""Samples random negative edges of multiple graphs given by :attr:`edge_index` and :attr:`batch`. Args: edge_index (LongTensor): The edge indices. batch (LongTensor or Tuple[LongTensor, LongTensor]): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. If given as a tuple, then :obj:`edge_index` is interpreted as a bipartite graph connecting two different node types. num_neg_samples (int, optional): The number of negative samples to return. If set to :obj:`None`, will try to return a negative edge for every positive edge. (default: :obj:`None`) method (string, optional): The method to use for negative sampling, *i.e.*, :obj:`"sparse"` or :obj:`"dense"`. This is a memory/runtime trade-off. :obj:`"sparse"` will work on any graph of any size, while :obj:`"dense"` can perform faster true-negative checks. (default: :obj:`"sparse"`) force_undirected (bool, optional): If set to :obj:`True`, sampled negative edges will be undirected. (default: :obj:`False`) :rtype: LongTensor """<if_stmt>isinstance(batch Tensor)<block_start>src_batch,dst_batch=batch batch<block_end><else_stmt><block_start>src_batch,dst_batch=batch[0] batch[1]<block_end>split=degree(src_batch[edge_index[0]] dtype=torch.long).tolist()<line_sep>edge_indices=torch.split(edge_index split dim=1)<line_sep>num_src=degree(src_batch dtype=torch.long)<line_sep>cum_src=torch.cat([src_batch.new_zeros(1) num_src.cumsum(0)[:-1]])<if_stmt>isinstance(batch Tensor)<block_start>num_nodes=num_src.tolist()<line_sep>cumsum=cum_src<block_end><else_stmt><block_start>num_dst=degree(dst_batch dtype=torch.long)<line_sep>cum_dst=torch.cat([dst_batch.new_zeros(1) num_dst.cumsum(0)[:-1]])<line_sep>num_nodes=torch.stack([num_src num_dst] dim=1).tolist()<line_sep>cumsum=torch.stack([cum_src cum_dst] dim=1).unsqueeze(-1)<block_end>neg_edge_indices=[]<for_stmt>i,edge_index enumerate(edge_indices)<block_start>edge_index=edge_index-cumsum[i]<line_sep>neg_edge_index=negative_sampling(edge_index num_nodes[i] num_neg_samples method force_undirected)<line_sep>neg_edge_index<augadd>cumsum[i]<line_sep>neg_edge_indices.append(neg_edge_index)<block_end><return>torch.cat(neg_edge_indices dim=1)<block_end><def_stmt>structured_negative_sampling edge_index num_nodes:Optional[int]=<none> contains_neg_self_loops:bool=<true><block_start>r"""Samples a negative edge :obj:`(i,k)` for every positive edge :obj:`(i,j)` in the graph given by :attr:`edge_index`, and returns it as a tuple of the form :obj:`(i,j,k)`. Args: edge_index (LongTensor): The edge indices. num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) contains_neg_self_loops (bool, optional): If set to :obj:`False`, sampled negative edges will not contain self loops. (default: :obj:`True`) :rtype: (LongTensor, LongTensor, LongTensor) """<line_sep>num_nodes=maybe_num_nodes(edge_index num_nodes)<line_sep>row,col=edge_index.cpu()<line_sep>pos_idx=row<times>num_nodes+col<if_stmt><not>contains_neg_self_loops<block_start>loop_idx=torch.arange(num_nodes)<times>(num_nodes+1)<line_sep>pos_idx=torch.cat([pos_idx loop_idx] dim=0)<block_end>rand=torch.randint(num_nodes (row.size(0) ) dtype=torch.long)<line_sep>neg_idx=row<times>num_nodes+rand<line_sep>mask=torch.from_numpy(np.isin(neg_idx pos_idx)).to(torch.bool)<line_sep>rest=mask.nonzero(as_tuple=<false>).view(-1)<while_stmt>rest.numel()<g>0# pragma: no cover <block_start>tmp=torch.randint(num_nodes (rest.size(0) ) dtype=torch.long)<line_sep>rand[rest]=tmp<line_sep>neg_idx=row[rest]<times>num_nodes+tmp<line_sep>mask=torch.from_numpy(np.isin(neg_idx pos_idx)).to(torch.bool)<line_sep>rest=rest[mask]<block_end><return>edge_index[0] edge_index[1] rand.to(edge_index.device)<block_end><def_stmt>structured_negative_sampling_feasible edge_index:Tensor num_nodes:Optional[int]=<none> contains_neg_self_loops:bool=<true><arrow>bool<block_start>r"""Returns :obj:`True` if :meth:`~torch_geometric.utils.structured_negative_sampling` is feasible on the graph given by :obj:`edge_index`. :obj:`~torch_geometric.utils.structured_negative_sampling` is infeasible if atleast one node is connected to all other nodes. Args: edge_index (LongTensor): The edge indices. num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) contains_neg_self_loops (bool, optional): If set to :obj:`False`, sampled negative edges will not contain self loops. (default: :obj:`True`) :rtype: bool """<line_sep>num_nodes=maybe_num_nodes(edge_index num_nodes)<line_sep>max_num_neighbors=num_nodes<line_sep>edge_index=coalesce(edge_index num_nodes=num_nodes)<if_stmt><not>contains_neg_self_loops<block_start>edge_index,_=remove_self_loops(edge_index)<line_sep>max_num_neighbors<augsub>1<block_end># Reduce number of valid neighbors deg=degree(edge_index[0] num_nodes)<line_sep># True if there exists no node that is connected to all other nodes. <return>bool(torch.all(deg<l>max_num_neighbors))<block_end>############################################################################### <def_stmt>sample population:int k:int device=<none><arrow>Tensor<block_start><if_stmt>population<le>k<block_start><return>torch.arange(population device=device)<block_end><else_stmt><block_start><return>torch.tensor(random.sample(range(population) k) device=device)<block_end><block_end><def_stmt>edge_index_to_vector edge_index:Tensor size:Tuple[int int] bipartite:bool force_undirected:bool=<false> <arrow>Tuple[Tensor int]<block_start>row,col=edge_index<if_stmt>bipartite# No need to account for self-loops. <block_start>idx=(row<times>size[1]).add_(col)<line_sep>population=size[0]<times>size[1]<line_sep><return>idx population<block_end><elif_stmt>force_undirected<block_start><assert_stmt>size[0]<eq>size[1]<line_sep>num_nodes=size[0]<line_sep># We only operate on the upper triangular matrix: mask=row<l>col<line_sep>row,col=row[mask] col[mask]<line_sep>offset=torch.arange(1 num_nodes device=row.device).cumsum(0)[row]<line_sep>idx=row.mul_(num_nodes).add_(col).sub_(offset)<line_sep>population=(num_nodes<times>(num_nodes+1))<floordiv>2-num_nodes<line_sep><return>idx population<block_end><else_stmt><block_start><assert_stmt>size[0]<eq>size[1]<line_sep>num_nodes=size[0]<line_sep># We remove self-loops as we do not want to take them into account # when sampling negative values. mask=row<ne>col<line_sep>row,col=row[mask] col[mask]<line_sep>col[row<l>col]<augsub>1<line_sep>idx=row.mul_(num_nodes-1).add_(col)<line_sep>population=num_nodes<times>num_nodes-num_nodes<line_sep><return>idx population<block_end><block_end><def_stmt>vector_to_edge_index idx:Tensor size:Tuple[int int] bipartite:bool force_undirected:bool=<false><arrow>Tensor<block_start><if_stmt>bipartite# No need to account for self-loops. <block_start>row=idx.div(size[1] rounding_mode='floor')<line_sep>col=idx%size[1]<line_sep><return>torch.stack([row col] dim=0)<block_end><elif_stmt>force_undirected<block_start><assert_stmt>size[0]<eq>size[1]<line_sep>num_nodes=size[0]<line_sep>offset=torch.arange(1 num_nodes device=idx.device).cumsum(0)<line_sep>end=torch.arange(num_nodes num_nodes<times>num_nodes num_nodes device=idx.device)<line_sep>row=torch.bucketize(idx end.sub_(offset) right=<true>)<line_sep>col=offset[row].add_(idx)%num_nodes<line_sep><return>torch.stack([torch.cat([row col]) torch.cat([col row])] 0)<block_end><else_stmt><block_start><assert_stmt>size[0]<eq>size[1]<line_sep>num_nodes=size[0]<line_sep>row=idx.div(num_nodes-1 rounding_mode='floor')<line_sep>col=idx%(num_nodes-1)<line_sep>col[row<le>col]<augadd>1<line_sep><return>torch.stack([row col] dim=0)<block_end><block_end>
# Copyright (C) 2015, 2016 GoSecure Inc. """ Telnet Transport and Authentication for the Honeypot @author: <NAME> <<EMAIL>> """<import_from_future_stmt> annotations<import_stmt>struct<import_from_stmt>twisted.conch.telnet ECHO LINEMODE NAWS SGA AuthenticatingTelnetProtocol ITelnetProtocol <import_from_stmt>twisted.python log<import_from_stmt>cowrie.core.config CowrieConfig<import_from_stmt>cowrie.core.credentials UsernamePasswordIP<class_stmt>HoneyPotTelnetAuthProtocol(AuthenticatingTelnetProtocol)<block_start>""" TelnetAuthProtocol that takes care of Authentication. Once authenticated this protocol is replaced with HoneyPotTelnetSession. """<line_sep>loginPrompt=b"login: "<line_sep>passwordPrompt=b"Password: "<line_sep>windowSize=[40 80]<def_stmt>connectionMade self# self.transport.negotiationMap[NAWS] = self.telnet_NAWS # Initial option negotation. Want something at least for Mirai # for opt in (NAWS,): # self.transport.doChain(opt).addErrback(log.err) # I need to doubly escape here since my underlying # CowrieTelnetTransport hack would remove it and leave just \n <block_start>self.transport.write(self.factory.banner.replace(b"\n" b"\r\r\n"))<line_sep>self.transport.write(self.loginPrompt)<block_end><def_stmt>connectionLost self reason<block_start>""" Fires on pre-authentication disconnects """<line_sep>AuthenticatingTelnetProtocol.connectionLost(self reason)<block_end><def_stmt>telnet_User self line<block_start>""" Overridden to conditionally kill 'WILL ECHO' which confuses clients that don't implement a proper Telnet protocol (most malware) """<line_sep>self.username=line# .decode() # only send ECHO option if we are chatting with a real Telnet client self.transport.willChain(ECHO)<line_sep># FIXME: this should be configurable or provided via filesystem self.transport.write(self.passwordPrompt)<line_sep><return>"Password"<block_end><def_stmt>telnet_Password self line<block_start>username,password=self.username line# .decode() <del_stmt>self.username<def_stmt>login ignored<block_start>self.src_ip=self.transport.getPeer().host<line_sep>creds=UsernamePasswordIP(username password self.src_ip)<line_sep>d=self.portal.login(creds self.src_ip ITelnetProtocol)<line_sep>d.addCallback(self._cbLogin)<line_sep>d.addErrback(self._ebLogin)<block_end># are we dealing with a real Telnet client? <if_stmt>self.transport.options# stop ECHO # even if ECHO negotiation fails we still want to attempt a login # this allows us to support dumb clients which is common in malware # thus the addBoth: on success and on exception (AlreadyNegotiating) <block_start>self.transport.wontChain(ECHO).addBoth(login)<block_end><else_stmt># process login <block_start>login("")<block_end><return>"Discard"<block_end><def_stmt>telnet_Command self command<block_start>self.transport.protocol.dataReceived(command+b"\r")<line_sep><return>"Command"<block_end><def_stmt>_cbLogin self ial<block_start>""" Fired on a successful login """<line_sep>interface,protocol,logout=ial<line_sep>protocol.windowSize=self.windowSize<line_sep>self.protocol=protocol<line_sep>self.logout=logout<line_sep>self.state="Command"<line_sep>self.transport.write(b"\n")<line_sep># Remove the short timeout of the login prompt. self.transport.setTimeout(CowrieConfig.getint("honeypot" "interactive_timeout" fallback=300))<line_sep># replace myself with avatar protocol protocol.makeConnection(self.transport)<line_sep>self.transport.protocol=protocol<block_end><def_stmt>_ebLogin self failure# TODO: provide a way to have user configurable strings for wrong password <block_start>self.transport.wontChain(ECHO)<line_sep>self.transport.write(b"\nLogin incorrect\n")<line_sep>self.transport.write(self.loginPrompt)<line_sep>self.state="User"<block_end><def_stmt>telnet_NAWS self data<block_start>""" From TelnetBootstrapProtocol in twisted/conch/telnet.py """<if_stmt>len(data)<eq>4<block_start>width,height=struct.unpack("!HH" b"".join(data))<line_sep>self.windowSize=[height width]<block_end><else_stmt><block_start>log.msg("Wrong number of NAWS bytes")<block_end><block_end><def_stmt>enableLocal self opt<block_start><if_stmt>opt<eq>ECHO<block_start><return><true><block_end># TODO: check if twisted now supports SGA (see git commit c58056b0) <elif_stmt>opt<eq>SGA<block_start><return><false><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>enableRemote self opt# TODO: check if twisted now supports LINEMODE (see git commit c58056b0) <block_start><if_stmt>opt<eq>LINEMODE<block_start><return><false><block_end><elif_stmt>opt<eq>NAWS<block_start><return><true><block_end><elif_stmt>opt<eq>SGA<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><block_end>
<class_stmt>WebException(Exception)<block_start><pass><block_end><class_stmt>ParserException(Exception)<block_start>""" 解析异常 """<line_sep><pass><block_end><class_stmt>ApiException(Exception)<block_start>""" api异常 """<line_sep><pass><block_end><class_stmt>WsException(Exception)<block_start>""" 轮询异常 """<line_sep><pass><block_end><class_stmt>SsoException(Exception)<block_start>""" sso异常 """<line_sep><pass><block_end><class_stmt>LibException(Exception)<block_start>""" lib异常 """<line_sep><pass><block_end><class_stmt>AccountException(Exception)<block_start>""" 账号异常(账号失效) """<line_sep><pass><block_end><class_stmt>FlowException(Exception)<block_start>""" 认证流量异常 """<line_sep><pass><block_end>
<import_stmt>re<import_stmt>base64<import_stmt>hmac<import_stmt>hashlib<import_stmt>logging<import_stmt>requests<import_from_stmt>datetime datetime<class_stmt>AzureSentinel<block_start><def_stmt>__init__ self workspace_id workspace_key log_type log_analytics_url=''<block_start>self._workspace_id=workspace_id<line_sep>self._workspace_key=workspace_key<line_sep>self._log_type=log_type<if_stmt>((log_analytics_url<in>(<none> '')<or>str(log_analytics_url).isspace()))<block_start>log_analytics_url='https://'+self._workspace_id+'.ods.opinsights.azure.com'<block_end>pattern=r"https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$"<if_stmt><not>re.match(pattern str(log_analytics_url))<block_start><raise>Exception("Invalid Log Analytics Uri.")<block_end>self._log_analytics_url=log_analytics_url<block_end><def_stmt>build_signature self date content_length method content_type resource<block_start>x_headers='x-ms-date:'+date<line_sep>string_to_hash=method+"\n"+str(content_length)+"\n"+content_type+"\n"+x_headers+"\n"+resource<line_sep>bytes_to_hash=bytes(string_to_hash encoding="utf-8")<line_sep>decoded_key=base64.b64decode(self._workspace_key)<line_sep>encoded_hash=base64.b64encode(hmac.new(decoded_key bytes_to_hash digestmod=hashlib.sha256).digest()).decode()<line_sep>authorization="SharedKey {}:{}".format(self._workspace_id encoded_hash)<line_sep><return>authorization<block_end><def_stmt>post_data self body<block_start>logging.info('constructing post to send to Azure Sentinel.')<line_sep>method='POST'<line_sep>content_type='application/json'<line_sep>resource='/api/logs'<line_sep>rfc1123date=datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')<line_sep>content_length=len(body)<line_sep>logging.info('build signature.')<line_sep>signature=self.build_signature(rfc1123date content_length method content_type resource)<line_sep>logging.info('signature built.')<line_sep>uri=self._log_analytics_url+resource+'?api-version=2016-04-01'<line_sep>headers={'content-type':content_type 'Authorization':signature 'Log-Type':self._log_type 'x-ms-date':rfc1123date}<line_sep>logging.info('sending post to Azure Sentinel.')<line_sep>response=requests.post(uri data=body headers=headers)<line_sep>logging.info(response.status_code)<if_stmt>(response.status_code<ge>200<and>response.status_code<le>299)<block_start><return>response.status_code<block_end><else_stmt><block_start>logging.warn("Events are not processed into Azure. Response code: {}".format(response.status_code))<line_sep><raise>Exception(f'Sending to Azure Sentinel failed with status code {response.status_code}')<block_end><block_end><block_end>
<import_stmt>pytest<import_from_stmt>onnx TensorProto<import_from_stmt>onnx helper<as>oh<import_stmt>finn.core.onnx_exec<as>oxe<import_from_stmt>finn.core.modelwrapper ModelWrapper<import_from_stmt>finn.transformation.streamline.reorder MoveTransposePastJoinAdd<import_from_stmt>finn.util.basic gen_finn_dt_tensor<def_stmt>create_model perm<block_start><if_stmt>perm<eq>[0 3 1 2]<block_start>in_shape=[1 128 1 256]<line_sep>out_shape=[1 256 128 1]<block_end><if_stmt>perm<eq>[0 2 3 1]<block_start>in_shape=[1 256 128 1]<line_sep>out_shape=[1 128 1 256]<block_end>Transpose1_node=oh.make_node("Transpose" inputs=["in_transpose1"] outputs=["out_transpose1"] perm=perm)<line_sep>Transpose2_node=oh.make_node("Transpose" inputs=["in_transpose2"] outputs=["out_transpose2"] perm=perm)<line_sep>Join1_node=oh.make_node("Add" inputs=["out_transpose1" "out_transpose2"] outputs=["out_join1"])<line_sep>in_transpose1=oh.make_tensor_value_info("in_transpose1" TensorProto.FLOAT in_shape)<line_sep>in_transpose2=oh.make_tensor_value_info("in_transpose2" TensorProto.FLOAT in_shape)<line_sep>out_transpose1=oh.make_tensor_value_info("out_transpose1" TensorProto.FLOAT out_shape)<line_sep>out_transpose2=oh.make_tensor_value_info("out_transpose2" TensorProto.FLOAT out_shape)<line_sep>out_join1=oh.make_tensor_value_info("out_join1" TensorProto.FLOAT out_shape)<line_sep>graph=oh.make_graph(nodes=[Transpose1_node Transpose2_node Join1_node] name="test_graph" inputs=[in_transpose1 in_transpose2] outputs=[out_join1] value_info=[out_transpose1 out_transpose2 ] )<line_sep>onnx_model=oh.make_model(graph producer_name="test_model")<line_sep>model=ModelWrapper(onnx_model)<line_sep><return>model<block_end># Permutation of transpose node @pytest.mark.parametrize("perm" [[0 3 1 2] [0 2 3 1]])<def_stmt>test_move_identical_op_past_join_op perm<block_start>model=create_model(perm)<line_sep># Create input data input0_tensor_name=model.graph.input[0].name<line_sep>input1_tensor_name=model.graph.input[1].name<line_sep># Note: it is assumed that both tensors have the same shape and data type input_shape=model.get_tensor_shape(input0_tensor_name)<line_sep>input_dtype=model.get_tensor_datatype(input0_tensor_name)<line_sep>input_val=gen_finn_dt_tensor(input_dtype input_shape)<line_sep>input_dict={}<line_sep>input_dict[input0_tensor_name]=input_val<line_sep>input_dict[input1_tensor_name]=input_val<line_sep>model_transformed=model.transform(MoveTransposePastJoinAdd())<assert_stmt>oxe.compare_execution(model model_transformed input_dict)<line_sep># Check if order changed node0_input0_model=model.find_consumers(model.graph.input[0].name)[0].op_type<line_sep>node1_input1_model=model.find_consumers(model.graph.input[1].name)[0].op_type<line_sep>node0_input0_model_transformed=model_transformed.find_consumers(model_transformed.graph.input[0].name)[0].op_type<line_sep>node1_input1_model_transformed=model_transformed.find_consumers(model_transformed.graph.input[1].name)[0].op_type<assert_stmt>node0_input0_model<ne>node0_input0_model_transformed<assert_stmt>node1_input1_model<ne>node1_input1_model_transformed<block_end>
<import_stmt>unittest<class_stmt>LexerTestCase(unittest.TestCase)<block_start><def_stmt>makeLexer self text<block_start><import_from_stmt>spi Lexer<line_sep>lexer=Lexer(text)<line_sep><return>lexer<block_end><def_stmt>test_tokens self<block_start><import_from_stmt>spi TokenType<line_sep>records=(('234' TokenType.INTEGER_CONST 234) ('3.14' TokenType.REAL_CONST 3.14) ('*' TokenType.MUL '*') ('DIV' TokenType.INTEGER_DIV 'DIV') ('/' TokenType.FLOAT_DIV '/') ('+' TokenType.PLUS '+') ('-' TokenType.MINUS '-') ('(' TokenType.LPAREN '(') (')' TokenType.RPAREN ')') (':=' TokenType.ASSIGN ':=') ('.' TokenType.DOT '.') ('number' TokenType.ID 'number') (';' TokenType.SEMI ';') ('BEGIN' TokenType.BEGIN 'BEGIN') ('END' TokenType.END 'END') ('PROCEDURE' TokenType.PROCEDURE 'PROCEDURE') )<for_stmt>text,tok_type,tok_val records<block_start>lexer=self.makeLexer(text)<line_sep>token=lexer.get_next_token()<line_sep>self.assertEqual(token.type tok_type)<line_sep>self.assertEqual(token.value tok_val)<block_end><block_end><def_stmt>test_lexer_exception self<block_start><import_from_stmt>spi LexerError<line_sep>lexer=self.makeLexer('<')<with_stmt>self.assertRaises(LexerError)<block_start>lexer.get_next_token()<block_end><block_end><block_end><class_stmt>ParserTestCase(unittest.TestCase)<block_start><def_stmt>makeParser self text<block_start><import_from_stmt>spi Lexer Parser<line_sep>lexer=Lexer(text)<line_sep>parser=Parser(lexer)<line_sep><return>parser<block_end><def_stmt>test_expression_invalid_syntax_01 self<block_start><import_from_stmt>spi ParserError ErrorCode<line_sep>parser=self.makeParser(""" PROGRAM Test; VAR a : INTEGER; BEGIN a := 10 * ; {Invalid syntax} END. """)<with_stmt>self.assertRaises(ParserError)<as>cm<block_start>parser.parse()<block_end>the_exception=cm.exception<line_sep>self.assertEqual(the_exception.error_code ErrorCode.UNEXPECTED_TOKEN)<line_sep>self.assertEqual(the_exception.token.value ';')<line_sep>self.assertEqual(the_exception.token.lineno 6)<block_end><def_stmt>test_expression_invalid_syntax_02 self<block_start><import_from_stmt>spi ParserError ErrorCode<line_sep>parser=self.makeParser(""" PROGRAM Test; VAR a : INTEGER; BEGIN a := 1 (1 + 2); {Invalid syntax} END. """)<with_stmt>self.assertRaises(ParserError)<as>cm<block_start>parser.parse()<block_end>the_exception=cm.exception<line_sep>self.assertEqual(the_exception.error_code ErrorCode.UNEXPECTED_TOKEN)<line_sep>self.assertEqual(the_exception.token.value '(')<line_sep>self.assertEqual(the_exception.token.lineno 6)<block_end><def_stmt>test_maximum_one_VAR_block_is_allowed self<block_start><import_from_stmt>spi ParserError ErrorCode<line_sep># zero VARs parser=self.makeParser(""" PROGRAM Test; BEGIN END. """)<line_sep>parser.parse()<line_sep># one VAR parser=self.makeParser(""" PROGRAM Test; VAR a : INTEGER; BEGIN END. """)<line_sep>parser.parse()<line_sep>parser=self.makeParser(""" PROGRAM Test; VAR a : INTEGER; VAR b : INTEGER; BEGIN a := 5; b := a + 10; END. """)<with_stmt>self.assertRaises(ParserError)<as>cm<block_start>parser.parse()<block_end>the_exception=cm.exception<line_sep>self.assertEqual(the_exception.error_code ErrorCode.UNEXPECTED_TOKEN)<line_sep>self.assertEqual(the_exception.token.value 'VAR')<line_sep>self.assertEqual(the_exception.token.lineno 5)<block_end><block_end># second VAR <class_stmt>SemanticAnalyzerTestCase(unittest.TestCase)<block_start><def_stmt>runSemanticAnalyzer self text<block_start><import_from_stmt>spi Lexer Parser SemanticAnalyzer<line_sep>lexer=Lexer(text)<line_sep>parser=Parser(lexer)<line_sep>tree=parser.parse()<line_sep>semantic_analyzer=SemanticAnalyzer()<line_sep>semantic_analyzer.visit(tree)<line_sep><return>semantic_analyzer<block_end><def_stmt>test_semantic_duplicate_id_error self<block_start><import_from_stmt>spi SemanticError ErrorCode<with_stmt>self.assertRaises(SemanticError)<as>cm<block_start>self.runSemanticAnalyzer(""" PROGRAM Test; VAR a : INTEGER; a : REAL; {Duplicate identifier} BEGIN a := 5; END. """)<block_end>the_exception=cm.exception<line_sep>self.assertEqual(the_exception.error_code ErrorCode.DUPLICATE_ID)<line_sep>self.assertEqual(the_exception.token.value 'a')<line_sep>self.assertEqual(the_exception.token.lineno 5)<block_end><def_stmt>test_semantic_id_not_found_error self<block_start><import_from_stmt>spi SemanticError ErrorCode<with_stmt>self.assertRaises(SemanticError)<as>cm<block_start>self.runSemanticAnalyzer(""" PROGRAM Test; VAR a : INTEGER; BEGIN a := 5 + b; END. """)<block_end>the_exception=cm.exception<line_sep>self.assertEqual(the_exception.error_code ErrorCode.ID_NOT_FOUND)<line_sep>self.assertEqual(the_exception.token.value 'b')<block_end><block_end><class_stmt>TestCallStack<block_start><def_stmt>__init__ self<block_start>self._records=[]<block_end><def_stmt>push self ar<block_start>self._records.append(ar)<block_end><def_stmt>pop self# do nothing <block_start><pass><block_end><def_stmt>peek self<block_start><return>self._records[-1]<block_end><block_end><class_stmt>InterpreterTestCase(unittest.TestCase)<block_start><def_stmt>makeInterpreter self text<block_start><import_from_stmt>spi Lexer Parser SemanticAnalyzer Interpreter<line_sep>lexer=Lexer(text)<line_sep>parser=Parser(lexer)<line_sep>tree=parser.parse()<line_sep>semantic_analyzer=SemanticAnalyzer()<line_sep>semantic_analyzer.visit(tree)<line_sep>interpreter=Interpreter(tree)<line_sep>interpreter.call_stack=TestCallStack()<line_sep><return>interpreter<block_end><def_stmt>test_integer_arithmetic_expressions self<block_start><for_stmt>expr,result (('3' 3) ('2 + 7 * 4' 30) ('7 - 8 DIV 4' 5) ('14 + 2 * 3 - 6 DIV 2' 17) ('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1))' 22) ('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1)) DIV (2 + 3) - 5 - 3 + (8)' 10) ('7 + (((3 + 2)))' 12) ('- 3' -3) ('+ 3' 3) ('5 - - - + - 3' 8) ('5 - - - + - (3 + 4) - +2' 10) )<block_start>interpreter=self.makeInterpreter("""PROGRAM Test; VAR a : INTEGER; BEGIN a := %s END. """%expr)<line_sep>interpreter.interpret()<line_sep>ar=interpreter.call_stack.peek()<line_sep>self.assertEqual(ar['a'] result)<block_end><block_end><def_stmt>test_float_arithmetic_expressions self<block_start><for_stmt>expr,result (('3.14' 3.14) ('2.14 + 7 * 4' 30.14) ('7.14 - 8 / 4' 5.14) )<block_start>interpreter=self.makeInterpreter("""PROGRAM Test; VAR a : REAL; BEGIN a := %s END. """%expr)<line_sep>interpreter.interpret()<line_sep>ar=interpreter.call_stack.peek()<line_sep>self.assertEqual(ar['a'] result)<block_end><block_end><def_stmt>test_procedure_call self<block_start>text="""\ program Main; procedure Alpha(a : integer; b : integer); var x : integer; begin x := (a + b ) * 2; end; begin { Main } Alpha(3 + 5, 7); end. { Main } """<line_sep>interpreter=self.makeInterpreter(text)<line_sep>interpreter.interpret()<line_sep>ar=interpreter.call_stack.peek()<line_sep>self.assertEqual(ar['a'] 8)<line_sep>self.assertEqual(ar['b'] 7)<line_sep>self.assertEqual(ar['x'] 30)<line_sep>self.assertEqual(ar.nesting_level 2)<block_end><def_stmt>test_program self<block_start>text="""\ PROGRAM Part12; VAR number : INTEGER; a, b : INTEGER; y : REAL; PROCEDURE P1; VAR a : REAL; k : INTEGER; PROCEDURE P2; VAR a, z : INTEGER; BEGIN {P2} z := 777; END; {P2} BEGIN {P1} END; {P1} BEGIN {Part12} number := 2; a := number ; b := 10 * a + 10 * number DIV 4; y := 20 / 7 + 3.14 END. {Part12} """<line_sep>interpreter=self.makeInterpreter(text)<line_sep>interpreter.interpret()<line_sep>ar=interpreter.call_stack.peek()<line_sep>self.assertEqual(len(ar.members.keys()) 4)<line_sep>self.assertEqual(ar['number'] 2)<line_sep>self.assertEqual(ar['a'] 2)<line_sep>self.assertEqual(ar['b'] 25)<line_sep>self.assertAlmostEqual(ar['y'] float(20)/7+3.14)<block_end><block_end># 5.9971... <if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
""" Simple pre-processing for PeerRead papers. Takes in JSON formatted data from ScienceParse and outputs a tfrecord Reference example: https://github.com/tensorlayer/tensorlayer/blob/9528da50dfcaf9f0f81fba9453e488a1e6c8ee8f/examples/data_process/tutorial_tfrecord3.py """<import_stmt>argparse<import_stmt>glob<import_stmt>os<import_stmt>random<import_stmt>io<import_stmt>json<import_from_stmt>dateutil.parser parse<as>parse_date<import_stmt>tensorflow<as>tf<import_stmt>bert.tokenization<as>tokenization<import_from_stmt>PeerRead.ScienceParse.Paper Paper<import_from_stmt>PeerRead.ScienceParse.ScienceParseReader ScienceParseReader<import_from_stmt>PeerRead.data_cleaning.PeerRead_hand_features get_PeerRead_hand_features<line_sep>rng=random.Random(0)<def_stmt>process_json_paper paper_json_filename scienceparse_dir tokenizer<block_start>paper=Paper.from_json(paper_json_filename)<line_sep>paper.SCIENCEPARSE=ScienceParseReader.read_science_parse(paper.ID paper.TITLE paper.ABSTRACT scienceparse_dir)<line_sep># tokenize PeerRead features <try_stmt><block_start>title_tokens=tokenizer.tokenize(paper.TITLE)<block_end><except_stmt>ValueError# missing titles are quite common sciparse <block_start>print("Missing title for "+paper_json_filename)<line_sep>title_tokens=<none><block_end>abstract_tokens=tokenizer.tokenize(paper.ABSTRACT)<line_sep>text_features={'title':title_tokens 'abstract':abstract_tokens}<line_sep>context_features={'authors':paper.AUTHORS 'accepted':paper.ACCEPTED 'name':paper.ID}<line_sep># add hand crafted features from PeerRead pr_hand_features=get_PeerRead_hand_features(paper)<line_sep>context_features.update(pr_hand_features)<line_sep><return>text_features context_features<block_end><def_stmt>bert_process_sentence example_tokens max_seq_length tokenizer<block_start>""" Tokenization and pre-processing of text as expected by Bert Parameters ---------- example_tokens max_seq_length tokenizer Returns ------- """<line_sep># Account for [CLS] and [SEP] with "- 2" <if_stmt>len(example_tokens)<g>max_seq_length-2<block_start>example_tokens=example_tokens[0:(max_seq_length-2)]<block_end># The convention in BERT for single sequences is: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. (vv: Not relevant for us) # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. # vv: segment_ids seem to be the same as type_ids tokens=[]<line_sep>segment_ids=[]<line_sep>tokens.append("[CLS]")<line_sep>segment_ids.append(0)<for_stmt>token example_tokens<block_start>tokens.append(token)<line_sep>segment_ids.append(0)<block_end>tokens.append("[SEP]")<line_sep>segment_ids.append(0)<line_sep>input_ids=tokenizer.convert_tokens_to_ids(tokens)<line_sep># The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask=[1]<times>len(input_ids)<line_sep># Zero-pad up to the sequence length. <while_stmt>len(input_ids)<l>max_seq_length<block_start>input_ids.append(0)<line_sep>input_mask.append(0)<line_sep>segment_ids.append(0)<block_end><assert_stmt>len(input_ids)<eq>max_seq_length<assert_stmt>len(input_mask)<eq>max_seq_length<assert_stmt>len(segment_ids)<eq>max_seq_length<line_sep><return>input_ids input_mask segment_ids<block_end><def_stmt>paper_to_bert_Example text_features context_features max_seq_length tokenizer<block_start>""" Parses the input paper into a tf.Example as expected by Bert Note: the docs for tensorflow Example are awful ¯\_(ツ)_/¯ """<line_sep>abstract_features={}<line_sep>abstract_tokens,abstract_padding_mask,_=bert_process_sentence(text_features['abstract'] max_seq_length tokenizer)<line_sep>abstract_features["token_ids"]=_int64_feature(abstract_tokens)<line_sep>abstract_features["token_mask"]=_int64_feature(abstract_padding_mask)<line_sep># abstract_features["segment_ids"] = create_int_feature(feature.segment_ids) TODO: ommission may cause bugs # abstract_features["label_ids"] = _int64_feature([feature.label_id]) # non-sequential features tf_context_features,tf_context_features_types=_dict_of_nonlist_numerical_to_tf_features(context_features)<line_sep>features={**tf_context_features **abstract_features}<line_sep>tf_example=tf.train.Example(features=tf.train.Features(feature=features))<line_sep><return>tf_example<block_end><def_stmt>_int64_feature value<block_start>"""Wrapper for inserting an int64 Feature into a SequenceExample proto, e.g, An integer label. """<if_stmt>isinstance(value list)<block_start><return>tf.train.Feature(int64_list=tf.train.Int64List(value=value))<block_end><else_stmt><block_start><return>tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))<block_end><block_end><def_stmt>_float_feature value<block_start>"""Wrapper for inserting a float Feature into a SequenceExample proto, e.g, An integer label. """<if_stmt>isinstance(value list)<block_start><return>tf.train.Feature(float_list=tf.train.FloatList(value=value))<block_end><else_stmt><block_start><return>tf.train.Feature(float_list=tf.train.FloatList(value=[value]))<block_end><block_end><def_stmt>_bytes_feature value<block_start>"""Wrapper for inserting a bytes Feature into a SequenceExample proto, e.g, an image in byte """<line_sep># return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)])) <return>tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))<block_end><def_stmt>_dict_of_nonlist_numerical_to_tf_features my_dict<block_start>""" Strip out non-numerical features Returns tf_features_dict: a dictionary suitable for passing to tf.train.example tf_types_dict: a dictionary of the tf types of previous dict """<line_sep>tf_types_dict={}<line_sep>tf_features_dict={}<for_stmt>k,v my_dict.items()<block_start><if_stmt>isinstance(v int)<or>isinstance(v bool)<block_start>tf_features_dict[k]=_int64_feature(v)<line_sep>tf_types_dict[k]=tf.int64<block_end><elif_stmt>isinstance(v float)<block_start>tf_features_dict[k]=_float_feature(v)<line_sep>tf_types_dict[k]=tf.float32<block_end><else_stmt><block_start><pass><block_end><block_end><return>tf_features_dict tf_types_dict<block_end>venues={'acl':1 'conll':2 'iclr':3 'nips':4 'icml':5 'emnlp':6 'aaai':7 'hlt-naacl':8 'arxiv':0}<def_stmt>_venues venue_name<block_start><if_stmt>venue_name.lower()<in>venues<block_start><return>venues[venue_name.lower()]<block_end><else_stmt><block_start><return>-1<block_end><block_end><def_stmt>_arxiv_subject subjects<block_start>subject=subjects[0]<if_stmt>'lg'<in>subject.lower()<block_start><return>0<block_end><elif_stmt>'cl'<in>subject.lower()<block_start><return>1<block_end><elif_stmt>'ai'<in>subject.lower()<block_start><return>2<block_end><else_stmt><block_start><raise>Exception("arxiv subject not recognized")<block_end><block_end><def_stmt>clean_PeerRead_dataset review_json_dir parsedpdf_json_dir venue year out_dir out_file max_abs_len tokenizer default_accept=1 is_arxiv=<false><block_start><if_stmt><not>os.path.exists(out_dir)<block_start>os.makedirs(out_dir)<block_end>print('Reading reviews from...' review_json_dir)<line_sep>paper_json_filenames=sorted(glob.glob('{}/*.json'.format(review_json_dir)))<with_stmt>tf.python_io.TFRecordWriter(out_dir+"/"+out_file)<as>writer<block_start><for_stmt>idx,paper_json_filename enumerate(paper_json_filenames)<block_start>text_features,context_features=process_json_paper(paper_json_filename parsedpdf_json_dir tokenizer)<if_stmt>context_features['accepted']<is><none># missing for conferences other than ICLR (we only see accepts) <block_start>context_features['accepted']=default_accept<block_end>many_split=rng.randint(0 100)# useful for easy data splitting later # other context features arxiv=-1<if_stmt>is_arxiv<block_start><with_stmt>io.open(paper_json_filename)<as>json_file<block_start>loaded=json.load(json_file)<block_end>year=parse_date(loaded['DATE_OF_SUBMISSION']).year<line_sep>venue=_venues(loaded['conference'])<line_sep>arxiv=_arxiv_subject([loaded['SUBJECTS']])<block_end>extra_context={'id':idx 'venue':venue 'year':year 'many_split':many_split 'arxiv':arxiv}<line_sep>context_features.update(extra_context)<line_sep># turn it into a tf.data example paper_ex=paper_to_bert_Example(text_features context_features max_seq_length=max_abs_len tokenizer=tokenizer)<line_sep>writer.write(paper_ex.SerializeToString())<block_end><block_end><block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--review-json-dir' type=str default='../dat/PeerRead/arxiv.all/all/reviews')<line_sep>parser.add_argument('--parsedpdf-json-dir' type=str default='../dat/PeerRead/arxiv.all/all/parsed_pdfs')<line_sep>parser.add_argument('--out-dir' type=str default='../dat/PeerRead/proc')<line_sep>parser.add_argument('--out-file' type=str default='arxiv-all.tf_record')<line_sep>parser.add_argument('--vocab-file' type=str default='../../bert/pre-trained/uncased_L-12_H-768_A-12/vocab.txt')<line_sep>parser.add_argument('--max-abs-len' type=int default=250)<line_sep>parser.add_argument('--venue' type=int default=0)<line_sep>parser.add_argument('--year' type=int default=2017)<line_sep>args=parser.parse_args()<line_sep>tokenizer=tokenization.FullTokenizer(vocab_file=args.vocab_file do_lower_case=<true>)<line_sep>clean_PeerRead_dataset(args.review_json_dir args.parsedpdf_json_dir args.venue args.year args.out_dir args.out_file args.max_abs_len tokenizer is_arxiv=<true>)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# -*- coding: utf-8 -*- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. <import_stmt>numpy<as>np<import_from_stmt>os path<as>op<import_from_stmt>..util load_data_file<line_sep># This is the package data dir, not the dir for config, etc. DATA_DIR=op.join(op.dirname(__file__) '_data')<def_stmt>load_iris <block_start>"""Load the iris dataset Returns ------- iris : NpzFile data['data'] : a (150, 4) NumPy array with the iris' features data['group'] : a (150,) NumPy array with the iris' group """<line_sep><return>np.load(load_data_file('iris/iris.npz' force_download='2014-09-04'))<block_end><def_stmt>load_crate <block_start>"""Load an image of a crate Returns ------- crate : array 256x256x3 crate image. """<line_sep><return>np.load(load_data_file('orig/crate.npz'))['crate']<block_end><def_stmt>pack_unit value<block_start>"""Packs float values between [0,1] into 4 unsigned int8 Returns ------- pack: array packed interpolation kernel """<line_sep>pack=np.zeros(value.shape+(4 ) dtype=np.ubyte)<for_stmt>i range(4)<block_start>value,pack[<ellipsis> i]=np.modf(value<times>256.)<block_end><return>pack<block_end><def_stmt>pack_ieee value<block_start>"""Packs float ieee binary representation into 4 unsigned int8 Returns ------- pack: array packed interpolation kernel """<line_sep><return>np.fromstring(value.tobytes() np.ubyte).reshape((value.shape+(4 )))<block_end><def_stmt>load_spatial_filters packed=<true><block_start>"""Load spatial-filters kernel Parameters ---------- packed : bool Whether or not the data should be in "packed" representation for use in GLSL code. Returns ------- kernel : array 16x1024x4 (packed float in rgba) or 16x1024 (unpacked float) 16 interpolation kernel with length 1024 each. names : tuple of strings Respective interpolation names, plus "Nearest" which does not require a filter but can still be used """<line_sep>names=("Bilinear" "Hanning" "Hamming" "Hermite" "Kaiser" "Quadric" "Bicubic" "CatRom" "Mitchell" "Spline16" "Spline36" "Gaussian" "Bessel" "Sinc" "Lanczos" "Blackman" "Nearest")<line_sep>kernel=np.load(op.join(DATA_DIR 'spatial-filters.npy'))<if_stmt>packed# convert the kernel to a packed representation <block_start>kernel=pack_unit(kernel)<block_end><return>kernel names<block_end>
<import_stmt>logging<import_stmt>os<import_from_stmt>typing List Tuple Optional<import_from_stmt>amlb.utils config_load Namespace<line_sep>log=logging.getLogger(__name__)<def_stmt>_find_local_benchmark_definition name:str benchmark_definition_dirs:List[str]<arrow>str# 'name' should be either a full path to the benchmark, # or a filename (without extension) in the benchmark directory. <block_start><if_stmt>os.path.exists(name)<block_start><return>name<block_end><for_stmt>bd benchmark_definition_dirs<block_start>bf=os.path.join(bd f"{name}.yaml")<if_stmt>os.path.exists(bf)# We don't account for duplicate definitions (yet). <block_start><return>bf<block_end><block_end># should we support s3 and check for s3 path before raising error? <raise>ValueError(f"Incorrect benchmark name or path `{name}`, name not available in {benchmark_definition_dirs}.")<block_end><def_stmt>load_file_benchmark name:str benchmark_definition_dirs:List[str]<arrow>Tuple[str Optional[str] List[Namespace]]<block_start>""" Loads benchmark from a local file. """<line_sep>benchmark_file=_find_local_benchmark_definition(name benchmark_definition_dirs)<line_sep>log.info("Loading benchmark definitions from %s." benchmark_file)<line_sep>tasks=config_load(benchmark_file)<line_sep>benchmark_name,_=os.path.splitext(os.path.basename(benchmark_file))<line_sep><return>benchmark_name benchmark_file tasks<block_end>
<import_from_stmt>itertools islice<import_from_stmt>test get_user_session cassette<import_from_stmt>test.resources.documents delete_all_documents create_document<def_stmt>test_should_iterate_through_documents <block_start>session=get_user_session()<line_sep>delete_all_documents()<with_stmt>cassette('fixtures/resources/documents/iter_documents/iterate_through_documents.yaml')<block_start>create_document(session 'title 1')<line_sep>create_document(session 'title 2')<line_sep>create_document(session 'title 3')<line_sep>docs=list(islice(session.documents.iter(page_size=2) 3))<assert_stmt>len(docs)<eq>3<assert_stmt>docs[0].title<eq>'title 1'<assert_stmt>docs[1].title<eq>'title 2'<assert_stmt>docs[2].title<eq>'title 3'<block_end><block_end>
# encoding: utf-8 """ mplsmask.py Created by <NAME> on 2016-12-01. Copyright (c) 2014-2017 Exa Networks. All rights reserved. """<import_from_stmt>exabgp.bgp.message.notification Notify<import_from_stmt>exabgp.bgp.message.update.attribute.bgpls.linkstate LinkState<import_from_stmt>exabgp.bgp.message.update.attribute.bgpls.linkstate FlagLS<line_sep># 0 1 2 3 # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Type | Length | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # |L|R| Reserved | # +-+-+-+-+-+-+-+-+ # https://tools.ietf.org/html/rfc7752#section-3.3.2.2 MPLS Protocol Mask # # +------------+------------------------------------------+-----------+ # | Bit | Description | Reference | # +------------+------------------------------------------+-----------+ # | 'L' | Label Distribution Protocol (LDP) | [RFC5036] | # | 'R' | Extension to RSVP for LSP Tunnels | [RFC3209] | # | | (RSVP-TE) | | # | 'Reserved' | Reserved for future use | | # +------------+------------------------------------------+-----------+ # RFC 7752 3.3.2.2. MPLS Protocol Mask TLV @LinkState.register()<class_stmt>MplsMask(FlagLS)<block_start>REPR='MPLS Protocol mask'<line_sep>JSON='mpls-mask'<line_sep>TLV=1094<line_sep>FLAGS=['LDP' 'RSVP-TE' 'RSV' 'RSV' 'RSV' 'RSV' 'RSV' 'RSV']<line_sep>LEN=1<block_end>
<import_stmt>numpy<as>np<import_stmt>scipy.interpolate<import_stmt>scipy.ndimage<import_from_stmt>sklearn.feature_extraction.image extract_patches_2d reconstruct_from_patches_2d<def_stmt>_calc_patch_grid_dims shape patch_size patch_stride<block_start>x_w,x_h,x_c=shape<line_sep>num_rows=1+(x_h-patch_size)<floordiv>patch_stride<line_sep>num_cols=1+(x_w-patch_size)<floordiv>patch_stride<line_sep><return>num_rows num_cols<block_end><def_stmt>make_patch_grid x patch_size patch_stride=1<block_start>'''x shape: (num_channels, rows, cols)'''<line_sep>x=x.transpose(2 1 0)<line_sep>patches=extract_patches_2d(x (patch_size patch_size))<line_sep>x_w,x_h,x_c=x.shape<line_sep>num_rows,num_cols=_calc_patch_grid_dims(x.shape patch_size patch_stride)<line_sep>patches=patches.reshape((num_rows num_cols patch_size patch_size x_c))<line_sep>patches=patches.transpose((0 1 4 2 3))<line_sep>#patches = np.rollaxis(patches, -1, 2) <return>patches<block_end><def_stmt>combine_patches_grid in_patches out_shape<block_start>'''Reconstruct an image from these `patches` input shape: (rows, cols, channels, patch_row, patch_col) '''<line_sep>num_rows,num_cols=in_patches.shape[:2]<line_sep>num_channels=in_patches.shape[-3]<line_sep>patch_size=in_patches.shape[-1]<line_sep>num_patches=num_rows<times>num_cols<line_sep>in_patches=np.reshape(in_patches (num_patches num_channels patch_size patch_size))# (patches, channels, pr, pc) in_patches=np.transpose(in_patches (0 2 3 1))# (patches, p, p, channels) recon=reconstruct_from_patches_2d(in_patches out_shape)<line_sep><return>recon.transpose(2 1 0).astype(np.float32)<block_end><class_stmt>PatchMatcher(object)<block_start>'''A matcher of image patches inspired by the PatchMatch algorithm. image shape: (width, height, channels) '''<def_stmt>__init__ self input_shape target_img patch_size=1 patch_stride=1 jump_size=0.5 num_propagation_steps=5 num_random_steps=5 random_max_radius=1.0 random_scale=0.5<block_start>self.input_shape=input_shape<line_sep>self.patch_size=patch_size<line_sep>self.patch_stride=patch_stride<line_sep>self.jump_size=jump_size<line_sep>self.num_propagation_steps=num_propagation_steps<line_sep>self.num_random_steps=num_random_steps<line_sep>self.random_max_radius=random_max_radius<line_sep>self.random_scale=random_scale<line_sep>self.num_input_rows,self.num_input_cols=_calc_patch_grid_dims(input_shape patch_size patch_stride)<line_sep>self.target_patches=make_patch_grid(target_img patch_size)<line_sep>self.target_patches_normed=self.normalize_patches(self.target_patches)<line_sep>self.coords=np.random.uniform(0.0 1.0 # TODO: switch to pixels (2 self.num_input_rows self.num_input_cols))<line_sep># * [[[self.num_input_rows]],[[self.num_input_cols]]] self.similarity=np.zeros(input_shape[:2:-1] dtype=np.float32)<line_sep>self.min_propagration_row=1.0/self.num_input_rows<line_sep>self.min_propagration_col=1.0/self.num_input_cols<line_sep>self.delta_row=np.array([[[self.min_propagration_row]] [[0.0]]])<line_sep>self.delta_col=np.array([[[0.0]] [[self.min_propagration_col]]])<block_end><def_stmt>update self input_img reverse_propagation=<false><block_start>input_patches=self.get_patches_for(input_img)<line_sep>self.update_with_patches(self.normalize_patches(input_patches) reverse_propagation=reverse_propagation)<block_end><def_stmt>update_with_patches self input_patches reverse_propagation=<false><block_start>self._propagate(input_patches reverse_propagation=reverse_propagation)<line_sep>self._random_update(input_patches)<block_end><def_stmt>get_patches_for self img<block_start><return>make_patch_grid(img self.patch_size)<line_sep><block_end><def_stmt>normalize_patches self patches<block_start>norm=np.sqrt(np.sum(np.square(patches) axis=(2 3 4) keepdims=<true>))<line_sep><return>patches/norm<block_end><def_stmt>_propagate self input_patches reverse_propagation=<false><block_start><if_stmt>reverse_propagation<block_start>roll_direction=1<block_end><else_stmt><block_start>roll_direction=-1<block_end>sign=float(roll_direction)<for_stmt>step_i range(self.num_propagation_steps)<block_start>new_coords=self.clip_coords(np.roll(self.coords roll_direction 1)+self.delta_row<times>sign)<line_sep>coords_row,similarity_row=self.eval_state(new_coords input_patches)<line_sep>new_coords=self.clip_coords(np.roll(self.coords roll_direction 2)+self.delta_col<times>sign)<line_sep>coords_col,similarity_col=self.eval_state(new_coords input_patches)<line_sep>self.coords,self.similarity=self.take_best(coords_row similarity_row coords_col similarity_col)<block_end><block_end><def_stmt>_random_update self input_patches<block_start><for_stmt>alpha range(1 self.num_random_steps+1)# NOTE this should actually stop when the move is < 1 <block_start>new_coords=self.clip_coords(self.coords+np.random.uniform(-self.random_max_radius self.random_max_radius self.coords.shape)<times>self.random_scale<power>alpha)<line_sep>self.coords,self.similarity=self.eval_state(new_coords input_patches)<block_end><block_end><def_stmt>eval_state self new_coords input_patches<block_start>new_similarity=self.patch_similarity(input_patches new_coords)<line_sep>delta_similarity=new_similarity-self.similarity<line_sep>coords=np.where(delta_similarity<g>0 new_coords self.coords)<line_sep>best_similarity=np.where(delta_similarity<g>0 new_similarity self.similarity)<line_sep><return>coords best_similarity<block_end><def_stmt>take_best self coords_a similarity_a coords_b similarity_b<block_start>delta_similarity=similarity_a-similarity_b<line_sep>best_coords=np.where(delta_similarity<g>0 coords_a coords_b)<line_sep>best_similarity=np.where(delta_similarity<g>0 similarity_a similarity_b)<line_sep><return>best_coords best_similarity<block_end><def_stmt>patch_similarity self source coords<block_start>'''Check the similarity of the patches specified in coords.'''<line_sep>target_vals=self.lookup_coords(self.target_patches_normed coords)<line_sep>err=source<times>target_vals<line_sep><return>np.sum(err axis=(2 3 4))<block_end><def_stmt>clip_coords self coords# TODO: should this all be in pixel space? <block_start>coords=np.clip(coords 0.0 1.0)<line_sep><return>coords<block_end><def_stmt>lookup_coords self x coords<block_start>x_shape=np.expand_dims(np.expand_dims(x.shape -1) -1)<line_sep>i_coords=np.round(coords<times>(x_shape[:2]-1)).astype('int32')<line_sep><return>x[i_coords[0] i_coords[1]]<block_end><def_stmt>get_reconstruction self patches=<none> combined=<none><block_start><if_stmt>combined<is><not><none><block_start>patches=make_patch_grid(combined self.patch_size)<block_end><if_stmt>patches<is><none><block_start>patches=self.target_patches<block_end>patches=self.lookup_coords(patches self.coords)<line_sep>recon=combine_patches_grid(patches self.input_shape)<line_sep><return>recon<block_end><def_stmt>scale self new_shape new_target_img<block_start>'''Create a new matcher of the given shape and replace its state with a scaled up version of the current matcher's state. '''<line_sep>new_matcher=PatchMatcher(new_shape new_target_img patch_size=self.patch_size patch_stride=self.patch_stride jump_size=self.jump_size num_propagation_steps=self.num_propagation_steps num_random_steps=self.num_random_steps random_max_radius=self.random_max_radius random_scale=self.random_scale)<line_sep>new_matcher.coords=congrid(self.coords new_matcher.coords.shape method='neighbour')<line_sep>new_matcher.similarity=congrid(self.similarity new_matcher.coords.shape method='neighbour')<line_sep><return>new_matcher<block_end><block_end><def_stmt>congrid a newdims method='linear' centre=<false> minusone=<false><block_start>'''Arbitrary resampling of source array to new dimension sizes. Currently only supports maintaining the same number of dimensions. To use 1-D arrays, first promote them to shape (x,1). Uses the same parameters and creates the same co-ordinate lookup points as IDL''s congrid routine, which apparently originally came from a VAX/VMS routine of the same name. method: neighbour - closest value from original data nearest and linear - uses n x 1-D interpolations using scipy.interpolate.interp1d (see Numerical Recipes for validity of use of n 1-D interpolations) spline - uses ndimage.map_coordinates centre: True - interpolation points are at the centres of the bins False - points are at the front edge of the bin minusone: For example- inarray.shape = (i,j) & new dimensions = (x,y) False - inarray is resampled by factors of (i/x) * (j/y) True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1) This prevents extrapolation one element beyond bounds of input array. '''<if_stmt><not>a.dtype<in>[np.float64 np.float32]<block_start>a=np.cast[float](a)<block_end>m1=np.cast[int](minusone)<line_sep>ofs=np.cast[int](centre)<times>0.5<line_sep>old=np.array(a.shape)<line_sep>ndims=len(a.shape)<if_stmt>len(newdims)<ne>ndims<block_start>print("[congrid] dimensions error. "<concat>"This routine currently only support "<concat>"rebinning to the same number of dimensions.")<line_sep><return><none><block_end>newdims=np.asarray(newdims dtype=float)<line_sep>dimlist=[]<if_stmt>method<eq>'neighbour'<block_start><for_stmt>i range(ndims)<block_start>base=np.indices(newdims)[i]<line_sep>dimlist.append((old[i]-m1)/(newdims[i]-m1)<times>(base+ofs)-ofs)<block_end>cd=np.array(dimlist).round().astype(int)<line_sep>newa=a[list(cd)]<line_sep><return>newa<block_end><elif_stmt>method<in>['nearest' 'linear']# calculate new dims <block_start><for_stmt>i range(ndims)<block_start>base=np.arange(newdims[i])<line_sep>dimlist.append((old[i]-m1)/(newdims[i]-m1)<times>(base+ofs)-ofs)<block_end># specify old dims olddims=[np.arange(i dtype=np.float)<for>i list(a.shape)]<line_sep># first interpolation - for ndims = any mint=scipy.interpolate.interp1d(olddims[-1] a kind=method)<line_sep>newa=mint(dimlist[-1])<line_sep>trorder=[ndims-1]+range(ndims-1)<for_stmt>i range(ndims-2 -1 -1)<block_start>newa=newa.transpose(trorder)<line_sep>mint=scipy.interpolate.interp1d(olddims[i] newa kind=method)<line_sep>newa=mint(dimlist[i])<block_end><if_stmt>ndims<g>1# need one more transpose to return to original dimensions <block_start>newa=newa.transpose(trorder)<block_end><return>newa<block_end><elif_stmt>method<in>['spline']<block_start>oslices=[slice(0 j)<for>j old]<line_sep>oldcoords=np.ogrid[oslices]<line_sep>nslices=[slice(0 j)<for>j list(newdims)]<line_sep>newcoords=np.mgrid[nslices]<line_sep>newcoords_dims=range(np.rank(newcoords))<line_sep>#make first index last newcoords_dims.append(newcoords_dims.pop(0))<line_sep>newcoords_tr=newcoords.transpose(newcoords_dims)<line_sep># makes a view that affects newcoords newcoords_tr<augadd>ofs<line_sep>deltas=(np.asarray(old)-m1)/(newdims-m1)<line_sep>newcoords_tr<augmul>deltas<line_sep>newcoords_tr<augsub>ofs<line_sep>newa=scipy.ndimage.map_coordinates(a newcoords)<line_sep><return>newa<block_end><else_stmt><block_start>print("Congrid error: Unrecognized interpolation type.\n" "Currently only \'neighbour\', \'nearest\',\'linear\'," "and \'spline\' are supported.")<line_sep><return><none><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys<import_stmt>time<import_from_stmt>scipy.misc imsave<import_from_stmt>image_analogy.img_utils load_image preprocess_image deprocess_image<line_sep>content_image_path,style_image_path,output_prefix=sys.argv[1:]<line_sep>jump_size=1.0<line_sep>num_steps=7<line_sep>patch_size=1<line_sep>patch_stride=1<line_sep>feat_chans=512<line_sep>feat_style_shape=(feat_chans 12 18)<line_sep>feat_style=np.random.uniform(0.0 1.0 feat_style_shape)<line_sep>feat_in_shape=(feat_chans 17 10)<line_sep>feat_in=np.random.uniform(0.0 1.0 feat_in_shape)<line_sep>matcher=PatchMatcher(feat_in_shape[::-1] feat_style patch_size=patch_size)<line_sep>feat_in_normed=matcher.normalize_patches(matcher.get_patches_for(feat_in))<for_stmt>i range(num_steps)<block_start>matcher.update_with_patches(feat_in_normed)<block_end>r=matcher.get_reconstruction()<line_sep>content_img_img=load_image(content_image_path)<line_sep>content_n_channels,content_n_rows,content_n_cols=content_img_img.shape[::-1]<line_sep>content_img=preprocess_image(content_img_img content_n_cols content_n_rows)[0]#.transpose((2,1,0)) style_img=load_image(style_image_path)<line_sep>style_n_channels,style_n_rows,style_n_cols=content_img_img.shape[::-1]<line_sep>style_img=preprocess_image(load_image(style_image_path) style_n_cols style_n_rows)[0]<line_sep>#.transpose((2,1,0)) pg=make_patch_grid(content_img patch_size)<line_sep>result=combine_patches_grid(pg content_img.shape[::-1])<line_sep>outimg=deprocess_image(result contrast_percent=0)<line_sep>imsave(output_prefix+'_bestre.png' outimg)<line_sep># # # matcher=PatchMatcher((content_n_cols content_n_rows content_n_channels) style_img patch_size=patch_size)<for_stmt>i range(num_steps)<block_start>start=time.time()<line_sep>matcher.update(content_img reverse_propagation=bool(i%2))<line_sep>print(matcher.similarity.min() matcher.similarity.max() matcher.similarity.mean())<line_sep>end=time.time()<line_sep>#print end-start <block_end>start=time.time()<line_sep>result=matcher.get_reconstruction(patches=matcher.target_patches)<line_sep>print(result.shape)<line_sep>end=time.time()<line_sep>print(end-start)<line_sep>outimg=deprocess_image(result contrast_percent=0)<line_sep># # imsave takes (rows, cols, channels) imsave(output_prefix+'_best.png' outimg)<block_end>
<import_from_stmt>.rohon_gateway RohonGateway<line_sep>
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. <import_from_future_stmt> absolute_import<import_from_stmt>mock Mock patch<import_from_stmt>packaging version<import_stmt>pytest<import_from_stmt>sagemaker.tensorflow TensorFlow<line_sep>REGION="us-west-2"<line_sep>ENV_INPUT={"env_key1":"env_val1" "env_key2":"env_val2" "env_key3":"env_val3"}<line_sep>@pytest.fixture()<def_stmt>sagemaker_session <block_start><return>Mock(name="sagemaker_session" boto_region_name=REGION)<block_end><def_stmt>_build_tf sagemaker_session **kwargs<block_start><return>TensorFlow(sagemaker_session=sagemaker_session entry_point="dummy.py" role="dummy-role" instance_count=1 instance_type="ml.c4.xlarge" **kwargs )<block_end>@patch("sagemaker.fw_utils.python_deprecation_warning")<def_stmt>test_estimator_py2_deprecation_warning warning sagemaker_session<block_start>estimator=_build_tf(sagemaker_session framework_version="2.1.1" py_version="py2")<assert_stmt>estimator.py_version<eq>"py2"<line_sep>warning.assert_called_with("tensorflow" "2.1.1")<block_end><def_stmt>test_py2_version_deprecated sagemaker_session<block_start><with_stmt>pytest.raises(AttributeError)<as>e<block_start>_build_tf(sagemaker_session framework_version="2.1.2" py_version="py2")<block_end>msg=("Python 2 containers are only available with 2.1.1 and lower versions. "<concat>"Please use a Python 3 container.")<assert_stmt>msg<in>str(e.value)<block_end><def_stmt>test_py2_version_is_not_deprecated sagemaker_session<block_start>estimator=_build_tf(sagemaker_session framework_version="1.15.0" py_version="py2")<assert_stmt>estimator.py_version<eq>"py2"<line_sep>estimator=_build_tf(sagemaker_session framework_version="2.0.0" py_version="py2")<assert_stmt>estimator.py_version<eq>"py2"<block_end><def_stmt>test_framework_name sagemaker_session<block_start>tf=_build_tf(sagemaker_session framework_version="1.15.2" py_version="py3")<assert_stmt>tf._framework_name<eq>"tensorflow"<block_end><def_stmt>test_tf_add_environment_variables sagemaker_session<block_start>tf=_build_tf(sagemaker_session framework_version="1.15.2" py_version="py3" environment=ENV_INPUT )<assert_stmt>tf.environment<eq>ENV_INPUT<block_end><def_stmt>test_tf_miss_environment_variables sagemaker_session<block_start>tf=_build_tf(sagemaker_session framework_version="1.15.2" py_version="py3" environment=<none> )<assert_stmt><not>tf.environment<block_end><def_stmt>test_enable_sm_metrics sagemaker_session<block_start>tf=_build_tf(sagemaker_session framework_version="1.15.2" py_version="py3" enable_sagemaker_metrics=<true> )<assert_stmt>tf.enable_sagemaker_metrics<block_end><def_stmt>test_disable_sm_metrics sagemaker_session<block_start>tf=_build_tf(sagemaker_session framework_version="1.15.2" py_version="py3" enable_sagemaker_metrics=<false> )<assert_stmt><not>tf.enable_sagemaker_metrics<block_end><def_stmt>test_disable_sm_metrics_if_fw_ver_is_less_than_1_15 sagemaker_session tensorflow_training_version tensorflow_training_py_version<block_start><if_stmt>version.Version(tensorflow_training_version)<g>version.Version("1.14")<block_start>pytest.skip("This test is for TF 1.14 and lower.")<block_end>tf=_build_tf(sagemaker_session framework_version=tensorflow_training_version py_version=tensorflow_training_py_version image_uri="old-image" )<assert_stmt>tf.enable_sagemaker_metrics<is><none><block_end><def_stmt>test_enable_sm_metrics_if_fw_ver_is_at_least_1_15 sagemaker_session tensorflow_training_version tensorflow_training_py_version<block_start><if_stmt>version.Version(tensorflow_training_version)<l>version.Version("1.15")<block_start>pytest.skip("This test is for TF 1.15 and higher.")<block_end>tf=_build_tf(sagemaker_session framework_version=tensorflow_training_version py_version=tensorflow_training_py_version )<assert_stmt>tf.enable_sagemaker_metrics<block_end><def_stmt>test_require_image_uri_if_fw_ver_is_less_than_1_11 sagemaker_session tensorflow_training_version tensorflow_training_py_version<block_start><if_stmt>version.Version(tensorflow_training_version)<g>version.Version("1.10")<block_start>pytest.skip("This test is for TF 1.10 and lower.")<block_end><with_stmt>pytest.raises(ValueError)<as>e<block_start>_build_tf(sagemaker_session framework_version=tensorflow_training_version py_version=tensorflow_training_py_version )<block_end>expected_msg=("TF {version} supports only legacy mode. Please supply the image URI directly with "<concat>"'image_uri=520713654638.dkr.ecr.{region}.amazonaws.com/"<concat>"sagemaker-tensorflow:{version}-cpu-py2' and set 'model_dir=False'. If you are using any "<concat>"legacy parameters (training_steps, evaluation_steps, checkpoint_path, requirements_file), "<concat>"make sure to pass them directly as hyperparameters instead.").format(version=tensorflow_training_version region=REGION)<assert_stmt>expected_msg<in>str(e.value)<block_end>