content
stringlengths
0
1.55M
<import_stmt>struct<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<line_sep>df_train=pd.read_csv('../data/train_data.csv')<line_sep>df_valid=pd.read_csv('../data/valid_data.csv')<line_sep>df_test=pd.read_csv('../data/test_data.csv')<with_stmt>open('result.dat' 'rb')<as>f<block_start>N,=struct.unpack('i' f.read(4))<line_sep>no_dims,=struct.unpack('i' f.read(4))<line_sep>print(N no_dims)<line_sep>mappedX=struct.unpack('{}d'.format(N<times>no_dims) f.read(8<times>N<times>no_dims))<line_sep>mappedX=np.array(mappedX).reshape((N no_dims))<line_sep>print(mappedX)<line_sep>tsne_train=mappedX[:len(df_train)]<line_sep>tsne_valid=mappedX[len(df_train):len(df_train)+len(df_valid)]<line_sep>tsne_test=mappedX[len(df_train)+len(df_valid):]<assert_stmt>(len(tsne_train)<eq>len(df_train))<assert_stmt>(len(tsne_valid)<eq>len(df_valid))<assert_stmt>(len(tsne_test)<eq>len(df_test))<line_sep>save_path='../data/tsne_{}d_30p.npz'.format(no_dims)<line_sep>np.savez(save_path train=tsne_train valid=tsne_valid test=tsne_test)<line_sep>print('Saved: {}'.format(save_path))<line_sep># landmarks, = struct.unpack('{}i'.format(N), f.read(4 * N)) # costs, = struct.unpack('{}d'.format(N), f.read(8 * N)) <block_end>
<import_from_stmt>flask Blueprint<import_from_stmt>apps.auth.business.wxlogin WxLoginBusiness<import_from_stmt>apps.auth.extentions validation parse_json_form<import_from_stmt>library.api.render json_detail_render<line_sep>wxlogin=Blueprint("wxlogin" __name__)<line_sep>@wxlogin.route('/' methods=['POST'])@validation('POST:wx_user_code')<def_stmt>wxuser_index_handler <block_start>""" @api {post} /v1/wxlogin/ 登录 微信 @apiName WxLogin @apiGroup 用户 @apiDescription 登录微信 @apiParam {string} user_code 用户编码 @apiParamExample {json} Request-Example: { "user_code":"j2qL3QjNXXwa_4A0WJFDNJyPEx88HTHytARgRbr176g" } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": { "token": "<PASSWORD>" }, "message": "" } """<line_sep>user_code=parse_json_form('wx_user_code')<line_sep>ret,data,msg=WxLoginBusiness.get_user(user_code[0])<line_sep><return>json_detail_render(ret data msg)<block_end>
""" Test PEP 0448 -- Additional Unpacking Generalizations https://www.python.org/dev/peps/pep-0448/ """<line_sep># pylint: disable=superfluous-parens, unnecessary-comprehension UNPACK_TUPLE=(*range(4) 4)<line_sep>UNPACK_LIST=[*range(4) 4]<line_sep>UNPACK_SET={*range(4) 4}<line_sep>UNPACK_DICT={'a':1 **{'b':'2'}}<line_sep>UNPACK_DICT2={**UNPACK_DICT "x":1 "y":2}<line_sep>UNPACK_DICT3={**{'a':1} 'a':2 **{'a':3}}<line_sep>UNPACK_IN_COMP={elem<for>elem (*range(10))}# [star-needs-assignment-target]
''' This file implements various optimization methods, including -- SGD with gradient norm clipping -- AdaGrad -- AdaDelta -- Adam Transparent to switch between CPU / GPU. @author: <NAME> (<EMAIL>) '''<import_stmt>random<import_from_stmt>collections OrderedDict<import_stmt>numpy<as>np<import_stmt>theano<import_stmt>theano.tensor<as>T<import_from_stmt>theano.sandbox.cuda.basic_ops HostFromGpu<import_from_stmt>theano.sandbox.cuda.var CudaNdarraySharedVariable<import_from_stmt>theano.printing debugprint<import_from_stmt>.initialization default_mrng<def_stmt>create_optimization_updates cost params method="sgd" max_norm=5 updates=<none> gradients=<none> lr=0.01 eps=<none> rho=0.99 gamma=0.999 beta1=0.9 beta2=0.999 momentum=0.0<block_start>_momentum=momentum<line_sep>lr=theano.shared(np.float64(lr).astype(theano.config.floatX))<line_sep>rho=theano.shared(np.float64(rho).astype(theano.config.floatX))<line_sep>beta1=theano.shared(np.float64(beta1).astype(theano.config.floatX))<line_sep>beta2=theano.shared(np.float64(beta2).astype(theano.config.floatX))<line_sep>momentum=theano.shared(np.float64(momentum).astype(theano.config.floatX))<line_sep>gamma=theano.shared(np.float64(gamma).astype(theano.config.floatX))<if_stmt>eps<is><none><block_start>eps=1e-8<if>method.lower()<ne>"esgd"<else>1e-4<block_end>eps=np.float64(eps).astype(theano.config.floatX)<line_sep>gparams=T.grad(cost params)<if>gradients<is><none><else>gradients<line_sep>g_norm=0<for_stmt>g gparams<block_start>g_norm=g_norm+g.norm(2)<power>2<block_end>g_norm=T.sqrt(g_norm)<line_sep># max_norm is useful for sgd <if_stmt>method<ne>"sgd"<block_start>max_norm=<none><block_end><if_stmt>max_norm<is><not><none><and>max_norm<is><not><false><block_start>max_norm=theano.shared(np.float64(max_norm).astype(theano.config.floatX))<line_sep>shrink_factor=T.minimum(max_norm g_norm+eps)/(g_norm+eps)<line_sep>gparams_clipped=[]<for_stmt>g gparams<block_start>g=shrink_factor<times>g<line_sep>gparams_clipped.append(g)<block_end>gparams=gparams_clipped<block_end><if_stmt>updates<is><none><block_start>updates=OrderedDict()<block_end>gsums=create_accumulators(params)<if>method<ne>"sgd"<or>_momentum<g>0.0<else>[<none><for>p params]<line_sep>xsums=create_accumulators(params)<if>method<ne>"sgd"<and>method<ne>"adagrad"<else><none><if_stmt>method<eq>"sgd"<block_start>create_sgd_updates(updates params gparams gsums lr momentum)<block_end><elif_stmt>method<eq>"adagrad"<block_start>create_adagrad_updates(updates params gparams gsums lr eps)<block_end><elif_stmt>method<eq>"adadelta"<block_start>create_adadelta_updates(updates params gparams gsums xsums lr eps rho)<block_end><elif_stmt>method<eq>"adam"<block_start>create_adam_updates(updates params gparams gsums xsums lr eps beta1 beta2)<block_end><elif_stmt>method<eq>"esgd"<block_start>create_esgd_updates(updates params gparams gsums xsums lr eps gamma momentum)<block_end><else_stmt><block_start><raise>Exception("Unknown optim method: {}\n".format(method))<block_end><if_stmt>method<eq>"adadelta"<block_start>lr=rho<block_end><return>updates lr g_norm gsums xsums max_norm<block_end><def_stmt>is_subtensor_op p<block_start><if_stmt>hasattr(p 'owner')<and>hasattr(p.owner 'op')<block_start><return>isinstance(p.owner.op T.AdvancedSubtensor1)<or>isinstance(p.owner.op T.Subtensor)<block_end><return><false><block_end><def_stmt>get_subtensor_op_inputs p<block_start>origin,indexes=p.owner.inputs<if_stmt>hasattr(origin 'owner')<and>hasattr(origin.owner 'op')<and>isinstance(origin.owner.op HostFromGpu)<block_start>origin=origin.owner.inputs[0]<assert_stmt>isinstance(origin CudaNdarraySharedVariable)<block_end><return>origin indexes<block_end><def_stmt>get_similar_subtensor matrix indexes param_op<block_start>''' So far there is only two possible subtensor operation used. '''<if_stmt>isinstance(param_op.owner.op T.AdvancedSubtensor1)<block_start><return>matrix[indexes]<block_end><else_stmt># indexes is start index in this case <block_start><return>matrix[indexes:]<block_end><block_end><def_stmt>create_accumulators params<block_start>accums=[]<for_stmt>p params<block_start><if_stmt>is_subtensor_op(p)<block_start>origin,_=get_subtensor_op_inputs(p)<line_sep>acc=theano.shared(np.zeros_like(origin.get_value(borrow=<true>) dtype=theano.config.floatX))<block_end><else_stmt><block_start>acc=theano.shared(np.zeros_like(p.get_value(borrow=<true>) dtype=theano.config.floatX))<block_end>accums.append(acc)<block_end><return>accums<block_end><def_stmt>create_sgd_updates updates params gparams gsums lr momentum<block_start>has_momentum=momentum.get_value()<g>0.0<for_stmt>p,g,acc zip(params gparams gsums)<block_start><if_stmt>is_subtensor_op(p)<block_start>origin,indexes=get_subtensor_op_inputs(p)<if_stmt>has_momentum<block_start>acc_slices=get_similar_subtensor(acc indexes p)<line_sep>new_acc=acc_slices<times>momentum+g<line_sep>updates[acc]=T.set_subtensor(acc_slices new_acc)<block_end><else_stmt><block_start>new_acc=g<block_end>updates[origin]=T.inc_subtensor(p -lr<times>new_acc)<block_end><else_stmt><block_start><if_stmt>has_momentum<block_start>new_acc=acc<times>momentum+g<line_sep>updates[acc]=new_acc<block_end><else_stmt><block_start>new_acc=g<block_end>updates[p]=p-lr<times>new_acc<block_end><block_end><block_end><def_stmt>create_adagrad_updates updates params gparams gsums lr eps<block_start><for_stmt>p,g,acc zip(params gparams gsums)<block_start><if_stmt>is_subtensor_op(p)<block_start>origin,indexes=get_subtensor_op_inputs(p)<line_sep>#acc_slices = acc[indexes] acc_slices=get_similar_subtensor(acc indexes p)<line_sep>new_acc=acc_slices+g<power>2<line_sep>updates[acc]=T.set_subtensor(acc_slices new_acc)<line_sep>updates[origin]=T.inc_subtensor(p -lr<times>(g/T.sqrt(new_acc+eps)))<block_end><else_stmt><block_start>new_acc=acc+g<power>2<line_sep>updates[acc]=new_acc<line_sep>updates[p]=p-lr<times>(g/T.sqrt(new_acc+eps))<line_sep>#updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps)) # which one to use? <block_end><block_end><block_end><def_stmt>create_adadelta_updates updates params gparams gsums xsums lr eps rho<block_start><for_stmt>p,g,gacc,xacc zip(params gparams gsums xsums)<block_start><if_stmt>is_subtensor_op(p)<block_start>origin,indexes=get_subtensor_op_inputs(p)<line_sep>gacc_slices=gacc[indexes]<line_sep>xacc_slices=xacc[indexes]<line_sep>new_gacc=rho<times>gacc_slices+(1.0-rho)<times>g<power>2<line_sep>d=-T.sqrt((xacc_slices+eps)/(new_gacc+eps))<times>g<line_sep>new_xacc=rho<times>xacc_slices+(1.0-rho)<times>d<power>2<line_sep>updates[gacc]=T.set_subtensor(gacc_slices new_gacc)<line_sep>updates[xacc]=T.set_subtensor(xacc_slices new_xacc)<line_sep>updates[origin]=T.inc_subtensor(p d)<block_end><else_stmt><block_start>new_gacc=rho<times>gacc+(1.0-rho)<times>g<power>2<line_sep>d=-T.sqrt((xacc+eps)/(new_gacc+eps))<times>g<line_sep>new_xacc=rho<times>xacc+(1.0-rho)<times>d<power>2<line_sep>updates[gacc]=new_gacc<line_sep>updates[xacc]=new_xacc<line_sep>updates[p]=p+d<block_end><block_end><block_end><def_stmt>create_adam_updates updates params gparams gsums xsums lr eps beta1 beta2<block_start>i=theano.shared(np.float64(0.0).astype(theano.config.floatX))<line_sep>i_t=i+1.0<line_sep>omb1_t=1.0-beta1<power>i_t<line_sep>omb2_t=1.0-beta2<power>i_t<line_sep>lr_t=lr<times>(T.sqrt(omb2_t)/omb1_t)<for_stmt>p,g,m,v zip(params gparams gsums xsums)<block_start><if_stmt>is_subtensor_op(p)<block_start>origin,indexes=get_subtensor_op_inputs(p)<line_sep>m_sub=m[indexes]<line_sep>v_sub=v[indexes]<line_sep>m_t=beta1<times>m_sub+(1.0-beta1)<times>g<line_sep>v_t=beta2<times>v_sub+(1.0-beta2)<times>T.sqr(g)<line_sep>g_t=m_t/(T.sqrt(v_t)+eps)<line_sep>updates[m]=T.set_subtensor(m_sub m_t)<line_sep>updates[v]=T.set_subtensor(v_sub v_t)<line_sep>updates[origin]=T.inc_subtensor(p -lr_t<times>g_t)<block_end><else_stmt><block_start>m_t=beta1<times>m+(1.0-beta1)<times>g<line_sep>v_t=beta2<times>v+(1.0-beta2)<times>T.sqr(g)<line_sep>g_t=m_t/(T.sqrt(v_t)+eps)<line_sep>updates[m]=m_t<line_sep>updates[v]=v_t<line_sep>updates[p]=p-lr_t<times>g_t<block_end><block_end>updates[i]=i_t<block_end><def_stmt>create_esgd_updates updates params gparams gsums xsums lr eps gamma momentum<block_start>has_momentum=momentum.get_value()<g>0.0<line_sep>samples=[default_mrng.normal(size=p.shape avg=0 std=1 dtype=theano.config.floatX)<for>p params]<line_sep>HVs=T.Lop(gparams params samples)<line_sep>i=theano.shared(np.float64(0.0).astype(theano.config.floatX))<line_sep>i_t=i+1.0<line_sep>omg_t=1.0-gamma<power>i_t<for_stmt>p,g,m,D,Hv zip(params gparams gsums xsums HVs)<block_start><if_stmt>is_subtensor_op(p)<block_start><raise>Exception("ESGD subtensor update not implemented!")<block_end><else_stmt><block_start>D_t=D<times>gamma+T.sqr(Hv)<times>(1.0-gamma)<if_stmt>has_momentum<block_start>m_t=m<times>momentum+g<line_sep>updates[m]=m_t<block_end><else_stmt><block_start>m_t=g<block_end>g_t=m_t/(T.sqrt(D_t/omg_t+eps))<line_sep>#g_t = m_t / ( T.sqrt(D_t + eps) ) updates[D]=D_t<line_sep>updates[p]=p-lr<times>g_t<block_end><block_end>updates[i]=i_t<block_end>
<import_from_stmt>OpenGL.arrays vbo<import_from_stmt>OpenGL.GLES2.VERSION GLES2_2_0<import_from_stmt>OpenGL.GLES2.OES mapbuffer<class_stmt>Implementation(vbo.Implementation)<block_start>"""OpenGL-based implementation of VBO interfaces"""<def_stmt>__init__ self<block_start><for_stmt>name self.EXPORTED_NAMES<block_start><for_stmt>source [GLES2_2_0 mapbuffer]<block_start><for_stmt>possible (name name+'OES')<block_start><try_stmt><block_start>setattr(self name getattr(source possible))<block_end><except_stmt>AttributeError<as>err<block_start><pass><block_end><else_stmt><block_start>found=<true><block_end><block_end><assert_stmt>found name<block_end><block_end><if_stmt>GLES2_2_0.glBufferData<block_start>self.available=<true><block_end><block_end><block_end>Implementation.register()<line_sep>
<import_from_future_stmt> print_function<import_stmt>os sys<import_stmt>pickle<import_stmt>time<import_stmt>glob<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>model PVSE<import_from_stmt>loss cosine_sim order_sim<import_from_stmt>vocab Vocabulary<import_from_stmt>data get_test_loader<import_from_stmt>logger AverageMeter<import_from_stmt>option parser verify_input_args<line_sep>ORDER_BATCH_SIZE=100<def_stmt>encode_data model data_loader use_gpu=<false><block_start>"""Encode all images and sentences loadable by data_loader"""<line_sep># switch to evaluate mode model.eval()<line_sep>use_mil=model.module.mil<if>hasattr(model 'module')<else>model.mil<line_sep># numpy array to keep all the embeddings img_embs,txt_embs=<none> <none><for_stmt>i,data enumerate(data_loader)<block_start>img,txt,txt_len,ids=data<if_stmt>torch.cuda.is_available()<block_start>img,txt,txt_len=img.cuda() txt.cuda() txt_len.cuda()<block_end># compute the embeddings img_emb,txt_emb,_,_,_,_=model.forward(img txt txt_len)<del_stmt>img txt txt_len<line_sep># initialize the output embeddings <if_stmt>img_embs<is><none><block_start><if_stmt>use_gpu<block_start>emb_sz=[len(data_loader.dataset) img_emb.size(1) img_emb.size(2)]<if>use_mil<else>[len(data_loader.dataset) img_emb.size(1)]<line_sep>img_embs=torch.zeros(emb_sz dtype=img_emb.dtype requires_grad=<false>).cuda()<line_sep>txt_embs=torch.zeros(emb_sz dtype=txt_emb.dtype requires_grad=<false>).cuda()<block_end><else_stmt><block_start>emb_sz=(len(data_loader.dataset) img_emb.size(1) img_emb.size(2))<if>use_mil<else>(len(data_loader.dataset) img_emb.size(1))<line_sep>img_embs=np.zeros(emb_sz)<line_sep>txt_embs=np.zeros(emb_sz)<block_end><block_end># preserve the embeddings by copying from gpu and converting to numpy img_embs[ids]=img_emb<if>use_gpu<else>img_emb.data.cpu().numpy().copy()<line_sep>txt_embs[ids]=txt_emb<if>use_gpu<else>txt_emb.data.cpu().numpy().copy()<block_end><return>img_embs txt_embs<block_end><def_stmt>i2t images sentences nreps=1 npts=<none> return_ranks=<false> order=<false> use_gpu=<false><block_start>""" Images->Text (Image Annotation) Images: (nreps*N, K) matrix of images Captions: (nreps*N, K) matrix of sentences """<if_stmt>use_gpu<block_start><assert_stmt><not>order 'Order embedding not supported in GPU mode'<block_end><if_stmt>npts<is><none><block_start>npts=int(images.shape[0]/nreps)<block_end>index_list=[]<line_sep>ranks,top1=np.zeros(npts) np.zeros(npts)<for_stmt>index range(npts)# Get query image <block_start>im=images[nreps<times>index]<line_sep>im=im.reshape((1 )+im.shape)<line_sep># Compute scores <if_stmt>use_gpu<block_start><if_stmt>len(sentences.shape)<eq>2<block_start>sim=im.mm(sentences.t()).view(-1)<block_end><else_stmt><block_start>_,K,D=im.shape<line_sep>sim_kk=im.view(-1 D).mm(sentences.view(-1 D).t())<line_sep>sim_kk=sim_kk.view(im.size(0) K sentences.size(0) K)<line_sep>sim_kk=sim_kk.permute(0 1 3 2).contiguous()<line_sep>sim_kk=sim_kk.view(im.size(0) -1 sentences.size(0))<line_sep>sim,_=sim_kk.max(dim=1)<line_sep>sim=sim.flatten()<block_end><block_end><else_stmt><block_start><if_stmt>order<block_start><if_stmt>index%ORDER_BATCH_SIZE<eq>0<block_start>mx=min(images.shape[0] nreps<times>(index+ORDER_BATCH_SIZE))<line_sep>im2=images[nreps<times>index:mx:nreps]<line_sep>sim_batch=order_sim(torch.Tensor(im2).cuda() torch.Tensor(sentences).cuda())<line_sep>sim_batch=sim_batch.cpu().numpy()<block_end>sim=sim_batch[index%ORDER_BATCH_SIZE]<block_end><else_stmt><block_start>sim=np.tensordot(im sentences axes=[2 2]).max(axis=(0 1 3)).flatten()<if>len(sentences.shape)<eq>3<else>np.dot(im sentences.T).flatten()<block_end><block_end><if_stmt>use_gpu<block_start>_,inds_gpu=sim.sort()<line_sep>inds=inds_gpu.cpu().numpy().copy()[::-1]<block_end><else_stmt><block_start>inds=np.argsort(sim)[::-1]<block_end>index_list.append(inds[0])<line_sep># Score rank=1e20<for_stmt>i range(nreps<times>index nreps<times>(index+1) 1)<block_start>tmp=np.where(inds<eq>i)[0][0]<if_stmt>tmp<l>rank<block_start>rank=tmp<block_end><block_end>ranks[index]=rank<line_sep>top1[index]=inds[0]<block_end># Compute metrics r1=100.0<times>len(np.where(ranks<l>1)[0])/len(ranks)<line_sep>r5=100.0<times>len(np.where(ranks<l>5)[0])/len(ranks)<line_sep>r10=100.0<times>len(np.where(ranks<l>10)[0])/len(ranks)<line_sep>medr=np.floor(np.median(ranks))+1<line_sep>meanr=ranks.mean()+1<if_stmt>return_ranks<block_start><return>(r1 r5 r10 medr meanr) (ranks top1)<block_end><else_stmt><block_start><return>(r1 r5 r10 medr meanr)<block_end><block_end><def_stmt>t2i images sentences nreps=1 npts=<none> return_ranks=<false> order=<false> use_gpu=<false><block_start>""" Text->Images (Image Search) Images: (nreps*N, K) matrix of images Captions: (nreps*N, K) matrix of sentences """<if_stmt>use_gpu<block_start><assert_stmt><not>order 'Order embedding not supported in GPU mode'<block_end><if_stmt>npts<is><none><block_start>npts=int(images.shape[0]/nreps)<block_end><if_stmt>use_gpu<block_start>ims=torch.stack([images[i]<for>i range(0 len(images) nreps)])<block_end><else_stmt><block_start>ims=np.array([images[i]<for>i range(0 len(images) nreps)])<block_end>ranks,top1=np.zeros(nreps<times>npts) np.zeros(nreps<times>npts)<for_stmt>index range(npts)# Get query sentences <block_start>queries=sentences[nreps<times>index:nreps<times>(index+1)]<line_sep># Compute scores <if_stmt>use_gpu<block_start><if_stmt>len(sentences.shape)<eq>2<block_start>sim=queries.mm(ims.t())<block_end><else_stmt><block_start>sim_kk=queries.view(-1 queries.size(-1)).mm(ims.view(-1 ims.size(-1)).t())<line_sep>sim_kk=sim_kk.view(queries.size(0) queries.size(1) ims.size(0) ims.size(1))<line_sep>sim_kk=sim_kk.permute(0 1 3 2).contiguous()<line_sep>sim_kk=sim_kk.view(queries.size(0) -1 ims.size(0))<line_sep>sim,_=sim_kk.max(dim=1)<block_end><block_end><else_stmt><block_start><if_stmt>order<block_start><if_stmt>nreps<times>index%ORDER_BATCH_SIZE<eq>0<block_start>mx=min(sentences.shape[0] nreps<times>index+ORDER_BATCH_SIZE)<line_sep>sentences_batch=sentences[nreps<times>index:mx]<line_sep>sim_batch=order_sim(torch.Tensor(images).cuda() torch.Tensor(sentences_batch).cuda())<line_sep>sim_batch=sim_batch.cpu().numpy()<block_end>sim=sim_batch[: (nreps<times>index)%ORDER_BATCH_SIZE:(nreps<times>index)%ORDER_BATCH_SIZE+nreps].T<block_end><else_stmt><block_start>sim=np.tensordot(queries ims axes=[2 2]).max(axis=(1 3))<if>len(sentences.shape)<eq>3<else>np.dot(queries ims.T)<block_end><block_end>inds=np.zeros(sim.shape)<for_stmt>i range(len(inds))<block_start><if_stmt>use_gpu<block_start>_,inds_gpu=sim[i].sort()<line_sep>inds[i]=inds_gpu.cpu().numpy().copy()[::-1]<block_end><else_stmt><block_start>inds[i]=np.argsort(sim[i])[::-1]<block_end>ranks[nreps<times>index+i]=np.where(inds[i]<eq>index)[0][0]<line_sep>top1[nreps<times>index+i]=inds[i][0]<block_end><block_end># Compute metrics r1=100.0<times>len(np.where(ranks<l>1)[0])/len(ranks)<line_sep>r5=100.0<times>len(np.where(ranks<l>5)[0])/len(ranks)<line_sep>r10=100.0<times>len(np.where(ranks<l>10)[0])/len(ranks)<line_sep>medr=np.floor(np.median(ranks))+1<line_sep>meanr=ranks.mean()+1<if_stmt>return_ranks<block_start><return>(r1 r5 r10 medr meanr) (ranks top1)<block_end><else_stmt><block_start><return>(r1 r5 r10 medr meanr)<block_end><block_end><def_stmt>convert_old_state_dict x model multi_gpu=<false><block_start>params=model.state_dict()<line_sep>prefix=['module.img_enc.' 'module.txt_enc.']<if>multi_gpu<else>['img_enc.' 'txt_enc.']<for_stmt>i,old_params enumerate(x)<block_start><for_stmt>key,val old_params.items()<block_start>key=prefix[i]+key.replace('module.' '').replace('our_model' 'pie_net')<assert_stmt>key<in>params '{} not found in model state_dict'.format(key)<line_sep>params[key]=val<block_end><block_end><return>params<block_end><def_stmt>evalrank model args split='test'<block_start>print('Loading dataset')<line_sep>data_loader=get_test_loader(args vocab)<line_sep>print('Computing results... (eval_on_gpu={})'.format(args.eval_on_gpu))<line_sep>img_embs,txt_embs=encode_data(model data_loader args.eval_on_gpu)<line_sep>n_samples=img_embs.shape[0]<line_sep>nreps=5<if>args.data_name<eq>'coco'<else>1<line_sep>print('Images: %d, Sentences: %d'%(img_embs.shape[0]/nreps txt_embs.shape[0]))<line_sep># 5fold cross-validation, only for MSCOCO mean_metrics=<none><if_stmt>args.data_name<eq>'coco'<block_start>results=[]<for_stmt>i range(5)<block_start>r,rt0=i2t(img_embs[i<times>5000:(i+1)<times>5000] txt_embs[i<times>5000:(i+1)<times>5000] nreps=nreps return_ranks=<true> order=args.order use_gpu=args.eval_on_gpu)<line_sep>r=(r[0] r[1] r[2] r[3] r[3]/n_samples r[4] r[4]/n_samples)<line_sep>print("Image to text: %.2f, %.2f, %.2f, %.2f (%.2f), %.2f (%.2f)"%r)<line_sep>ri,rti0=t2i(img_embs[i<times>5000:(i+1)<times>5000] txt_embs[i<times>5000:(i+1)<times>5000] nreps=nreps return_ranks=<true> order=args.order use_gpu=args.eval_on_gpu)<if_stmt>i<eq>0<block_start>rt,rti=rt0 rti0<block_end>ri=(ri[0] ri[1] ri[2] ri[3] ri[3]/n_samples ri[4] ri[4]/n_samples)<line_sep>print("Text to image: %.2f, %.2f, %.2f, %.2f (%.2f), %.2f (%.2f)"%ri)<line_sep>ar=(r[0]+r[1]+r[2])/3<line_sep>ari=(ri[0]+ri[1]+ri[2])/3<line_sep>rsum=r[0]+r[1]+r[2]+ri[0]+ri[1]+ri[2]<line_sep>print("rsum: %.2f ar: %.2f ari: %.2f"%(rsum ar ari))<line_sep>results<augadd>[list(r)+list(ri)+[ar ari rsum]]<block_end>mean_metrics=tuple(np.array(results).mean(axis=0).flatten())<line_sep>print("-----------------------------------")<line_sep>print("Mean metrics from 5-fold evaluation: ")<line_sep>print("rsum: %.2f"%(mean_metrics[-1]<times>6))<line_sep>print("Average i2t Recall: %.2f"%mean_metrics[-3])<line_sep>print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)"%mean_metrics[:7])<line_sep>print("Average t2i Recall: %.2f"%mean_metrics[-2])<line_sep>print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)"%mean_metrics[7:14])<block_end># no cross-validation, full evaluation r,rt=i2t(img_embs txt_embs nreps=nreps return_ranks=<true> use_gpu=args.eval_on_gpu)<line_sep>ri,rti=t2i(img_embs txt_embs nreps=nreps return_ranks=<true> use_gpu=args.eval_on_gpu)<line_sep>ar=(r[0]+r[1]+r[2])/3<line_sep>ari=(ri[0]+ri[1]+ri[2])/3<line_sep>rsum=r[0]+r[1]+r[2]+ri[0]+ri[1]+ri[2]<line_sep>r=(r[0] r[1] r[2] r[3] r[3]/n_samples r[4] r[4]/n_samples)<line_sep>ri=(ri[0] ri[1] ri[2] ri[3] ri[3]/n_samples ri[4] ri[4]/n_samples)<line_sep>print("rsum: %.2f"%rsum)<line_sep>print("Average i2t Recall: %.2f"%ar)<line_sep>print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)"%r)<line_sep>print("Average t2i Recall: %.2f"%ari)<line_sep>print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)"%ri)<line_sep><return>mean_metrics<block_end><if_stmt>__name__<eq>'__main__'<block_start>multi_gpu=torch.cuda.device_count()<g>1<line_sep>args=verify_input_args(parser.parse_args())<line_sep>opt=verify_input_args(parser.parse_args())<line_sep># load vocabulary used by the model <with_stmt>open('./vocab/%s_vocab.pkl'%args.data_name 'rb')<as>f<block_start>vocab=pickle.load(f)<block_end>args.vocab_size=len(vocab)<line_sep># load model and options <assert_stmt>os.path.isfile(args.ckpt)<line_sep>model=PVSE(vocab.word2idx args)<if_stmt>torch.cuda.is_available()<block_start>model=torch.nn.DataParallel(model).cuda()<if>multi_gpu<else>model<line_sep>torch.backends.cudnn.benchmark=<true><block_end>model.load_state_dict(torch.load(args.ckpt))<line_sep># evaluate metrics=evalrank(model args split='test')<block_end>
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>paddle<import_stmt>paddle.nn<as>nn<import_stmt>paddle.nn.functional<as>F<import_from_stmt>paddlex.ppdet.core.workspace register<import_stmt>pycocotools.mask<as>mask_util<import_from_stmt>..initializer linear_init_ constant_<import_from_stmt>..transformers.utils inverse_sigmoid<line_sep>__all__=['DETRHead' 'DeformableDETRHead']<class_stmt>MLP(nn.Layer)<block_start><def_stmt>__init__ self input_dim hidden_dim output_dim num_layers<block_start>super().__init__()<line_sep>self.num_layers=num_layers<line_sep>h=[hidden_dim]<times>(num_layers-1)<line_sep>self.layers=nn.LayerList(nn.Linear(n k)<for>n,k zip([input_dim]+h h+[output_dim]))<line_sep>self._reset_parameters()<block_end><def_stmt>_reset_parameters self<block_start><for_stmt>l self.layers<block_start>linear_init_(l)<block_end><block_end><def_stmt>forward self x<block_start><for_stmt>i,layer enumerate(self.layers)<block_start>x=F.relu(layer(x))<if>i<l>self.num_layers-1<else>layer(x)<block_end><return>x<block_end><block_end><class_stmt>MultiHeadAttentionMap(nn.Layer)<block_start>"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""<def_stmt>__init__ self query_dim hidden_dim num_heads dropout=0.0 bias=<true><block_start>super().__init__()<line_sep>self.num_heads=num_heads<line_sep>self.hidden_dim=hidden_dim<line_sep>self.dropout=nn.Dropout(dropout)<line_sep>weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.XavierUniform())<line_sep>bias_attr=paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Constant())<if>bias<else><false><line_sep>self.q_proj=nn.Linear(query_dim hidden_dim weight_attr bias_attr)<line_sep>self.k_proj=nn.Conv2D(query_dim hidden_dim 1 weight_attr=weight_attr bias_attr=bias_attr)<line_sep>self.normalize_fact=float(hidden_dim/self.num_heads)<power>-0.5<block_end><def_stmt>forward self q k mask=<none><block_start>q=self.q_proj(q)<line_sep>k=self.k_proj(k)<line_sep>bs,num_queries,n,c,h,w=q.shape[0] q.shape[1] self.num_heads self.hidden_dim<floordiv>self.num_heads k.shape[-2] k.shape[-1]<line_sep>qh=q.reshape([bs num_queries n c])<line_sep>kh=k.reshape([bs n c h w])<line_sep># weights = paddle.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh) qh=qh.transpose([0 2 1 3]).reshape([-1 num_queries c])<line_sep>kh=kh.reshape([-1 c h<times>w])<line_sep>weights=paddle.bmm(qh<times>self.normalize_fact kh).reshape([bs n num_queries h w]).transpose([0 2 1 3 4])<if_stmt>mask<is><not><none><block_start>weights<augadd>mask<block_end># fix a potenial bug: https://github.com/facebookresearch/detr/issues/247 weights=F.softmax(weights.flatten(3) axis=-1).reshape(weights.shape)<line_sep>weights=self.dropout(weights)<line_sep><return>weights<block_end><block_end><class_stmt>MaskHeadFPNConv(nn.Layer)<block_start>""" Simple convolutional head, using group norm. Upsampling is done using a FPN approach """<def_stmt>__init__ self input_dim fpn_dims context_dim num_groups=8<block_start>super().__init__()<line_sep>inter_dims=[input_dim ]+[context_dim<floordiv>(2<power>i)<for>i range(1 5)]<line_sep>weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.KaimingUniform())<line_sep>bias_attr=paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Constant())<line_sep>self.conv0=self._make_layers(input_dim input_dim 3 num_groups weight_attr bias_attr)<line_sep>self.conv_inter=nn.LayerList()<for_stmt>in_dims,out_dims zip(inter_dims[:-1] inter_dims[1:])<block_start>self.conv_inter.append(self._make_layers(in_dims out_dims 3 num_groups weight_attr bias_attr))<block_end>self.conv_out=nn.Conv2D(inter_dims[-1] 1 3 padding=1 weight_attr=weight_attr bias_attr=bias_attr)<line_sep>self.adapter=nn.LayerList()<for_stmt>i range(len(fpn_dims))<block_start>self.adapter.append(nn.Conv2D(fpn_dims[i] inter_dims[i+1] 1 weight_attr=weight_attr bias_attr=bias_attr))<block_end><block_end><def_stmt>_make_layers self in_dims out_dims kernel_size num_groups weight_attr=<none> bias_attr=<none><block_start><return>nn.Sequential(nn.Conv2D(in_dims out_dims kernel_size padding=kernel_size<floordiv>2 weight_attr=weight_attr bias_attr=bias_attr) nn.GroupNorm(num_groups out_dims) nn.ReLU())<block_end><def_stmt>forward self x bbox_attention_map fpns<block_start>x=paddle.concat([x.tile([bbox_attention_map.shape[1] 1 1 1]) bbox_attention_map.flatten(0 1)] 1)<line_sep>x=self.conv0(x)<for_stmt>inter_layer,adapter_layer,feat zip(self.conv_inter[:-1] self.adapter fpns)<block_start>feat=adapter_layer(feat).tile([bbox_attention_map.shape[1] 1 1 1])<line_sep>x=inter_layer(x)<line_sep>x=feat+F.interpolate(x size=feat.shape[-2:])<block_end>x=self.conv_inter[-1](x)<line_sep>x=self.conv_out(x)<line_sep><return>x<block_end><block_end>@register<class_stmt>DETRHead(nn.Layer)<block_start>__shared__=['num_classes' 'hidden_dim' 'use_focal_loss']<line_sep>__inject__=['loss']<def_stmt>__init__ self num_classes=80 hidden_dim=256 nhead=8 num_mlp_layers=3 loss='DETRLoss' fpn_dims=[1024 512 256] with_mask_head=<false> use_focal_loss=<false><block_start>super(DETRHead self).__init__()<line_sep># add background class self.num_classes=num_classes<if>use_focal_loss<else>num_classes+1<line_sep>self.hidden_dim=hidden_dim<line_sep>self.loss=loss<line_sep>self.with_mask_head=with_mask_head<line_sep>self.use_focal_loss=use_focal_loss<line_sep>self.score_head=nn.Linear(hidden_dim self.num_classes)<line_sep>self.bbox_head=MLP(hidden_dim hidden_dim output_dim=4 num_layers=num_mlp_layers)<if_stmt>self.with_mask_head<block_start>self.bbox_attention=MultiHeadAttentionMap(hidden_dim hidden_dim nhead)<line_sep>self.mask_head=MaskHeadFPNConv(hidden_dim+nhead fpn_dims hidden_dim)<block_end>self._reset_parameters()<block_end><def_stmt>_reset_parameters self<block_start>linear_init_(self.score_head)<block_end>@classmethod<def_stmt>from_config cls cfg hidden_dim nhead input_shape<block_start><return>{'hidden_dim':hidden_dim 'nhead':nhead 'fpn_dims':[i.channels<for>i input_shape[::-1]][1:]}<block_end>@staticmethod<def_stmt>get_gt_mask_from_polygons gt_poly pad_mask<block_start>out_gt_mask=[]<for_stmt>polygons,padding zip(gt_poly pad_mask)<block_start>height,width=int(padding[: 0].sum()) int(padding[0 :].sum())<line_sep>masks=[]<for_stmt>obj_poly polygons<block_start>rles=mask_util.frPyObjects(obj_poly height width)<line_sep>rle=mask_util.merge(rles)<line_sep>masks.append(paddle.to_tensor(mask_util.decode(rle)).astype('float32'))<block_end>masks=paddle.stack(masks)<line_sep>masks_pad=paddle.zeros([masks.shape[0] pad_mask.shape[1] pad_mask.shape[2]])<line_sep>masks_pad[: :height :width]=masks<line_sep>out_gt_mask.append(masks_pad)<block_end><return>out_gt_mask<block_end><def_stmt>forward self out_transformer body_feats inputs=<none><block_start>r""" Args: out_transformer (Tuple): (feats: [num_levels, batch_size, num_queries, hidden_dim], memory: [batch_size, hidden_dim, h, w], src_proj: [batch_size, h*w, hidden_dim], src_mask: [batch_size, 1, 1, h, w]) body_feats (List(Tensor)): list[[B, C, H, W]] inputs (dict): dict(inputs) """<line_sep>feats,memory,src_proj,src_mask=out_transformer<line_sep>outputs_logit=self.score_head(feats)<line_sep>outputs_bbox=F.sigmoid(self.bbox_head(feats))<line_sep>outputs_seg=<none><if_stmt>self.with_mask_head<block_start>bbox_attention_map=self.bbox_attention(feats[-1] memory src_mask)<line_sep>fpn_feats=[a<for>a body_feats[::-1]][1:]<line_sep>outputs_seg=self.mask_head(src_proj bbox_attention_map fpn_feats)<line_sep>outputs_seg=outputs_seg.reshape([feats.shape[1] feats.shape[2] outputs_seg.shape[-2] outputs_seg.shape[-1]])<block_end><if_stmt>self.training<block_start><assert_stmt>inputs<is><not><none><assert_stmt>'gt_bbox'<in>inputs<and>'gt_class'<in>inputs<line_sep>gt_mask=self.get_gt_mask_from_polygons(inputs['gt_poly'] inputs['pad_mask'])<if>'gt_poly'<in>inputs<else><none><line_sep><return>self.loss(outputs_bbox outputs_logit inputs['gt_bbox'] inputs['gt_class'] masks=outputs_seg gt_mask=gt_mask)<block_end><else_stmt><block_start><return>(outputs_bbox[-1] outputs_logit[-1] outputs_seg)<block_end><block_end><block_end>@register<class_stmt>DeformableDETRHead(nn.Layer)<block_start>__shared__=['num_classes' 'hidden_dim']<line_sep>__inject__=['loss']<def_stmt>__init__ self num_classes=80 hidden_dim=512 nhead=8 num_mlp_layers=3 loss='DETRLoss'<block_start>super(DeformableDETRHead self).__init__()<line_sep>self.num_classes=num_classes<line_sep>self.hidden_dim=hidden_dim<line_sep>self.nhead=nhead<line_sep>self.loss=loss<line_sep>self.score_head=nn.Linear(hidden_dim self.num_classes)<line_sep>self.bbox_head=MLP(hidden_dim hidden_dim output_dim=4 num_layers=num_mlp_layers)<line_sep>self._reset_parameters()<block_end><def_stmt>_reset_parameters self<block_start>linear_init_(self.score_head)<line_sep>constant_(self.score_head.bias -4.595)<line_sep>constant_(self.bbox_head.layers[-1].weight)<line_sep>bias=paddle.zeros_like(self.bbox_head.layers[-1].bias)<line_sep>bias[2:]=-2.0<line_sep>self.bbox_head.layers[-1].bias.set_value(bias)<block_end>@classmethod<def_stmt>from_config cls cfg hidden_dim nhead input_shape<block_start><return>{'hidden_dim':hidden_dim 'nhead':nhead}<block_end><def_stmt>forward self out_transformer body_feats inputs=<none><block_start>r""" Args: out_transformer (Tuple): (feats: [num_levels, batch_size, num_queries, hidden_dim], memory: [batch_size, \sum_{l=0}^{L-1} H_l \cdot W_l, hidden_dim], reference_points: [batch_size, num_queries, 2]) body_feats (List(Tensor)): list[[B, C, H, W]] inputs (dict): dict(inputs) """<line_sep>feats,memory,reference_points=out_transformer<line_sep>reference_points=inverse_sigmoid(reference_points.unsqueeze(0))<line_sep>outputs_bbox=self.bbox_head(feats)<line_sep># It's equivalent to "outputs_bbox[:, :, :, :2] += reference_points", # but the gradient is wrong in paddle. outputs_bbox=paddle.concat([outputs_bbox[: : : :2]+reference_points outputs_bbox[: : : 2:]] axis=-1)<line_sep>outputs_bbox=F.sigmoid(outputs_bbox)<line_sep>outputs_logit=self.score_head(feats)<if_stmt>self.training<block_start><assert_stmt>inputs<is><not><none><assert_stmt>'gt_bbox'<in>inputs<and>'gt_class'<in>inputs<line_sep><return>self.loss(outputs_bbox outputs_logit inputs['gt_bbox'] inputs['gt_class'])<block_end><else_stmt><block_start><return>(outputs_bbox[-1] outputs_logit[-1] <none>)<block_end><block_end><block_end>
<import_stmt>subprocess<import_stmt>aiostream<import_stmt>pytest<import_from_stmt>vdirsyncer.storage.filesystem FilesystemStorage<import_from_stmt>vdirsyncer.vobject Item<import_from_stmt>. StorageTests<class_stmt>TestFilesystemStorage(StorageTests)<block_start>storage_class=FilesystemStorage<line_sep>@pytest.fixture<def_stmt>get_storage_args self tmpdir<block_start><async_keyword><def_stmt>inner collection="test"<block_start>rv={"path":str(tmpdir) "fileext":".txt" "collection":collection}<if_stmt>collection<is><not><none><block_start>rv=<await>self.storage_class.create_collection(**rv)<block_end><return>rv<block_end><return>inner<block_end><def_stmt>test_is_not_directory self tmpdir<block_start><with_stmt>pytest.raises(OSError)<block_start>f=tmpdir.join("hue")<line_sep>f.write("stub")<line_sep>self.storage_class(str(tmpdir)+"/hue" ".txt")<block_end><block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_broken_data self tmpdir<block_start>s=self.storage_class(str(tmpdir) ".txt")<class_stmt>BrokenItem<block_start>raw="Ц, Ш, Л, ж, Д, З, Ю".encode()<line_sep>uid="jeezus"<line_sep>ident=uid<block_end><with_stmt>pytest.raises(TypeError)<block_start><await>s.upload(BrokenItem)<block_end><assert_stmt><not>tmpdir.listdir()<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_ident_with_slash self tmpdir<block_start>s=self.storage_class(str(tmpdir) ".txt")<line_sep><await>s.upload(Item("UID:a/b/c"))<line_sep>(item_file )=tmpdir.listdir()<assert_stmt>"/"<not><in>item_file.basename<and>item_file.isfile()<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_ignore_tmp_files self tmpdir<block_start>"""Test that files with .tmp suffix beside .ics files are ignored."""<line_sep>s=self.storage_class(str(tmpdir) ".ics")<line_sep><await>s.upload(Item("UID:xyzxyz"))<line_sep>(item_file )=tmpdir.listdir()<line_sep>item_file.copy(item_file.new(ext="tmp"))<assert_stmt>len(tmpdir.listdir())<eq>2<assert_stmt>len(<await>aiostream.stream.list(s.list()))<eq>1<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_ignore_tmp_files_empty_fileext self tmpdir<block_start>"""Test that files with .tmp suffix are ignored with empty fileext."""<line_sep>s=self.storage_class(str(tmpdir) "")<line_sep><await>s.upload(Item("UID:xyzxyz"))<line_sep>(item_file )=tmpdir.listdir()<line_sep>item_file.copy(item_file.new(ext="tmp"))<assert_stmt>len(tmpdir.listdir())<eq>2<line_sep># assert False, tmpdir.listdir() # enable to see the created filename <assert_stmt>len(<await>aiostream.stream.list(s.list()))<eq>1<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_ignore_files_typical_backup self tmpdir<block_start>"""Test file-name ignorance with typical backup ending ~."""<line_sep>ignorext="~"# without dot storage=self.storage_class(str(tmpdir) "" fileignoreext=ignorext)<line_sep><await>storage.upload(Item("UID:xyzxyz"))<line_sep>(item_file )=tmpdir.listdir()<line_sep>item_file.copy(item_file.new(basename=item_file.basename+ignorext))<assert_stmt>len(tmpdir.listdir())<eq>2<assert_stmt>len(<await>aiostream.stream.list(storage.list()))<eq>1<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_too_long_uid self tmpdir<block_start>storage=self.storage_class(str(tmpdir) ".txt")<line_sep>item=Item("UID:"+"hue"<times>600)<line_sep>href,etag=<await>storage.upload(item)<assert_stmt>item.uid<not><in>href<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_post_hook_inactive self tmpdir monkeypatch<block_start><def_stmt>check_call_mock *args **kwargs<block_start><raise>AssertionError()<block_end>monkeypatch.setattr(subprocess "call" check_call_mock)<line_sep>s=self.storage_class(str(tmpdir) ".txt" post_hook=<none>)<line_sep><await>s.upload(Item("UID:a/b/c"))<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_post_hook_active self tmpdir monkeypatch<block_start>calls=[]<line_sep>exe="foo"<def_stmt>check_call_mock call *args **kwargs<block_start>calls.append(<true>)<assert_stmt>len(call)<eq>2<assert_stmt>call[0]<eq>exe<block_end>monkeypatch.setattr(subprocess "call" check_call_mock)<line_sep>s=self.storage_class(str(tmpdir) ".txt" post_hook=exe)<line_sep><await>s.upload(Item("UID:a/b/c"))<assert_stmt>calls<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_ignore_git_dirs self tmpdir<block_start>tmpdir.mkdir(".git").mkdir("foo")<line_sep>tmpdir.mkdir("a")<line_sep>tmpdir.mkdir("b")<line_sep>expected={"a" "b"}<line_sep>actual={c["collection"]<async_keyword><for>c self.storage_class.discover(str(tmpdir))}<assert_stmt>actual<eq>expected<block_end><block_end>
""" Model select class1 single allele models. """<import_stmt>argparse<import_stmt>os<import_stmt>signal<import_stmt>sys<import_stmt>time<import_stmt>traceback<import_stmt>random<import_from_stmt>functools partial<import_from_stmt>pprint pprint<import_stmt>numpy<import_stmt>pandas<import_from_stmt>scipy.stats kendalltau percentileofscore pearsonr<import_from_stmt>sklearn.metrics roc_auc_score<import_stmt>tqdm# progress bar tqdm.monitor_interval=0# see https://github.com/tqdm/tqdm/issues/481 <import_from_stmt>.class1_affinity_predictor Class1AffinityPredictor<import_from_stmt>.common normalize_allele_name<import_from_stmt>.encodable_sequences EncodableSequences<import_from_stmt>.common configure_logging random_peptides<import_from_stmt>.local_parallelism worker_pool_with_gpu_assignments_from_args add_local_parallelism_args<import_from_stmt>.regression_target from_ic50<line_sep># To avoid pickling large matrices to send to child processes when running in # parallel, we use this global variable as a place to store data. Data that is # stored here before creating the thread pool will be inherited to the child # processes upon fork() call, allowing us to share large data with the workers # via shared memory. GLOBAL_DATA={}<line_sep>parser=argparse.ArgumentParser(usage=__doc__)<line_sep>parser.add_argument("--data" metavar="FILE.csv" required=<false> help=("Model selection data CSV. Expected columns: "<concat>"allele, peptide, measurement_value"))<line_sep>parser.add_argument("--exclude-data" metavar="FILE.csv" required=<false> help=("Data to EXCLUDE from model selection. Useful to specify the original "<concat>"training data used"))<line_sep>parser.add_argument("--models-dir" metavar="DIR" required=<true> help="Directory to read models")<line_sep>parser.add_argument("--out-models-dir" metavar="DIR" required=<true> help="Directory to write selected models")<line_sep>parser.add_argument("--out-unselected-predictions" metavar="FILE.csv" help="Write predictions for validation data using unselected predictor to "<concat>"FILE.csv")<line_sep>parser.add_argument("--unselected-accuracy-scorer" metavar="SCORER" default="combined:mass-spec,mse")<line_sep>parser.add_argument("--unselected-accuracy-scorer-num-samples" type=int default=1000)<line_sep>parser.add_argument("--unselected-accuracy-percentile-threshold" type=float metavar="X" default=95)<line_sep>parser.add_argument("--allele" default=<none> nargs="+" help="Alleles to select models for. If not specified, all alleles with "<concat>"enough measurements will be used.")<line_sep>parser.add_argument("--combined-min-models" type=int default=8 metavar="N" help="Min number of models to select per allele when using combined selector")<line_sep>parser.add_argument("--combined-max-models" type=int default=1000 metavar="N" help="Max number of models to select per allele when using combined selector")<line_sep>parser.add_argument("--combined-min-contribution-percent" type=float default=1.0 metavar="X" help="Use only model selectors that can contribute at least X %% to the "<concat>"total score. Default: %(default)s")<line_sep>parser.add_argument("--mass-spec-min-measurements" type=int metavar="N" default=1 help="Min number of measurements required for an allele to use mass-spec model "<concat>"selection")<line_sep>parser.add_argument("--mass-spec-min-models" type=int default=8 metavar="N" help="Min number of models to select per allele when using mass-spec selector")<line_sep>parser.add_argument("--mass-spec-max-models" type=int default=1000 metavar="N" help="Max number of models to select per allele when using mass-spec selector")<line_sep>parser.add_argument("--mse-min-measurements" type=int metavar="N" default=1 help="Min number of measurements required for an allele to use MSE model "<concat>"selection")<line_sep>parser.add_argument("--mse-min-models" type=int default=8 metavar="N" help="Min number of models to select per allele when using MSE selector")<line_sep>parser.add_argument("--mse-max-models" type=int default=1000 metavar="N" help="Max number of models to select per allele when using MSE selector")<line_sep>parser.add_argument("--scoring" nargs="+" default=["mse" "consensus"] help="Scoring procedures to use in order")<line_sep>parser.add_argument("--consensus-min-models" type=int default=8 metavar="N" help="Min number of models to select per allele when using consensus selector")<line_sep>parser.add_argument("--consensus-max-models" type=int default=1000 metavar="N" help="Max number of models to select per allele when using consensus selector")<line_sep>parser.add_argument("--consensus-num-peptides-per-length" type=int default=10000 help="Num peptides per length to use for consensus scoring")<line_sep>parser.add_argument("--mass-spec-regex" metavar="REGEX" default="mass[- ]spec" help="Regular expression for mass-spec data. Runs on measurement_source col."<concat>"Default: %(default)s.")<line_sep>parser.add_argument("--verbosity" type=int help="Keras verbosity. Default: %(default)s" default=0)<line_sep>add_local_parallelism_args(parser)<def_stmt>run argv=sys.argv[1:]<block_start><global>GLOBAL_DATA<line_sep># On sigusr1 print stack trace print("To show stack trace, run:\nkill -s USR1 %d"%os.getpid())<line_sep>signal.signal(signal.SIGUSR1 <lambda>sig frame:traceback.print_stack())<line_sep>args=parser.parse_args(argv)<line_sep>args.out_models_dir=os.path.abspath(args.out_models_dir)<line_sep>configure_logging(verbose=args.verbosity<g>1)<line_sep>input_predictor=Class1AffinityPredictor.load(args.models_dir)<line_sep>print("Loaded: %s"%input_predictor)<if_stmt>args.allele<block_start>alleles=[normalize_allele_name(a)<for>a args.allele]<block_end><else_stmt><block_start>alleles=input_predictor.supported_alleles<block_end>metadata_dfs={}<if_stmt>args.data<block_start>df=pandas.read_csv(args.data)<line_sep>print("Loaded data: %s"%(str(df.shape)))<line_sep>df=df.loc[(df.peptide.str.len()<ge>8)&(df.peptide.str.len()<le>15)]<line_sep>print("Subselected to 8-15mers: %s"%(str(df.shape)))<line_sep># Allele names in data are assumed to be already normalized. df=df.loc[df.allele.isin(alleles)].dropna()<line_sep>print("Selected %d alleles: %s"%(len(alleles) ' '.join(alleles)))<if_stmt>args.exclude_data<block_start>exclude_df=pandas.read_csv(args.exclude_data)<line_sep>metadata_dfs["model_selection_exclude"]=exclude_df<line_sep>print("Loaded exclude data: %s"%(str(df.shape)))<line_sep>df["_key"]=df.allele+"__"+df.peptide<line_sep>exclude_df["_key"]=exclude_df.allele+"__"+exclude_df.peptide<line_sep>df["_excluded"]=df._key.isin(exclude_df._key.unique())<line_sep>print("Excluding measurements per allele (counts): ")<line_sep>print(df.groupby("allele")._excluded.sum())<line_sep>print("Excluding measurements per allele (fractions): ")<line_sep>print(df.groupby("allele")._excluded.mean())<line_sep>df=df.loc[~df._excluded]<del_stmt>df["_excluded"]<del_stmt>df["_key"]<line_sep>print("Reduced data to: %s"%(str(df.shape)))<block_end>metadata_dfs["model_selection_data"]=df<line_sep>df["mass_spec"]=df.measurement_source.str.contains(args.mass_spec_regex)<block_end><else_stmt><block_start>df=<none><block_end><if_stmt>args.out_unselected_predictions<block_start>df["unselected_prediction"]=input_predictor.predict(alleles=df.allele.values peptides=df.peptide.values)<line_sep>df.to_csv(args.out_unselected_predictions)<line_sep>print("Wrote: %s"%args.out_unselected_predictions)<block_end>selectors={}<line_sep>selector_to_model_selection_kwargs={}<def_stmt>make_selector scoring combined_min_contribution_percent=args.combined_min_contribution_percent<block_start><if_stmt>scoring<in>selectors<block_start><return>(selectors[scoring] selector_to_model_selection_kwargs[scoring])<block_end>start=time.time()<if_stmt>scoring.startswith("combined:")<block_start>model_selection_kwargs={'min_models':args.combined_min_models 'max_models':args.combined_max_models }<line_sep>component_selectors=[]<for_stmt>component_selector scoring.split(":" 1)[1].split(",")<block_start>component_selectors.append(make_selector(component_selector)[0])<block_end>selector=CombinedModelSelector(component_selectors min_contribution_percent=combined_min_contribution_percent)<block_end><elif_stmt>scoring<eq>"mse"<block_start>model_selection_kwargs={'min_models':args.mse_min_models 'max_models':args.mse_max_models }<line_sep>min_measurements=args.mse_min_measurements<line_sep>selector=MSEModelSelector(df=df.loc[~df.mass_spec] predictor=input_predictor min_measurements=min_measurements)<block_end><elif_stmt>scoring<eq>"mass-spec"<block_start>mass_spec_df=df.loc[df.mass_spec]<line_sep>model_selection_kwargs={'min_models':args.mass_spec_min_models 'max_models':args.mass_spec_max_models }<line_sep>min_measurements=args.mass_spec_min_measurements<line_sep>selector=MassSpecModelSelector(df=mass_spec_df predictor=input_predictor min_measurements=min_measurements)<block_end><elif_stmt>scoring<eq>"consensus"<block_start>model_selection_kwargs={'min_models':args.consensus_min_models 'max_models':args.consensus_max_models }<line_sep>selector=ConsensusModelSelector(predictor=input_predictor num_peptides_per_length=args.consensus_num_peptides_per_length)<block_end><else_stmt><block_start><raise>ValueError("Unsupported scoring method: %s"%scoring)<block_end>print("Instantiated model selector %s in %0.2f sec."%(scoring time.time()-start))<line_sep><return>(selector model_selection_kwargs)<block_end><for_stmt>scoring args.scoring<block_start>(selector model_selection_kwargs)=make_selector(scoring)<line_sep>selectors[scoring]=selector<line_sep>selector_to_model_selection_kwargs[scoring]=model_selection_kwargs<block_end>unselected_accuracy_scorer=<none><if_stmt>args.unselected_accuracy_scorer# Force running all selectors by setting combined_min_contribution_percent=0. <block_start>unselected_accuracy_scorer=make_selector(args.unselected_accuracy_scorer combined_min_contribution_percent=0.0)[0]<line_sep>print("Using unselected accuracy scorer: %s"%unselected_accuracy_scorer)<block_end>GLOBAL_DATA["unselected_accuracy_scorer"]=unselected_accuracy_scorer<line_sep>print("Selectors for alleles:")<line_sep>allele_to_selector={}<line_sep>allele_to_model_selection_kwargs={}<for_stmt>allele alleles<block_start>selector=<none><for_stmt>possible_selector args.scoring<block_start><if_stmt>selectors[possible_selector].usable_for_allele(allele=allele)<block_start>selector=selectors[possible_selector]<line_sep>print("%20s %s"%(allele selector.plan_summary(allele)))<line_sep><break><block_end><block_end><if_stmt>selector<is><none><block_start><raise>ValueError("No selectors usable for allele: %s"%allele)<block_end>allele_to_selector[allele]=selector<line_sep>allele_to_model_selection_kwargs[allele]=(selector_to_model_selection_kwargs[possible_selector])<block_end>GLOBAL_DATA["args"]=args<line_sep>GLOBAL_DATA["input_predictor"]=input_predictor<line_sep>GLOBAL_DATA["unselected_accuracy_scorer"]=unselected_accuracy_scorer<line_sep>GLOBAL_DATA["allele_to_selector"]=allele_to_selector<line_sep>GLOBAL_DATA["allele_to_model_selection_kwargs"]=allele_to_model_selection_kwargs<if_stmt><not>os.path.exists(args.out_models_dir)<block_start>print("Attempting to create directory: %s"%args.out_models_dir)<line_sep>os.mkdir(args.out_models_dir)<line_sep>print("Done.")<block_end>result_predictor=Class1AffinityPredictor(metadata_dataframes=metadata_dfs)<line_sep>worker_pool=worker_pool_with_gpu_assignments_from_args(args)<line_sep>start=time.time()<if_stmt>worker_pool<is><none># Serial run <block_start>print("Running in serial.")<line_sep>results=(model_select(allele)<for>allele alleles)<block_end><else_stmt># Parallel run <block_start>random.shuffle(alleles)<line_sep>results=worker_pool.imap_unordered(partial(model_select constant_data=GLOBAL_DATA) alleles chunksize=1)<block_end>unselected_summary=[]<line_sep>model_selection_dfs=[]<for_stmt>result tqdm.tqdm(results total=len(alleles))<block_start>pprint(result)<line_sep>summary_dict=dict(result)<line_sep>summary_dict["retained"]=result["selected"]<is><not><none><del_stmt>summary_dict["selected"]<line_sep>unselected_summary.append(summary_dict)<if_stmt>result['selected']<is><not><none><block_start>model_selection_dfs.append(result['selected'].metadata_dataframes['model_selection'])<line_sep>result_predictor.merge_in_place([result['selected']])<block_end><block_end><if_stmt>model_selection_dfs<block_start>model_selection_df=pandas.concat(model_selection_dfs ignore_index=<true>)<line_sep>model_selection_df["selector"]=model_selection_df.allele.map(allele_to_selector)<line_sep>result_predictor.metadata_dataframes["model_selection"]=(model_selection_df)<block_end>result_predictor.metadata_dataframes["unselected_summary"]=(pandas.DataFrame(unselected_summary))<line_sep>print("Done model selecting for %d alleles."%len(alleles))<line_sep>result_predictor.save(args.out_models_dir)<line_sep>model_selection_time=time.time()-start<if_stmt>worker_pool<block_start>worker_pool.close()<line_sep>worker_pool.join()<block_end>print("Model selection time %0.2f min."%(model_selection_time/60.0))<line_sep>print("Predictor written to: %s"%args.out_models_dir)<block_end><class_stmt>ScrambledPredictor(object)<block_start><def_stmt>__init__ self predictor<block_start>self.predictor=predictor<line_sep>self._predictions={}<line_sep>self._allele=<none><block_end><def_stmt>predict self peptides allele<block_start><if_stmt>peptides<not><in>self._predictions<block_start>self._predictions[peptides]=pandas.Series(self.predictor.predict(peptides=peptides allele=allele))<line_sep>self._allele=allele<block_end><assert_stmt>allele<eq>self._allele<line_sep><return>self._predictions[peptides].sample(frac=1.0).values<block_end><block_end><def_stmt>model_select allele constant_data=GLOBAL_DATA<block_start>unselected_accuracy_scorer=constant_data["unselected_accuracy_scorer"]<line_sep>selector=constant_data["allele_to_selector"][allele]<line_sep>model_selection_kwargs=constant_data["allele_to_model_selection_kwargs"][allele]<line_sep>predictor=constant_data["input_predictor"]<line_sep>args=constant_data["args"]<line_sep>unselected_accuracy_scorer_samples=constant_data["args"].unselected_accuracy_scorer_num_samples<line_sep>result_dict={"allele":allele}<line_sep>unselected_score=<none><line_sep>unselected_score_percentile=<none><line_sep>unselected_score_scrambled_mean=<none><if_stmt>unselected_accuracy_scorer<block_start>unselected_score_function=(unselected_accuracy_scorer.score_function(allele))<line_sep>additional_metadata={}<line_sep>unselected_score=unselected_score_function(predictor additional_metadata_out=additional_metadata)<line_sep>scrambled_predictor=ScrambledPredictor(predictor)<line_sep>scrambled_scores=numpy.array([unselected_score_function(scrambled_predictor)<for>_ range(unselected_accuracy_scorer_samples)])<line_sep>unselected_score_scrambled_mean=scrambled_scores.mean()<line_sep>unselected_score_percentile=percentileofscore(scrambled_scores unselected_score)<line_sep>print("Unselected score and percentile" allele unselected_score unselected_score_percentile additional_metadata)<line_sep>result_dict.update(dict(("unselected_%s"%key value)<for>(key value) additional_metadata.items()))<block_end>selected=<none><line_sep>threshold=args.unselected_accuracy_percentile_threshold<if_stmt>unselected_score_percentile<is><none><or>unselected_score_percentile<ge>threshold<block_start>selected=predictor.model_select(score_function=selector.score_function(allele=allele) alleles=[allele] **model_selection_kwargs)<block_end>result_dict["unselected_score_plan"]=(unselected_accuracy_scorer.plan_summary(allele)<if>unselected_accuracy_scorer<else><none>)<line_sep>result_dict["selector_score_plan"]=selector.plan_summary(allele)<line_sep>result_dict["unselected_accuracy_score_percentile"]=unselected_score_percentile<line_sep>result_dict["unselected_score"]=unselected_score<line_sep>result_dict["unselected_score_scrambled_mean"]=unselected_score_scrambled_mean<line_sep>result_dict["selected"]=selected<line_sep>result_dict["num_models"]=len(selected.neural_networks)<if>selected<else><none><line_sep><return>result_dict<block_end><def_stmt>cache_encoding predictor peptides# Encode the peptides for each neural network, so the encoding # becomes cached. <block_start><for_stmt>network predictor.neural_networks<block_start>network.peptides_to_network_input(peptides)<block_end><block_end><class_stmt>ScoreFunction(object)<block_start>""" Thin wrapper over a score function (Class1AffinityPredictor -> float). Used to keep a summary string associated with the function. """<def_stmt>__init__ self function summary=<none><block_start>self.function=function<line_sep>self.summary=summary<if>summary<else>"(n/a)"<block_end><def_stmt>__call__ self *args **kwargs<block_start><return>self.function(*args **kwargs)<block_end><block_end><class_stmt>CombinedModelSelector(object)<block_start>""" Model selector that computes a weighted average over other model selectors. """<def_stmt>__init__ self model_selectors weights=<none> min_contribution_percent=1.0<block_start><if_stmt>weights<is><none><block_start>weights=numpy.ones(shape=(len(model_selectors) ))<block_end>self.model_selectors=model_selectors<line_sep>self.selector_to_weight=dict(zip(self.model_selectors weights))<line_sep>self.min_contribution_percent=min_contribution_percent<block_end><def_stmt>usable_for_allele self allele<block_start><return>any(selector.usable_for_allele(allele)<for>selector self.model_selectors)<block_end><def_stmt>plan_summary self allele<block_start><return>self.score_function(allele dry_run=<true>).summary<block_end><def_stmt>score_function self allele dry_run=<false><block_start>selector_to_max_weighted_score={}<for_stmt>selector self.model_selectors<block_start>weight=self.selector_to_weight[selector]<if_stmt>selector.usable_for_allele(allele)<block_start>max_weighted_score=selector.max_absolute_value(allele)<times>weight<block_end><else_stmt><block_start>max_weighted_score=0<block_end>selector_to_max_weighted_score[selector]=max_weighted_score<block_end>max_total_score=sum(selector_to_max_weighted_score.values())<line_sep># Use only selectors that can contribute >1% to the total score selectors_to_use=[selector<for>selector self.model_selectors<if>(selector_to_max_weighted_score[selector]<g>max_total_score<times>self.min_contribution_percent/100.0)]<line_sep>summary=", ".join(["%s(|%.3f|)"%(selector.plan_summary(allele) selector_to_max_weighted_score[selector])<for>selector selectors_to_use])<if_stmt>dry_run<block_start>score=<none><block_end><else_stmt><block_start>score_functions_and_weights=[(selector.score_function(allele=allele) self.selector_to_weight[selector])<for>selector selectors_to_use]<def_stmt>score predictor additional_metadata_out=<none><block_start>scores=numpy.array([score_function(predictor additional_metadata_out=additional_metadata_out)<times>weight<for>(score_function weight) score_functions_and_weights])<if_stmt>additional_metadata_out<is><not><none><block_start>additional_metadata_out["combined_score_terms"]=str(list(scores))<block_end><return>scores.sum()<block_end><block_end><return>ScoreFunction(score summary=summary)<block_end><block_end><class_stmt>ConsensusModelSelector(object)<block_start>""" Model selector that scores sub-ensembles based on their Kendall tau consistency with the full ensemble over a set of random peptides. """<def_stmt>__init__ self predictor num_peptides_per_length=10000 multiply_score_by_value=10.0<block_start>(min_length max_length)=predictor.supported_peptide_lengths<line_sep>peptides=[]<for_stmt>length range(min_length max_length+1)<block_start>peptides.extend(random_peptides(num_peptides_per_length length=length))<block_end>self.peptides=EncodableSequences.create(peptides)<line_sep>self.predictor=predictor<line_sep>self.multiply_score_by_value=multiply_score_by_value<line_sep>cache_encoding(self.predictor self.peptides)<block_end><def_stmt>usable_for_allele self allele<block_start><return><true><block_end><def_stmt>max_absolute_value self allele<block_start><return>self.multiply_score_by_value<block_end><def_stmt>plan_summary self allele<block_start><return>"consensus (%d points)"%len(self.peptides)<block_end><def_stmt>score_function self allele<block_start>full_ensemble_predictions=self.predictor.predict(allele=allele peptides=self.peptides)<def_stmt>score predictor additional_metadata_out=<none><block_start>predictions=predictor.predict(allele=allele peptides=self.peptides )<line_sep>tau=kendalltau(predictions full_ensemble_predictions).correlation<if_stmt>additional_metadata_out<is><not><none><block_start>additional_metadata_out["score_consensus_tau"]=tau<block_end><return>tau<times>self.multiply_score_by_value<block_end><return>ScoreFunction(score summary=self.plan_summary(allele))<block_end><block_end><class_stmt>MSEModelSelector(object)<block_start>""" Model selector that uses mean-squared error to score models. Inequalities are supported. """<def_stmt>__init__ self df predictor min_measurements=1 multiply_score_by_data_size=<true><block_start>self.df=df<line_sep>self.predictor=predictor<line_sep>self.min_measurements=min_measurements<line_sep>self.multiply_score_by_data_size=multiply_score_by_data_size<block_end><def_stmt>usable_for_allele self allele<block_start><return>(self.df.allele<eq>allele).sum()<ge>self.min_measurements<block_end><def_stmt>max_absolute_value self allele<block_start><if_stmt>self.multiply_score_by_data_size<block_start><return>(self.df.allele<eq>allele).sum()<block_end><else_stmt><block_start><return>1.0<block_end><block_end><def_stmt>plan_summary self allele<block_start><return>self.score_function(allele).summary<block_end><def_stmt>score_function self allele<block_start>sub_df=self.df.loc[self.df.allele<eq>allele].reset_index(drop=<true>)<line_sep>peptides=EncodableSequences.create(sub_df.peptide.values)<def_stmt>score predictor additional_metadata_out=<none><block_start>predictions=predictor.predict(allele=allele peptides=peptides )<line_sep>deviations=from_ic50(predictions)-from_ic50(sub_df.measurement_value)<if_stmt>'measurement_inequality'<in>sub_df.columns# Must reverse meaning of inequality since we are working with # transformed 0-1 values, which are anti-correlated with the ic50s. # The measurement_inequality column is given in terms of ic50s. <block_start>deviations.loc[((sub_df.measurement_inequality<eq>"<")&(deviations<g>0))|((sub_df.measurement_inequality<eq>">")&(deviations<l>0))]=0.0<block_end>score_mse=(1-(deviations<power>2).mean())<if_stmt>additional_metadata_out<is><not><none><block_start>additional_metadata_out["score_MSE"]=1-score_mse<line_sep># We additionally include other scores on (=) measurements as # a convenience eq_df=sub_df<if_stmt>'measurement_inequality'<in>sub_df.columns<block_start>eq_df=sub_df.loc[sub_df.measurement_inequality<eq>"="]<block_end>additional_metadata_out["score_pearsonr"]=(pearsonr(numpy.log(eq_df.measurement_value.values) numpy.log(predictions[eq_df.index.values]))[0])<for_stmt>threshold [500 5000 15000]<block_start><if_stmt>(eq_df.measurement_value<l>threshold).nunique()<eq>2<block_start>additional_metadata_out["score_AUC@%d"%threshold]=(roc_auc_score((eq_df.measurement_value<l>threshold).values -1<times>predictions[eq_df.index.values]))<block_end><block_end><block_end><return>score_mse<times>(len(sub_df)<if>self.multiply_score_by_data_size<else>1)<block_end>summary="mse (%d points)"%(len(sub_df))<line_sep><return>ScoreFunction(score summary=summary)<block_end><block_end><class_stmt>MassSpecModelSelector(object)<block_start>""" Model selector that uses PPV of differentiating decoys from hits from mass-spec experiments. """<def_stmt>__init__ self df predictor decoys_per_length=0 min_measurements=100 multiply_score_by_data_size=<true># Index is peptide, columns are alleles <block_start>hit_matrix=df.groupby(["peptide" "allele"]).measurement_value.count().unstack().fillna(0).astype(bool)<if_stmt>decoys_per_length<block_start>(min_length max_length)=predictor.supported_peptide_lengths<line_sep>decoys=[]<for_stmt>length range(min_length max_length+1)<block_start>decoys.extend(random_peptides(decoys_per_length length=length))<block_end>decoy_matrix=pandas.DataFrame(index=decoys columns=hit_matrix.columns dtype=bool)<line_sep>decoy_matrix[:]=<false><line_sep>full_matrix=pandas.concat([hit_matrix decoy_matrix])<block_end><else_stmt><block_start>full_matrix=hit_matrix<block_end><if_stmt>len(full_matrix)<g>0<block_start>full_matrix=full_matrix.sample(frac=1.0).astype(float)<block_end>self.df=full_matrix<line_sep>self.predictor=predictor<line_sep>self.min_measurements=min_measurements<line_sep>self.multiply_score_by_data_size=multiply_score_by_data_size<line_sep>self.peptides=EncodableSequences.create(full_matrix.index.values)<line_sep>cache_encoding(self.predictor self.peptides)<block_end>@staticmethod<def_stmt>ppv y_true predictions<block_start>df=pandas.DataFrame({"prediction":predictions "y_true":y_true})<line_sep><return>df.sort_values("prediction" ascending=<true>)[:int(y_true.sum())].y_true.mean()<block_end><def_stmt>usable_for_allele self allele<block_start><return>allele<in>self.df.columns<and>(self.df[allele].sum()<ge>self.min_measurements)<block_end><def_stmt>max_absolute_value self allele<block_start><if_stmt>self.multiply_score_by_data_size<block_start><return>self.df[allele].sum()<block_end><else_stmt><block_start><return>1.0<block_end><block_end><def_stmt>plan_summary self allele<block_start><return>self.score_function(allele).summary<block_end><def_stmt>score_function self allele<block_start>total_hits=self.df[allele].sum()<line_sep>total_decoys=(self.df[allele]<eq>0).sum()<line_sep>multiplier=total_hits<if>self.multiply_score_by_data_size<else>1<def_stmt>score predictor additional_metadata_out=<none><block_start>predictions=predictor.predict(allele=allele peptides=self.peptides )<line_sep>ppv=self.ppv(self.df[allele] predictions)<if_stmt>additional_metadata_out<is><not><none><block_start>additional_metadata_out["score_mass_spec_PPV"]=ppv<line_sep># We additionally compute AUC score. additional_metadata_out["score_mass_spec_AUC"]=roc_auc_score(self.df[allele].values -1<times>predictions)<block_end><return>ppv<times>multiplier<block_end>summary="mass-spec (%d hits / %d decoys)"%(total_hits total_decoys)<line_sep><return>ScoreFunction(score summary=summary)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>run()<block_end>
<import_stmt>time<import_from_stmt>adafruit_circuitplayground.express cpx<import_stmt>simpleio<line_sep>cpx.pixels.auto_write=<false><line_sep>cpx.pixels.brightness=0.3<line_sep># Set these based on your ambient temperature for best results! minimum_temp=24<line_sep>maximum_temp=30<while_stmt><true># temperature value remapped to pixel position <block_start>peak=simpleio.map_range(cpx.temperature minimum_temp maximum_temp 0 10)<line_sep>print(cpx.temperature)<line_sep>print(int(peak))<for_stmt>i range(0 10 1)<block_start><if_stmt>i<le>peak<block_start>cpx.pixels[i]=(0 255 255)<block_end><else_stmt><block_start>cpx.pixels[i]=(0 0 0)<block_end><block_end>cpx.pixels.show()<line_sep>time.sleep(0.05)<block_end>
<import_from_stmt>collections namedtuple<import_stmt>io<import_stmt>json<import_from_stmt>furl furl<import_from_stmt>django.core.handlers.wsgi WSGIRequest<import_from_stmt>django.http.request QueryDict<import_from_stmt>django.template Variable VariableDoesNotExist<import_from_stmt>django.test.client MULTIPART_CONTENT<import_from_stmt>django.urls resolve<import_from_stmt>django.urls.exceptions Resolver404<import_from_stmt>mayan.apps.organizations.settings setting_organization_url_base_path<import_from_stmt>mayan.apps.templating.classes Template<import_from_stmt>.literals API_VERSION<class_stmt>BatchResponse<block_start><def_stmt>__init__ self name status_code data headers<block_start>self.name=name<line_sep>self.status_code=status_code<line_sep>self.data=data<line_sep>self.headers=headers<block_end><block_end><class_stmt>NestableLazyIterator<block_start><def_stmt>__init__ self iterable_string context context_list_index parent_iterator=<none><block_start>self.iterable_string=iterable_string<line_sep>self.context=context<line_sep>self.context_list_index=context_list_index<line_sep>self.parent_iterator=parent_iterator<line_sep>self.items=<none><line_sep>self.index=0<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>__next__ self# Setup the initial values on the initial access. <block_start><if_stmt><not>self.items<block_start><if_stmt>self.parent_iterator<block_start>next(self.parent_iterator)<block_end>self.update_iterable_object()<block_end><if_stmt>self.index<eq>len(self.items)<block_start>self.index=0<if_stmt>self.parent_iterator<block_start>next(self.parent_iterator)<block_end><else_stmt><block_start><raise>StopIteration<block_end>self.update_iterable_object()<block_end>value=self.items[self.index]<line_sep>self.context['iterables'][self.context_list_index]=value<line_sep>self.index<augadd>1<line_sep><return>value<block_end><def_stmt>update_iterable_object self<block_start>self.items=Variable(var=self.iterable_string).resolve(context=self.context)<block_end><block_end>RenderedContent=namedtuple(typename='RenderedContent' field_names=('body' 'include' 'method' 'name' 'url'))<class_stmt>BatchRequest<block_start><def_stmt>__init__ self collection name url body=<none> group_name=<none> include='true' iterables=<none> method='GET'<block_start>self.collection=collection<line_sep>self.body=body<or>{}<line_sep>self.include=include<line_sep>self.group_name=group_name<line_sep>self.iterables=iterables<line_sep>self.method=method<line_sep>self.name=name<line_sep>self.url=url<block_end><def_stmt>execute self<block_start><if_stmt>self.iterables# Initialize the iterables list to allow using any index. <block_start>self.collection.context['iterables']=[<none>]<times>len(self.iterables)<line_sep>iterator=<none><for_stmt>iterable_index,iterable enumerate(self.iterables)<block_start>iterator=NestableLazyIterator(context=self.collection.context context_list_index=iterable_index iterable_string=iterable parent_iterator=iterator)<block_end><while_stmt><true><block_start><try_stmt><block_start>next(iterator)<block_end><except_stmt>StopIteration<block_start><break><block_end><except_stmt>VariableDoesNotExist<as>exception<block_start>self.collection.responses[self.name]={'data':{'error':str(exception)} 'include':'true' 'is_response':<true>}<line_sep><return><block_end><else_stmt><block_start>rendered_content=self.render_content()<line_sep>BatchRequest(collection=self.collection body=rendered_content.body group_name=self.group_name include=rendered_content.include method=rendered_content.method name=rendered_content.name url=rendered_content.url).execute()<block_end><block_end><block_end><else_stmt><block_start>rendered_content=self.render_content()<line_sep>url_parts=furl(rendered_content.url)<try_stmt><block_start>resolver_match=resolve(path=url_parts.pathstr)<block_end><except_stmt>Resolver404<as>exception<block_start>self.collection.responses[rendered_content.name]={'data':{'error':'"{}" not found'.format(exception.args[0]['path'])} 'include':'true' 'is_response':<true> 'status_code':404}<line_sep><return><block_end><else_stmt><block_start>environ=getattr(self.collection.view_request 'environ' {}).copy()<line_sep>environ['REQUEST_METHOD']=rendered_content.method<line_sep>environ['PATH_INFO']=self.url<line_sep>environ['QUERY_STRING']=url_parts.querystr<line_sep>post_query_dict=QueryDict(mutable=<true>)<line_sep>post_query_dict.update(rendered_content.body)<line_sep>json_body=json.dumps(post_query_dict)<line_sep>request_data=json_body.encode('utf-8')<line_sep>environ['wsgi.input']=io.BytesIO(request_data)<line_sep>environ['CONTENT_LENGTH']=str(len(request_data))<if_stmt>rendered_content.method<eq>'POST'<block_start>environ['CONTENT_TYPE']=MULTIPART_CONTENT<block_end><else_stmt><block_start>environ['CONTENT_TYPE']='application/octet-stream'<block_end>request=WSGIRequest(environ=environ)<line_sep>request.LANGUAGE_CODE=getattr(self.collection.view_request 'LANGUAGE_CODE' <none>)<line_sep>request.POST=post_query_dict<line_sep>request._read_started=<true><line_sep>request.auth=getattr(self.collection.view_request 'auth' <none>)<line_sep>request.csrf_processing_done=<true><line_sep>request.session=getattr(self.collection.view_request 'session' <none>)<line_sep>request.user=getattr(self.collection.view_request 'user' <none>)<line_sep>response=resolver_match.func(request=request **resolver_match.kwargs)<line_sep>result={'data':response.data 'headers':{key:value<for>key,value response.items()} 'include':rendered_content.include 'is_response':<true> 'status_code':response.status_code}<line_sep>self.collection.context[rendered_content.name]=result<line_sep>self.collection.responses[rendered_content.name]=result<block_end><if_stmt>self.group_name<block_start>self.collection.context.setdefault('groups' {})<line_sep>self.collection.context['groups'].setdefault(self.group_name [])<line_sep>self.collection.context['groups'][self.group_name].append(result)<block_end><block_end><block_end><def_stmt>render_content self<block_start>rendered_body={}<for_stmt>key,value self.body.items()<block_start>rendered_key=Template(template_string=key).render(context=self.collection.context)<line_sep>rendered_value=Template(template_string=value).render(context=self.collection.context)<line_sep>rendered_body[rendered_key]=rendered_value<block_end>rendered_include=Template(template_string=self.include).render(context=self.collection.context)<line_sep>rendered_method=Template(template_string=self.method).render(context=self.collection.context)<line_sep>rendered_name=Template(template_string=self.name).render(context=self.collection.context)<line_sep>rendered_url=Template(template_string=self.url).render(context=self.collection.context)<line_sep><return>RenderedContent(body=rendered_body include=rendered_include method=rendered_method name=rendered_name url=rendered_url)<block_end><block_end><class_stmt>BatchRequestCollection<block_start><def_stmt>__init__ self request_list=<none><block_start>self.requests=[]<for_stmt>request_index,request_dict enumerate(request_list)<block_start>request_dict.update({'collection':self})<try_stmt><block_start>self.requests.append(BatchRequest(**request_dict))<block_end><except_stmt>Exception<as>exception<block_start><raise>ValueError('Error instantiating request #{}; {}'.format(request_index exception))<from>exception<block_end><block_end><block_end><def_stmt>execute self view_request<block_start>self.context={'view_request':view_request}<line_sep>self.responses={}<line_sep>self.view_request=view_request<for_stmt>request self.requests<block_start>request.execute()<block_end># Convert responses in context into response class instances. result=[]<for_stmt>key,value self.responses.items()<block_start><if_stmt>json.loads(s=value.get('include' 'true'))<block_start>result.append(BatchResponse(name=key status_code=value.get('status_code' 0) data=value.get('data' {}) headers=value.get('headers' {}) ))<block_end><block_end><return>result<block_end><block_end><class_stmt>Endpoint<block_start><def_stmt>__init__ self label viewname=<none> kwargs=<none><block_start>self.label=label<line_sep>self.kwargs=kwargs<if_stmt>viewname<block_start>self.viewname=viewname<block_end><else_stmt><block_start>installation_base_url=setting_organization_url_base_path.value<if_stmt>installation_base_url<block_start>installation_base_url='/{}'.format(installation_base_url)<block_end><else_stmt><block_start>installation_base_url=''<block_end>self.url='{}/api/v{}/{}/'.format(installation_base_url API_VERSION self.label)<try_stmt><block_start>self.viewname=resolve(path=self.url).view_name<block_end><except_stmt>Resolver404<block_start>self.viewname=<none><block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- # Copyright (c) 2013 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ envelopes.envelope ================== This module contains the Envelope class. """<import_stmt>sys<if_stmt>sys.version_info[0]<eq>2<block_start><import_from_stmt>email Encoders<as>email_encoders<block_end><elif_stmt>sys.version_info[0]<eq>3<block_start><import_from_stmt>email encoders<as>email_encoders<line_sep>basestring=str<def_stmt>unicode _str _charset<block_start><return>str(_str.encode(_charset) _charset)<block_end><block_end><else_stmt><block_start><raise>RuntimeError('Unsupported Python version: %d.%d.%d'%(sys.version_info[0] sys.version_info[1] sys.version_info[2]))<block_end><import_from_stmt>email.header Header<import_from_stmt>email.mime.base MIMEBase<import_from_stmt>email.mime.multipart MIMEMultipart<import_from_stmt>email.mime.application MIMEApplication<import_from_stmt>email.mime.audio MIMEAudio<import_from_stmt>email.mime.image MIMEImage<import_from_stmt>email.mime.text MIMEText<import_stmt>mimetypes<import_stmt>os<import_stmt>re<import_from_stmt>.conn SMTP<import_from_stmt>.compat encoded<class_stmt>MessageEncodeError(Exception)<block_start><pass><block_end><class_stmt>Envelope(object)<block_start>""" The Envelope class. **Address formats** The following formats are supported for e-mail addresses: * ``"<EMAIL>"`` - just the e-mail address part as a string, * ``"Some User <<EMAIL>>"`` - name and e-mail address parts as a string, * ``("<EMAIL>", "Some User")`` - e-mail address and name parts as a tuple. Whenever you come to manipulate addresses feel free to use any (or all) of the formats above. :param to_addr: ``To`` address or list of ``To`` addresses :param from_addr: ``From`` address :param subject: message subject :param html_body: optional HTML part of the message :param text_body: optional plain text part of the message :param cc_addr: optional single CC address or list of CC addresses :param bcc_addr: optional single BCC address or list of BCC addresses :param headers: optional dictionary of headers :param charset: message charset """<line_sep>ADDR_FORMAT='%s <%s>'<line_sep>ADDR_REGEXP=re.compile(r'^(.*) <([^@]+@[^@]+)>$')<def_stmt>__init__ self to_addr=<none> from_addr=<none> subject=<none> html_body=<none> text_body=<none> cc_addr=<none> bcc_addr=<none> headers=<none> charset='utf-8'<block_start><if_stmt>to_addr<block_start><if_stmt>isinstance(to_addr list)<block_start>self._to=to_addr<block_end><else_stmt><block_start>self._to=[to_addr]<block_end><block_end><else_stmt><block_start>self._to=[]<block_end>self._from=from_addr<line_sep>self._subject=subject<line_sep>self._parts=[]<if_stmt>text_body<block_start>self._parts.append(('text/plain' text_body charset))<block_end><if_stmt>html_body<block_start>self._parts.append(('text/html' html_body charset))<block_end><if_stmt>cc_addr<block_start><if_stmt>isinstance(cc_addr list)<block_start>self._cc=cc_addr<block_end><else_stmt><block_start>self._cc=[cc_addr]<block_end><block_end><else_stmt><block_start>self._cc=[]<block_end><if_stmt>bcc_addr<block_start><if_stmt>isinstance(bcc_addr list)<block_start>self._bcc=bcc_addr<block_end><else_stmt><block_start>self._bcc=[bcc_addr]<block_end><block_end><else_stmt><block_start>self._bcc=[]<block_end><if_stmt>headers<block_start>self._headers=headers<block_end><else_stmt><block_start>self._headers={}<block_end>self._charset=charset<line_sep>self._addr_format=unicode(self.ADDR_FORMAT charset)<block_end><def_stmt>__repr__ self<block_start><return>u'<Envelope from="%s" to="%s" subject="%s">'%(self._addrs_to_header([self._from]) self._addrs_to_header(self._to) self._subject)<block_end>@property<def_stmt>to_addr self<block_start>"""List of ``To`` addresses."""<line_sep><return>self._to<block_end><def_stmt>add_to_addr self to_addr<block_start>"""Adds a ``To`` address."""<line_sep>self._to.append(to_addr)<block_end><def_stmt>clear_to_addr self<block_start>"""Clears list of ``To`` addresses."""<line_sep>self._to=[]<block_end>@property<def_stmt>from_addr self<block_start><return>self._from<block_end>@from_addr.setter<def_stmt>from_addr self from_addr<block_start>self._from=from_addr<block_end>@property<def_stmt>cc_addr self<block_start>"""List of CC addresses."""<line_sep><return>self._cc<block_end><def_stmt>add_cc_addr self cc_addr<block_start>"""Adds a CC address."""<line_sep>self._cc.append(cc_addr)<block_end><def_stmt>clear_cc_addr self<block_start>"""Clears list of CC addresses."""<line_sep>self._cc=[]<block_end>@property<def_stmt>bcc_addr self<block_start>"""List of BCC addresses."""<line_sep><return>self._bcc<block_end><def_stmt>add_bcc_addr self bcc_addr<block_start>"""Adds a BCC address."""<line_sep>self._bcc.append(bcc_addr)<block_end><def_stmt>clear_bcc_addr self<block_start>"""Clears list of BCC addresses."""<line_sep>self._bcc=[]<block_end>@property<def_stmt>charset self<block_start>"""Message charset."""<line_sep><return>self._charset<block_end>@charset.setter<def_stmt>charset self charset<block_start>self._charset=charset<line_sep>self._addr_format=unicode(self.ADDR_FORMAT charset)<block_end><def_stmt>_addr_tuple_to_addr self addr_tuple<block_start>addr=''<if_stmt>len(addr_tuple)<eq>2<and>addr_tuple[1]<block_start>addr=self._addr_format%(self._header(addr_tuple[1]<or>'') addr_tuple[0]<or>'')<block_end><elif_stmt>addr_tuple[0]<block_start>addr=addr_tuple[0]<block_end><return>addr<block_end>@property<def_stmt>headers self<block_start>"""Dictionary of custom headers."""<line_sep><return>self._headers<block_end><def_stmt>add_header self key value<block_start>"""Adds a custom header."""<line_sep>self._headers[key]=value<block_end><def_stmt>clear_headers self<block_start>"""Clears custom headers."""<line_sep>self._headers={}<block_end><def_stmt>_addrs_to_header self addrs<block_start>_addrs=[]<for_stmt>addr addrs<block_start><if_stmt><not>addr<block_start><continue><block_end><if_stmt>isinstance(addr basestring)<block_start><if_stmt>self._is_ascii(addr)<block_start>_addrs.append(self._encoded(addr))<block_end><else_stmt># these headers need special care when encoding, see: # http://tools.ietf.org/html/rfc2047#section-8 # Need to break apart the name from the address if there are # non-ascii chars <block_start>m=self.ADDR_REGEXP.match(addr)<if_stmt>m<block_start>t=(m.group(2) m.group(1))<line_sep>_addrs.append(self._addr_tuple_to_addr(t))<block_end><else_stmt># What can we do? Just pass along what the user gave us and hope they did it right <block_start>_addrs.append(self._encoded(addr))<block_end><block_end><block_end><elif_stmt>isinstance(addr tuple)<block_start>_addrs.append(self._addr_tuple_to_addr(addr))<block_end><else_stmt><block_start>self._raise(MessageEncodeError '%s is not a valid address'%str(addr))<block_end><block_end>_header=','.join(_addrs)<line_sep><return>_header<block_end><def_stmt>_raise self exc_class message<block_start><raise>exc_class(self._encoded(message))<block_end><def_stmt>_header self _str<block_start><if_stmt>self._is_ascii(_str)<block_start><return>_str<block_end><return>Header(_str self._charset).encode()<block_end><def_stmt>_is_ascii self _str<block_start><return>all(ord(c)<l>128<for>c _str)<block_end><def_stmt>_encoded self _str<block_start><return>encoded(_str self._charset)<block_end><def_stmt>to_mime_message self<block_start>"""Returns the envelope as :py:class:`email.mime.multipart.MIMEMultipart`."""<line_sep>msg=MIMEMultipart('alternative')<line_sep>msg['Subject']=self._header(self._subject<or>'')<line_sep>msg['From']=self._encoded(self._addrs_to_header([self._from]))<line_sep>msg['To']=self._encoded(self._addrs_to_header(self._to))<if_stmt>self._cc<block_start>msg['CC']=self._addrs_to_header(self._cc)<block_end><if_stmt>self._headers<block_start><for_stmt>key,value self._headers.items()<block_start>msg[key]=self._header(value)<block_end><block_end><for_stmt>part self._parts<block_start>type_maj,type_min=part[0].split('/')<if_stmt>type_maj<eq>'text'<and>type_min<in>('html' 'plain')<block_start>msg.attach(MIMEText(part[1] type_min self._charset))<block_end><else_stmt><block_start>msg.attach(part[1])<block_end><block_end><return>msg<block_end><def_stmt>add_attachment self file_path mimetype=<none><block_start>"""Attaches a file located at *file_path* to the envelope. If *mimetype* is not specified an attempt to guess it is made. If nothing is guessed then `application/octet-stream` is used."""<if_stmt><not>mimetype<block_start>mimetype,_=mimetypes.guess_type(file_path)<block_end><if_stmt>mimetype<is><none><block_start>mimetype='application/octet-stream'<block_end>type_maj,type_min=mimetype.split('/')<with_stmt>open(file_path 'rb')<as>fh<block_start>part_data=fh.read()<line_sep>part=MIMEBase(type_maj type_min)<line_sep>part.set_payload(part_data)<line_sep>email_encoders.encode_base64(part)<line_sep>part_filename=os.path.basename(self._encoded(file_path))<line_sep>part.add_header('Content-Disposition' 'attachment; filename="%s"'%part_filename)<line_sep>self._parts.append((mimetype part))<block_end><block_end><def_stmt>send self *args **kwargs<block_start>"""Sends the envelope using a freshly created SMTP connection. *args* and *kwargs* are passed directly to :py:class:`envelopes.conn.SMTP` constructor. Returns a tuple of SMTP object and whatever its send method returns."""<line_sep>conn=SMTP(*args **kwargs)<line_sep>send_result=conn.send(self)<line_sep><return>conn send_result<block_end><block_end>
<import_from_stmt>PIL Image ImageDraw ImageFont<import_from_stmt>numpy asarray<import_from_stmt>cv2 cvtColor COLOR_RGB2BGR imshow waitKey<import_from_stmt>os getcwd<def_stmt>getFontSize_name resolution<block_start>x=resolution[0]<if_stmt>x<le>1024<block_start><return>(16 (1024 576))<block_end><elif_stmt>x<le>1280<block_start><return>(21 (1280 720))<block_end><elif_stmt>x<le>1440<block_start><return>(23 (1440 810))<block_end><elif_stmt>x<le>1600<block_start><return>(26 (1600 900))<block_end><else_stmt><block_start><return>(31 (1920 1080))<block_end><block_end><def_stmt>getTemplatePic_CH words fontsize#字号典型值 基建干员名称23 进驻总览房屋名称28(1440*810) 基建干员名称30 进驻总览房屋名称38(1920*1080) <block_start>ttf=ImageFont.truetype(getcwd()+"/res/fonts/SourceHanSansCN-Regular.otf" fontsize)#字体选用思源黑体 wordsPic=Image.new('RGB' ttf.getsize(words))<line_sep>wordsDraw=ImageDraw.Draw(wordsPic)<line_sep>wordsDraw.text((0 0) words font=ttf fill=(255 255 255))#创建对应的模板 #temp = cvtColor(asarray(wordsPic), COLOR_RGB2BGR) #imshow('test', temp) #waitKey(0) <return>cvtColor(asarray(wordsPic) COLOR_RGB2BGR)<block_end><def_stmt>getTemplatePic_NUM num fontsize#字号典型值 进驻总览干员心情28 <block_start>num=str(num)<line_sep>ttf=ImageFont.truetype(getcwd()+"/res/fonts/Bender.otf" fontsize)#字体选用bender wordsPic=Image.new('RGB' ttf.getsize(num) color=(255 255 255))<line_sep>wordsDraw=ImageDraw.Draw(wordsPic)<line_sep>wordsDraw.text((0 0) num font=ttf fill=(0 0 0))#创建对应的模板 <return>cvtColor(asarray(wordsPic) COLOR_RGB2BGR)<block_end>
########################################################################## # Copyright (c) 2009, ETH Zurich. # All rights reserved. # # This file is distributed under the terms in the attached LICENSE file. # If you do not find this file, copies can be found by writing to: # ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group. ########################################################################## <import_stmt>re<import_stmt>tests<import_from_stmt>common TestCommon<import_from_stmt>results PassFailResult<line_sep>@tests.add_test<class_stmt>MemTest(TestCommon)<block_start>'''prints out free and total memory after system boot up'''<line_sep>name="freemem"<def_stmt>get_modules self build machine<block_start>modules=super(MemTest self).get_modules(build machine)<line_sep>modules.add_module("freemem")<line_sep><return>modules<block_end><def_stmt>get_finish_string self<block_start><return>"freemem done!"<block_end><def_stmt>process_data self testdir rawiter# the test passed iff the last line is the finish string <block_start>lastline=''<for_stmt>line rawiter<block_start>lastline=line<block_end>passed=lastline.startswith(self.get_finish_string())<line_sep><return>PassFailResult(passed)<block_end><block_end>
""" enclosure_tags ~~~~~~~~~~~~~~ Fix tags for MP3 enclosures (e.g. podcasts). Adds a "with tags" link to a version of the file with tags set as follows: * the entry title as title * the feed title as album * the entry/feed author as author This plugin needs additional dependencies, use the ``unstable-plugins`` extra to install them: .. code-block:: bash pip install reader[unstable-plugins] To load:: READER_APP_PLUGIN='reader._plugins.enclosure_tags:init' \\ python -m reader serve Implemented for https://github.com/lemon24/reader/issues/50. Became a plugin in https://github.com/lemon24/reader/issues/52. """<import_stmt>tempfile<import_from_stmt>urllib.parse urlparse<import_stmt>mutagen.mp3<import_stmt>requests<import_from_stmt>flask Blueprint<import_from_stmt>flask request<import_from_stmt>flask Response<import_from_stmt>flask stream_with_context<import_from_stmt>flask url_for<line_sep>blueprint=Blueprint('enclosure_tags' __name__)<line_sep>ALL_TAGS=('album' 'title' 'artist')<line_sep>SET_ONLY_IF_MISSING_TAGS={'artist'}<line_sep>@blueprint.route('/enclosure-tags' defaults={'filename':<none>})@blueprint.route('/enclosure-tags/<filename>')<def_stmt>enclosure_tags filename<block_start><def_stmt>update_tags file<block_start>emp3=mutagen.mp3.EasyMP3(file)<line_sep>changed=<false><for_stmt>key ALL_TAGS<block_start><if_stmt>key<in>SET_ONLY_IF_MISSING_TAGS<and>emp3.get(key)<block_start><continue><block_end>value=request.args.get(key)<if_stmt><not>value<block_start><continue><block_end>emp3[key]=[value]<line_sep>changed=<true><block_end><if_stmt>changed<block_start>emp3.save(file)<block_end>file.seek(0)<block_end><def_stmt>chunks req# Send the headers as soon as possible. # Some browsers wait for the headers before showing the "Save As" dialog. <block_start><yield>''<line_sep>tmp=tempfile.TemporaryFile()<for_stmt>chunk req.iter_content(chunk_size=2<power>20)<block_start>tmp.write(chunk)<block_end>tmp.seek(0)<line_sep>update_tags(tmp)<try_stmt><block_start><while_stmt><true><block_start>data=tmp.read(2<power>20)<if_stmt><not>data<block_start><break><block_end><yield>data<block_end><block_end><finally_stmt><block_start>tmp.close()<block_end><block_end>url=request.args['url']<line_sep>req=requests.get(url stream=<true>)<line_sep>headers={}<for_stmt>name ('Content-Type' 'Content-Disposition')<block_start><if_stmt>name<in>req.headers<block_start>headers[name]=req.headers[name]<block_end><block_end><return>Response(stream_with_context(chunks(req)) headers=headers)<block_end><def_stmt>enclosure_tags_filter enclosure entry<block_start>filename=urlparse(enclosure.href).path.split('/')[-1]<if_stmt><not>filename.endswith('.mp3')<block_start><return>[]<block_end>args={'url':enclosure.href 'filename':filename}<if_stmt>entry.title<block_start>args['title']=entry.title<block_end><if_stmt>entry.feed.title<block_start>args['album']=entry.feed.title<block_end><if_stmt>entry.author<or>entry.feed.author<block_start>args['artist']=entry.author<or>entry.feed.author<block_end><return>[('with tags' url_for('enclosure_tags.enclosure_tags' **args))]<block_end><def_stmt>init app<block_start>app.register_blueprint(blueprint)<line_sep>app.reader_additional_enclosure_links.append(enclosure_tags_filter)<block_end>
<class_stmt>Workset(WorksetPreview IDisposable)<block_start>""" Represents a workset in the document. """<line_sep>@staticmethod<def_stmt>Create document name<block_start>""" Create(document: Document,name: str) -> Workset Creates a new workset. document: The document in which the new instance is created. name: The workset name. Returns: Returns the newly created workset. """<line_sep><pass><block_end><def_stmt>Dispose self<block_start>""" Dispose(self: WorksetPreview,A_0: bool) """<line_sep><pass><block_end><def_stmt>ReleaseUnmanagedResources self *args<block_start>""" ReleaseUnmanagedResources(self: WorksetPreview,disposing: bool) """<line_sep><pass><block_end><def_stmt>__enter__ self *args<block_start>""" __enter__(self: IDisposable) -> object """<line_sep><pass><block_end><def_stmt>__exit__ self *args<block_start>""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """<line_sep><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end>IsEditable=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Whether the workset is editable. Get: IsEditable(self: Workset) -> bool """<line_sep>IsOpen=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Whether the workset is open (rather than closed). Get: IsOpen(self: Workset) -> bool """<line_sep>IsVisibleByDefault=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Whether the workset is visible by default. Get: IsVisibleByDefault(self: Workset) -> bool """<line_sep>Kind=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Kind of the workset. Get: Kind(self: Workset) -> WorksetKind """<block_end>
# Lint as: python3 # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Date-related constants and enums."""<import_stmt>enum<class_stmt>Month(enum.Enum)<block_start>"""Months. Values are one-based."""<line_sep>JANUARY=1<line_sep>FEBUARY=2<line_sep>MARCH=3<line_sep>APRIL=4<line_sep>MAY=5<line_sep>JUNE=6<line_sep>JULY=7<line_sep>AUGUST=8<line_sep>SEPTEMBER=9<line_sep>OCTOBER=10<line_sep>NOVEMBER=11<line_sep>DECEMBER=12<block_end><class_stmt>WeekDay(enum.Enum)<block_start>"""Named days of the week. Values are zero-based with Monday = 0."""<line_sep># We follow Python datetime convention of starting from 0. MONDAY=0<line_sep>TUESDAY=1<line_sep>WEDNESDAY=2<line_sep>THURSDAY=3<line_sep>FRIDAY=4<line_sep>SATURDAY=5<line_sep>SUNDAY=6<block_end><class_stmt>PeriodType(enum.Enum)<block_start>"""Periods that can be added or subtracted from DateTensors."""<line_sep>DAY=0<line_sep>WEEK=1<line_sep>MONTH=2<line_sep>YEAR=3<block_end><class_stmt>BusinessDayConvention(enum.Enum)<block_start>"""Conventions that determine how to roll dates that fall on holidays. * `NONE`: No adjustment * `FOLLOWING`: Choose the first business day after the given holiday. * `MODIFIED_FOLLOWING`: Choose the first business day after the given holiday unless that day falls in the next calendar month, in which case choose the first business day before the holiday. * `PRECEDING`: Choose the first business day before the given holiday. * `MODIFIED_PRECEDING`: Choose the first business day before the given holiday unless that day falls in the previous calendar month, in which case choose the first business day after the holiday. """<line_sep>NONE=0<line_sep>FOLLOWING=1<line_sep>MODIFIED_FOLLOWING=2<line_sep>PRECEDING=3<line_sep>MODIFIED_PRECEDING=4<block_end># TODO(b/148011715): add NEAREST convention. <class_stmt>WeekendMask(object)<block_start>"""Provides weekend masks for some of the common weekend patterns."""<line_sep># E.g. US/UK/Europe etc. SATURDAY_SUNDAY=(0 0 0 0 0 1 1)<line_sep># E.g. Most countries in the Middle East. FRIDAY_SATURDAY=(0 0 0 0 1 1 0)<line_sep># E.g. India, Nepal. SUNDAY_ONLY=(0 0 0 0 0 0 1)<line_sep># Default value. NONE=(0 0 0 0 0 0 0)<block_end>
<import_stmt>argparse pdb<import_stmt>gym<import_stmt>numpy<as>np<import_stmt>os<import_stmt>pickle<import_stmt>random<import_stmt>torch<import_stmt>scipy.misc<import_from_stmt>gym.envs.registration register<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('-display' type=int default=0)<line_sep>parser.add_argument('-seed' type=int default=1)<line_sep>parser.add_argument('-lanes' type=int default=3)<line_sep>parser.add_argument('-traffic_rate' type=int default=15)<line_sep>parser.add_argument('-state_image' type=int default=1)<line_sep>parser.add_argument('-save_images' type=int default=0)<line_sep>parser.add_argument('-store' type=int default=1)<line_sep>parser.add_argument('-data_dir' type=str default='traffic-data/state-action-cost/')<line_sep>parser.add_argument('-fps' type=int default=30)<line_sep>parser.add_argument('-time_slot' type=int default=0)<line_sep>parser.add_argument('-map' type=str default='i80' choices={'ai' 'i80' 'us101' 'lanker' 'peach'})<line_sep>parser.add_argument('-delta_t' type=float default=0.1)<line_sep>opt=parser.parse_args()<line_sep>opt.state_image=(opt.state_image<eq>1)<line_sep>opt.store=(opt.store<eq>1)<line_sep>random.seed(opt.seed)<line_sep>np.random.seed(opt.seed)<line_sep>torch.manual_seed(opt.seed)<line_sep>os.system("mkdir -p "+opt.data_dir)<line_sep>kwargs=dict(display=opt.display state_image=opt.state_image store=opt.store fps=opt.fps nb_lanes=opt.lanes traffic_rate=opt.traffic_rate data_dir=opt.data_dir delta_t=opt.delta_t )<line_sep>register(id='Traffic-v0' entry_point='traffic_gym:Simulator' kwargs=kwargs)<line_sep>register(id='I-80-v0' entry_point='map_i80:I80' kwargs=kwargs)<line_sep>gym.envs.registration.register(id='US-101-v0' entry_point='map_us101:US101' kwargs=kwargs )<line_sep>gym.envs.registration.register(id='Lankershim-v0' entry_point='map_lanker:Lankershim' kwargs=kwargs )<line_sep>gym.envs.registration.register(id='Peachtree-v0' entry_point='map_peach:Peachtree' kwargs=kwargs )<line_sep>env_names={'ai':'Traffic-v0' 'i80':'I-80-v0' 'us101':'US-101-v0' 'lanker':'Lankershim-v0' 'peach':'Peachtree-v0' }<line_sep>print('Building the environment (loading data, if any)')<line_sep>env=gym.make(env_names[opt.map])<line_sep>env.reset(frame=0 time_slot=opt.time_slot)<line_sep>done=<false><while_stmt><not>done<block_start>observation,reward,done,info=env.step()<line_sep>env.render()<block_end>print(f'Data generation for <{opt.map}, time slot {opt.time_slot}> completed')<line_sep>
<import_from_stmt>curriculum.utils set_env_no_gpu format_experiment_prefix<line_sep>set_env_no_gpu()<import_stmt>argparse<import_stmt>math<import_stmt>os<import_stmt>os.path<as>osp<import_stmt>sys<import_stmt>random<import_from_stmt>multiprocessing cpu_count<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>rllab.misc.instrument run_experiment_lite<import_from_stmt>rllab config<import_from_stmt>rllab.misc.instrument VariantGenerator<import_from_stmt>rllab.algos.trpo TRPO<import_from_stmt>rllab.baselines.linear_feature_baseline LinearFeatureBaseline<import_from_stmt>curriculum.envs.ndim_point.point_env PointEnv<import_from_stmt>rllab.envs.normalized_env normalize<import_from_stmt>rllab.policies.gaussian_mlp_policy GaussianMLPPolicy<import_from_stmt>curriculum.envs.goal_env GoalExplorationEnv evaluate_goal_env<import_from_stmt>curriculum.envs.base FixedStateGenerator UniformStateGenerator<import_from_stmt>curriculum.state.evaluator *<import_from_stmt>curriculum.logging.html_report format_dict HTMLReport<import_from_stmt>curriculum.logging.visualization *<import_from_stmt>curriculum.logging.logger ExperimentLogger<import_from_stmt>curriculum.experiments.goals.point_nd.utils plot_policy_performance<line_sep>EXPERIMENT_TYPE=osp.basename(__file__).split('.')[0]<def_stmt>run_task v<block_start>random.seed(v['seed'])<line_sep>np.random.seed(v['seed'])<line_sep># goal generators logger.log("Initializing the goal generators and the inner env...")<line_sep>inner_env=normalize(PointEnv(dim=v['goal_size'] state_bounds=v['state_bounds']))<line_sep>print("the state_bounds are: " v['state_bounds'])<line_sep>center=np.zeros(v['goal_size'])<line_sep>uniform_goal_generator=UniformStateGenerator(state_size=v['goal_size'] bounds=v['goal_range'] center=center)<line_sep>feasible_goal_ub=np.array(v['state_bounds'])[:v['goal_size']]<line_sep>print("the feasible_goal_ub is: " feasible_goal_ub)<line_sep>uniform_feasible_goal_generator=UniformStateGenerator(state_size=v['goal_size'] bounds=[-1<times>feasible_goal_ub feasible_goal_ub])<line_sep>env=GoalExplorationEnv(env=inner_env goal_generator=uniform_goal_generator obs2goal_transform=<lambda>x:x[:int(len(x)/2)] terminal_eps=v['terminal_eps'] only_feasible=v['only_feasible'] distance_metric=v['distance_metric'] terminate_env=<true> goal_weight=v['goal_weight'] )<line_sep># this goal_generator will be updated by a uniform after <if_stmt>v['sample_unif_feas']<block_start>env.update_goal_generator(uniform_feasible_goal_generator)<block_end>policy=GaussianMLPPolicy(env_spec=env.spec hidden_sizes=(32 32) # Fix the variance since different goals will require different variances, making this parameter hard to learn. learn_std=<false> output_gain=v['output_gain'] init_std=v['policy_init_std'] )<line_sep>baseline=LinearFeatureBaseline(env_spec=env.spec)<line_sep>n_traj=3<line_sep># feasible_goals = generate_initial_goals(env, policy, v['goal_range'], horizon=v['horizon'], size=10000) #v['horizon']) # print(feasible_goals) # uniform_list_goal_generator = UniformListStateGenerator(goal_list=feasible_goals.tolist()) # env.update_goal_generator(uniform_list_goal_generator) # env.update_goal_generator(fixed_goal_generator) logger.log("Initializing report and plot_policy_reward...")<line_sep>log_dir=logger.get_snapshot_dir()<line_sep>inner_log_dir=osp.join(log_dir 'inner_iters')<line_sep>report=HTMLReport(osp.join(log_dir 'report.html') images_per_row=3)<line_sep>report.add_header("{}".format(EXPERIMENT_TYPE))<line_sep>report.add_text(format_dict(v))<line_sep>logger.log("Starting the outer iterations")<for_stmt>outer_iter range(v['outer_iters'])<block_start>logger.log("Outer itr # %i"%outer_iter)<line_sep>logger.log("Perform TRPO with UniformListStateGenerator...")<with_stmt>ExperimentLogger(inner_log_dir outer_iter snapshot_mode='last' hold_outter_log=<true>)<block_start>algo=TRPO(env=env policy=policy baseline=baseline batch_size=v['pg_batch_size'] max_path_length=v['horizon'] n_itr=v['inner_iters'] discount=0.995 step_size=0.01 plot=<false> )<line_sep>algo.train()<block_end>report.add_image(plot_policy_performance(policy env v['horizon']))<line_sep># log some more on how the pendulum performs the upright and general task old_goal_generator=env.goal_generator<line_sep>logger.log("Evaluating performance on Unif and Fix Goal Gen...")<with_stmt>logger.tabular_prefix('UnifFeasGoalGen_')<block_start>env.update_goal_generator(uniform_feasible_goal_generator)<line_sep>evaluate_goal_env(env policy=policy horizon=v['horizon'] n_goals=50 fig_prefix='UnifFeasGoalGen_itr%d'%outer_iter report=report n_traj=n_traj)<block_end># back to old goal generator <with_stmt>logger.tabular_prefix("UnifGoalGen_")<block_start>env.update_goal_generator(old_goal_generator)<line_sep>evaluate_goal_env(env policy=policy horizon=v['horizon'] n_goals=50 fig_prefix='UnifGoalGen_itr%d'%outer_iter report=report n_traj=n_traj)<block_end>logger.dump_tabular(with_prefix=<false>)<line_sep>report.save()<line_sep>report.new_row()<block_end><with_stmt>logger.tabular_prefix('FINALUnifFeasGoalGen_')<block_start>env.update_goal_generator(uniform_feasible_goal_generator)<line_sep>evaluate_goal_env(env policy=policy horizon=v['horizon'] n_goals=5e3 fig_prefix='FINAL1UnifFeasGoalGen_' report=report n_traj=n_traj)<line_sep>evaluate_goal_env(env policy=policy horizon=v['horizon'] n_goals=5e3 fig_prefix='FINAL2UnifFeasGoalGen_' report=report n_traj=n_traj)<block_end>logger.dump_tabular(with_prefix=<false>)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--ec2' '-e' action='store_true' default=<false> help="add flag to run in ec2")<line_sep>parser.add_argument('--clone' '-c' action='store_true' default=<false> help="add flag to copy file and checkout current")<line_sep>parser.add_argument('--local_docker' '-d' action='store_true' default=<false> help="add flag to run in local dock")<line_sep>parser.add_argument('--type' '-t' type=str default='' help='set instance type')<line_sep>parser.add_argument('--price' '-p' type=str default='' help='set betting price')<line_sep>parser.add_argument('--subnet' '-sn' type=str default='' help='set subnet like us-west-1a')<line_sep>parser.add_argument('--name' '-n' type=str default='' help='set exp prefix name and new file name')<line_sep>parser.add_argument('--debug' action='store_true' default=<false> help="run code without multiprocessing")<line_sep>parser.add_argument('--prefix' type=str default=<none> help='set the additional name for experiment prefix')<line_sep>args=parser.parse_args()<line_sep># setup ec2 ec2_instance=args.type<if>args.type<else>'m4.4xlarge'<line_sep># configure instance info=config.INSTANCE_TYPE_INFO[ec2_instance]<line_sep>config.AWS_INSTANCE_TYPE=ec2_instance<line_sep>config.AWS_SPOT_PRICE=str(info["price"])<line_sep>n_parallel=int(info["vCPU"])# make the default 4 if not using ec2 <if_stmt>args.ec2<block_start>mode='ec2'<block_end><elif_stmt>args.local_docker<block_start>mode='local_docker'<line_sep>n_parallel=cpu_count()<if><not>args.debug<else>1<block_end><else_stmt><block_start>mode='local'<line_sep>n_parallel=cpu_count()<if><not>args.debug<else>1<block_end>default_prefix='goal-point-nd-trpo'<if_stmt>args.prefix<is><none><block_start>exp_prefix=format_experiment_prefix(default_prefix)<block_end><elif_stmt>args.prefix<eq>''<block_start>exp_prefix=default_prefix<block_end><else_stmt><block_start>exp_prefix='{}_{}'.format(default_prefix args.prefix)<block_end>vg=VariantGenerator()<line_sep>vg.add('seed' range(30 90 20))<line_sep># # GeneratorEnv params vg.add('goal_size' [2 3 4 5 6])# this is the ultimate goal we care about: getting the pendulum upright vg.add('terminal_eps' <lambda>goal_size:[math.sqrt(goal_size)/math.sqrt(2)<times>0.3])<line_sep>vg.add('only_feasible' [<true>])<line_sep>vg.add('goal_range' [5])# this will be used also as bound of the state_space vg.add('state_bounds' <lambda>goal_range goal_size terminal_eps:[(1 goal_range)+(0.3 )<times>(goal_size-2)+(goal_range )<times>goal_size])<line_sep>vg.add('sample_unif_feas' [<true>])<line_sep>vg.add('distance_metric' ['L2'])<line_sep>vg.add('goal_weight' [1])<line_sep>############################################# vg.add('min_reward' <lambda>goal_weight:[goal_weight<times>0.1])# now running it with only the terminal reward of 1! vg.add('max_reward' <lambda>goal_weight:[goal_weight<times>0.9])<line_sep>vg.add('horizon' [200])<line_sep>vg.add('outer_iters' [200])<line_sep>vg.add('inner_iters' [5])<line_sep>vg.add('pg_batch_size' [20000])<line_sep># policy initialization vg.add('output_gain' [1])<line_sep>vg.add('policy_init_std' [1])<line_sep>print('Running {} inst. on type {}, with price {}, parallel {}'.format(vg.size config.AWS_INSTANCE_TYPE config.AWS_SPOT_PRICE n_parallel))<for_stmt>vv vg.variants()<block_start><if_stmt>mode<in>['ec2' 'local_docker']<block_start>run_experiment_lite(# use_cloudpickle=False, stub_method_call=run_task variant=vv mode=mode # Number of parallel workers for sampling n_parallel=n_parallel # Only keep the snapshot parameters for the last iteration snapshot_mode="last" seed=vv['seed'] # plot=True, exp_prefix=exp_prefix # exp_name=exp_name, sync_s3_pkl=<true> # for sync the pkl file also during the training sync_s3_png=<true> sync_s3_html=<true> # # use this ONLY with ec2 or local_docker!!! pre_commands=['export MPLBACKEND=Agg' 'pip install --upgrade pip' 'pip install --upgrade -I tensorflow' 'pip install git+https://github.com/tflearn/tflearn.git' 'pip install dominate' 'pip install multiprocessing_on_dill' 'pip install scikit-image' 'conda install numpy -n rllab3 -y' ] )<if_stmt>mode<eq>'local_docker'<block_start>sys.exit()<block_end><block_end><else_stmt><block_start>run_experiment_lite(# use_cloudpickle=False, stub_method_call=run_task variant=vv mode='local' n_parallel=n_parallel # Only keep the snapshot parameters for the last iteration snapshot_mode="last" seed=vv['seed'] exp_prefix=exp_prefix print_command=<false> )<if_stmt>args.debug<block_start>sys.exit()<block_end><block_end><block_end><block_end>
<def_stmt>decode_lines lines_bytes encoding:str<block_start><if_stmt>isinstance(lines_bytes[0] bytes)<block_start>lines_str=[line.decode(encoding)<for>line lines_bytes]<block_end><elif_stmt>isinstance(lines_bytes[0] str)<block_start>lines_str=lines_bytes<block_end><else_stmt><block_start><raise>TypeError(type(lines_bytes[0]))<block_end><return>lines_str<block_end>
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests whether modules produce similar output given np.ndarray inputs."""<import_stmt>functools<import_from_stmt>typing Tuple<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_stmt>haiku<as>hk<import_from_stmt>haiku._src test_utils<import_from_stmt>haiku._src.integration descriptors<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>numpy<as>np<line_sep>ModuleFn=descriptors.ModuleFn<def_stmt>tree_assert_allclose a b * atol=1e-6<block_start>jax.tree_multimap(functools.partial(np.testing.assert_allclose atol=atol) a b)<block_end><class_stmt>NumpyInputsTest(parameterized.TestCase)<block_start>@test_utils.combined_named_parameters(descriptors.ALL_MODULES test_utils.named_bools('np_inputs') test_utils.named_bools('np_params') test_utils.named_bools('close_over_params'))<def_stmt>test_numpy_and_jax_results_close self module_fn:ModuleFn shape:Tuple[int <ellipsis>] dtype:jnp.dtype np_params:bool np_inputs:bool close_over_params:bool <block_start><if_stmt><not>(np_params<or>np_inputs)<block_start>self.skipTest('Pure JAX variants tested elsewhere')<block_end>f=hk.transform_with_state(<lambda>x:module_fn()(x))# pylint: disable=unnecessary-lambda rng=jax.random.PRNGKey(42)<line_sep>x=jnp.ones(shape dtype)<line_sep>params,state=f.init(rng x)<if_stmt>close_over_params<block_start>apply_fn=functools.partial(f.apply params state)<line_sep>out,new_state=jax.jit(apply_fn)(rng x)<block_end><else_stmt><block_start>out,new_state=jax.jit(f.apply)(params state rng x)<block_end><if_stmt>np_inputs<block_start>rng,x=jax.device_get((rng x))<with_stmt>self.subTest('init')<block_start>params2,state2=f.init(rng x)<line_sep>tree_assert_allclose(params params2)<line_sep>tree_assert_allclose(state state2)<block_end><block_end><with_stmt>self.subTest('apply')<block_start><if_stmt>np_params<block_start>params,state=jax.device_get((params state))<block_end><if_stmt>close_over_params<block_start>apply_fn=functools.partial(f.apply params state)<line_sep>out2,new_state2=jax.jit(apply_fn)(rng x)<block_end><else_stmt><block_start>out2,new_state2=jax.jit(f.apply)(params state rng x)<block_end>tree_assert_allclose(out out2)<line_sep>tree_assert_allclose(new_state new_state2)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
# -*- coding: utf-8 -*- <import_stmt>pytest<def_stmt>test_translator <block_start><def_stmt>translator string<block_start>translations={'String value is too long.':'Tamanho de texto muito grande.'}<line_sep><return>translations.get(string string)<block_end><import_from_stmt>schematics.translator register_translator<line_sep>register_translator(translator)<import_from_stmt>schematics.types StringType<import_from_stmt>schematics.exceptions ValidationError<with_stmt>pytest.raises(ValidationError)<as>exc<block_start>StringType(max_length=1).validate_length('Abc')<block_end><assert_stmt>exc.value<eq>['Tamanho de texto muito grande.']<block_end>
<import_stmt>json<import_stmt>logging<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>django.db transaction<import_from_stmt>osf.models AbstractProvider PreprintProvider Preprint Subject<import_from_stmt>osf.models.provider rules_to_subjects<import_from_stmt>scripts utils<as>script_utils<import_from_stmt>osf.models.validators validate_subject_hierarchy<import_from_stmt>website.preprints.tasks on_preprint_updated<line_sep>logger=logging.getLogger(__name__)<line_sep>BEPRESS_PROVIDER=<none><def_stmt>validate_input custom_provider data provider_type='osf.preprintprovider' copy=<false> add_missing=<false># This function may be run outside of this command (e.g. in the admin app) so we # need to make sure that BEPRESS_PROVIDER is set <block_start><global>BEPRESS_PROVIDER<line_sep>BEPRESS_PROVIDER=AbstractProvider.objects.filter(_id='osf' type='osf.preprintprovider').first()<line_sep>logger.info('Validating data')<line_sep>includes=data.get('include' [])<line_sep>excludes=data.get('exclude' [])<line_sep>customs=data.get('custom' {})<line_sep>merges=data.get('merge' {})<if_stmt>copy<block_start>included_subjects=rules_to_subjects(custom_provider.subjects_acceptable)<block_end><else_stmt><block_start><assert_stmt><not>set(includes)&set(excludes) 'There must be no overlap between includes and excludes'<for_stmt>text includes<block_start><assert_stmt>Subject.objects.filter(provider=BEPRESS_PROVIDER text=text).exists() 'Unable to find included subject with text {}'.format(text)<block_end>included_subjects=Subject.objects.filter(provider=BEPRESS_PROVIDER text__in=includes).include_children()<line_sep>logger.info('Successfully validated `include`')<for_stmt>text excludes<block_start><try_stmt><block_start>Subject.objects.get(provider=BEPRESS_PROVIDER text=text)<block_end><except_stmt>Subject.DoesNotExist<block_start><raise>RuntimeError('Unable to find excluded subject with text {}'.format(text))<block_end><assert_stmt>included_subjects.filter(text=text).exists() 'Excluded subject with text {} was not included'.format(text)<block_end>included_subjects=included_subjects.exclude(text__in=excludes)<line_sep>logger.info('Successfully validated `exclude`')<block_end><for_stmt>cust_name,map_dict customs.items()<block_start><assert_stmt><not>included_subjects.filter(text=cust_name).exists() 'Custom text {} already exists in mapped set'.format(cust_name)<assert_stmt>Subject.objects.filter(provider=BEPRESS_PROVIDER text=map_dict.get('bepress')).exists() 'Unable to find specified BePress subject with text {}'.format(map_dict.get('bepress'))<if_stmt>map_dict.get('parent')# Null parent possible <block_start><assert_stmt>map_dict['parent']<in>set(customs.keys())|set(included_subjects.values_list('text' flat=<true>)) 'Unable to find specified parent with text {} in mapped set'.format(map_dict['parent'])<line_sep># TODO: hierarchy length validation? Probably more trouble than worth here, done on .save <block_end><block_end>logger.info('Successfully validated `custom`')<line_sep>included_subjects=included_subjects|Subject.objects.filter(text__in=[map_dict['bepress']<for>map_dict customs.values()])<for_stmt>merged_from,merged_into merges.items()<block_start><assert_stmt><not>included_subjects.filter(text=merged_from).exists() 'Cannot merge subject "{}" that will be included'.format(merged_from)<assert_stmt>merged_into<in>set(included_subjects.values_list('text' flat=<true>))|set(customs.keys()) 'Unable to determine merge target for "{}"'.format(merged_into)<block_end>included_subjects=included_subjects|Subject.objects.filter(text__in=merges.keys())<line_sep>missing_subjects=Subject.objects.filter(id__in=set([hier[-1].id<for>ps Preprint.objects.filter(provider=custom_provider)<for>hier ps.subject_hierarchy])).exclude(id__in=included_subjects.values_list('id' flat=<true>))<if_stmt><not>add_missing<block_start><assert_stmt><not>missing_subjects.exists() 'Incomplete mapping -- following subjects in use but not included:\n{}'.format(list(missing_subjects.values_list('text' flat=<true>)))<block_end><if_stmt>isinstance(custom_provider PreprintProvider)<block_start><assert_stmt>custom_provider.share_title<not><in>[<none> '' 'bepress'] 'share title not set; please set the share title on this provider before creating a custom taxonomy.'<block_end>logger.info('Successfully validated mapping completeness')<line_sep><return>list(missing_subjects)<if>add_missing<else><none><block_end><def_stmt>create_subjects_recursive custom_provider root_text exclude_texts parent=<none><block_start>logger.info('Duplicating BePress subject {} on {}'.format(root_text custom_provider._id))<line_sep>bepress_subj=Subject.objects.get(provider=BEPRESS_PROVIDER text=root_text)<line_sep>custom_subj=Subject(text=root_text parent=parent bepress_subject=bepress_subj provider=custom_provider)<line_sep>custom_subj.save()<line_sep># This is not a problem now, as all excluded subjects are leafs, but it could be problematic if non-leafs had their children excluded. # It could also be problematic if they didn't, if any of those children are used by existing preprints. # TODO: Determine correct resolution <for_stmt>child_text bepress_subj.children.exclude(text__in=exclude_texts).values_list('text' flat=<true>)<block_start>create_subjects_recursive(custom_provider child_text exclude_texts parent=custom_subj)<block_end><block_end><def_stmt>create_from_subjects_acceptable custom_provider add_missing=<false> missing=<none><block_start>tries=0<line_sep>subjects_to_copy=list(rules_to_subjects(custom_provider.subjects_acceptable))<if_stmt>missing<and>add_missing<block_start>subjects_to_copy=subjects_to_copy+missing<block_end><while_stmt>len(subjects_to_copy)<block_start>previous_len=len(subjects_to_copy)<line_sep>tries<augadd>1<if_stmt>tries<eq>10<block_start><raise>RuntimeError('Unable to map subjects acceptable with 10 iterations -- subjects remaining: {}'.format(subjects_to_copy))<block_end><for_stmt>subj list(subjects_to_copy)<block_start><if_stmt>map_custom_subject(custom_provider subj.text subj.parent.text<if>subj.parent<else><none> subj.text)<block_start>subjects_to_copy.remove(subj)<block_end><elif_stmt>add_missing<and>subj.parent<and>subj.parent<not><in>subjects_to_copy# Dirty <block_start>subjects_to_copy.append(subj.parent)<line_sep>previous_len<augadd>1<block_end><else_stmt><block_start>logger.warn('Failed. Retrying next iteration')<block_end><block_end>new_len=len(subjects_to_copy)<if_stmt>new_len<eq>previous_len<block_start><raise>RuntimeError('Unable to map any custom subjects on iteration -- subjects remaining: {}'.format(subjects_to_copy))<block_end><block_end><block_end><def_stmt>do_create_subjects custom_provider includes excludes copy=<false> add_missing=<false> missing=<none><block_start><if_stmt>copy<block_start>create_from_subjects_acceptable(custom_provider add_missing=add_missing missing=missing)<block_end><else_stmt><block_start><for_stmt>root_text includes<block_start>create_subjects_recursive(custom_provider root_text excludes)<block_end><block_end><block_end><def_stmt>map_custom_subject custom_provider name parent mapping<block_start>logger.info('Attempting to create subject {} on {} from {} with {}'.format(name custom_provider._id mapping 'parent {}'.format(parent)<if>parent<else>'no parent'))<if_stmt>parent<block_start>parent_subject=Subject.objects.filter(provider=custom_provider text=parent).first()<block_end><else_stmt><block_start>parent_subject=<none><block_end>bepress_subject=Subject.objects.get(provider=BEPRESS_PROVIDER text=mapping)<if_stmt>parent<and><not>parent_subject<block_start><return><false><block_end>custom_subject=Subject(provider=custom_provider text=name parent=parent_subject bepress_subject=bepress_subject)<line_sep>custom_subject.save()<line_sep><return><true><block_end><def_stmt>do_custom_mapping custom_provider customs<block_start>tries=0<line_sep>unmapped_customs=customs<while_stmt>len(unmapped_customs)<block_start>previous_len=len(unmapped_customs)<line_sep>tries<augadd>1<if_stmt>tries<eq>10<block_start><raise>RuntimeError('Unable to map custom subjects with 10 iterations -- invalid input')<block_end>successes=[]<for_stmt>cust_name,map_dict unmapped_customs.items()<block_start><if_stmt>map_custom_subject(custom_provider cust_name map_dict.get('parent') map_dict.get('bepress'))<block_start>successes.append(cust_name)<block_end><else_stmt><block_start>logger.warn('Failed. Retrying next iteration')<block_end><block_end>[unmapped_customs.pop(key)<for>key successes]<line_sep>new_len=len(unmapped_customs)<if_stmt>new_len<eq>previous_len<block_start><raise>RuntimeError('Unable to map any custom subjects on iteration -- invalid input')<block_end><block_end><block_end><def_stmt>map_preprints_to_custom_subjects custom_provider merge_dict dry_run=<false><block_start><for_stmt>preprint Preprint.objects.filter(provider=custom_provider)<block_start>logger.info('Preparing to migrate preprint {}'.format(preprint.id))<line_sep>old_hier=preprint.subject_hierarchy<line_sep>subjects_to_map=[hier[-1]<for>hier old_hier]<line_sep>merged_subject_ids=set(Subject.objects.filter(provider=custom_provider text__in=[merge_dict[k]<for>k set(merge_dict.keys())&set([s.text<for>s subjects_to_map])]).values_list('id' flat=<true>))<line_sep>subject_ids_to_map=set(s.id<for>s subjects_to_map<if>s.text<not><in>merge_dict.keys())<line_sep>aliased_subject_ids=set(Subject.objects.filter(bepress_subject__id__in=subject_ids_to_map provider=custom_provider).values_list('id' flat=<true>))|merged_subject_ids<line_sep>aliased_hiers=[s.object_hierarchy<for>s Subject.objects.filter(id__in=aliased_subject_ids)]<line_sep>old_subjects=list(preprint.subjects.values_list('id' flat=<true>))<line_sep>preprint.subjects.clear()<for_stmt>hier aliased_hiers<block_start>validate_subject_hierarchy([s._id<for>s hier])<for_stmt>s hier<block_start>preprint.subjects.add(s)<block_end><block_end># Update preprint in SHARE <if_stmt><not>dry_run<block_start>on_preprint_updated(preprint._id old_subjects=old_subjects)<block_end>preprint.reload()<line_sep>new_hier=[s.object_hierarchy<for>s preprint.subjects.exclude(children__in=preprint.subjects.all())]<line_sep>logger.info('Successfully migrated preprint {}.\n\tOld hierarchy:{}\n\tNew hierarchy:{}'.format(preprint.id old_hier new_hier))<block_end><block_end><def_stmt>migrate provider=<none> provider_type='osf.preprintprovider' share_title=<none> data=<none> dry_run=<false> copy=<false> add_missing=<false># This function may be run outside of this command (e.g. in the admin app) so we # need to make sure that BEPRESS_PROVIDER is set <block_start><global>BEPRESS_PROVIDER<if_stmt><not>BEPRESS_PROVIDER<block_start>BEPRESS_PROVIDER=AbstractProvider.objects.filter(_id='osf' type='osf.preprintprovider').first()<block_end>custom_provider=AbstractProvider.objects.filter(_id=provider type=provider_type).first()<assert_stmt>custom_provider 'Unable to find specified provider: {}'.format(provider)<assert_stmt>custom_provider.id<ne>BEPRESS_PROVIDER.id 'Cannot add custom mapping to BePress provider'<assert_stmt><not>custom_provider.subjects.exists() 'Provider aldready has a custom taxonomy'<if_stmt>isinstance(custom_provider PreprintProvider)<and>custom_provider.share_title<in>[<none> '' 'bepress']<block_start><if_stmt><not>share_title<block_start><raise>RuntimeError('`--share-title` is required if not already set on the provider')<block_end>custom_provider.share_title=share_title<line_sep>custom_provider.save()<block_end>missing=validate_input(custom_provider data provider_type=provider_type copy=copy add_missing=add_missing)<line_sep>do_create_subjects(custom_provider data['include'] data.get('exclude' []) copy=copy add_missing=add_missing missing=missing)<line_sep>do_custom_mapping(custom_provider data.get('custom' {}))<line_sep>map_preprints_to_custom_subjects(custom_provider data.get('merge' {}) dry_run=dry_run)<block_end><class_stmt>Command(BaseCommand)<block_start><def_stmt>add_arguments self parser<block_start>super(Command self).add_arguments(parser)<line_sep>parser.add_argument('--dry' action='store_true' dest='dry_run' help='Run migration and roll back changes to db' )<line_sep>parser.add_argument('--data' action='store' dest='data' help='List of targets, of form {\n"include": [<list of subject texts to include at top level, children implicit>],'<concat>'\n"exclude": [<list of children to exclude from included trees>],'<concat>'\n"custom": [{"<Custom Name": {"parent": <Parent text>", "bepress": "<Bepress Name>"}}, ...]'<concat>'\n"merge": {"<Merged from (bepress)>": "<Merged into (custom)", ...}}' )<line_sep>parser.add_argument('--provider' action='store' dest='provider' required=<true> help='_id of the <provider> object, e.g. "osf". <provider> is expected to not already have a custom taxonomy.')<line_sep>parser.add_argument('--from-subjects-acceptable' action='store_true' dest='from_subjects_acceptable' help='Specifies that the provider\'s `subjects_acceptable` be copied. `data.include` and `exclude` are ignored, the other keys may still be used')<line_sep>parser.add_argument('--add-missing' action='store_true' dest='add_missing' help='Adds "used-but-not-included" subjects.')<line_sep>parser.add_argument('--share-title' action='store' type=str dest='share_title' help='Sets <provider>.share_title. Ignored if already set on provider, required if not.')<line_sep>parser.add_argument('--type' action='store' type=str dest='provider_type' help='Specifies provider type [`osf.preprintprovider`, `osf.registrationprovider`, `osf.collectionprovider`]')<block_end><def_stmt>handle self *args **options<block_start><global>BEPRESS_PROVIDER<line_sep>provider_type=options.get('provider_type')<or>'osf.preprintprovider'<line_sep>BEPRESS_PROVIDER=AbstractProvider.objects.filter(_id='osf' type='osf.preprintprovider').first()<line_sep>dry_run=options.get('dry_run')<line_sep>provider=options['provider']<line_sep>data=json.loads(options['data']<or>'{}')<line_sep>share_title=options.get('share_title')<line_sep>copy=options.get('from_subjects_acceptable')<line_sep>add_missing=options.get('add_missing')<if_stmt>copy<block_start>data['include']=list(Subject.objects.filter(provider=BEPRESS_PROVIDER parent__isnull=<true>).values_list('text' flat=<true>))<block_end><if_stmt><not>dry_run<block_start>script_utils.add_file_logger(logger __file__)<block_end><with_stmt>transaction.atomic()<block_start>migrate(provider=provider share_title=share_title provider_type=provider_type data=data dry_run=dry_run copy=copy add_missing=add_missing)<if_stmt>dry_run<block_start><raise>RuntimeError('Dry run, transaction rolled back.')<block_end><block_end><block_end><block_end>
<import_stmt>os<import_from_stmt>typing TYPE_CHECKING<import_from_stmt>modules.base ModuleProcessor<import_from_stmt>opta.core.terraform get_terraform_outputs<import_from_stmt>opta.exceptions UserErrors<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>opta.layer Layer<import_from_stmt>opta.module Module<block_end><class_stmt>MongodbAtlasProcessor(ModuleProcessor)<block_start><def_stmt>__init__ self module:"Module" layer:"Layer"<block_start><if_stmt>module.data["type"]<ne>"mongodb-atlas"<block_start><raise>Exception(f"The module {module.name} was expected to be of type mongodb-atlas")<block_end>super(MongodbAtlasProcessor self).__init__(module layer)<block_end><def_stmt>pre_hook self module_idx:int<arrow><none><block_start>required_env_set=set(["MONGODB_ATLAS_PUBLIC_KEY" "MONGODB_ATLAS_PRIVATE_KEY"])<if_stmt><not>required_env_set.issubset(set(os.environ.keys()))<block_start><raise>UserErrors("Opta did not find environment variable(s), please set them and retry: {}".format(required_env_set-set(os.environ.keys())))<block_end>super(MongodbAtlasProcessor self).pre_hook(module_idx)<block_end><def_stmt>process self module_idx:int<arrow><none><block_start>self.module.data["cloud_provider"]=self.layer.cloud.upper()<if_stmt>self.module.data["cloud_provider"]<eq>"LOCAL"<block_start>self.module.data["cloud_provider"]="AWS"# For local, always spin up in AWS self.module.data["region"]="US_EAST_1"<block_end>base_layer=self.layer.root()<line_sep>root_outputs=get_terraform_outputs(base_layer)<line_sep>self.module.data["public_nat_ips"]=root_outputs["public_nat_ips"]<line_sep>super(MongodbAtlasProcessor self).process(module_idx)<block_end><block_end>
<import_from_stmt>girder.exceptions ValidationException<import_from_stmt>girder.utility setting_utilities<class_stmt>PluginSettings<block_start>AUTO_COMPUTE='hashsum_download.auto_compute'<block_end>@setting_utilities.default(PluginSettings.AUTO_COMPUTE)<def_stmt>_defaultAutoCompute <block_start><return><false><block_end>@setting_utilities.validator(PluginSettings.AUTO_COMPUTE)<def_stmt>_validateAutoCompute doc<block_start><if_stmt><not>isinstance(doc['value'] bool)<block_start><raise>ValidationException('Auto-compute hash setting must be true or false.')<block_end><block_end>
<import_from_future_stmt> absolute_import<import_stmt>os<import_stmt>yaml<import_from_stmt>ccmlib common extension repository<import_from_stmt>ccmlib.cluster Cluster<import_from_stmt>ccmlib.dse_cluster DseCluster<import_from_stmt>ccmlib.node Node<import_from_stmt>distutils.version LooseVersion#pylint: disable=import-error, no-name-in-module <class_stmt>ClusterFactory()<block_start>@staticmethod<def_stmt>load path name<block_start>cluster_path=os.path.join(path name)<line_sep>filename=os.path.join(cluster_path 'cluster.conf')<with_stmt>open(filename 'r')<as>f<block_start>data=yaml.safe_load(f)<block_end><try_stmt><block_start>install_dir=<none><if_stmt>'install_dir'<in>data<block_start>install_dir=data['install_dir']<line_sep>repository.validate(install_dir)<block_end><if_stmt>install_dir<is><none><and>'cassandra_dir'<in>data<block_start>install_dir=data['cassandra_dir']<line_sep>repository.validate(install_dir)<block_end>cassandra_version=<none><if_stmt>'cassandra_version'<in>data<block_start>cassandra_version=LooseVersion(data['cassandra_version'])<block_end><if_stmt>common.isDse(install_dir)<block_start>cluster=DseCluster(path data['name'] install_dir=install_dir create_directory=<false> derived_cassandra_version=cassandra_version)<block_end><else_stmt><block_start>cluster=Cluster(path data['name'] install_dir=install_dir create_directory=<false> derived_cassandra_version=cassandra_version)<block_end>node_list=data['nodes']<line_sep>seed_list=data['seeds']<if_stmt>'partitioner'<in>data<block_start>cluster.partitioner=data['partitioner']<block_end><if_stmt>'config_options'<in>data<block_start>cluster._config_options=data['config_options']<block_end><if_stmt>'dse_config_options'<in>data<block_start>cluster._dse_config_options=data['dse_config_options']<block_end><if_stmt>'misc_config_options'<in>data<block_start>cluster._misc_config_options=data['misc_config_options']<block_end><if_stmt>'log_level'<in>data<block_start>cluster.__log_level=data['log_level']<block_end><if_stmt>'use_vnodes'<in>data<block_start>cluster.use_vnodes=data['use_vnodes']<block_end><if_stmt>'datadirs'<in>data<block_start>cluster.data_dir_count=int(data['datadirs'])<block_end>extension.load_from_cluster_config(cluster data)<block_end><except_stmt>KeyError<as>k<block_start><raise>common.LoadError("Error Loading "+filename+", missing property:"+k)<block_end><for_stmt>node_name node_list<block_start>cluster.nodes[node_name]=Node.load(cluster_path node_name cluster)<block_end><for_stmt>seed seed_list<block_start>cluster.seeds.append(seed)<block_end><return>cluster<block_end><block_end>
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND # NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS # BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo # (either separately or in combination, "QuantumBlack Trademarks") are # trademarks of QuantumBlack. The License does not grant you any right or # license to the QuantumBlack Trademarks. You may not use the QuantumBlack # Trademarks or any confusingly similar mark as a trademark for your product, # or use the QuantumBlack Trademarks in any other manner that might cause # confusion in the marketplace, including but not limited to in advertising, # on websites, or on software. # # See the License for the specific language governing permissions and # limitations under the License. """ ``causalnex.pytorch.dist_type._base`` defines the distribution type class interface and default behavior. """<import_stmt>itertools<import_from_stmt>abc ABCMeta abstractmethod<import_from_stmt>copy deepcopy<import_from_stmt>typing Dict List Tuple<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>causalnex.structure.structuremodel StructureModel<class_stmt>DistTypeBase(metaclass=ABCMeta)<block_start>"""Base class defining the distribution default behavior and interface"""<def_stmt>__init__ self idx:int<block_start>""" Default constructor for the DistTypeBase class. Unless overridden, provides default behavior to all subclasses. Args: idx: Positional index in data passed to the NOTEARS algorithm which correspond to this datatype. """<line_sep>self.idx=idx<block_end><def_stmt>get_columns self X:np.ndarray <arrow>np.ndarray<block_start>""" Gets the column(s) associated with the instantiated DistType. Args: X: Full dataset to be selected from. Returns: 1d or 2d np.ndarray of columns. """<line_sep><return>X[: self.idx]<block_end># pylint: disable=no-self-use # pylint: disable=unused-argument <def_stmt>preprocess_X self X:np.ndarray fit_transform:bool=<true><arrow>np.ndarray<block_start>""" Overload this method to perform any required preprocessing of the data matrix. This can include data conversion, column expansion etc. Changes to the tabu parameters should also be done here. **WARN** This preprocessing CANNOT reorder the columns of X. Args: X: The original passed-in data. fit_transform: Whether the class first fits then transforms the data, or just transforms. Just transforming is used to preprocess new data after the initial NOTEARS fit. Returns: Preprocessed X """<line_sep><return>X<block_end># pylint: disable=no-self-use <def_stmt>preprocess_tabu_edges self tabu_edges:List[Tuple[int int]]<arrow>List[Tuple[int int]]<block_start>""" Overload this method to perform any required preprocessing of the tabu_edges. Args: tabu_edges: The original tabu_edges. Returns: Preprocessed tabu_edges. """<line_sep><return>tabu_edges<block_end># pylint: disable=no-self-use <def_stmt>preprocess_tabu_nodes self tabu_nodes:List[int]<arrow>List[int]<block_start>""" Overload this method to perform any required preprocessing of the tabu_nodes. Args: tabu_nodes: The original tabu_nodes. Returns: Preprocessed tabu_nodes. """<line_sep><return>tabu_nodes<block_end># pylint: disable=no-self-use <def_stmt>update_idx_col self idx_col:Dict[int str]<arrow>Dict[int str]<block_start>""" Overload this method to update the idx_col dict with expanded colnames. Args: idx_col: The original index to column mapping. Returns: Updated index to column mapping. """<line_sep><return>idx_col<block_end><def_stmt>add_to_node self sm:StructureModel<arrow>StructureModel<block_start>""" Adds self to a node of a structure model corresponding to self.idx. Args: sm: The input StructureModel Returns: Updated StructureModel """<line_sep>sm.nodes[self.idx]["dist_type"]=self<line_sep><return>sm<block_end># pylint: disable=no-self-use <def_stmt>modify_h self square_weight_mat:torch.Tensor<arrow>torch.Tensor<block_start>""" Overload this method to apply updates to the W matrix in h(W). Typically used to prevent spurious cycles when using expended columns. Args: square_weight_mat: The weight matrix used in h(W). Returns: Updated weight matrix used in h(W). """<line_sep><return>square_weight_mat<block_end># pylint: disable=no-self-use <def_stmt>collapse_adj self adj:np.ndarray<arrow>np.ndarray<block_start>""" Overload this method to apply updates to collapse the W matrix of a multi-parameter distribution Likely has the same impact as modify_h. Args: adj: The adjacency matrix. Returns: Updated adjacency matrix. """<line_sep><return>adj<block_end>@abstractmethod<def_stmt>loss self X:torch.Tensor X_hat:torch.Tensor<arrow>torch.Tensor<block_start>""" Args: X: The original data passed into NOTEARS (i.e. the reconstruction target). X_hat: The reconstructed data. Returns: Scalar pytorch tensor of the reconstruction loss between X and X_hat. """<line_sep><raise>NotImplementedError("Must implement the loss() method")<block_end>@abstractmethod<def_stmt>inverse_link_function self X_hat:torch.Tensor<arrow>torch.Tensor<block_start>""" Convert the transformed data from the latent space to the original dtype using the inverse link function. Args: X_hat: Reconstructed data in the latent space. Returns: Modified X_hat. MUST be same shape as passed in data. Projects the self.idx column from the latent space to the dist_type space. """<line_sep><raise>NotImplementedError("Must implement the inverse_link_function() method")<block_end><block_end><class_stmt>ExpandColumnsMixin<block_start>""" Mixin class providing convenience methods for column expansion. """<line_sep>@staticmethod<def_stmt>_expand_columns X:np.ndarray new_columns:np.ndarray<arrow>np.ndarray<block_start>""" Expands the data matrix columns without reordering the indices. Args: X: Base dataset to expand. new_columns: The columns to expand the dataset by. Returns: Expanded dataset. """<line_sep><return>np.hstack([X new_columns])<block_end>@staticmethod<def_stmt>update_tabu_edges idx_group:List[int] tabu_edges:List[Tuple[int int]] tabu_idx_group:bool <arrow>List[Tuple[int int]]<block_start>""" Tabu edges are: 1. all user defined connections to original feature column 2. all inter-feature connections (optional) Args: idx_group: The group of indices which correspond to a single expanded column. tabu_edges: The list of tabu_edges to be updated. tabu_idx_group: Whether inter-group edges should also be considered tabu. I.e if a result of a column expansion, often want to prevent edges being learned between parameters. Returns: Updated tabu_edges """<if_stmt>tabu_edges<is><none><block_start>tabu_edges=[]<block_end># copy to prevent mutations tabu_edges=deepcopy(tabu_edges)<line_sep># handle 1. new_tabu_edges=[]<line_sep># for each original tabu pair <for_stmt>(i j) tabu_edges# idx_group[0] is the original column index <block_start><if_stmt>i<eq>idx_group[0]<block_start>new_tabu_edges<augadd>[(idx j)<for>idx idx_group[1:]]<block_end><elif_stmt>j<eq>idx_group[0]<block_start>new_tabu_edges<augadd>[(i idx)<for>idx idx_group[1:]]<block_end><block_end># all new edges added to tabu_edges tabu_edges<augadd>new_tabu_edges<line_sep># handle 2. <if_stmt>tabu_idx_group# add on all pairwise permutations of particular feature group # NOTE: permutations are needed for edge directionality <block_start>tabu_edges<augadd>list(itertools.permutations(idx_group 2))<block_end><return>tabu_edges<block_end>@staticmethod<def_stmt>update_tabu_nodes idx_group:List[int] tabu_nodes:List[int]<arrow>List[Tuple[int int]]<block_start>""" Tabu nodes are: 1. all user defined connections to original feature column Args: idx_group: The group of indices which correspond to a single expanded column. tabu_nodes: The list of tabu_nodes to be updated. Returns: Updated tabu_nodes """<if_stmt>tabu_nodes<is><none><block_start><return>tabu_nodes<block_end># copy to prevent mutations tabu_nodes=deepcopy(tabu_nodes)<line_sep>new_tabu_nodes=[]<for_stmt>i tabu_nodes# NOTE: the first element in the idx_group is guaranteed as self.idx <block_start><if_stmt>i<eq>idx_group[0]<block_start>new_tabu_nodes<augadd>idx_group[1:]<block_end><block_end># add on the new tabu nodes tabu_nodes<augadd>new_tabu_nodes<line_sep><return>tabu_nodes<block_end><block_end>
<import_from_stmt>raw.ndfd *<line_sep>
<import_stmt>json<import_stmt>kfp.dsl<as>_kfp_dsl<import_stmt>kfp.components<as>_kfp_components<import_from_stmt>collections OrderedDict<import_from_stmt>kubernetes client<as>k8s_client<def_stmt>step1 <block_start><import_from_stmt>kale.common mlmdutils<as>_kale_mlmdutils<line_sep>_kale_mlmdutils.init_metadata()<import_from_stmt>kale.marshal.decorator marshal<as>_kale_marshal<import_from_stmt>kale.common.runutils link_artifacts<as>_kale_link_artifacts<line_sep>_kale_pipeline_parameters={}<line_sep>@_kale_marshal([] ['_b' '_a'] _kale_pipeline_parameters "/marshal")<def_stmt>step1 <block_start>a=1<line_sep>b=2<line_sep><return>a b<block_end>step1()<line_sep>_kale_artifacts={}<line_sep>_kale_link_artifacts(_kale_artifacts)<line_sep>_kale_mlmdutils.call("mark_execution_complete")<block_end><def_stmt>step2 <block_start><import_from_stmt>kale.common mlmdutils<as>_kale_mlmdutils<line_sep>_kale_mlmdutils.init_metadata()<import_from_stmt>kale.common.runutils ttl<as>_kale_ttl<import_from_stmt>kale.marshal.decorator marshal<as>_kale_marshal<import_from_stmt>kale.common.runutils link_artifacts<as>_kale_link_artifacts<line_sep>_kale_pipeline_parameters={}<line_sep>@_kale_ttl(5)@_kale_marshal(['_b' '_a'] ['_c'] _kale_pipeline_parameters "/marshal")<def_stmt>step2 a b<block_start>c=a+b<line_sep>print(c)<line_sep><return>c<block_end>step2()<line_sep>_kale_artifacts={}<line_sep>_kale_link_artifacts(_kale_artifacts)<line_sep>_kale_mlmdutils.call("mark_execution_complete")<block_end><def_stmt>step3 <block_start><import_from_stmt>kale.common mlmdutils<as>_kale_mlmdutils<line_sep>_kale_mlmdutils.init_metadata()<import_from_stmt>kale.marshal.decorator marshal<as>_kale_marshal<import_from_stmt>kale.common.runutils link_artifacts<as>_kale_link_artifacts<line_sep>_kale_pipeline_parameters={}<line_sep>@_kale_marshal(['_a' '_c'] [] _kale_pipeline_parameters "/marshal")<def_stmt>step3 a c<block_start>d=c+a<line_sep>print(d)<block_end>step3()<line_sep>_kale_artifacts={}<line_sep>_kale_link_artifacts(_kale_artifacts)<line_sep>_kale_mlmdutils.call("mark_execution_complete")<block_end>_kale_step1_op=_kfp_components.func_to_container_op(step1)<line_sep>_kale_step2_op=_kfp_components.func_to_container_op(step2)<line_sep>_kale_step3_op=_kfp_components.func_to_container_op(step3)<line_sep>@_kfp_dsl.pipeline(name='test' description='')<def_stmt>auto_generated_pipeline <block_start>_kale_pvolumes_dict=OrderedDict()<line_sep>_kale_volume_step_names=[]<line_sep>_kale_volume_name_parameters=[]<line_sep>_kale_marshal_vop=_kfp_dsl.VolumeOp(name="kale-marshal-volume" resource_name="kale-marshal-pvc" modes=['ReadWriteMany'] size="1Gi")<line_sep>_kale_volume_step_names.append(_kale_marshal_vop.name)<line_sep>_kale_volume_name_parameters.append(_kale_marshal_vop.outputs["name"].full_name)<line_sep>_kale_pvolumes_dict['/marshal']=_kale_marshal_vop.volume<line_sep>_kale_volume_step_names.sort()<line_sep>_kale_volume_name_parameters.sort()<line_sep>_kale_step1_task=_kale_step1_op().add_pvolumes(_kale_pvolumes_dict).after()<line_sep>_kale_step_labels={'common-label':'true'}<for_stmt>_kale_k,_kale_v _kale_step_labels.items()<block_start>_kale_step1_task.add_pod_label(_kale_k _kale_v)<block_end>_kale_step_limits={'amd/gpu':'1'}<for_stmt>_kale_k,_kale_v _kale_step_limits.items()<block_start>_kale_step1_task.container.add_resource_limit(_kale_k _kale_v)<block_end>_kale_step1_task.container.working_dir="/test"<line_sep>_kale_step1_task.container.set_security_context(k8s_client.V1SecurityContext(run_as_user=0))<line_sep>_kale_output_artifacts={}<line_sep>_kale_step1_task.output_artifact_paths.update(_kale_output_artifacts)<line_sep>_kale_step1_task.add_pod_label("pipelines.kubeflow.org/metadata_written" "true")<line_sep>_kale_dep_names=(_kale_step1_task.dependent_names+_kale_volume_step_names)<line_sep>_kale_step1_task.add_pod_annotation("kubeflow-kale.org/dependent-templates" json.dumps(_kale_dep_names))<if_stmt>_kale_volume_name_parameters<block_start>_kale_step1_task.add_pod_annotation("kubeflow-kale.org/volume-name-parameters" json.dumps(_kale_volume_name_parameters))<block_end>_kale_step2_task=_kale_step2_op().add_pvolumes(_kale_pvolumes_dict).after(_kale_step1_task)<line_sep>_kale_step_labels={'common-label':'true'}<for_stmt>_kale_k,_kale_v _kale_step_labels.items()<block_start>_kale_step2_task.add_pod_label(_kale_k _kale_v)<block_end>_kale_step2_task.set_retry_strategy(num_retries=5 retry_policy="Always" backoff_duration="20" backoff_factor=2 backoff_max_duration=<none>)<line_sep>_kale_step2_task.container.working_dir="/test"<line_sep>_kale_step2_task.container.set_security_context(k8s_client.V1SecurityContext(run_as_user=0))<line_sep>_kale_output_artifacts={}<line_sep>_kale_step2_task.output_artifact_paths.update(_kale_output_artifacts)<line_sep>_kale_step2_task.add_pod_label("pipelines.kubeflow.org/metadata_written" "true")<line_sep>_kale_dep_names=(_kale_step2_task.dependent_names+_kale_volume_step_names)<line_sep>_kale_step2_task.add_pod_annotation("kubeflow-kale.org/dependent-templates" json.dumps(_kale_dep_names))<if_stmt>_kale_volume_name_parameters<block_start>_kale_step2_task.add_pod_annotation("kubeflow-kale.org/volume-name-parameters" json.dumps(_kale_volume_name_parameters))<block_end>_kale_step3_task=_kale_step3_op().add_pvolumes(_kale_pvolumes_dict).after(_kale_step2_task _kale_step1_task)<line_sep>_kale_step_annotations={'step3-annotation':'test'}<for_stmt>_kale_k,_kale_v _kale_step_annotations.items()<block_start>_kale_step3_task.add_pod_annotation(_kale_k _kale_v)<block_end>_kale_step_labels={'common-label':'true'}<for_stmt>_kale_k,_kale_v _kale_step_labels.items()<block_start>_kale_step3_task.add_pod_label(_kale_k _kale_v)<block_end>_kale_step3_task.container.working_dir="/test"<line_sep>_kale_step3_task.container.set_security_context(k8s_client.V1SecurityContext(run_as_user=0))<line_sep>_kale_output_artifacts={}<line_sep>_kale_step3_task.output_artifact_paths.update(_kale_output_artifacts)<line_sep>_kale_step3_task.add_pod_label("pipelines.kubeflow.org/metadata_written" "true")<line_sep>_kale_dep_names=(_kale_step3_task.dependent_names+_kale_volume_step_names)<line_sep>_kale_step3_task.add_pod_annotation("kubeflow-kale.org/dependent-templates" json.dumps(_kale_dep_names))<if_stmt>_kale_volume_name_parameters<block_start>_kale_step3_task.add_pod_annotation("kubeflow-kale.org/volume-name-parameters" json.dumps(_kale_volume_name_parameters))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>pipeline_func=auto_generated_pipeline<line_sep>pipeline_filename=pipeline_func.__name__+'.pipeline.tar.gz'<import_stmt>kfp.compiler<as>compiler<line_sep>compiler.Compiler().compile(pipeline_func pipeline_filename)<line_sep># Get or create an experiment and submit a pipeline run <import_stmt>kfp<line_sep>client=kfp.Client()<line_sep>experiment=client.create_experiment('test')<line_sep># Submit a pipeline run <import_from_stmt>kale.common kfputils<line_sep>pipeline_id,version_id=kfputils.upload_pipeline(pipeline_filename "test")<line_sep>run_result=kfputils.run_pipeline(experiment_name=experiment.name pipeline_id=pipeline_id version_id=version_id)<block_end>
<import_from_stmt>gaphor.diagram.connectors Connector<import_from_stmt>gaphor.diagram.presentation Classified<import_from_stmt>gaphor.RAAML.raaml RelevantTo<import_from_stmt>gaphor.RAAML.stpa RelevantToItem<import_from_stmt>gaphor.SysML.requirements.connectors DirectedRelationshipPropertyPathConnect<line_sep>@Connector.register(Classified RelevantToItem)<class_stmt>RelevantToConnect(DirectedRelationshipPropertyPathConnect)<block_start>relation_type=RelevantTo<block_end>
# Copyright 2021 Sony Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>nnabla<as>nn<import_stmt>nnabla.functions<as>F<import_stmt>nnabla.parametric_functions<as>PF<def_stmt>test_show_graph <block_start><try_stmt><block_start><import_from_stmt>nnabla.experimental.tb_graph_writer TBGraphWriter<block_end><except_stmt><block_start>pytest.skip('Skip because tensorboardX and tensorflow is not installed.')<block_end>nn.clear_parameters()<line_sep>x=nn.Variable((2 3 4 4))<with_stmt>nn.parameter_scope('c1')<block_start>h=PF.convolution(x 8 (3 3) pad=(1 1))<line_sep>h=F.relu(PF.batch_normalization(h))<block_end><with_stmt>nn.parameter_scope('f1')<block_start>y=PF.affine(h 10)<block_end><with_stmt>TBGraphWriter(log_dir='log_out')<as>tb<block_start>tb.from_variable(y output_name="y")<block_end><block_end><def_stmt>test_show_curve <block_start><try_stmt><block_start><import_from_stmt>nnabla.experimental.tb_graph_writer TBGraphWriter<block_end><except_stmt><block_start>pytest.skip('Skip because tensorboardX and tensorflow is not installed.')<block_end><with_stmt>TBGraphWriter(log_dir='log_out')<as>tb<block_start>values=[]<for_stmt>i range(360)<block_start>s=np.sin(i/180.0<times>np.pi)<line_sep>tb.add_scalar("show_curve/sin" s i)<line_sep>values.append(s)<block_end>nd_values=np.array(values)<for_stmt>i range(10)<block_start>tb.add_histogram("histogram" nd_values i)<line_sep>nd_values<augadd>0.05<block_end><block_end><block_end>
__author__='calvin'<import_stmt>re<import_stmt>sys<import_from_stmt>math log10<if_stmt>sys.version[0]<eq>'3'<block_start><pass><block_end><else_stmt><block_start>range=xrange<block_end>classdef_regex=re.compile(r"\S*def .*#!|class .*#!")<line_sep>tagged_line_regex=re.compile(r".*#!")<def_stmt>convert_time_units t<block_start>""" Convert time in seconds into reasonable time units. """<if_stmt>t<eq>0<block_start><return>'0 s'<block_end>order=log10(t)<if_stmt>-9<l>order<l>-6<block_start>time_units='ns'<line_sep>factor=1000000000<block_end><elif_stmt>-6<le>order<l>-3<block_start>time_units='us'<line_sep>factor=1000000<block_end><elif_stmt>-3<le>order<l>-1<block_start>time_units='ms'<line_sep>factor=1000.<block_end><elif_stmt>-1<le>order<block_start>time_units='s'<line_sep>factor=1<block_end><return>"{:.3f} {}".format(factor<times>t time_units)<block_end><def_stmt>globalize_indentation src<block_start>""" Strip the indentation level so the code runs in the global scope. """<line_sep>lines=src.splitlines()<line_sep>indent=len(lines[0])-len(lines[0].strip(' '))<line_sep>func_src=''<for_stmt>ii,l enumerate(src.splitlines())<block_start>line=l[indent:]<line_sep>func_src<augadd>line+'\n'<block_end><return>func_src<block_end><def_stmt>remove_decorators src<block_start>""" Remove decorators from the source code """<line_sep>src=src.strip()<line_sep>src_lines=src.splitlines()<line_sep>multi_line=<false><line_sep>n_deleted=0<for_stmt>n range(len(src_lines))<block_start>line=src_lines[n-n_deleted].strip()<if_stmt>(line.startswith('@')<and>'Benchmark'<in>line)<or>multi_line<block_start><del_stmt>src_lines[n-n_deleted]<line_sep>n_deleted<augadd>1<if_stmt>line.endswith(')')<block_start>multi_line=<false><block_end><else_stmt><block_start>multi_line=<true><block_end><block_end><block_end>setup_src='\n'.join(src_lines)<line_sep><return>setup_src<block_end><def_stmt>get_tagged_imports fp<block_start>imports=[]<line_sep>inside_def=<false><line_sep>def_lines=[]<line_sep>def_indent=0<with_stmt>open(fp 'r')<as>f<block_start>lastLine=f.readline()<for_stmt>line f<block_start>tagged_class_or_def=re.findall(classdef_regex lastLine)<line_sep>tagged_line=re.findall(tagged_line_regex lastLine)<line_sep># Find the indentation level of the function/class definition and capture all source code lines # until we get a line that is the same indentation level (end of function/class definition). <if_stmt>tagged_class_or_def<or>inside_def<block_start><if_stmt>tagged_class_or_def<and>def_lines<block_start>imports.append(''.join(def_lines))<line_sep>def_lines=[]<line_sep>inside_def=<false><block_end><if_stmt>inside_def# For lines within the definition <block_start>indent=len(lastLine)-len(lastLine.lstrip(' '))<if_stmt>indent<eq>def_indent<and>lastLine<ne>'\n'<block_start>imports.append(''.join(def_lines))<line_sep>def_lines=[]<line_sep>inside_def=<false><line_sep>def_indent=0<if_stmt>tagged_line<block_start>imports.append(lastLine)<block_end><block_end><else_stmt><block_start><if_stmt>lastLine<ne>'\n'<block_start>def_lines.append(lastLine)<block_end><block_end><block_end><else_stmt># For the definition line <block_start>inside_def=<true><line_sep>def_indent=len(lastLine)-len(lastLine.lstrip(' '))<line_sep>def_lines.append(lastLine)<block_end><block_end><elif_stmt>tagged_line<block_start>imports.append(lastLine)<block_end>lastLine=line<block_end><block_end># Examine the last line tagged_line=re.findall(tagged_line_regex lastLine)<if_stmt>inside_def<block_start>def_lines.append(line)<line_sep>imports.append(''.join(def_lines))<block_end><elif_stmt>tagged_line<block_start>imports.append(line)<block_end>src='\n'.join(imports)+'\n'<line_sep><return>src<block_end><def_stmt>generate_call_statement func is_class_method *args **kwargs# Create the call statement <block_start><if_stmt>is_class_method<block_start>stmt='instance.'+func.__name__+'('<block_end><else_stmt><block_start>stmt=func.__name__+'('<block_end><for_stmt>arg args<block_start>stmt<augadd>arg.__repr__()+', '<block_end><for_stmt>kw,val kwargs.items()<block_start>stmt<augadd>'{0}={1}, '.format(kw val.__repr__())<block_end>stmt=stmt.strip(', ')<line_sep>stmt<augadd>')'<line_sep><return>stmt<block_end><def_stmt>walk_tree start attr<block_start>""" Recursively walk through a tree relationship. This iterates a tree in a top-down approach, fully reaching the end of a lineage before moving onto the next sibling of that generation. """<line_sep>path=[start]<for_stmt>child path<block_start><yield>child<line_sep>idx=path.index(child)<for_stmt>grandchild reversed(getattr(child attr))<block_start>path.insert(idx+1 grandchild)<block_end><block_end><block_end>
# Copyright 2021 <NAME> <EMAIL> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple coloring problem (MIP approach) in OR-tools CP-SAT Solver. Inspired by the GLPK:s model color.mod ''' COLOR, Graph Coloring Problem Written in GNU MathProg by <NAME> <<EMAIL>> Given an undirected loopless graph G = (V, E), where V is a set of nodes, E <= V x V is a set of arcs, the Graph Coloring Problem is to find a mapping (coloring) F: V -> C, where C = {1, 2, ... } is a set of colors whose cardinality is as small as possible, such that F(i) != F(j) for every arc (i,j) in E, that is adjacent nodes must be assigned different colors. ''' This is a port of my old OR-tools CP solver coloring_ip.py This model was created by <NAME> (<EMAIL>) Also see my other OR-tols models: http://www.hakank.org/or_tools/ """<import_from_future_stmt> print_function<import_from_stmt>ortools.sat.python cp_model<as>cp<import_stmt>math sys<line_sep># from cp_sat_utils import * <def_stmt>main <block_start>model=cp.CpModel()<line_sep># max number of colors # [we know that 4 suffices for normal maps] nc=5<line_sep># number of nodes n=11<line_sep># set of nodes V=list(range(n))<line_sep>num_edges=20<line_sep># # Neighbours # # This data correspond to the instance myciel3.col from: # http://mat.gsia.cmu.edu/COLOR/instances.html # # Note: 1-based (adjusted below) E=[[1 2] [1 4] [1 7] [1 9] [2 3] [2 6] [2 8] [3 5] [3 7] [3 10] [4 5] [4 6] [4 10] [5 8] [5 9] [6 11] [7 11] [8 11] [9 11] [10 11]]<line_sep># # declare variables # # x[i,c] = 1 means that node i is assigned color c x={}<for_stmt>v V<block_start><for_stmt>j range(nc)<block_start>x[v j]=model.NewIntVar(0 1 'v[%i,%i]'%(v j))<block_end><block_end># u[c] = 1 means that color c is used, i.e. assigned to some node u=[model.NewIntVar(0 1 'u[%i]'%i)<for>i range(nc)]<line_sep># number of colors used, to minimize num_colors=model.NewIntVar(0 nc "num_colors")<line_sep>model.Add(num_colors<eq>sum(u))<line_sep># # constraints # # each node must be assigned exactly one color <for_stmt>i V<block_start>model.Add(sum([x[i c]<for>c range(nc)])<eq>1)<block_end># adjacent nodes cannot be assigned the same color # (and adjust to 0-based) <for_stmt>i range(num_edges)<block_start><for_stmt>c range(nc)<block_start>model.Add(x[E[i][0]-1 c]+x[E[i][1]-1 c]<le>u[c])<block_end><block_end># objective model.Minimize(num_colors)<line_sep># # solution # solver=cp.CpSolver()<line_sep>status=solver.Solve(model)<if_stmt>status<eq>cp.OPTIMAL<block_start>print()<line_sep>print('number of colors:' solver.Value(num_colors))<line_sep>print('colors used:' [solver.Value(u[i])<for>i range(nc)])<line_sep>print()<for_stmt>v V<block_start>print('v%i'%v ' color ' end=' ')<for_stmt>c range(nc)<block_start><if_stmt>solver.Value(x[v c])<eq>1<block_start>print(c)<block_end><block_end><block_end><block_end>print()<line_sep>print('NumConflicts:' solver.NumConflicts())<line_sep>print('NumBranches:' solver.NumBranches())<line_sep>print('WallTime:' solver.WallTime())<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_from_future_stmt> print_function absolute_import<import_stmt>unittest math<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>. *<class_stmt>T(base_pandas_extensions_tester.BasePandasExtensionsTester)<block_start><def_stmt>test_concat self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'c_2':['d' 'e' 'f']})<line_sep>df.engineer('concat(c_1, c_2)')<line_sep>self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values np.array(['ad' 'be' 'cf'] 'object')))<block_end><def_stmt>test_concat_3_cols self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'c_2':['d' 'e' 'f'] 'c_3':['h' 'i' 'j']})<line_sep>df.engineer('concat(c_3, c_1, c_2)')<line_sep>self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values np.array(['had' 'ibe' 'jcf'] 'object')))<block_end><def_stmt>test_concat_with_numerical_col self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'n_2':[1 2 3]})<line_sep>df.engineer('concat(c_1,n_2)')<line_sep>self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values np.array(['a1' 'b2' 'c3'] 'object')))<block_end><def_stmt>test_concat_with_numerical_col_3_cols self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'n_2':[1 2 3] 'n_3':[4 5 6]})<line_sep>df.engineer('concat(n_3,c_1,n_2)')<line_sep>self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values np.array(['4a1' '5b2' '6c3'] 'object')))<block_end><def_stmt>test_multiplication self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('mult(n_2, n_3)')<line_sep>self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values np.array([4 10 18] long)))<block_end><def_stmt>test_multiplication_3_cols self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('mult(n_2, n_3, n_4)')<line_sep>self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values np.array([4<times>7 80 18<times>9] long)))<block_end><def_stmt>test_square_on_whole_data_frame self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('pow(2)')<line_sep>np.testing.assert_array_equal(df.values np.array([['a' 1 4 7 1<times>1 4<times>4 7<times>7] ['b' 2 5 8 2<times>2 5<times>5 8<times>8] ['c' 3 6 9 3<times>3 6<times>6 9<times>9] ] 'object'))<block_end><def_stmt>test_square_on_cols self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('pow(n_3, 2)')<line_sep>np.testing.assert_array_equal(df.values np.array([['a' 1 4 7 4<times>4] ['b' 2 5 8 5<times>5] ['c' 3 6 9 6<times>6] ] 'object'))<block_end><def_stmt>test_log_on_whole_data_frame self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('lg()')<line_sep>self.assertTrue(np.array_equal(df.values np.array([['a' 1 4 7 math.log(1) math.log(4) math.log(7)] ['b' 2 5 8 math.log(2) math.log(5) math.log(8)] ['c' 3 6 9 math.log(3) math.log(6) math.log(9)] ] 'object')))<block_end><def_stmt>test_log_on_cols self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('lg(n_3)')<line_sep>self.assertTrue(np.array_equal(df.values np.array([['a' 1 4 7 math.log(4)] ['b' 2 5 8 math.log(5)] ['c' 3 6 9 math.log(6)] ] 'object')))<block_end><def_stmt>test_sqrt_on_whole_data_frame self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('sqrt()')<line_sep>self.assertTrue(np.array_equal(df.values np.array([['a' 1 4 7 math.sqrt(1) math.sqrt(4) math.sqrt(7)] ['b' 2 5 8 math.sqrt(2) math.sqrt(5) math.sqrt(8)] ['c' 3 6 9 math.sqrt(3) math.sqrt(6) math.sqrt(9)] ] 'object')))<block_end><def_stmt>test_sqrt_on_cols self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('sqrt(n_3)')<line_sep>self.assertTrue(np.array_equal(df.values np.array([['a' 1 4 7 math.sqrt(4)] ['b' 2 5 8 math.sqrt(5)] ['c' 3 6 9 math.sqrt(6)] ] 'object')))<block_end><def_stmt>test_rolling_sum_on_single_col self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34]})<line_sep>col='rolling_sum(n_1,3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_array_equal([np.nan np.nan 35 40 30 29 48] df['n_'+col])<block_end><def_stmt>test_rolling_mean_on_single_col self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34]})<line_sep>col='rolling_mean(n_1,3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_allclose([np.nan np.nan 11.66 13.33 10 9.66 16] df['n_'+col] rtol=1e-3)<block_end><def_stmt>test_rolling_median_on_single_col self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34]})<line_sep>col='rolling_median(n_1,3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_array_equal([np.nan np.nan 12 13 13 12 12] df['n_'+col])<block_end><def_stmt>test_rolling_min_on_single_col self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34]})<line_sep>col='rolling_min(n_1,3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_array_equal([np.nan np.nan 10 12 2 2 2] df['n_'+col])<block_end><def_stmt>test_rolling_max_on_single_col self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34]})<line_sep>col='rolling_max(n_1,3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_array_equal([np.nan np.nan 13 15 15 15 34] df['n_'+col])<block_end><def_stmt>test_rolling_std_on_single_col self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34]})<line_sep>col='rolling_std(n_1,3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_allclose([np.nan np.nan 1.528 1.528 7 6.807 16.371] df['n_'+col] rtol=1e-3)<block_end><def_stmt>test_rolling_var_on_single_col self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34]})<line_sep>col='rolling_var(n_1,3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_allclose([np.nan np.nan 2.333 2.333 49 46.333 268] df['n_'+col] rtol=1e-3)<block_end># Multiple Columns <def_stmt>test_rolling_sum_on_multi_cols self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34] 'n_2':[1 2 3 5 2 2 4]})<line_sep>col='rolling_sum(3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_array_equal([np.nan np.nan 35 40 30 29 48] df['n_rolling_sum(n_1,3)'])<line_sep>np.testing.assert_array_equal([np.nan np.nan 6 10 10 9 8] df['n_rolling_sum(n_2,3)'])<block_end><def_stmt>test_rolling_mean_on_multi_cols self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34] 'n_2':[1 2 3 5 2 2 4]})<line_sep>col='rolling_mean(3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_allclose([np.nan np.nan 11.66 13.33 10 9.66 16] df['n_rolling_mean(n_1,3)'] rtol=1e-3)<line_sep>np.testing.assert_allclose([np.nan np.nan 2 3.333 3.333 3 2.666] df['n_rolling_mean(n_2,3)'] rtol=1e-3)<block_end><def_stmt>test_rolling_median_on_multi_cols self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34] 'n_2':[1 2 3 5 2 2 4]})<line_sep>col='rolling_median(3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_array_equal([np.nan np.nan 12 13 13 12 12] df['n_rolling_median(n_1,3)'])<line_sep>np.testing.assert_array_equal([np.nan np.nan 2 3 3 2 2] df['n_rolling_median(n_2,3)'])<block_end><def_stmt>test_rolling_min_on_multi_cols self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34] 'n_2':[1 2 3 5 2 2 4]})<line_sep>col='rolling_min(3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_array_equal([np.nan np.nan 10 12 2 2 2] df['n_rolling_min(n_1,3)'])<line_sep>np.testing.assert_array_equal([np.nan np.nan 1 2 2 2 2] df['n_rolling_min(n_2,3)'])<block_end><def_stmt>test_rolling_max_on_multi_cols self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34] 'n_2':[1 2 3 5 2 2 4]})<line_sep>col='rolling_max(3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_array_equal([np.nan np.nan 13 15 15 15 34] df['n_rolling_max(n_1,3)'])<line_sep>np.testing.assert_array_equal([np.nan np.nan 3 5 5 5 4] df['n_rolling_max(n_2,3)'])<block_end><def_stmt>test_rolling_std_on_multi_cols self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34] 'n_2':[1 2 3 5 2 2 4]})<line_sep>col='rolling_std(3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_allclose([np.nan np.nan 1.528 1.528 7 6.807 16.371] df['n_rolling_std(n_1,3)'] rtol=1e-3)<line_sep>np.testing.assert_allclose([np.nan np.nan 1 1.528 1.528 1.732 1.1547] df['n_rolling_std(n_2,3)'] rtol=1e-3)<block_end><def_stmt>test_rolling_var_on_multi_cols self<block_start>df=pd.DataFrame({'n_1':[10 12 13 15 2 12 34] 'n_2':[1 2 3 5 2 2 4]})<line_sep>col='rolling_var(3)'<line_sep>df.engineer(col)<line_sep>np.testing.assert_allclose([np.nan np.nan 2.333 2.333 49 46.333 268] df['n_rolling_var(n_1,3)'] rtol=1e-3)<line_sep>np.testing.assert_allclose([np.nan np.nan 1 2.333 2.333 3 1.333] df['n_rolling_var(n_2,3)'] rtol=1e-3)<block_end><def_stmt>test_method_chaining self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'c_2':['d' 'e' 'f'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('concat(c_1, c_2)').engineer('concat(c_1, n_2)').engineer('mult(n_2, n_3)').engineer('lg(n_2)').engineer('pow(n_3, 2)')<line_sep>self.assertTrue(np.array_equal(df.values np.array([['a' 'd' 1 4 7 'ad' 'a1' 4 math.log(1) 4<times>4] ['b' 'e' 2 5 8 'be' 'b2' 10 math.log(2) 5<times>5] ['c' 'f' 3 6 9 'cf' 'c3' 18 math.log(3) 6<times>6]] 'object')))<block_end><def_stmt>test_chaining_single_call_semi_col_sep self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'c_2':['d' 'e' 'f'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')<line_sep>self.assertTrue(np.array_equal(df.values np.array([['a' 'd' 1 4 7 'ad' 'a1' 4 math.log(1) 4<times>4] ['b' 'e' 2 5 8 'be' 'b2' 10 math.log(2) 5<times>5] ['c' 'f' 3 6 9 'cf' 'c3' 18 math.log(3) 6<times>6]] 'object')))<block_end><def_stmt>test_chaining_single_with_arr_arg self<block_start>df=pd.DataFrame({'c_1':['a' 'b' 'c'] 'c_2':['d' 'e' 'f'] 'n_2':[1 2 3] 'n_3':[4 5 6] 'n_4':[7 8 9]})<line_sep>df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))<line_sep>self.assertTrue(np.array_equal(df.values np.array([['a' 'd' 1 4 7 'ad' 'a1' 4 math.log(1) 4<times>4] ['b' 'e' 2 5 8 'be' 'b2' 10 math.log(2) 5<times>5] ['c' 'f' 3 6 9 'cf' 'c3' 18 math.log(3) 6<times>6]] 'object')))<block_end><def_stmt>test_long_method_chains self<block_start>df1=pd.DataFrame({'n_1':[1 2 3] 'n_2':[4 5 6]})<line_sep>df2=pd.DataFrame({'n_1':[1 2 3] 'n_2':[4 5 6]})<line_sep>df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')<line_sep>df2.engineer('mult(n_1,n_2);pow(n_1,3)')<line_sep>df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')<line_sep>df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')<line_sep>np.testing.assert_array_equal(df1.columns.values.sort() df2.columns.values.sort())<line_sep>np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values df2['n_mult(n_1,n_2)'].values)<line_sep>np.testing.assert_array_equal(df1['n_pow(n_1,3)'] df2['n_pow(n_1,3)'])<line_sep>np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'] df2['n_lg(pow(n_1,3))'])<line_sep>np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'] df2['n_lg(mult(n_1,n_2))'])<line_sep>np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'] df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'])<line_sep><block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>math<import_stmt>pyrobot.utils.util<as>prutil<import_stmt>rospy<import_stmt>habitat_sim.agent<as>habAgent<import_stmt>habitat_sim.utils<as>habUtils<import_from_stmt>habitat_sim.agent.controls ActuationSpec<import_stmt>habitat_sim.errors<import_stmt>quaternion<import_from_stmt>tf.transformations euler_from_quaternion euler_from_matrix<class_stmt>LoCoBotBase(object)<block_start>"""docstring for SimpleBase"""<def_stmt>__init__ self configs simulator<block_start>self.configs=configs<line_sep>self.sim=simulator.sim<line_sep>self.agent=self.sim.get_agent(self.configs.COMMON.SIMULATOR.DEFAULT_AGENT_ID)<line_sep>self.transform=<none><line_sep>self.init_state=self.get_full_state()<block_end><def_stmt>execute_action self action_name actuation# actions = "turn_right" or "turn_left" or "move_forward" # returns a bool showing if collided or not <block_start><return>self._act(action_name actuation)<block_end><def_stmt>get_full_state self# Returns habitat_sim.agent.AgentState <block_start><return>self.agent.get_state()<block_end><def_stmt>_rot_matrix self habitat_quat<block_start>quat_list=[habitat_quat.x habitat_quat.y habitat_quat.z habitat_quat.w]<line_sep><return>prutil.quat_to_rot_mat(quat_list)<block_end><def_stmt>get_state self state_type="odom"# Returns (x, y, yaw) <block_start><assert_stmt>state_type<eq>"odom" "Error: Only Odom state is available"<line_sep>cur_state=self.get_full_state()<line_sep>init_rotation=self._rot_matrix(self.init_state.rotation)<line_sep># true position here refers to the relative position from # where `self.init_state` is treated as origin true_position=cur_state.position-self.init_state.position<line_sep>true_position=np.matmul(init_rotation.transpose() true_position dtype=np.float64)<line_sep>cur_rotation=self._rot_matrix(cur_state.rotation)<line_sep>cur_rotation=np.matmul(init_rotation.transpose() cur_rotation dtype=np.float64)<line_sep>(r pitch yaw)=euler_from_matrix(cur_rotation axes="sxzy")<line_sep># Habitat has y perpendicular to map where as ROS has z perpendicular # to the map. Where as x is same. # Here ROS_X = -1 * habitat_z and ROS_Y = -1*habitat_x <return>(-1<times>true_position[2] -1<times>true_position[0] yaw)<block_end><def_stmt>stop self<block_start><raise>NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")<block_end><def_stmt>set_vel self fwd_speed turn_speed exe_time=1<block_start><raise>NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")<block_end><def_stmt>go_to_relative self xyt_position use_map=<false> close_loop=<false> smooth=<false><block_start>""" Moves the robot to the robot to given goal state relative to its initial pose. :param xyt_position: The relative goal state of the form (x,y,t) :param use_map: When set to "True", ensures that controler is using only free space on the map to move the robot. :param close_loop: When set to "True", ensures that controler is operating in open loop by taking account of odometry. :param smooth: When set to "True", ensures that the motion leading to the goal is a smooth one. :type xyt_position: list :type use_map: bool :type close_loop: bool :type smooth: bool :return: True if successful; False otherwise (timeout, etc.) :rtype: bool """<try_stmt><block_start><if_stmt>use_map<block_start><raise>NotImplementedError("Using map feature is not yet supported for Habitat-Sim")<block_end><if_stmt>close_loop<block_start><raise>NotImplementedError("Closed-loop postion control is not supported in Habitat-Sim!")<block_end><if_stmt>smooth<block_start><raise>NotImplementedError("Smooth position control feature is not yet for Habitat-Sim")<block_end><block_end><except_stmt>Exception<as>error<block_start>print(error)<line_sep><return><false><block_end>(cur_x cur_y cur_yaw)=self.get_state()<line_sep>abs_yaw=cur_yaw+xyt_position[2]<line_sep><return>self._go_to_relative_pose(xyt_position[0] xyt_position[1] abs_yaw)<block_end><def_stmt>go_to_absolute self xyt_position use_map=<false> close_loop=<false> smooth=<false><block_start>""" Moves the robot to the robot to given goal state in the world frame. :param xyt_position: The goal state of the form (x,y,t) in the world (map) frame. :param use_map: When set to "True", ensures that controler is using only free space on the map to move the robot. :param close_loop: When set to "True", ensures that controler is operating in open loop by taking account of odometry. :param smooth: When set to "True", ensures that the motion leading to the goal is a smooth one. :type xyt_position: list :type use_map: bool :type close_loop: bool :type smooth: bool :return: True if successful; False otherwise (timeout, etc.) :rtype: bool """<try_stmt><block_start><if_stmt>use_map<block_start><raise>NotImplementedError("Using map feature is not yet supported for Habitat-Sim")<block_end><if_stmt>close_loop<block_start><raise>NotImplementedError("Closed-loop postion control is not supported in Habitat-Sim!")<block_end><if_stmt>smooth<block_start><raise>NotImplementedError("Smooth position control feature is not yet for Habitat-Sim")<block_end><block_end><except_stmt>Exception<as>error<block_start>print(error)<line_sep><return><false><block_end>(cur_x cur_y cur_yaw)=self.get_state()<line_sep>rel_X=xyt_position[0]-cur_x<line_sep>rel_Y=xyt_position[1]-cur_y<line_sep>abs_yaw=xyt_position[2]<line_sep># convert rel_X & rel_Y from global frame to current frame R=np.array([[np.cos(cur_yaw) np.sin(cur_yaw)] [-np.sin(cur_yaw) np.cos(cur_yaw)]])<line_sep>rel_x,rel_y=np.matmul(R np.array([rel_X rel_Y]).reshape(-1 1))<line_sep><return>self._go_to_relative_pose(rel_x[0] rel_y[0] abs_yaw)<block_end><def_stmt>_act self action_name actuation<block_start>"""Take the action specified by action_id :param action_id: ID of the action. Retreives the action from `agent_config.action_space <AgentConfiguration.action_space>` :return: Whether or not the action taken resulted in a collision """<line_sep>did_collide=<false><line_sep>act_spec=ActuationSpec(actuation)<line_sep>did_collide=self.agent.controls.action(self.agent.scene_node action_name act_spec apply_filter=<true>)<line_sep><return>did_collide<block_end><def_stmt>_go_to_relative_pose self rel_x rel_y abs_yaw# clip relative movements beyond 10 micrometer precision # this is done to improve determinism, as habitat-sim doesn't # seem to precisely move the robot beyond sub milimeter precision anyways <block_start><if_stmt>abs(rel_x)<l>1e-5<block_start>rel_x=0<block_end><if_stmt>abs(rel_y)<l>1e-5<block_start>rel_y=0<block_end><if_stmt>math.sqrt(rel_x<power>2+rel_y<power>2)<g>0.0# rotate to point to (x, y) point <block_start>action_name="turn_left"<if_stmt>rel_y<l>0.0<block_start>action_name="turn_right"<block_end>v1=np.asarray([1 0] dtype=np.float64)<line_sep>v2=np.asarray([rel_x rel_y] dtype=np.float64)<line_sep>cosine_angle=np.dot(v1 v2)/(np.linalg.norm(v1)<times>np.linalg.norm(v2))<line_sep>angle=np.arccos(cosine_angle)<line_sep>did_collide=self._act(action_name math.degrees(angle))<if_stmt>did_collide<block_start>print("Error: Collision accured while 1st rotating!")<line_sep><return><false><block_end># move to (x,y) point did_collide=self._act("move_forward" math.sqrt(rel_x<power>2+rel_y<power>2))<if_stmt>did_collide<block_start>print("Error: Collision accured while moving straight!")<line_sep><return><false><block_end><block_end># rotate to match the final yaw! (cur_x cur_y cur_yaw)=self.get_state()<line_sep>rel_yaw=abs_yaw-cur_yaw<line_sep># clip to micro-degree precision to preserve determinism <if_stmt>abs(rel_yaw)<l>1e-4<block_start>rel_yaw=0<block_end>action_name="turn_left"<if_stmt>rel_yaw<l>0.0<block_start>action_name="turn_right"<line_sep>rel_yaw<augmul>-1<block_end>did_collide=self._act(action_name math.degrees(rel_yaw))<if_stmt>did_collide<block_start>print("Error: Collision accured while rotating!")<line_sep><return><false><block_end><return><true><block_end><def_stmt>track_trajectory self states controls close_loop<block_start>""" State trajectory that the robot should track. :param states: sequence of (x,y,t) states that the robot should track. :param controls: optionally specify control sequence as well. :param close_loop: whether to close loop on the computed control sequence or not. :type states: list :type controls: list :type close_loop: bool :return: True if successful; False otherwise (timeout, etc.) :rtype: bool """<line_sep><raise>NotImplementedError<block_end><block_end>
<import_from_future_stmt> division<import_stmt>logging<import_from_stmt>nanpy.i2c I2C_Master<import_from_stmt>nanpy.memo memoized<import_stmt>time<line_sep>log=logging.getLogger(__name__)<def_stmt>to_s16 n<block_start><return>(n+2<power>15)%2<power>16-2<power>15<block_end><class_stmt>Bmp180(object)<block_start>"""Control of BMP180 Digital pressure sensor (I2C) calculation is based on Bosch datasheet."""<def_stmt>__init__ self wire address=0x77 oss=3<block_start>self.i2c=I2C_Master(wire)<line_sep>self.address=address<line_sep>self.oss=oss<block_end><def_stmt>read_bytes self address count<block_start>self.i2c.send(self.address [address])<line_sep>x=self.i2c.request(self.address count)<line_sep><return>x<block_end><def_stmt>write_byte self address data<block_start>self.i2c.send(self.address [address data])<block_end>@property@memoized<def_stmt>eeprom self<block_start><return>self.read_bytes(0xaa 22)<block_end><def_stmt>read_temperature_raw self<block_start>self.write_byte(0xf4 0x2e)<line_sep>time.sleep(0.005)<line_sep>MSB,LSB=self.read_bytes(0xf6 2)<line_sep>UT=(MSB<lshift>8)+LSB<line_sep><return>UT<block_end><def_stmt>read_pressure_raw self<block_start>self.write_byte(0xf4 0x34+(self.oss<lshift>6))<line_sep>time.sleep(0.005)<line_sep>MSB,LSB,XLSB=self.read_bytes(0xf6 3)<line_sep>UP=((MSB<lshift>16)+(LSB<lshift>8)+XLSB)<rshift>(8-self.oss)<line_sep><return>UP<block_end>@classmethod<def_stmt>calculate cls pressure_raw temperature_raw oss eeprom<block_start>''' return: Pascal, Celsius '''<line_sep>UT=temperature_raw<line_sep>UP=pressure_raw<def_stmt>ushort i<block_start><return>(eeprom[2<times>i]<lshift>8)+eeprom[2<times>i+1]<block_end><def_stmt>short i<block_start><return>to_s16(ushort(i))<block_end>AC1=short(0)<line_sep>AC2=short(1)<line_sep>AC3=short(2)<line_sep>AC4=ushort(3)<line_sep>AC5=ushort(4)<line_sep>AC6=ushort(5)<line_sep>B1=short(6)<line_sep>B2=short(7)<line_sep># MB = short(8) MC=short(9)<line_sep>MD=short(10)<line_sep>X1=((UT-AC6)<times>AC5)<rshift>15<line_sep>X2=(MC<lshift>11)<floordiv>(X1+MD)<line_sep>B5=X1+X2<line_sep>T=(B5+8)<rshift>4<line_sep>B6=B5-4000<line_sep>X1=(B2<times>((B6<times>B6)<rshift>12))<rshift>11<line_sep>X2=(AC2<times>B6)<rshift>11<line_sep>X3=X1+X2<line_sep>B3=(((AC1<times>4+X3)<lshift>oss)+2)<floordiv>4<line_sep>X1=(AC3<times>B6)<rshift>13<line_sep>X2=(B1<times>((B6<times>B6)<rshift>12))<rshift>16<line_sep>X3=((X1+X2)+2)<floordiv>4<line_sep>B4=(AC4<times>(X3+32768))<rshift>15<line_sep>B7=(UP-B3)<times>(50000<rshift>oss)<line_sep>p=(B7<times>2)<floordiv>B4<if>B7<l>0x80000000<else>(B7<floordiv>B4)<times>2<line_sep>X1=(p<rshift>8)<times>(p<rshift>8)<line_sep>X1=(X1<times>3038)<rshift>16<line_sep>X2=(-7357<times>p)<rshift>16<line_sep>p<augadd>(X1+X2+3791)<rshift>4<line_sep><return>p T/10<block_end><def_stmt>read self<block_start>''' return: Pascal, Celsius '''<line_sep>temperature_raw=self.read_temperature_raw()<line_sep>pressure_raw=self.read_pressure_raw()<line_sep><return>self.calculate(pressure_raw temperature_raw self.oss self.eeprom )<block_end><block_end>
# Copyright 2016-present CERN – European Organization for Nuclear Research # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>qf_lib.backtesting.contract.contract Contract<import_from_stmt>qf_lib.backtesting.order.execution_style ExecutionStyle<import_from_stmt>qf_lib.backtesting.order.time_in_force TimeInForce<class_stmt>Order(object)<block_start>""" Order generated by a strategy, then processed by PositionSizer. Finally executed by ExecutionHandler. """<def_stmt>__init__ self contract:Contract quantity:int execution_style:ExecutionStyle time_in_force:TimeInForce order_state=""<block_start>""" This __init__ shouldn't be used anywhere beyond this module. Use OrderFactory for creating Order objects. """<line_sep>self.id=<none># type:int self.contract=contract<line_sep>self.quantity=quantity<line_sep>self.time_in_force=time_in_force<line_sep>self.execution_style=execution_style<line_sep>self.order_state=order_state<block_end><def_stmt>__str__ self<block_start><return>'\nOrder:\n'<concat>'\tid: {}\n'<concat>'\tcontract: {}\n'<concat>'\tquantity: {}\n'<concat>'\ttif: {}\n'<concat>'\texecution_style: {}\n'<concat>'\torder_state: {}'.format(self.id str(self.contract) self.quantity str(self.time_in_force) self.execution_style self.order_state)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>self<is>other<block_start><return><true><block_end><if_stmt><not>isinstance(other Order)<block_start><return><false><block_end># one Order has id and another hasn't <if_stmt>(self.id<is><none>)<ne>(other.id<is><none>)<block_start><return><false><block_end><if_stmt>self.id<is><not><none><and>other.id<eq>self.id<block_start><return><true><block_end># when both ids are none -> compare the values <return>(self.contract self.quantity self.time_in_force self.execution_style)<eq>(other.contract other.quantity other.time_in_force other.execution_style)<block_end><def_stmt>__hash__ self<block_start><return>hash((self.contract self.quantity self.time_in_force self.execution_style))<block_end><block_end>
<class_stmt>BaseHandler<block_start><def_stmt>send self data p<block_start><pass><block_end><def_stmt>recv self data p<block_start><pass><block_end><def_stmt>shutdown self p direction=2<block_start><pass><block_end><def_stmt>close self<block_start><pass><block_end><block_end>
""" Script for serving a trained chatbot model over http """<import_stmt>datetime<import_stmt>click<import_from_stmt>os path<import_from_stmt>flask Flask request send_from_directory<import_from_stmt>flask_cors CORS<import_from_stmt>flask_restful Resource Api<import_stmt>general_utils<import_stmt>chat_command_handler<import_from_stmt>chat_settings ChatSettings<import_from_stmt>chatbot_model ChatbotModel<import_from_stmt>vocabulary Vocabulary<line_sep>app=Flask(__name__)<line_sep>CORS(app)<line_sep>@app.cli.command()@click.argument("checkpointfile")@click.option("-p" "--port" type=int)<def_stmt>serve_chat checkpointfile port<block_start>api=Api(app)<line_sep>#Read the hyperparameters and configure paths model_dir,hparams,checkpoint=general_utils.initialize_session_server(checkpointfile)<line_sep>#Load the vocabulary print()<line_sep>print("Loading vocabulary...")<if_stmt>hparams.model_hparams.share_embedding<block_start>shared_vocab_filepath=path.join(model_dir Vocabulary.SHARED_VOCAB_FILENAME)<line_sep>input_vocabulary=Vocabulary.load(shared_vocab_filepath)<line_sep>output_vocabulary=input_vocabulary<block_end><else_stmt><block_start>input_vocab_filepath=path.join(model_dir Vocabulary.INPUT_VOCAB_FILENAME)<line_sep>input_vocabulary=Vocabulary.load(input_vocab_filepath)<line_sep>output_vocab_filepath=path.join(model_dir Vocabulary.OUTPUT_VOCAB_FILENAME)<line_sep>output_vocabulary=Vocabulary.load(output_vocab_filepath)<block_end>#Create the model print("Initializing model...")<line_sep>print()<with_stmt>ChatbotModel(mode="infer" model_hparams=hparams.model_hparams input_vocabulary=input_vocabulary output_vocabulary=output_vocabulary model_dir=model_dir)<as>model#Load the weights <block_start>print()<line_sep>print("Loading model weights...")<line_sep>model.load(checkpoint)<line_sep># Setting up the chat chatlog_filepath=path.join(model_dir "chat_logs" "web_chatlog_{0}.txt".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")))<line_sep>chat_settings=ChatSettings(hparams.model_hparams hparams.inference_hparams)<line_sep>chat_command_handler.print_commands()<class_stmt>Answer(Resource)<block_start><def_stmt>get self question<block_start>is_command,terminate_chat,_=chat_command_handler.handle_command(question model chat_settings)<if_stmt>terminate_chat<block_start>answer="[Can't terminate from http request]"<block_end><elif_stmt>is_command<block_start>answer="[Command processed]"<block_end><else_stmt>#If it is not a command (it is a question), pass it on to the chatbot model to get the answer <block_start>_,answer=model.chat(question chat_settings)<if_stmt>chat_settings.inference_hparams.log_chat<block_start>chat_command_handler.append_to_chatlog(chatlog_filepath question answer)<block_end><block_end><return>answer<block_end><block_end><class_stmt>UI(Resource)<block_start><def_stmt>get self<block_start><return>send_from_directory("." "chat_ui.html")<block_end><block_end>api.add_resource(Answer "/chat/<string:question>")<line_sep>api.add_resource(UI "/chat_ui/")<line_sep>app.run(debug=<false> port=port)<block_end><block_end>
"""Trigonometric and Hyperbolic Functions"""<import_from_stmt>typing Callable<import_stmt>numpy<import_from_stmt>pipda register_func<import_from_stmt>..core.contexts Context<import_from_stmt>..core.types FloatOrIter<import_from_stmt>.constants pi<def_stmt>_register_trig_hb_func name:str np_name:str doc:str<arrow>Callable<block_start>"""Register trigonometric and hyperbolic function"""<line_sep>np_fun=getattr(numpy np_name)<if_stmt>name.endswith("pi")<block_start>func=<lambda>x:np_fun(x<times>pi)<block_end><else_stmt># ufunc cannot set context <block_start>func=<lambda>x:np_fun(x)<block_end>func=register_func(<none> context=Context.EVAL func=func)<line_sep>func.__name__=name<line_sep>func.__doc__=doc<line_sep><return>func<block_end>sin=_register_trig_hb_func("sin" "sin" doc="""The sine function Args: x: a numeric value or iterable Returns: The sine value of `x` """ )<line_sep>cos=_register_trig_hb_func("cos" "cos" doc="""The cosine function Args: x: a numeric value or iterable Returns: The cosine value of `x` """ )<line_sep>tan=_register_trig_hb_func("tan" "tan" doc="""The tangent function Args: x: a numeric value or iterable Returns: The tangent value of `x` """ )<line_sep>acos=_register_trig_hb_func("acos" "arccos" doc="""The arc-cosine function Args: x: a numeric value or iterable Returns: The arc-cosine value of `x` """ )<line_sep>asin=_register_trig_hb_func("acos" "arcsin" doc="""The arc-sine function Args: x: a numeric value or iterable Returns: The arc-sine value of `x` """ )<line_sep>atan=_register_trig_hb_func("acos" "arctan" doc="""The arc-sine function Args: x: a numeric value or iterable Returns: The arc-sine value of `x` """ )<line_sep>sinpi=_register_trig_hb_func("sinpi" "sin" doc="""The sine function Args: x: a numeric value or iterable, which is the multiple of pi Returns: The sine value of `x` """ )<line_sep>cospi=_register_trig_hb_func("cospi" "cos" doc="""The cosine function Args: x: a numeric value or iterable, which is the multiple of pi Returns: The cosine value of `x` """ )<line_sep>tanpi=_register_trig_hb_func("tanpi" "tan" doc="""The tangent function Args: x: a numeric value or iterable, which is the multiple of pi Returns: The tangent value of `x` """ )<line_sep>cosh=_register_trig_hb_func("cosh" "cosh" doc="""Hyperbolic cosine Args: x: a numeric value or iterable Returns: The hyperbolic cosine value of `x` """ )<line_sep>sinh=_register_trig_hb_func("sinh" "sinh" doc="""Hyperbolic sine Args: x: a numeric value or iterable Returns: The hyperbolic sine value of `x` """ )<line_sep>tanh=_register_trig_hb_func("tanh" "tanh" doc="""Hyperbolic tangent Args: x: a numeric value or iterable Returns: The hyperbolic tangent value of `x` """ )<line_sep>acosh=_register_trig_hb_func("acosh" "arccosh" doc="""Hyperbolic arc-cosine Args: x: a numeric value or iterable Returns: The hyperbolic arc-cosine value of `x` """ )<line_sep>asinh=_register_trig_hb_func("asinh" "arcsinh" doc="""Hyperbolic arc-sine Args: x: a numeric value or iterable Returns: The hyperbolic arc-sine value of `x` """ )<line_sep>atanh=_register_trig_hb_func("atanh" "arctanh" doc="""Hyperbolic arc-tangent Args: x: a numeric value or iterable Returns: The hyperbolic arc-tangent value of `x` """ )<line_sep>@register_func(<none> context=Context.EVAL)<def_stmt>atan2 y:FloatOrIter x:FloatOrIter<arrow>FloatOrIter<block_start>"""Calculates the angle between the x-axis and the vector (0,0) -> (x,y) Args: y: and x: The end coordinates of the vector Returns: The angle between x-axis and vector (0,0) -> (x,y) """<line_sep><return>numpy.arctan2(y x)<block_end>
<import_from_stmt>anchorecli.cli repo<line_sep>
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_stmt>numpy<as>np<import_stmt>pyrobot.utils.util<as>prutil<import_from_stmt>pyrobot.core Camera<import_from_stmt>pyrobot.utils.util try_cv2_import<line_sep>cv2=try_cv2_import()<import_from_stmt>cv_bridge CvBridge CvBridgeError<import_from_stmt>pyrep.objects.vision_sensor VisionSensor<import_from_stmt>pyrep.const ObjectType PerspectiveMode RenderMode<import_from_stmt>pyrep.objects.joint Joint<class_stmt>LoCoBotCamera(Camera)<block_start>"""docstring for SimpleCamera"""<def_stmt>__init__ self configs simulator<block_start>self.sim=simulator.sim<line_sep>self.rgb_cam=VisionSensor("kinect_rgb")<line_sep>self.depth_cam=VisionSensor("kinect_depth")<line_sep>self.rgb_cam.set_render_mode(RenderMode.OPENGL3)<line_sep>self.depth_cam.set_render_mode(RenderMode.OPENGL3)<line_sep># Pan and tilt related variables. self.pan_joint=Joint("LoCoBot_head_pan_joint")<line_sep>self.tilt_joint=Joint("LoCoBot_head_tilt_joint")<block_end><def_stmt>get_rgb self<block_start><return>self.rgb_cam.capture_rgb()<block_end><def_stmt>get_depth self<block_start><return>self.depth_cam.capture_depth()<block_end><def_stmt>get_rgb_depth self<block_start><return>self.get_rgb() self.get_depth()<block_end><def_stmt>get_intrinsics self# Todo: Remove this after we fix intrinsics <block_start><raise>NotImplementedError<line_sep>""" Returns the instrinsic matrix of the camera :return: the intrinsic matrix (shape: :math:`[3, 3]`) :rtype: np.ndarray """<line_sep># fx = self.configs['Camera.fx'] # fy = self.configs['Camera.fy'] # cx = self.configs['Camera.cx'] # cy = self.configs['Camera.cy'] Itc=np.array([[fx 0 cx] [0 fy cy] [0 0 1]])<line_sep><return>Itc<block_end><def_stmt>pix_to_3dpt self rs cs in_cam=<false><block_start>""" Get the 3D points of the pixels in RGB images. :param rs: rows of interest in the RGB image. It can be a list or 1D numpy array which contains the row indices. The default value is None, which means all rows. :param cs: columns of interest in the RGB image. It can be a list or 1D numpy array which contains the column indices. The default value is None, which means all columns. :param in_cam: return points in camera frame, otherwise, return points in base frame :type rs: list or np.ndarray :type cs: list or np.ndarray :type in_cam: bool :returns: tuple (pts, colors) pts: point coordinates in world frame (shape: :math:`[N, 3]`) colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`) :rtype: tuple(np.ndarray, np.ndarray) """<line_sep><raise>NotImplementedError<block_end><def_stmt>get_current_pcd self in_cam=<true><block_start>""" Return the point cloud at current time step (one frame only) :param in_cam: return points in camera frame, otherwise, return points in base frame :type in_cam: bool :returns: tuple (pts, colors) pts: point coordinates in world frame (shape: :math:`[N, 3]`) colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`) :rtype: tuple(np.ndarray, np.ndarray) """<line_sep><raise>NotImplementedError<block_end>@property<def_stmt>state self<block_start>""" Return the current pan and tilt joint angles of the robot camera. :return: pan_tilt: A list the form [pan angle, tilt angle] :rtype: list """<line_sep><return>self.get_state()<block_end><def_stmt>get_state self<block_start>""" Return the current pan and tilt joint angles of the robot camera. :return: pan_tilt: A list the form [pan angle, tilt angle] :rtype: list """<line_sep><return>[self.get_pan() self.get_tilt()]<block_end><def_stmt>get_pan self<block_start>""" Return the current pan joint angle of the robot camera. :return: pan: Pan joint angle :rtype: float """<line_sep><return>self.pan_joint.get_joint_position()<block_end><def_stmt>get_tilt self<block_start>""" Return the current tilt joint angle of the robot camera. :return: tilt: Tilt joint angle :rtype: float """<line_sep><return>self.tilt_joint.get_joint_position()<block_end><def_stmt>set_pan self pan wait=<true><block_start>""" Sets the pan joint angle to the specified value. :param pan: value to be set for pan joint :param wait: wait until the pan angle is set to the target angle. :type pan: float :type wait: bool """<line_sep>self.pan_joint.set_joint_position(pan)<line_sep># [self.sim.step() for _ in range(50)] <block_end><def_stmt>set_tilt self tilt wait=<true><block_start>""" Sets the tilt joint angle to the specified value. :param tilt: value to be set for the tilt joint :param wait: wait until the tilt angle is set to the target angle. :type tilt: float :type wait: bool """<line_sep>self.tilt_joint.set_joint_position(tilt)<block_end><def_stmt>set_pan_tilt self pan tilt wait=<true><block_start>""" Sets both the pan and tilt joint angles to the specified values. :param pan: value to be set for pan joint :param tilt: value to be set for the tilt joint :param wait: wait until the pan and tilt angles are set to the target angles. :type pan: float :type tilt: float :type wait: bool """<line_sep>self.set_pan(pan)<line_sep>self.set_tilt(tilt)<block_end><def_stmt>reset self<block_start>""" This function resets the pan and tilt joints by actuating them to their home configuration. """<line_sep>self.set_pan_tilt(self.configs.CAMERA.RESET_PAN self.configs.CAMERA.RESET_TILT)<block_end><block_end>
<import_from_stmt>lib action<class_stmt>RGBAction(action.BaseAction)<block_start><def_stmt>run self light_id red green blue transition_time<block_start>light=self.hue.lights.get(light_id)<line_sep>light.rgb(red green blue transition_time)<block_end><block_end>
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base utilities for loading datasets."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>collections<import_stmt>os<import_stmt>random<import_stmt>time<import_stmt>shutil<import_from_stmt>six.moves urllib<line_sep>Dataset=collections.namedtuple('Dataset' ['data' 'target'])<line_sep>Datasets=collections.namedtuple('Datasets' ['train' 'validation' 'test'])<def_stmt>retry initial_delay max_delay factor=2.0 jitter=0.25 is_retriable=<none><block_start>"""Simple decorator for wrapping retriable functions. Args: initial_delay: the initial delay. factor: each subsequent retry, the delay is multiplied by this value. (must be >= 1). jitter: to avoid lockstep, the returned delay is multiplied by a random number between (1-jitter) and (1+jitter). To add a 20% jitter, set jitter = 0.2. Must be < 1. max_delay: the maximum delay allowed (actual max is max_delay * (1 + jitter). is_retriable: (optional) a function that takes an Exception as an argument and returns true if retry should be applied. """<if_stmt>factor<l>1<block_start><raise>ValueError('factor must be >= 1; was %f'%(factor ))<block_end><if_stmt>jitter<ge>1<block_start><raise>ValueError('jitter must be < 1; was %f'%(jitter ))<block_end># Generator to compute the individual delays <def_stmt>delays <block_start>delay=initial_delay<while_stmt>delay<le>max_delay<block_start><yield>delay<times>random.uniform(1-jitter 1+jitter)<line_sep>delay<augmul>factor<block_end><block_end><def_stmt>wrap fn<block_start>"""Wrapper function factory invoked by decorator magic."""<def_stmt>wrapped_fn *args **kwargs<block_start>"""The actual wrapper function that applies the retry logic."""<for_stmt>delay delays()<block_start><try_stmt><block_start><return>fn(*args **kwargs)<block_end><except_stmt>Exception<as>e# pylint: disable=broad-except) <block_start><if_stmt>is_retriable<is><none><block_start><continue><block_end><if_stmt>is_retriable(e)<block_start>time.sleep(delay)<block_end><else_stmt><block_start><raise><block_end><block_end><block_end><return>fn(*args **kwargs)<block_end><return>wrapped_fn<block_end><return>wrap<block_end>_RETRIABLE_ERRNOS={110 # Connection timed out [socket.py] }<def_stmt>_is_retriable e<block_start><return>isinstance(e IOError)<and>e.errno<in>_RETRIABLE_ERRNOS<block_end>@retry(initial_delay=1.0 max_delay=16.0 is_retriable=_is_retriable)<def_stmt>urlretrieve_with_retry url filename=<none><block_start><return>urllib.request.urlretrieve(url filename)<block_end><def_stmt>maybe_download filename work_directory source_url<block_start>"""Download the data from source url, unless it's already here. Args: filename: string, name of the file in the directory. work_directory: string, path to working directory. source_url: url to download from if file doesn't exist. Returns: Path to resulting file. """<if_stmt><not>os.path.exists(work_directory)<block_start>os.makedirs(work_directory)<block_end>filepath=os.path.join(work_directory filename)<if_stmt><not>os.path.exists(filepath)<block_start>temp_file_name,_=urlretrieve_with_retry(source_url)<line_sep>shutil.copy(temp_file_name filepath)<line_sep>size=os.path.getsize(filepath)<line_sep>print('Successfully downloaded' filename size 'bytes.')<block_end><return>filepath<block_end>
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals. # Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>AlgorithmImports *<line_sep>### <summary> ### This algorithm is a regression test for issue #2018 and PR #2038. ### </summary> <class_stmt>OptionDataNullReferenceRegressionAlgorithm(QCAlgorithm)<block_start><def_stmt>Initialize self<block_start>self.SetStartDate(2016 12 1)<line_sep>self.SetEndDate(2017 1 1)<line_sep>self.SetCash(500000)<line_sep>self.AddEquity("DUST")<line_sep>option=self.AddOption("DUST")<line_sep>option.SetFilter(self.UniverseFunc)<block_end><def_stmt>UniverseFunc self universe<block_start><return>universe.IncludeWeeklys().Strikes(-1 +1).Expiration(timedelta(25) timedelta(100))<block_end><block_end>
<import_from_stmt>django.db models<import_from_stmt>django.contrib.auth.models User<class_stmt>Link(models.Model)<block_start>url=models.URLField()<line_sep>title=models.CharField(max_length=255)<line_sep>reporter=models.ForeignKey(User on_delete=models.SET_NULL related_name='reported_links' null=<true> blank=<false> )<def_stmt>__str__ self<block_start><return>'{self.title} ({self.url})'.format(self=self)<block_end><def_stmt>get_num_of_positive_votes self<block_start><return>self.votes.filter(positive=<true>).count()<block_end><def_stmt>get_num_of_negative_votes self<block_start><return>self.votes.filter(negative=<true>).count()<block_end><block_end><class_stmt>LinkVote(models.Model)<block_start><class_stmt>Meta<block_start>unique_together=(('link' 'voter') )<block_end>link=models.ForeignKey(Link on_delete=models.CASCADE related_name='votes' )<line_sep>voter=models.ForeignKey(User on_delete=models.SET_NULL related_name='votes' null=<true> blank=<false> )<line_sep>positive=models.BooleanField()<line_sep>negative=models.BooleanField()<def_stmt>__str__ self<block_start><if_stmt>self.positive<block_start>vote='positive'<block_end><elif_stmt>self.negative<block_start>vote='negative'<block_end><else_stmt><block_start>vote='neutral'<block_end><return>'{vote} vote for {self.link} by {self.voter}'.format(vote=vote self=self)<block_end><block_end>
# -*- coding: utf-8 -*- """ clint.textui.core ~~~~~~~~~~~~~~~~~ Core TextUI functionality for Puts/Indent/Writer. """<import_from_future_stmt> absolute_import<import_stmt>sys<import_from_stmt>contextlib contextmanager<import_from_stmt>.formatters max_width min_width _get_max_width_context<import_from_stmt>.cols columns<import_from_stmt>..utils tsplit<line_sep>__all__=('puts' 'puts_err' 'indent' 'dedent' 'columns' 'max_width' 'min_width' 'STDOUT' 'STDERR')<line_sep>STDOUT=sys.stdout.write<line_sep>STDERR=sys.stderr.write<line_sep>NEWLINES=('\n' '\r' '\r\n')<line_sep>INDENT_STRINGS=[]<line_sep># Private <def_stmt>_indent indent=0 quote='' indent_char=' '<block_start>"""Indent util function, compute new indent_string"""<if_stmt>indent<g>0<block_start>indent_string=''.join((str(quote) (indent_char<times>(indent-len(quote)))))<block_end><else_stmt><block_start>indent_string=''.join((('\x08'<times>(-1<times>(indent-len(quote)))) str(quote)))<block_end><if_stmt>len(indent_string)<block_start>INDENT_STRINGS.append(indent_string)<block_end><block_end># Public <def_stmt>puts s='' newline=<true> stream=STDOUT<block_start>"""Prints given string to stdout."""<line_sep>max_width_ctx=_get_max_width_context()<if_stmt>max_width_ctx<block_start>cols,separator=max_width_ctx[-1]<line_sep>s=max_width(s cols separator)<block_end><if_stmt>newline<block_start>s=tsplit(s NEWLINES)<line_sep>s=map(str s)<line_sep>indent=''.join(INDENT_STRINGS)<line_sep>s=(str('\n'+indent)).join(s)<block_end>_str=''.join((''.join(INDENT_STRINGS) str(s) '\n'<if>newline<else>''))<line_sep>stream(_str)<block_end><def_stmt>puts_err s='' newline=<true> stream=STDERR<block_start>"""Prints given string to stderr."""<line_sep>puts(s newline stream)<block_end><def_stmt>dedent <block_start>"""Dedent next strings, use only if you use indent otherwise than as a context."""<line_sep>INDENT_STRINGS.pop()<block_end>@contextmanager<def_stmt>_indent_context <block_start>"""Indentation context manager."""<try_stmt><block_start><yield><block_end><finally_stmt><block_start>dedent()<block_end><block_end><def_stmt>indent indent=4 quote=''<block_start>"""Indentation manager, return an indentation context manager."""<line_sep>_indent(indent quote)<line_sep><return>_indent_context()<block_end>
<import_from_stmt>ethereum tester<as>t<import_from_stmt>ethereum utils<def_stmt>test <block_start>s=t.state()<line_sep>test_company=s.abi_contract('company.se' ADMIN_ACCOUNT=utils.decode_int(t.a0))<line_sep>order_book=s.abi_contract('orders.se')<line_sep>test_currency=s.abi_contract('currency.se' sender=t.k0)<assert_stmt>test_company.getAdmin()<eq>t.a0.encode('hex')<line_sep># Issue 1000 shares to user a1 test_company.issueShares(1000 t.a1 sender=t.k0)<line_sep># Issue 50000 coins to users a2 and a3 test_currency.sendCoin(50000 t.a2 sender=t.k0)<line_sep>test_currency.sendCoin(50000 t.a3 sender=t.k0)<line_sep># User a1 can have as many shares as he wants, but must retain at # least 800 test_company.setShareholderMaxShares(t.a1 2<power>100 sender=t.k0)<line_sep>test_company.setShareholderMinShares(t.a1 800 sender=t.k0)<line_sep># User a2 can have up to 500 shares test_company.setShareholderMaxShares(t.a2 500 sender=t.k0)<line_sep># User a2 tries to give himself the right to unlimited shares, # fails because he is not the admin test_company.setShareholderMaxShares(t.a2 2<power>100 sender=t.k2)<line_sep># A few sanity checks <assert_stmt>test_company.getCurrentShareholdingsOf(t.a1)<eq>1000<assert_stmt>test_company.getShareholderMinShares(t.a1)<eq>800<assert_stmt>test_company.getShareholderMaxShares(t.a2)<eq>500<line_sep># User a1 transfers 150 shares to a2 <assert_stmt>test_company.sendCoin(150 t.a2 sender=t.k1)<is><true><line_sep># User a1 tries to transfer 150 shares to a2 again, fails because # such a transaction would result a1 having 700 shares, which is # below his limit <assert_stmt>test_company.sendCoin(150 t.a2 sender=t.k1)<is><false><line_sep># Check shareholdings <assert_stmt>test_company.getCurrentShareholdingsOf(t.a1)<eq>850<assert_stmt>test_company.getCurrentShareholdingsOf(t.a2)<eq>150<line_sep># Authorize the order book contract to accept lockups test_company.setContractAuthorized(order_book.address <true>)<line_sep># User a1 puts up 50 shares for sale; however, he tries to do # this without first authorizing the order book to withdraw so # the operation fails <assert_stmt>order_book.mkSellOrder(test_company.address 50 test_currency.address 10000 sender=t.k1)<eq>-1<line_sep># Now, try to create the order properly test_company.authorizeLockup(order_book.address 50 sender=t.k1)<line_sep>_id=order_book.mkSellOrder(test_company.address 50 test_currency.address 10000 sender=t.k1)<assert_stmt>_id<ge>0<assert_stmt>test_company.getLockedShareholdingsOf(t.a1)<eq>50<line_sep># Accept the order by a3. This should fail because a3 has not # authorized the order_book to withdraw coins <assert_stmt>order_book.claimSellOrder(_id sender=t.k3)<is><false><line_sep># Do the authorization test_currency.approveOnce(order_book.address 10000 sender=t.k3)<line_sep># It should still fail because a3 is not authorized to hold shares <assert_stmt>order_book.claimSellOrder(_id sender=t.k3)<is><false><line_sep># Now do it properly test_currency.approveOnce(order_book.address 10000 sender=t.k2)<assert_stmt>order_book.claimSellOrder(_id sender=t.k2)<is><true><line_sep># Check shareholdings and balances <assert_stmt>test_company.getCurrentShareholdingsOf(t.a1)<eq>800<assert_stmt>test_company.getCurrentShareholdingsOf(t.a2)<eq>200<assert_stmt>test_company.getLockedShareholdingsOf(t.a1)<eq>0<assert_stmt>test_currency.coinBalanceOf(t.a1)<eq>10000<assert_stmt>test_currency.coinBalanceOf(t.a2)<eq>40000<assert_stmt>test_currency.coinBalanceOf(t.a3)<eq>50000<line_sep># Authorize a3 to hold shares test_company.setShareholderMaxShares(t.a3 500)<line_sep># A3 buys shares test_currency.approveOnce(order_book.address 20000 sender=t.k3)<line_sep>_id2=order_book.mkBuyOrder(test_company.address 100 test_currency.address 20000 sender=t.k3)<assert_stmt>_id2<ge>0 _id2<line_sep>test_company.authorizeLockup(order_book.address 100 sender=t.k2)<assert_stmt>order_book.claimBuyOrder(_id2 sender=t.k2)<is><true><line_sep># Check shareholdings and balances <assert_stmt>test_company.getCurrentShareholdingsOf(t.a1)<eq>800<assert_stmt>test_company.getCurrentShareholdingsOf(t.a2)<eq>100<assert_stmt>test_company.getCurrentShareholdingsOf(t.a3)<eq>100<assert_stmt>test_company.getLockedShareholdingsOf(t.a1)<eq>0<assert_stmt>test_currency.coinBalanceOf(t.a1)<eq>10000<assert_stmt>test_currency.coinBalanceOf(t.a2)<eq>60000<assert_stmt>test_currency.coinBalanceOf(t.a3)<eq>30000<block_end><if_stmt>__name__<eq>'__main__'<block_start>test()<block_end>
""" Dependency graph: Evaluated ID example ++++++++++++++++++++++++++++++++++++++ This example demonstrates access to the evaluated ID (such as object, material, etc.) state from an original ID. This is needed every time one needs to access state with animation, constraints, and modifiers taken into account. """<import_stmt>bpy<class_stmt>OBJECT_OT_evaluated_example(bpy.types.Operator)<block_start>"""Access evaluated object state and do something with it"""<line_sep>bl_label="DEG Access Evaluated Object"<line_sep>bl_idname="object.evaluated_example"<def_stmt>execute self context# This is an original object. Its data does not have any modifiers applied. <block_start>obj=context.object<if_stmt>obj<is><none><or>obj.type<ne>'MESH'<block_start>self.report({'INFO'} "No active mesh object to get info from")<line_sep><return>{'CANCELLED'}<block_end># Evaluated object exists within a specific dependency graph. # We will request evaluated object from the dependency graph which corresponds to the # current scene and view layer. # # NOTE: This call ensure the dependency graph is fully evaluated. This might be expensive # if changes were made made to the scene, but is needed to ensure no dangling or incorrect # pointers are exposed. depsgraph=context.evaluated_depsgraph_get()<line_sep># Actually request evaluated object. # # This object has animation and drivers applied on it, together with constraints and # modifiers. # # For mesh objects the object.data will be a mesh with all modifiers applied. # This means that in access to vertices or faces after modifier stack happens via fields of # object_eval.object. # # For other types of objects the object_eval.data does not have modifiers applied on it, # but has animation applied. # # NOTE: All ID types have `evaluated_get()`, including materials, node trees, worlds. object_eval=obj.evaluated_get(depsgraph)<line_sep>mesh_eval=object_eval.data<line_sep>self.report({'INFO'} f"Number of evaluated vertices: {len(mesh_eval.vertices)}")<line_sep><return>{'FINISHED'}<block_end><block_end><def_stmt>register <block_start>bpy.utils.register_class(OBJECT_OT_evaluated_example)<block_end><def_stmt>unregister <block_start>bpy.utils.unregister_class(OBJECT_OT_evaluated_example)<block_end><if_stmt>__name__<eq>"__main__"<block_start>register()<block_end>
<import_stmt>os<import_stmt>re<import_from_stmt>base64 b64decode<import_from_stmt>pathlib Path<import_stmt>requests<line_sep>username=os.environ["GITHUB_USERNAME"]<line_sep>password=os.environ["GITHUB_PERSONAL_ACCESS_TOKEN"]<line_sep>auth=requests.auth.HTTPBasicAuth(username password)<line_sep>directory=Path(__file__).resolve().parent.parent/"github"<line_sep>directory.mkdir(exist_ok=<true>)<line_sep>start_url="https://api.github.com/search/code?q=view+language:lookml"<line_sep>next_url=<none><line_sep>page=1<with_stmt>requests.Session()<as>session<block_start>session.auth=auth<while_stmt><true><block_start>response=session.get(next_url<or>start_url)<line_sep>response.raise_for_status()<line_sep>links=response.headers["Link"]<line_sep>finds=re.findall(r"<(https://api.github.com/search/code\?"<concat>r'q=view\+language%3Alookml&page=\d+)>; rel="next"' links )<if_stmt>finds<block_start>next_url=finds[0]<block_end><else_stmt><block_start>next_url=<none><block_end>print(next_url)<line_sep>urls=[item["url"]<for>item response.json()["items"]]<line_sep>print(f"Downloading all content from page {page}")<for_stmt>url urls<block_start>response=session.get(url)<line_sep>response.raise_for_status()<line_sep>response_json=response.json()<line_sep>name=response_json["name"]<line_sep>encoded=response_json["content"]<line_sep>content=b64decode(encoded).decode("utf-8")<if_stmt>(name.endswith(".lookml")<or>content.startswith("-")<or>"- view"<in>content)<block_start><continue><block_end>file_path=directory/name<with_stmt>file_path.open("w+")<as>file<block_start>file.write(content)<block_end><block_end><if_stmt>next_url<is><none><block_start><break><block_end><else_stmt><block_start>page<augadd>1<block_end><block_end><block_end>
<import_stmt>logging<import_from_stmt>iemit_plugin IEmitter<import_from_stmt>plugins.emitters.base_http_emitter BaseHttpEmitter<line_sep>logger=logging.getLogger('crawlutils')<class_stmt>HttpEmitter(BaseHttpEmitter IEmitter)<block_start><def_stmt>get_emitter_protocol self<block_start><return>'http'<block_end><block_end>
<import_stmt>chainer<import_from_stmt>chainer functions<import_from_stmt>chainer links<import_stmt>chainer_chemistry<import_from_stmt>chainer_chemistry.links.connection.graph_linear GraphLinear<import_from_stmt>chainer_chemistry.utils is_sparse<class_stmt>GGNNUpdate(chainer.Chain)<block_start>"""GGNN submodule for update part. Args: in_channels (int or None): input dim of feature vector for each node hidden_channels (int): dimension of feature vector for each node out_channels (int or None): output dime of feature vector for each node When `None`, `hidden_channels` is used. n_edge_types (int): number of types of edge """<def_stmt>__init__ self in_channels=<none> hidden_channels=16 out_channels=<none> n_edge_types=4 **kwargs<block_start><if_stmt>out_channels<is><none><block_start>out_channels=hidden_channels<block_end>super(GGNNUpdate self).__init__()<if_stmt>in_channels<is><none><block_start>gru_in_channels=<none><block_end><else_stmt><block_start>gru_in_channels=in_channels+hidden_channels<block_end><with_stmt>self.init_scope()<block_start>self.graph_linear=GraphLinear(in_channels n_edge_types<times>hidden_channels)<line_sep>self.update_layer=links.GRU(gru_in_channels out_channels)<block_end>self.n_edge_types=n_edge_types<line_sep>self.in_channels=in_channels<line_sep>self.hidden_channels=hidden_channels<line_sep>self.out_channels=out_channels<block_end><def_stmt>__call__ self h adj **kwargs<block_start>hidden_ch=self.hidden_channels<line_sep># --- Message part --- mb,atom,in_ch=h.shape<line_sep>m=functions.reshape(self.graph_linear(h) (mb atom hidden_ch self.n_edge_types))<line_sep># m: (minibatch, atom, ch, edge_type) # Transpose m=functions.transpose(m (0 3 1 2))<line_sep># m: (minibatch, edge_type, atom, ch) # (minibatch * edge_type, atom, out_ch) m=functions.reshape(m (mb<times>self.n_edge_types atom hidden_ch))<if_stmt>is_sparse(adj)<block_start>m=functions.sparse_matmul(adj m)<block_end><else_stmt><block_start>adj=functions.reshape(adj (mb<times>self.n_edge_types atom atom))<line_sep>m=chainer_chemistry.functions.matmul(adj m)<block_end># (minibatch * edge_type, atom, out_ch) m=functions.reshape(m (mb self.n_edge_types atom hidden_ch))<line_sep>m=functions.sum(m axis=1)<line_sep># (minibatch, atom, out_ch) # --- Update part --- # Contraction h=functions.reshape(h (mb<times>atom in_ch))<line_sep># Contraction m=functions.reshape(m (mb<times>atom hidden_ch))<line_sep>out_h=self.update_layer(functions.concat((h m) axis=1))<line_sep># Expansion out_h=functions.reshape(out_h (mb atom self.out_channels))<line_sep><return>out_h<block_end><def_stmt>reset_state self<block_start>self.update_layer.reset_state()<block_end><block_end>
<def_stmt>func a b<block_start><return>a+b<block_end><def_stmt>func2 a<block_start>print(a)<block_end>print("Hello")<line_sep>
<import_stmt>os<import_from_stmt>.default DefaultModelConfig<class_stmt>ModelConfig(DefaultModelConfig)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.MODEL_NAME='AOTT'<block_end><block_end>
<def_stmt>_do_set env name value<block_start><if_stmt>env.contains(name)<block_start>env.set(name value)<block_end><elif_stmt>env.parent<is><not><none><block_start>_do_set(env.parent name value)<block_end><else_stmt><block_start><raise>Exception("Attempted to set name '%s' but it does not exist."%name)<block_end><block_end><def_stmt>set_ env symbol_name value<block_start><if_stmt>symbol_name[0]<ne>"string"<block_start><raise>Exception("set() takes a string as its first argument, but was: %s"%str(symbol_name))<block_end>_do_set(env symbol_name[1] value)<line_sep><return>value<block_end>
<import_stmt>itertools<import_from_stmt>..quant_utils QuantizedValue QuantizedValueType attribute_to_kwarg quantize_nparray<import_from_stmt>.base_operator QuantOperatorBase<class_stmt>QDQOperatorBase<block_start><def_stmt>__init__ self onnx_quantizer onnx_node<block_start>self.quantizer=onnx_quantizer<line_sep>self.node=onnx_node<line_sep>self.disable_qdq_for_node_output=(<true><if>onnx_node.op_type<in>onnx_quantizer.op_types_to_exclude_output_quantization<else><false>)<block_end><def_stmt>quantize self<block_start>node=self.node<if_stmt>self.disable_qdq_for_node_output<block_start>tensors_to_quantize=node.input<block_end><else_stmt><block_start>tensors_to_quantize=itertools.chain(node.input node.output)<block_end><for_stmt>tensor_name tensors_to_quantize<block_start>self.quantizer.quantize_tensor(tensor_name)<block_end><block_end><block_end>
<import_stmt>numpy<def_stmt>layer_method <block_start><return>{"pi":"{0:.2f}".format(numpy.pi)}<block_end>
<import_from_stmt>uliweb.i18n ugettext_lazy<as>_<def_stmt>test_1 <block_start>""" >>> x = _('Hello') >>> print repr(x) ugettext_lazy('Hello') """<block_end><def_stmt>test_1 <block_start>""" >>> x = _('Hello {0}') >>> print x.format('name') Hello name """<block_end>
# Copyright 2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>mock<import_stmt>pytest<import_from_stmt>f5.bigip.tm.vcmp.virtual_disk Virtual_Disk<import_from_stmt>f5.sdk_exception UnsupportedMethod<line_sep>@pytest.fixture<def_stmt>FakeResource <block_start>mo=mock.MagicMock()<line_sep><return>Virtual_Disk(mo)<block_end><def_stmt>test_create FakeResource<block_start><with_stmt>pytest.raises(UnsupportedMethod)<as>ex<block_start>FakeResource.create()<block_end><assert_stmt>"does not support the create method"<in>str(ex.value)<block_end><def_stmt>test_update FakeResource<block_start><with_stmt>pytest.raises(UnsupportedMethod)<as>ex<block_start>FakeResource.update()<block_end><assert_stmt>"does not support the update method"<in>str(ex.value)<block_end><def_stmt>test_modify FakeResource<block_start><with_stmt>pytest.raises(UnsupportedMethod)<as>ex<block_start>FakeResource.modify()<block_end><assert_stmt>"does not support the modify method"<in>str(ex.value)<block_end>
"""Datatest main program"""<import_stmt>sys<as>_sys<import_from_stmt>unittest TestProgram<as>_TestProgram<import_from_stmt>unittest defaultTestLoader<as>_defaultTestLoader<try_stmt><block_start><import_from_stmt>unittest.signals installHandler<block_end><except_stmt>ImportError<block_start>installHandler=<none><block_end><import_from_stmt>datatest DataTestRunner<line_sep>__unittest=<true><line_sep>__datatest=<true><class_stmt>DataTestProgram(_TestProgram)<block_start><def_stmt>__init__ self module='__main__' defaultTest=<none> argv=<none> testRunner=DataTestRunner testLoader=_defaultTestLoader exit=<true> verbosity=1 failfast=<none> catchbreak=<none> buffer=<none> ignore=<false><block_start>self.ignore=ignore<line_sep>_TestProgram.__init__(self module=module defaultTest=defaultTest argv=argv testRunner=testRunner testLoader=testLoader exit=exit verbosity=verbosity failfast=failfast catchbreak=catchbreak buffer=buffer)<block_end><def_stmt>runTests self<block_start><try_stmt><block_start><if_stmt>self.catchbreak<and>installHandler<block_start>installHandler()<block_end><block_end><except_stmt>AttributeError<block_start><pass><block_end># does not have catchbreak attribute <if_stmt>self.testRunner<is><none><block_start>self.testRunner=DataTestRunner<block_end><if_stmt>isinstance(self.testRunner type)<block_start><try_stmt><block_start>kwds=['verbosity' 'failfast' 'buffer' 'warnings' 'ignore']<line_sep>kwds=[attr<for>attr kwds<if>hasattr(self attr)]<line_sep>kwds=dict((attr getattr(self attr))<for>attr kwds)<line_sep>testRunner=self.testRunner(**kwds)<block_end><except_stmt>TypeError<block_start><if_stmt>'warnings'<in>kwds<block_start><del_stmt>kwds['warnings']<block_end>testRunner=self.testRunner(**kwds)<block_end><block_end><else_stmt># assumed to be a TestRunner instance <block_start>testRunner=self.testRunner<block_end>self.result=testRunner.run(self.test)<if_stmt>self.exit<block_start>_sys.exit(<not>self.result.wasSuccessful())<block_end><block_end><block_end><if_stmt>_sys.version_info[:2]<eq>(3 1)# Patch methods for Python 3.1. <block_start><def_stmt>__init__ self module='__main__' defaultTest=<none> argv=<none> testRunner=DataTestRunner testLoader=_defaultTestLoader exit=<true> ignore=<false><block_start>self.ignore=ignore<line_sep>_TestProgram.__init__(self module=module defaultTest=defaultTest argv=argv testRunner=testRunner testLoader=testLoader exit=exit)<block_end>DataTestProgram.__init__=__init__<block_end><elif_stmt>_sys.version_info[:2]<eq>(2 6)# Patch runTests() for Python 2.6. <block_start><def_stmt>__init__ self module='__main__' defaultTest=<none> argv=<none> testRunner=DataTestRunner testLoader=_defaultTestLoader exit=<true> ignore=<false><block_start>self.exit=exit# <- 2.6 does not handle exit argument. self.ignore=ignore<line_sep>_TestProgram.__init__(self module=module defaultTest=defaultTest argv=argv testRunner=testRunner testLoader=testLoader)<block_end>DataTestProgram.__init__=__init__<block_end>main=DataTestProgram<line_sep>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>chest_sizes path<block_start>"""Chest measurements of 5738 Scottish Militiamen Quetelet's data on chest measurements of 5738 Scottish Militiamen. Quetelet (1846) used this data as a demonstration of the normal distribution of physical characteristics. A data frame with 16 observations on the following 2 variables. `chest` Chest size (in inches) `count` Number of soldiers with this chest size <NAME>. and <NAME>. (1981). *Applications, Basics, and Computing of Exploratory Data Analysis*. Belmont. CA: Wadsworth. Retrieved from Statlib: `https://www.stat.cmu.edu/StatDat/Datafiles/MilitiamenChests.html` Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `chest_sizes.csv`. Returns: Tuple of np.ndarray `x_train` with 16 rows and 2 columns and dictionary `metadata` of column headers (feature names). """<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='chest_sizes.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/HistData/ChestSizes.csv'<line_sep>maybe_download_and_extract(path url save_file_name='chest_sizes.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>beacontools parse_packet<line_sep># Eddystone UID packet uid_packet=b"\x02\x01\x06\x03\x03\xaa\xfe\x17\x16\xaa\xfe\x00\xe3\x12\x34\x56\x78\x90\x12"<concat>b"\x34\x67\x89\x01\x00\x00\x00\x00\x00\x01\x00\x00"<line_sep>uid_frame=parse_packet(uid_packet)<line_sep>print("Namespace: %s"%uid_frame.namespace)<line_sep>print("Instance: %s"%uid_frame.instance)<line_sep>print("TX Power: %s"%uid_frame.tx_power)<line_sep>print("-----")<line_sep># Eddystone URL packet url_packet=b"\x03\x03\xAA\xFE\x13\x16\xAA\xFE\x10\xF8\x03github\x00citruz"<line_sep>url_frame=parse_packet(url_packet)<line_sep>print("TX Power: %d"%url_frame.tx_power)<line_sep>print("URL: %s"%url_frame.url)<line_sep>print("-----")<line_sep># Eddystone TLM packet (unencrypted) tlm_packet=b"\x02\x01\x06\x03\x03\xaa\xfe\x11\x16\xaa\xfe\x20\x00\x0b\x18\x13\x00\x00\x00"<concat>b"\x14\x67\x00\x00\x2a\xc4\xe4"<line_sep>tlm_frame=parse_packet(tlm_packet)<line_sep>print("Voltage: %d mV"%tlm_frame.voltage)<line_sep>print("Temperature: %f °C"%tlm_frame.temperature)<line_sep>print("Advertising count: %d"%tlm_frame.advertising_count)<line_sep>print("Seconds since boot: %d"%tlm_frame.seconds_since_boot)<line_sep>print("-----")<line_sep># Eddystone TLM packet (encrypted) enc_tlm_packet=b"\x02\x01\x06\x03\x03\xaa\xfe\x11\x16\xaa\xfe\x20\x01\x41\x41\x41\x41\x41"<concat>b"\x41\x41\x41\x41\x41\x41\x41\xDE\xAD\xBE\xFF"<line_sep>enc_tlm_frame=parse_packet(enc_tlm_packet)<line_sep>print("Data: %s"%enc_tlm_frame.encrypted_data)<line_sep>print("Salt: %d"%enc_tlm_frame.salt)<line_sep>print("Mic: %d"%enc_tlm_frame.mic)<line_sep>print("-----")<line_sep># iBeacon Advertisement ibeacon_packet=b"\x02\x01\x06\x1a\xff\x4c\x00\x02\x15\x41\x41\x41\x41\x41\x41\x41\x41\x41"<concat>b"\x41\x41\x41\x41\x41\x41\x41\x00\x01\x00\x01\xf8"<line_sep>adv=parse_packet(ibeacon_packet)<line_sep>print("UUID: %s"%adv.uuid)<line_sep>print("Major: %d"%adv.major)<line_sep>print("Minor: %d"%adv.minor)<line_sep>print("TX Power: %d"%adv.tx_power)<line_sep>print("-----")<line_sep># Cypress iBeacon Sensor cypress_packet=b"\x02\x01\x04\x1a\xff\x4c\x00\x02\x15\x00\x05\x00\x01\x00\x00\x10\x00\x80"<concat>b"\x00\x00\x80\x5f\x9b\x01\x31\x00\x02\x6c\x66\xc3"<line_sep>sensor=parse_packet(cypress_packet)<line_sep>print("UUID: %s"%sensor.uuid)<line_sep>print("Major: %d"%sensor.major)<line_sep>print("Temperature: %d °C"%sensor.cypress_temperature)<line_sep>print("Humidity: %d %%"%sensor.cypress_humidity)<line_sep>print("TX Power: %d"%sensor.tx_power)<line_sep>print("-----")<line_sep># Estimote Telemetry Packet (Subframe A) telemetry_a_packet=b"\x02\x01\x04\x03\x03\x9a\xfe\x17\x16\x9a\xfe\x22\x47\xa0\x38\xd5"<concat>b"\xeb\x03\x26\x40\x00\x00\x01\x41\x44\x47\xfa\xff\xff\xff\xff"<line_sep>telemetry=parse_packet(telemetry_a_packet)<line_sep>print("Identifier: %s"%telemetry.identifier)<line_sep>print("Protocol Version: %d"%telemetry.protocol_version)<line_sep>print("Acceleration (g): (%f, %f, %f)"%telemetry.acceleration)<line_sep>print("Is moving: %s"%telemetry.is_moving)<line_sep># ... see packet_types/estimote.py for all available attributes and units print("-----")<line_sep># Estimote Telemetry Packet (Subframe B) telemetry_b_packet=b"\x02\x01\x04\x03\x03\x9a\xfe\x17\x16\x9a\xfe\x22\x47\xa0\x38\xd5"<concat>b"\xeb\x03\x26\x40\x01\xd8\x42\xed\x73\x49\x25\x66\xbc\x2e\x50"<line_sep>telemetry_b=parse_packet(telemetry_b_packet)<line_sep>print("Identifier: %s"%telemetry_b.identifier)<line_sep>print("Protocol Version: %d"%telemetry_b.protocol_version)<line_sep>print("Magnetic field: (%f, %f, %f)"%telemetry_b.magnetic_field)<line_sep>print("Temperature: %f °C"%telemetry_b.temperature)<line_sep># ... see packet_types/estimote.py for all available attributes and units # Estimote Nearable Advertisement nearable_packet=b"\x02\x01\x04\x03\x03\x0f\x18\x17\xff\x5d"<concat>b"\x01\x01\x1e\xfe\x42\x7e\xb6\xf4\xbc\x2f"<concat>b"\x04\x01\x68\xa1\xaa\xfe\x05\xc1\x45\x25"<concat>b"\x53\xb5"<line_sep>nearable_adv=parse_packet(nearable_packet)<line_sep>print("Identifier: %s"%nearable_adv.identifier)<line_sep>print("Hardware_version: %d"%nearable_adv.hardware_version)<line_sep>print("Firmware_version: %d"%nearable_adv.firmware_version)<line_sep>print("Temperature: %d"%nearable_adv.temperature)<line_sep>print("Is moving: %i"%nearable_adv.is_moving)<line_sep>print("-----")<line_sep># CJ Monitor packet cj_monitor_packet=b"\x02\x01\x06\x05\x02\x1A\x18\x00\x18"<concat>b"\x09\xFF\x72\x04\xFE\x10\xD1\x0C\x33\x61"<concat>b"\x09\x09\x4D\x6F\x6E\x20\x35\x36\x34\x33"<line_sep>cj_monitor=parse_packet(cj_monitor_packet)<line_sep>print("Name: %s"%cj_monitor.name)<line_sep>print("Temperature: %f °C"%cj_monitor.temperature)<line_sep>print("Humidity: %d %%"%cj_monitor.humidity)<line_sep>print("Light: %f"%cj_monitor.light)<line_sep>
r"""Train an EfficientNet classifier. Currently implementation of multi-label multi-class classification is non-functional. During training, start tensorboard from within the classification/ directory: tensorboard --logdir run --bind_all --samples_per_plugin scalars=0,images=0 Example usage: python train_classifier_tf.py run_idfg /ssd/crops_sq \ -m "efficientnet-b0" --pretrained --finetune --label-weighted \ --epochs 50 --batch-size 512 --lr 1e-4 \ --seed 123 \ --logdir run_idfg """<import_from_future_stmt> annotations<import_stmt>argparse<import_from_stmt>collections defaultdict<import_from_stmt>collections.abc Callable Mapping MutableMapping Sequence<import_from_stmt>datetime datetime<import_stmt>json<import_stmt>os<import_from_stmt>typing Any Optional<import_stmt>uuid<import_stmt>numpy<as>np<import_stmt>sklearn.metrics<import_stmt>tensorflow<as>tf<import_from_stmt>tensorboard.plugins.hparams api<as>hp<import_stmt>tqdm<import_from_stmt>classification.train_utils HeapItem recall_from_confusion_matrix add_to_heap fig_to_img imgs_with_confidences load_dataset_csv prefix_all_keys <import_from_stmt>visualization plot_utils<line_sep>AUTOTUNE=tf.data.experimental.AUTOTUNE<line_sep># match pytorch EfficientNet model names EFFICIENTNET_MODELS:Mapping[str Mapping[str Any]]={'efficientnet-b0':dict(cls='EfficientNetB0' img_size=224 dropout=0.2) 'efficientnet-b1':dict(cls='EfficientNetB1' img_size=240 dropout=0.2) 'efficientnet-b2':dict(cls='EfficientNetB2' img_size=260 dropout=0.3) 'efficientnet-b3':dict(cls='EfficientNetB3' img_size=300 dropout=0.3) 'efficientnet-b4':dict(cls='EfficientNetB4' img_size=380 dropout=0.4) 'efficientnet-b5':dict(cls='EfficientNetB5' img_size=456 dropout=0.4) 'efficientnet-b6':dict(cls='EfficientNetB6' img_size=528 dropout=0.5) 'efficientnet-b7':dict(cls='EfficientNetB7' img_size=600 dropout=0.5)}<def_stmt>create_dataset img_files:Sequence[str] labels:Sequence[Any] sample_weights:Optional[Sequence[float]]=<none> img_base_dir:str='' transform:Optional[Callable[[tf.Tensor] Any]]=<none> target_transform:Optional[Callable[[Any] Any]]=<none> cache:bool|str=<false><arrow>tf.data.Dataset<block_start>"""Create a tf.data.Dataset. The dataset returns elements (img, label, img_file, sample_weight) if sample_weights is not None, or (img, label, img_file) if sample_weights=None. img: tf.Tensor, shape [H, W, 3], type uint8 label: tf.Tensor img_file: tf.Tensor, scalar, type str sample_weight: tf.Tensor, scalar, type float32 Possible TODO: oversample the imbalanced classes see tf.data.experimental.sample_from_datasets Args: img_files: list of str, relative paths from img_base_dir labels: list of int if multilabel=False sample_weights: optional list of float img_base_dir: str, base directory for images transform: optional transform to apply to a single uint8 JPEG image target_transform: optional transform to apply to a single label cache: bool or str, cache images in memory if True, cache images to a file on disk if a str Returns: tf.data.Dataset """<line_sep># images dataset img_ds=tf.data.Dataset.from_tensor_slices(img_files)<line_sep>img_ds=img_ds.map(<lambda>p:tf.io.read_file(img_base_dir+os.sep+p) num_parallel_calls=AUTOTUNE)<line_sep># for smaller disk / memory usage, we cache the raw JPEG bytes instead # of the decoded Tensor <if_stmt>isinstance(cache str)<block_start>img_ds=img_ds.cache(cache)<block_end><elif_stmt>cache<block_start>img_ds=img_ds.cache()<block_end># convert JPEG bytes to a 3D uint8 Tensor # keras EfficientNet already includes normalization from [0, 255] to [0, 1], # so we don't need to do that here img_ds=img_ds.map(<lambda>img:tf.io.decode_jpeg(img channels=3))<if_stmt>transform<block_start>img_ds=img_ds.map(transform num_parallel_calls=AUTOTUNE)<block_end># labels dataset labels_ds=tf.data.Dataset.from_tensor_slices(labels)<if_stmt>target_transform<block_start>labels_ds=labels_ds.map(target_transform num_parallel_calls=AUTOTUNE)<block_end># img_files dataset img_files_ds=tf.data.Dataset.from_tensor_slices(img_files)<if_stmt>sample_weights<is><none><block_start><return>tf.data.Dataset.zip((img_ds labels_ds img_files_ds))<block_end># weights dataset weights_ds=tf.data.Dataset.from_tensor_slices(sample_weights)<line_sep><return>tf.data.Dataset.zip((img_ds labels_ds img_files_ds weights_ds))<block_end><def_stmt>create_dataloaders dataset_csv_path:str label_index_json_path:str splits_json_path:str cropped_images_dir:str img_size:int multilabel:bool label_weighted:bool weight_by_detection_conf:bool|str batch_size:int augment_train:bool cache_splits:Sequence[str]<arrow>tuple[dict[str tf.data.Dataset] list[str]]<block_start>""" Args: dataset_csv_path: str, path to CSV file with columns ['dataset', 'location', 'label'], where label is a comma-delimited list of labels splits_json_path: str, path to JSON file augment_train: bool, whether to shuffle/augment the training set cache_splits: list of str, splits to cache training set is cached at /mnt/tempds/random_file_name validation and test sets are cached in memory Returns: datasets: dict, maps split to DataLoader label_names: list of str, label names in order of label id """<line_sep>df,label_names,split_to_locs=load_dataset_csv(dataset_csv_path label_index_json_path splits_json_path multilabel=multilabel label_weighted=label_weighted weight_by_detection_conf=weight_by_detection_conf)<line_sep># define the transforms # efficientnet data preprocessing: # - train: # 1) random crop: aspect_ratio_range=(0.75, 1.33), area_range=(0.08, 1.0) # 2) bicubic resize to img_size # 3) random horizontal flip # - test: # 1) center crop # 2) bicubic resize to img_size @tf.function<def_stmt>train_transform img:tf.Tensor<arrow>tf.Tensor<block_start>"""Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""<line_sep>img=tf.image.resize_with_pad(img img_size img_size method=tf.image.ResizeMethod.BICUBIC)<line_sep>img=tf.image.random_flip_left_right(img)<line_sep>img=tf.image.random_brightness(img max_delta=0.25)<line_sep>img=tf.image.random_contrast(img lower=0.75 upper=1.25)<line_sep>img=tf.image.random_saturation(img lower=0.75 upper=1.25)<line_sep><return>img<block_end>@tf.function<def_stmt>test_transform img:tf.Tensor<arrow>tf.Tensor<block_start>"""Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""<line_sep>img=tf.image.resize_with_pad(img img_size img_size method=tf.image.ResizeMethod.BICUBIC)<line_sep><return>img<block_end>dataloaders={}<for_stmt>split,locs split_to_locs.items()<block_start>is_train=(split<eq>'train')<and>augment_train<line_sep>split_df=df[df['dataset_location'].isin(locs)]<line_sep>weights=<none><if_stmt>label_weighted<or>weight_by_detection_conf# weights sums to: # - if weight_by_detection_conf: (# images in split - conf delta) # - otherwise: (# images in split) <block_start>weights=split_df['weights'].tolist()<if_stmt><not>weight_by_detection_conf<block_start><assert_stmt>np.isclose(sum(weights) len(split_df))<block_end><block_end>cache:bool|str=(split<in>cache_splits)<if_stmt>split<eq>'train'<and>'train'<in>cache_splits<block_start>unique_filename=str(uuid.uuid4())<line_sep>os.makedirs('/mnt/tempds/' exist_ok=<true>)<line_sep>cache=f'/mnt/tempds/{unique_filename}'<block_end>ds=create_dataset(img_files=split_df['path'].tolist() labels=split_df['label_index'].tolist() sample_weights=weights img_base_dir=cropped_images_dir transform=train_transform<if>is_train<else>test_transform target_transform=<none> cache=cache)<if_stmt>is_train<block_start>ds=ds.shuffle(1000 reshuffle_each_iteration=<true>)<block_end>ds=ds.batch(batch_size).prefetch(buffer_size=AUTOTUNE)<line_sep>dataloaders[split]=ds<block_end><return>dataloaders label_names<block_end><def_stmt>build_model model_name:str num_classes:int img_size:int pretrained:bool finetune:bool<arrow>tf.keras.Model<block_start>"""Creates a model with an EfficientNet base."""<line_sep>class_name=EFFICIENTNET_MODELS[model_name]['cls']<line_sep>dropout=EFFICIENTNET_MODELS[model_name]['dropout']<line_sep>model_class=tf.keras.applications.__dict__[class_name]<line_sep>weights='imagenet'<if>pretrained<else><none><line_sep>inputs=tf.keras.layers.Input(shape=(img_size img_size 3))<line_sep>base_model=model_class(input_tensor=inputs weights=weights include_top=<false> pooling='avg')<if_stmt>finetune# freeze the base model's weights, including BatchNorm statistics # https://www.tensorflow.org/guide/keras/transfer_learning#fine-tuning <block_start>base_model.trainable=<false><block_end># rebuild output x=tf.keras.layers.Dropout(dropout name='top_dropout')(base_model.output)<line_sep>outputs=tf.keras.layers.Dense(num_classes kernel_initializer=tf.keras.initializers.VarianceScaling(scale=1./3. mode='fan_out' distribution='uniform') name='logits')(x)<line_sep>model=tf.keras.Model(inputs outputs name='complete_model')<line_sep>model.base_model=base_model# cache this so that we can turn off finetune <return>model<block_end><def_stmt>main dataset_dir:str cropped_images_dir:str multilabel:bool model_name:str pretrained:bool finetune:int label_weighted:bool weight_by_detection_conf:bool|str epochs:int batch_size:int lr:float weight_decay:float seed:Optional[int]=<none> logdir:str='' cache_splits:Sequence[str]=()<arrow><none><block_start>"""Main function."""<line_sep># input validation <assert_stmt>os.path.exists(dataset_dir)<assert_stmt>os.path.exists(cropped_images_dir)<if_stmt>isinstance(weight_by_detection_conf str)<block_start><assert_stmt>os.path.exists(weight_by_detection_conf)<block_end># set seed seed=np.random.randint(10_000)<if>seed<is><none><else>seed<line_sep>np.random.seed(seed)<line_sep>tf.random.set_seed(seed)<line_sep># create logdir and save params params=dict(locals())# make a copy timestamp=datetime.now().strftime('%Y%m%d_%H%M%S')# '20200722_110816' logdir=os.path.join(logdir timestamp)<line_sep>os.makedirs(logdir exist_ok=<true>)<line_sep>print('Created logdir:' logdir)<with_stmt>open(os.path.join(logdir 'params.json') 'w')<as>f<block_start>json.dump(params f indent=1)<block_end>gpus=tf.config.experimental.list_physical_devices('GPU')<for_stmt>gpu gpus<block_start>tf.config.experimental.set_memory_growth(gpu <true>)<block_end>img_size=EFFICIENTNET_MODELS[model_name]['img_size']<line_sep># create dataloaders and log the index_to_label mapping loaders,label_names=create_dataloaders(dataset_csv_path=os.path.join(dataset_dir 'classification_ds.csv') label_index_json_path=os.path.join(dataset_dir 'label_index.json') splits_json_path=os.path.join(dataset_dir 'splits.json') cropped_images_dir=cropped_images_dir img_size=img_size multilabel=multilabel label_weighted=label_weighted weight_by_detection_conf=weight_by_detection_conf batch_size=batch_size augment_train=<true> cache_splits=cache_splits)<line_sep>writer=tf.summary.create_file_writer(logdir)<line_sep>writer.set_as_default()<line_sep>model=build_model(model_name num_classes=len(label_names) img_size=img_size pretrained=pretrained finetune=finetune<g>0)<line_sep># define loss function and optimizer loss_fn:tf.keras.losses.Loss<if_stmt>multilabel<block_start>loss_fn=tf.keras.losses.BinaryCrossentropy(from_logits=<true> reduction=tf.keras.losses.Reduction.NONE)<block_end><else_stmt><block_start>loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<true> reduction=tf.keras.losses.Reduction.NONE)<block_end># using EfficientNet training defaults # - batch norm momentum: 0.99 # - optimizer: RMSProp, decay 0.9 and momentum 0.9 # - epochs: 350 # - learning rate: 0.256, decays by 0.97 every 2.4 epochs # - weight decay: 1e-5 lr_schedule=tf.keras.optimizers.schedules.ExponentialDecay(lr decay_steps=1 decay_rate=0.97 staircase=<true>)<line_sep>optimizer=tf.keras.optimizers.RMSprop(learning_rate=lr rho=0.9 momentum=0.9)<line_sep>best_epoch_metrics:dict[str float]={}<for_stmt>epoch range(epochs)<block_start>print(f'Epoch: {epoch}')<line_sep>optimizer.learning_rate=lr_schedule(epoch)<line_sep>tf.summary.scalar('lr' optimizer.learning_rate epoch)<if_stmt>epoch<g>0<and>finetune<eq>epoch<block_start>print('Turning off fine-tune!')<line_sep>model.base_model.trainable=<true><block_end>print('- train:')<line_sep># TODO: change weighted to False if oversampling minority classes train_metrics,train_heaps,train_cm=run_epoch(model loader=loaders['train'] weighted=label_weighted loss_fn=loss_fn weight_decay=weight_decay optimizer=optimizer finetune=finetune<g>epoch return_extreme_images=<true>)<line_sep>train_metrics=prefix_all_keys(train_metrics prefix='train/')<line_sep>log_run('train' epoch writer label_names metrics=train_metrics heaps=train_heaps cm=train_cm)<line_sep>print('- val:')<line_sep>val_metrics,val_heaps,val_cm=run_epoch(model loader=loaders['val'] weighted=label_weighted loss_fn=loss_fn return_extreme_images=<true>)<line_sep>val_metrics=prefix_all_keys(val_metrics prefix='val/')<line_sep>log_run('val' epoch writer label_names metrics=val_metrics heaps=val_heaps cm=val_cm)<if_stmt>val_metrics['val/acc_top1']<g>best_epoch_metrics.get('val/acc_top1' 0)# pylint: disable=line-too-long <block_start>filename=os.path.join(logdir f'ckpt_{epoch}.h5')<line_sep>print(f'New best model! Saving checkpoint to {filename}')<line_sep>model.save(filename)<line_sep>best_epoch_metrics.update(train_metrics)<line_sep>best_epoch_metrics.update(val_metrics)<line_sep>best_epoch_metrics['epoch']=epoch<line_sep>print('- test:')<line_sep>test_metrics,test_heaps,test_cm=run_epoch(model loader=loaders['test'] weighted=label_weighted loss_fn=loss_fn return_extreme_images=<true>)<line_sep>test_metrics=prefix_all_keys(test_metrics prefix='test/')<line_sep>log_run('test' epoch writer label_names metrics=test_metrics heaps=test_heaps cm=test_cm)<block_end># stop training after 8 epochs without improvement <if_stmt>epoch<ge>best_epoch_metrics['epoch']+8<block_start><break><block_end><block_end>hparams_dict={'model_name':model_name 'multilabel':multilabel 'finetune':finetune 'batch_size':batch_size 'epochs':epochs}<line_sep>hp.hparams(hparams_dict)<line_sep>writer.close()<block_end><def_stmt>log_run split:str epoch:int writer:tf.summary.SummaryWriter label_names:Sequence[str] metrics:MutableMapping[str float] heaps:Mapping[str Mapping[int list[HeapItem]]] cm:np.ndarray<arrow><none><block_start>"""Logs the outputs (metrics, confusion matrix, tp/fp/fn images) from a single epoch run to Tensorboard. Args: metrics: dict, keys already prefixed with {split}/ """<line_sep>per_class_recall=recall_from_confusion_matrix(cm label_names)<line_sep>metrics.update(prefix_all_keys(per_class_recall f'{split}/label_recall/'))<line_sep># log metrics <for_stmt>metric,value metrics.items()<block_start>tf.summary.scalar(metric value epoch)<block_end># log confusion matrix cm_fig=plot_utils.plot_confusion_matrix(cm classes=label_names normalize=<true>)<line_sep>cm_fig_img=tf.convert_to_tensor(fig_to_img(cm_fig)[np.newaxis <ellipsis>])<line_sep>tf.summary.image(f'confusion_matrix/{split}' cm_fig_img step=epoch)<line_sep># log tp/fp/fn images <for_stmt>heap_type,heap_dict heaps.items()<block_start>log_images_with_confidence(heap_dict label_names epoch=epoch tag=f'{split}/{heap_type}')<block_end>writer.flush()<block_end><def_stmt>log_images_with_confidence heap_dict:Mapping[int list[HeapItem]] label_names:Sequence[str] epoch:int tag:str<arrow><none><block_start>""" Args: heap_dict: dict, maps label_id to list of HeapItem, where each HeapItem data is a list [img, target, top3_conf, top3_preds, img_file], and img is a tf.Tensor of shape [H, W, 3] label_names: list of str, label names in order of label id epoch: int tag: str """<for_stmt>label_id,heap heap_dict.items()<block_start>label_name=label_names[label_id]<line_sep>sorted_heap=sorted(heap reverse=<true>)# sort largest to smallest imgs_list=[item.data<for>item sorted_heap]<line_sep>fig,img_files=imgs_with_confidences(imgs_list label_names)<line_sep># tf.summary.image requires input of shape [N, H, W, C] fig_img=tf.convert_to_tensor(fig_to_img(fig)[np.newaxis <ellipsis>])<line_sep>tf.summary.image(f'{label_name}/{tag}' fig_img step=epoch)<line_sep>tf.summary.text(f'{label_name}/{tag}_files' '\n\n'.join(img_files) step=epoch)<block_end><block_end><def_stmt>track_extreme_examples tp_heaps:dict[int list[HeapItem]] fp_heaps:dict[int list[HeapItem]] fn_heaps:dict[int list[HeapItem]] inputs:tf.Tensor labels:tf.Tensor img_files:tf.Tensor logits:tf.Tensor<arrow><none><block_start>"""Updates the 5 most extreme true-positive (tp), false-positive (fp), and false-negative (fn) examples with examples from this batch. Each HeapItem's data attribute is a tuple with: - img: np.ndarray, shape [H, W, 3], type uint8 - label: int - top3_conf: list of float - top3_preds: list of float - img_file: str Args: *_heaps: dict, maps label_id (int) to heap of HeapItems inputs: tf.Tensor, shape [batch_size, H, W, 3], type float32 labels: tf.Tensor, shape [batch_size] img_files: tf.Tensor, shape [batch_size], type tf.string logits: tf.Tensor, shape [batch_size, num_classes] """<line_sep>labels=labels.numpy().tolist()<line_sep>inputs=inputs.numpy().astype(np.uint8)<line_sep>img_files=img_files.numpy().astype(str).tolist()<line_sep>batch_probs=tf.nn.softmax(logits axis=1)<line_sep>iterable=zip(labels inputs img_files batch_probs)<for_stmt>label,img,img_file,confs iterable<block_start>label_conf=confs[label].numpy().item()<line_sep>top3_conf,top3_preds=tf.math.top_k(confs k=3 sorted=<true>)<line_sep>top3_conf=top3_conf.numpy().tolist()<line_sep>top3_preds=top3_preds.numpy().tolist()<line_sep>data=(img label top3_conf top3_preds img_file)<if_stmt>top3_preds[0]<eq>label# true positive <block_start>item=HeapItem(priority=label_conf-top3_conf[1] data=data)<line_sep>add_to_heap(tp_heaps[label] item k=5)<block_end><else_stmt># false positive for top3_pred[0] # false negative for label <block_start>item=HeapItem(priority=top3_conf[0]-label_conf data=data)<line_sep>add_to_heap(fp_heaps[top3_preds[0]] item k=5)<line_sep>add_to_heap(fn_heaps[label] item k=5)<block_end><block_end><block_end><def_stmt>run_epoch model:tf.keras.Model loader:tf.data.Dataset weighted:bool top:Sequence[int]=(1 3) loss_fn:Optional[tf.keras.losses.Loss]=<none> weight_decay:float=0 finetune:bool=<false> optimizer:Optional[tf.keras.optimizers.Optimizer]=<none> return_extreme_images:bool=<false><arrow>tuple[dict[str float] dict[str dict[int list[HeapItem]]] np.ndarray]<block_start>"""Runs for 1 epoch. Args: model: tf.keras.Model loader: tf.data.Dataset weighted: bool, whether to use sample weights in calculating loss and accuracy top: tuple of int, list of values of k for calculating top-K accuracy loss_fn: optional loss function, calculates the mean loss over a batch weight_decay: float, L2-regularization constant finetune: bool, if true sets model's dropout and BN layers to eval mode optimizer: optional optimizer Returns: metrics: dict, metrics from epoch, contains keys: 'loss': float, mean per-example loss over entire epoch, only included if loss_fn is not None 'acc_top{k}': float, accuracy@k over the entire epoch heaps: dict, keys are ['tp', 'fp', 'fn'], values are heap_dicts, each heap_dict maps label_id (int) to a heap of <= 5 HeapItems with data attribute (img, target, top3_conf, top3_preds, img_file) - 'tp': priority is the difference between target confidence and 2nd highest confidence - 'fp': priority is the difference between highest confidence and target confidence - 'fn': same as 'fp' confusion_matrix: np.ndarray, shape [num_classes, num_classes], C[i, j] = # of samples with true label i, predicted as label j """<line_sep># if evaluating or finetuning, set dropout & BN layers to eval mode is_train=<false><line_sep>train_dropout_and_bn=<false><if_stmt>optimizer<is><not><none><block_start><assert_stmt>loss_fn<is><not><none><line_sep>is_train=<true><if_stmt><not>finetune<block_start>train_dropout_and_bn=<true><line_sep>reg_vars=[v<for>v model.trainable_variables<if>'kernel'<in>v.name]<block_end><block_end><if_stmt>loss_fn<is><not><none><block_start>losses=tf.keras.metrics.Mean()<block_end>accuracies_topk={k:tf.keras.metrics.SparseTopKCategoricalAccuracy(k)<for>k top}<line_sep># for each label, track 5 most-confident and least-confident examples tp_heaps:dict[int list[HeapItem]]=defaultdict(list)<line_sep>fp_heaps:dict[int list[HeapItem]]=defaultdict(list)<line_sep>fn_heaps:dict[int list[HeapItem]]=defaultdict(list)<line_sep>all_labels=[]<line_sep>all_preds=[]<line_sep>tqdm_loader=tqdm.tqdm(loader)<for_stmt>batch tqdm_loader<block_start><if_stmt>weighted<block_start>inputs,labels,img_files,weights=batch<block_end><else_stmt># even if batch contains sample weights, don't use them <block_start>inputs,labels,img_files=batch[0:3]<line_sep>weights=<none><block_end>all_labels.append(labels.numpy())<line_sep>desc=[]<with_stmt>tf.GradientTape(watch_accessed_variables=is_train)<as>tape<block_start>outputs=model(inputs training=train_dropout_and_bn)<if_stmt>loss_fn<is><not><none><block_start>loss=loss_fn(labels outputs)<if_stmt>weights<is><not><none><block_start>loss<augmul>weights<block_end># we do not track L2-regularization loss in the loss metric losses.update_state(loss sample_weight=weights)<line_sep>desc.append(f'Loss {losses.result().numpy():.4f}')<block_end><if_stmt>optimizer<is><not><none><block_start>loss=tf.math.reduce_mean(loss)<if_stmt><not>finetune# only regularize layers before the final FC <block_start>loss<augadd>weight_decay<times>tf.add_n(tf.nn.l2_loss(v)<for>v reg_vars)<block_end><block_end><block_end>all_preds.append(tf.math.argmax(outputs axis=1).numpy())<if_stmt>optimizer<is><not><none><block_start>gradients=tape.gradient(loss model.trainable_variables)<line_sep>optimizer.apply_gradients(zip(gradients model.trainable_variables))<block_end><for_stmt>k,acc accuracies_topk.items()<block_start>acc.update_state(labels outputs sample_weight=weights)<line_sep>desc.append(f'Acc@{k} {acc.result().numpy()<times>100:.3f}')<block_end>tqdm_loader.set_description(' '.join(desc))<if_stmt>return_extreme_images<block_start>track_extreme_examples(tp_heaps fp_heaps fn_heaps inputs labels img_files outputs)<block_end><block_end>confusion_matrix=sklearn.metrics.confusion_matrix(y_true=np.concatenate(all_labels) y_pred=np.concatenate(all_preds))<line_sep>metrics={}<if_stmt>loss_fn<is><not><none><block_start>metrics['loss']=losses.result().numpy().item()<block_end><for_stmt>k,acc accuracies_topk.items()<block_start>metrics[f'acc_top{k}']=acc.result().numpy().item()<times>100<block_end>heaps={'tp':tp_heaps 'fp':fp_heaps 'fn':fn_heaps}<line_sep><return>metrics heaps confusion_matrix<block_end><def_stmt>_parse_args <arrow>argparse.Namespace<block_start>"""Parses arguments."""<line_sep>parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter description='Trains classifier.')<line_sep>parser.add_argument('dataset_dir' help='path to directory containing: 1) classification dataset CSV, '<concat>'2) label index JSON, 3) splits JSON')<line_sep>parser.add_argument('cropped_images_dir' help='path to local directory where image crops are saved')<line_sep>parser.add_argument('--multilabel' action='store_true' help='for multi-label, multi-class classification')<line_sep>parser.add_argument('-m' '--model-name' default='efficientnet-b0' choices=list(EFFICIENTNET_MODELS.keys()) help='which EfficientNet model')<line_sep>parser.add_argument('--pretrained' action='store_true' help='start with pretrained model')<line_sep>parser.add_argument('--finetune' type=int default=0 help='only fine tune the final fully-connected layer for the first '<concat>'<finetune> epochs')<line_sep>parser.add_argument('--label-weighted' action='store_true' help='weight training samples to balance labels')<line_sep>parser.add_argument('--weight-by-detection-conf' nargs='?' const=<true> default=<false> help='weight training examples by detection confidence. '<concat>'Optionally takes a .npz file for isotonic calibration.')<line_sep>parser.add_argument('--epochs' type=int default=0 help='number of epochs for training, 0 for eval-only')<line_sep>parser.add_argument('--batch-size' type=int default=256 help='batch size for both training and eval')<line_sep>parser.add_argument('--lr' type=float default=<none> help='initial learning rate, defaults to (0.016 * batch_size / 256)')<line_sep>parser.add_argument('--weight-decay' type=float default=1e-5 help='weight decay')<line_sep>parser.add_argument('--seed' type=int help='random seed')<line_sep>parser.add_argument('--logdir' default='.' help='directory where TensorBoard logs and a params file are saved')<line_sep>parser.add_argument('--cache' nargs='*' choices=['train' 'val' 'test'] default=() help='which splits of the dataset to cache')<line_sep><return>parser.parse_args()<block_end><if_stmt>__name__<eq>'__main__'<block_start>args=_parse_args()<if_stmt>args.lr<is><none><block_start>args.lr=0.016<times>args.batch_size/256# based on TF models repo <block_end>main(dataset_dir=args.dataset_dir cropped_images_dir=args.cropped_images_dir multilabel=args.multilabel model_name=args.model_name pretrained=args.pretrained finetune=args.finetune label_weighted=args.label_weighted weight_by_detection_conf=args.weight_by_detection_conf epochs=args.epochs batch_size=args.batch_size lr=args.lr weight_decay=args.weight_decay seed=args.seed logdir=args.logdir cache_splits=args.cache)<block_end>
""" Initialization methods. """<line_sep># pylint: disable=wildcard-import <import_from_stmt>.methods *<import_from_stmt>. methods<line_sep>__all__=[]<line_sep>__all__<augadd>methods.__all__<line_sep>
"""This module contains custom data-structures."""<import_stmt>six<class_stmt>TreeMap(dict)<block_start>"""Tree structure implemented with nested dictionaries."""<def_stmt>get_paths self<block_start>"""Get all paths from the root to the leaves. For example, given a chain like `{'a':{'b':{'c':None}}}`, this method would return `[['a', 'b', 'c']]`. Returns: A list of lists of paths. """<line_sep>paths=[]<for_stmt>key,child six.iteritems(self)<block_start><if_stmt>isinstance(child TreeMap)<and>child# current child is an intermediate node <block_start><for_stmt>path child.get_paths()<block_start>path.insert(0 key)<line_sep>paths.append(path)<block_end><block_end><else_stmt># current child is an endpoint <block_start>paths.append([key])<block_end><block_end><return>paths<block_end><def_stmt>insert self parts leaf_value update=<false><block_start>"""Add a list of nodes into the tree. The list will be converted into a TreeMap (chain) and then merged with the current TreeMap. For example, this method would insert `['a','b','c']` as `{'a':{'b':{'c':{}}}}`. Arguments: parts: List of nodes representing a chain. leaf_value: Value to insert into the leaf of the chain. update: Whether or not to update the leaf with the given value or to replace the value. Returns: self """<line_sep>tree=self<if_stmt><not>parts<block_start><return>tree<block_end>cur=tree<line_sep>last=len(parts)-1<for_stmt>i,part enumerate(parts)<block_start><if_stmt>part<not><in>cur<block_start>cur[part]=TreeMap()<if>i<ne>last<else>leaf_value<block_end><elif_stmt>i<eq>last# found leaf <block_start><if_stmt>update<block_start>cur[part].update(leaf_value)<block_end><else_stmt><block_start>cur[part]=leaf_value<block_end><block_end>cur=cur[part]<block_end><return>self<block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_stmt>datasets.registry<import_from_stmt>foundations hparams<import_from_stmt>foundations.step Step<import_from_stmt>lottery.branch base<import_stmt>models.registry<import_from_stmt>pruning.mask Mask<import_from_stmt>pruning.pruned_model PrunedModel<import_from_stmt>training train<class_stmt>Branch(base.Branch)<block_start><def_stmt>branch_function self retrain_d:hparams.DatasetHparams retrain_t:hparams.TrainingHparams start_at_step_zero:bool=<false># Get the mask and model. <block_start>m=models.registry.load(self.level_root self.lottery_desc.train_start_step self.lottery_desc.model_hparams)<line_sep>m=PrunedModel(m Mask.load(self.level_root))<line_sep>start_step=Step.from_iteration(0<if>start_at_step_zero<else>self.lottery_desc.train_start_step.iteration datasets.registry.iterations_per_epoch(retrain_d))<line_sep>train.standard_train(m self.branch_root retrain_d retrain_t start_step=start_step verbose=self.verbose)<block_end>@staticmethod<def_stmt>description <block_start><return>"Retrain the model with different hyperparameters."<block_end>@staticmethod<def_stmt>name <block_start><return>'retrain'<block_end><block_end>
# Time: O(|V| + |E|) # Space: O(|V|) # """ # This is HtmlParser's API interface. # You should not implement it, or speculate about its implementation # """ <class_stmt>HtmlParser(object)<block_start><def_stmt>getUrls self url<block_start>""" :type url: str :rtype List[str] """<line_sep><pass><block_end><block_end><class_stmt>Solution(object)<block_start><def_stmt>crawl self startUrl htmlParser<block_start>""" :type startUrl: str :type htmlParser: HtmlParser :rtype: List[str] """<line_sep>SCHEME="http://"<def_stmt>hostname url<block_start>pos=url.find('/' len(SCHEME))<if_stmt>pos<eq>-1<block_start><return>url<block_end><return>url[:pos]<block_end>result=[startUrl]<line_sep>lookup=set(result)<for_stmt>from_url result<block_start>name=hostname(from_url)<for_stmt>to_url htmlParser.getUrls(from_url)<block_start><if_stmt>to_url<not><in>lookup<and>name<eq>hostname(to_url)<block_start>result.append(to_url)<line_sep>lookup.add(to_url)<block_end><block_end><block_end><return>result<block_end><block_end>
""" Settings for re-running the experiments from the paper "Layer-wise relevance propagation for explaining deep neural network decisions in MRI-based Alzheimer’s disease classification". Please note that you need to download the ADNI data from http://adni.loni.usc.edu/ and preprocess it using https://github.com/ANTsX/ANTs/blob/master/Scripts/antsRegistrationSyNQuick.sh Please prepare the data, such that you will get three HDF5 files, consisting of a training, a validation and a holdout (test) set. Each HDF5 file is required to have 2 datasets, namely X and y, containing the data matrix and label vector accordingly. We have included the "Data Split ADNI.ipynb" file as a guideline for data splitting. Please note that it is highly dependent on the format of your data storage and needs to be individualized as such. Furthermore you will need SPM12 https://www.fil.ion.ucl.ac.uk/spm/software/spm12/ in order to access the Neuromorphometrics atlas. Arguments: model_path: Path to the trained pytorch model parameters data_path: Path where the outputs will be stored and retrieved ADNI_DIR: Path to the root of your downloaded ADNI data train_h5: Path to the training set HDF5 file val_h5: Path to the validation set HDF5 file holdout_h5: Path to the holdout set HDF5 file binary_brain_mask: Path to the mask used for masking the images, included in the repository. nmm_mask_path: Path to the Neuromorphometrics mask. Needs to be acquired from SPM12. Typically located under ~/spm12/tpm/labels_Neuromorphometrics.nii nmm_mask_path_scaled: Path to the rescaled Neuromorphometrics mask. """<line_sep>settings={"model_path":INSERT "data_path":INSERT "ADNI_DIR":INSERT "train_h5":INSERT "val_h5":INSERT "holdout_h5":INSERT "binary_brain_mask":"binary_brain_mask.nii.gz" "nmm_mask_path":"~/spm12/tpm/labels_Neuromorphometrics.nii" "nmm_mask_path_scaled":"nmm_mask_rescaled.nii"}<line_sep>
<import_stmt>math<import_stmt>unittest<import_stmt>torch<import_from_stmt>nuscenes.prediction.models mtp<class_stmt>TestMTPLoss(unittest.TestCase)<block_start>""" Test each component of MTPLoss as well as the __call__ method. """<def_stmt>test_get_trajectories_and_modes self<block_start>loss_n_modes_5=mtp.MTPLoss(5 0 0)<line_sep>loss_n_modes_1=mtp.MTPLoss(1 0 0)<line_sep>xy_pred=torch.arange(60).view(1 -1).repeat(1 5).view(-1 60)<line_sep>mode_pred=torch.arange(5).view(1 -1)<line_sep>prediction_bs_1=torch.cat([xy_pred.reshape(1 -1) mode_pred] dim=1)<line_sep>prediction_bs_2=prediction_bs_1.repeat(2 1)<line_sep># Testing many modes with batch size 1. traj,modes=loss_n_modes_5._get_trajectory_and_modes(prediction_bs_1)<line_sep>self.assertTrue(torch.allclose(traj xy_pred.unsqueeze(0).reshape(1 5 30 2)))<line_sep>self.assertTrue(torch.allclose(modes mode_pred))<line_sep># Testing many modes with batch size > 1. traj,modes=loss_n_modes_5._get_trajectory_and_modes(prediction_bs_2)<line_sep>self.assertTrue(torch.allclose(traj xy_pred.repeat(1 2).unsqueeze(0).reshape(2 5 30 2)))<line_sep>self.assertTrue(torch.allclose(modes mode_pred.repeat(2 1)))<line_sep>xy_pred=torch.arange(60).view(1 -1).repeat(1 1).view(-1 60)<line_sep>mode_pred=torch.arange(1).view(1 -1)<line_sep>prediction_bs_1=torch.cat([xy_pred.reshape(1 -1) mode_pred] dim=1)<line_sep>prediction_bs_2=prediction_bs_1.repeat(2 1)<line_sep># Testing one mode with batch size 1. traj,modes=loss_n_modes_1._get_trajectory_and_modes(prediction_bs_1)<line_sep>self.assertTrue(torch.allclose(traj xy_pred.unsqueeze(0).reshape(1 1 30 2)))<line_sep>self.assertTrue(torch.allclose(modes mode_pred))<line_sep># Testing one mode with batch size > 1. traj,modes=loss_n_modes_1._get_trajectory_and_modes(prediction_bs_2)<line_sep>self.assertTrue(torch.allclose(traj xy_pred.repeat(1 2).unsqueeze(0).reshape(2 1 30 2)))<line_sep>self.assertTrue(torch.allclose(modes mode_pred.repeat(2 1)))<block_end><def_stmt>test_angle_between_trajectories self<block_start><def_stmt>make_trajectory last_point<block_start>traj=torch.zeros((12 2))<line_sep>traj[-1]=torch.Tensor(last_point)<line_sep><return>traj<block_end>loss=mtp.MTPLoss(0 0 0)<line_sep># test angle is 0. self.assertEqual(loss._angle_between(make_trajectory([0 0]) make_trajectory([0 0])) 0.)<line_sep>self.assertEqual(loss._angle_between(make_trajectory([15 15]) make_trajectory([15 15])) 0.)<line_sep># test angle is 15. self.assertAlmostEqual(loss._angle_between(make_trajectory([1 1]) make_trajectory([math.sqrt(3)/2 0.5])) 15. places=4)<line_sep># test angle is 30. self.assertAlmostEqual(loss._angle_between(make_trajectory([1 0]) make_trajectory([math.sqrt(3)/2 0.5])) 30. places=4)<line_sep># test angle is 45. self.assertAlmostEqual(loss._angle_between(make_trajectory([1 1]) make_trajectory([0 1])) 45. places=4)<line_sep># test angle is 90. self.assertAlmostEqual(loss._angle_between(make_trajectory([1 1]) make_trajectory([-1 1])) 90. places=4)<line_sep>self.assertAlmostEqual(loss._angle_between(make_trajectory([1 0]) make_trajectory([0 1])) 90. places=4)<line_sep># test angle is 180. self.assertAlmostEqual(loss._angle_between(make_trajectory([1 0]) make_trajectory([-1 0])) 180. places=4)<line_sep>self.assertAlmostEqual(loss._angle_between(make_trajectory([0 1]) make_trajectory([0 -1])) 180. places=4)<line_sep>self.assertAlmostEqual(loss._angle_between(make_trajectory([3 1]) make_trajectory([-3 -1])) 180. places=4)<block_end><def_stmt>test_compute_best_mode_nothing_below_threshold self<block_start>angles=[(90 0) (80 1) (70 2)]<line_sep>target=<none><line_sep>traj=<none><line_sep>loss=mtp.MTPLoss(3 0 5)<line_sep>self.assertTrue(loss._compute_best_mode(angles target traj)<in>{0 1 2})<line_sep>loss=mtp.MTPLoss(3 0 65)<line_sep>self.assertTrue(loss._compute_best_mode(angles target traj)<in>{0 1 2})<block_end><def_stmt>test_compute_best_mode_only_one_below_threshold self<block_start>angles=[(30 1) (3 0) (25 2)]<line_sep>target=torch.ones((1 6 2))<line_sep>trajectory=torch.zeros((3 6 2))<line_sep>loss=mtp.MTPLoss(3 0 5)<line_sep>self.assertEqual(loss._compute_best_mode(angles target trajectory) 0)<block_end><def_stmt>test_compute_best_mode_multiple_below_threshold self<block_start>angles=[(2 2) (4 1) (10 0)]<line_sep>target=torch.ones((1 6 2))<line_sep>trajectory=torch.zeros((3 6 2))<line_sep>trajectory[1]=1<line_sep>loss=mtp.MTPLoss(3 0 5)<line_sep>self.assertEqual(loss._compute_best_mode(angles target trajectory) 1)<block_end><def_stmt>test_compute_best_mode_only_one_mode self<block_start>angles=[(25 0)]<line_sep>target=torch.ones((1 6 2))<line_sep>trajectory=torch.zeros((1 6 2))<line_sep>loss=mtp.MTPLoss(1 0 5)<line_sep>self.assertEqual(loss._compute_best_mode(angles target trajectory) 0)<line_sep>trajectory[0]=1<line_sep>self.assertEqual(loss._compute_best_mode(angles target trajectory) 0)<block_end><def_stmt>test_loss_single_mode self<block_start>targets=torch.zeros((16 1 30 2))<line_sep>targets[: : : 1]=torch.arange(start=0 end=3 step=0.1)<line_sep>predictions=torch.ones((16 61))<line_sep>predictions[: :60]=targets[0 0 : :].reshape(-1 60)<line_sep>predictions[: 60]=1/10<line_sep>loss=mtp.MTPLoss(1 1 angle_threshold_degrees=20)<line_sep># Only regression loss in single mode case. self.assertAlmostEqual(float(loss(predictions targets).detach().numpy()) 0 places=4)<line_sep># Now the best mode differs by 1 from the ground truth. # Smooth l1 loss subtracts 0.5 from l1 norm if diff >= 1. predictions[: :60]<augadd>1<line_sep>self.assertAlmostEqual(float(loss(predictions targets).detach().numpy()) 0.5 places=4)<line_sep># In this case, one element has perfect regression, the others are off by 1. predictions[1 :60]<augsub>1<line_sep>self.assertAlmostEqual(float(loss(predictions targets).detach().numpy()) (15/16)<times>0.5 places=4)<block_end><def_stmt>test_loss_many_modes self<block_start>targets=torch.zeros((16 1 30 2))<line_sep>targets[: : : 1]=torch.arange(start=0 end=3 step=0.1)<line_sep>predictions=torch.ones((16 610))<line_sep>predictions[: 540:600]=targets[0 0 : :].reshape(-1 60)<line_sep>predictions[: -10:]=1/10<line_sep>loss=mtp.MTPLoss(10 1 angle_threshold_degrees=20)<line_sep># Since one mode exactly matches gt, loss should only be classification error. self.assertAlmostEqual(float(loss(predictions targets).detach().numpy()) -math.log(1/10) places=4)<line_sep># Now the best mode differs by 1 from the ground truth. # Smooth l1 loss subtracts 0.5 from l1 norm if diff >= 1. predictions[: 540:600]<augadd>1<line_sep>self.assertAlmostEqual(float(loss(predictions targets).detach().numpy()) -math.log(1/10)+0.5 places=4)<line_sep># In this case, one element has perfect regression, the others are off by 1. predictions[1 540:600]<augsub>1<line_sep>self.assertAlmostEqual(float(loss(predictions targets).detach().numpy()) -math.log(1/10)+(15/16)<times>0.5 places=4)<block_end><block_end>
<import_stmt>os<import_stmt>sys<line_sep>compiler=r'../Binary/RelWithDebInfo/ShaderCompiler'<line_sep>#compiler = r'../Binary/Debug/ShaderCompiler' shader_dirs=['.' './Editor']<line_sep>count=0<for_stmt>d shader_dirs<block_start><for_stmt>fn os.listdir(d)<block_start>print(fn)<line_sep>ext=fn.split('.')[-1]<if_stmt>ext<in>['surf' 'shader']<block_start>cmd=compiler+' '+os.path.abspath(os.path.join(d fn))<line_sep>print(cmd)<if_stmt>os.system(cmd)<ne>0<block_start>print("Compile ERROR: " fn)<line_sep>sys.exit()<block_end>count<augadd>1<block_end><block_end><block_end>print("Done. {} shaders compiled.".format(count))<line_sep>
# SPDX-License-Identifier: Apache-2.0 """Graph Optimizer Base"""<import_stmt>copy<import_from_stmt>.. logging utils<class_stmt>GraphOptimizerBase(object)<block_start>"""optimizer graph to improve performance """<def_stmt>__init__ self<block_start>self._logger=logging.getLogger('.'.join(__name__.split('.')[:-1]+[self.__class__.__name__]))<line_sep>self._graph_been_opt=<false><line_sep>self.opt_iteration=0<block_end>@property<def_stmt>logger self<block_start><return>self._logger<block_end>@property<def_stmt>is_debug_mode self<block_start><return>utils.is_debug_mode()<block_end>@property<def_stmt>graph_been_opt self<block_start><return>self._graph_been_opt<block_end>@graph_been_opt.setter<def_stmt>graph_been_opt self value<block_start>self._graph_been_opt=value<block_end><def_stmt>optimize self graph iteration<block_start>""" Optimize graph, return optimized graph. """<line_sep>before=graph.dump_node_statistics()<line_sep>self.opt_iteration=iteration<line_sep>graph=self._optimize(graph)<line_sep>graph.update_proto()<line_sep>graph.delete_unused_nodes(graph.outputs)<line_sep>after=graph.dump_node_statistics()<line_sep>self._print_stat_diff(before after)<line_sep><return>graph<block_end><def_stmt>_optimize self graph<block_start>""" Derived class should override this function. """<line_sep><raise>NotImplementedError<block_end>@staticmethod<def_stmt>_apply_optimization graph optimize_func<block_start>""" optimize graph will also optimize graph of nodes' Args: graph: the top level graph to be optimized optimize_func: function to optimize graph """<line_sep>graph=optimize_func(graph)<for_stmt>node graph.get_nodes()<block_start>body_graphs=node.get_body_graphs()<if_stmt>body_graphs<block_start><for_stmt>attr,b_g body_graphs.items()<block_start>b_g=GraphOptimizerBase._apply_optimization(b_g optimize_func)<line_sep>node.set_body_graph_as_attr(attr b_g)<block_end><block_end><block_end><return>graph<block_end><def_stmt>_print_stat_diff self before after<block_start>diff=copy.deepcopy(after)<line_sep>diff.subtract(before)<line_sep>diff=["{} {} ({}->{})".format(k str(v)<if>v<l>0<else>'+'+str(v) before.get(k 0) after.get(k 0))<for>k,v sorted(diff.items())<if>v<ne>0]<line_sep>self.logger.verbose(', '.join(diff)<if>diff<else>"no change")<block_end><block_end>
# # Copyright (C) [2020] Futurewei Technologies, Inc. # # FORCE-RISCV is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES # OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the License for the specific language governing permissions and # limitations under the License. # # BootPriority.py # # This file defines the BootPriority helper class. # The boot priority class defines helper methods associated with boot priority <class_stmt>BootPriority# Returns the appropriate boot priority based on the name and type of # register provided along with if the register is write only <block_start><def_stmt>getBootPriority aName=<none> aType=<none> aWriteOnly=0<block_start><return>1<block_end><block_end>
<import_stmt>pytest<line_sep># from pytest_factoryboy import register <def_stmt>pytest_addoption parser<block_start>parser.addoption("--slow" action="store_true" default=<false> help="run slow tests" )<line_sep>parser.addoption("--all" action="store_true" default=<false> help="run all tests" )<block_end><def_stmt>_is_connection_psql <block_start><import_from_stmt>django.db connection<line_sep><return>connection.vendor<eq>'postgresql'<block_end><def_stmt>pytest_collection_modifyitems config items<block_start>skip_pg=pytest.mark.skip(reason="connection is not a postgres database")<if_stmt><not>_is_connection_psql()<block_start><for_stmt>item items<block_start><if_stmt>"postgres"<in>item.keywords<block_start>item.add_marker(skip_pg)<block_end><block_end><block_end><if_stmt>config.getoption("--all")<block_start><return><block_end><elif_stmt>config.getoption("--slow")<block_start>skip_non_slow=pytest.mark.skip(reason="need --slow option to run")<for_stmt>item items<block_start><if_stmt>"slow"<not><in>item.keywords<block_start>item.add_marker(skip_non_slow)<block_end><block_end><block_end><else_stmt><block_start>skip_slow=pytest.mark.skip(reason="need --slow option to run")<for_stmt>item items<block_start><if_stmt>"slow"<in>item.keywords<block_start>item.add_marker(skip_slow)<block_end><block_end><block_end><block_end>
<import_stmt>os<import_stmt>random<line_sep>#initiate a list called emails_list emails_list=[]<line_sep>Directory='/home/azureuser/spam_filter/enron1/emails/'<line_sep>Dir_list=os.listdir(Directory)<for_stmt>file Dir_list<block_start><block_end>f=open(Directory+file 'r')<line_sep>emails_list.append(f.read())<line_sep>f.close()<line_sep>
<import_from_stmt>math pi sin cos<import_from_stmt>panda3d.core *<import_from_stmt>direct.showbase.ShowBase ShowBase<import_from_stmt>direct.task Task<import_from_stmt>floorplan Floorplan<import_stmt>numpy<as>np<import_stmt>random<import_stmt>copy<class_stmt>Viewer(ShowBase)<block_start><def_stmt>__init__ self<block_start>ShowBase.__init__(self)<line_sep>#self.scene = self.loader.loadModel("floorplan_1.txt-floor.obj") #self.scene = base.loader.loadModel("floorplan_1.txt-floor.egg") #self.scene = base.loader.loadModel("panda.egg") #self.scene = base.loader.loadModel("environment") base.setBackgroundColor(0 0 0)<line_sep>self.angle=0.0<line_sep>lens=PerspectiveLens()<line_sep>lens.setFov(60)<line_sep>lens.setNear(0.01)<line_sep>lens.setFar(100000)<line_sep>base.cam.node().setLens(lens)<line_sep>floorplan=Floorplan('test/floorplan_7')<line_sep>#floorplan.setFilename('test/floorplan_2') floorplan.read()<line_sep>self.scene=floorplan.generateEggModel()<line_sep>self.scene.reparentTo(self.render)<line_sep>#self.scene.setScale(0.01, 0.01, 0.01) #self.scene.setTwoSided(True) self.scene.setTwoSided(<true>)<line_sep>#self.scene.setPos(0, 0, 3) #texture = loader.loadTexture("floorplan_1.png") #self.scene.setTexture(texture) #self.scene.setHpr(0, 0, 0) # angleDegrees = 0 # angleRadians = angleDegrees * (pi / 180.0) # self.camera.setPos(20 * sin(angleRadians), -20 * cos(angleRadians), 3) # self.camera.setHpr(angleDegrees, 0, 0) #self.camera.lookAt(0, 0, 0) self.alight=AmbientLight('alight')<line_sep>self.alight.setColor(VBase4(0.2 0.2 0.2 1))<line_sep>self.alnp=self.render.attachNewNode(self.alight)<line_sep>self.render.setLight(self.alnp)<line_sep>dlight=DirectionalLight('dlight')<line_sep>dlight.setColor(VBase4(1 1 1 1))<line_sep>dlnp=self.render.attachNewNode(dlight)<line_sep>#dlnp.setHpr(0, -90, 0) dlnp.setPos(0.5 0.5 3)<line_sep>dlnp.lookAt(0.5 0.5 2)<line_sep>self.render.setLight(dlnp)<for_stmt>i xrange(10)<block_start>plight=PointLight('plight')<line_sep>plight.setAttenuation((1 0 1))<line_sep>color=random.randint(10 15)<line_sep>plight.setColor(VBase4(color color color 1))<line_sep>plnp=self.render.attachNewNode(plight)<if_stmt>i<eq>0<block_start>plnp.setPos(0.5 0.5 3)<block_end><else_stmt><block_start>plnp.setPos(1<times>random.random() 1<times>random.random() 0.3)<line_sep><pass><block_end>self.render.setLight(plnp)<block_end>#base.useTrackball() #base.trackball.node().setPos(2.0, 0, 3) #base.trackball.node().setHpr(0, 0, 3) #base.enableMouse() #base.useDrive() base.disableMouse()<line_sep>self.taskMgr.add(self.spinCameraTask "SpinCameraTask")<line_sep>#self.accept('arrow_up', self.moveForward) #self.accept('arrow_up_-repeat', self.moveForward) self.topDownCameraPos=[0.5 0.5 1.5]<line_sep>self.topDownTarget=[0.5 0.499 0.5]<line_sep>self.topDownH=0<line_sep>self.startCameraPos=floorplan.startCameraPos<line_sep>self.startTarget=floorplan.startTarget<line_sep>self.startH=0<line_sep>self.cameraPos=self.topDownCameraPos<line_sep>self.target=self.topDownTarget<line_sep>self.H=self.topDownH<line_sep>self.accept('space' self.openDoor)<line_sep>self.accept('enter' self.startChangingView)<line_sep>self.viewMode='T'<line_sep>self.viewChangingProgress=1.02<line_sep>ceiling=self.scene.find("**/ceiling")<line_sep>ceiling.hide()<line_sep><return><block_end><def_stmt>moveForward self<block_start>self.cameraPos[0]<augsub>0.1<block_end><def_stmt>openDoor self<block_start>minDistance=10000<line_sep>doors=self.scene.find("**/doors")<for_stmt>door doors.getChildren()<block_start>mins,maxs=door.getTightBounds()<line_sep>vec_1=(mins+maxs)/2-Vec3(self.target[0] self.target[1] (mins[2]+maxs[2])/2)<line_sep>vec_2=(mins+maxs)/2-Vec3(self.cameraPos[0] self.cameraPos[1] (mins[2]+maxs[2])/2)<if_stmt>(vec_1.dot(vec_2)<g>0<and>vec_1.length()<g>vec_2.length())<or>np.arccos(abs(vec_1.dot(vec_2))/(vec_1.length()<times>vec_2.length()))<g>np.pi/4<block_start><continue><block_end>distance=pow(pow(self.cameraPos[0]-(mins[0]+maxs[0])/2 2)+pow(self.cameraPos[1]-(mins[1]+maxs[1])/2 2)+pow(self.cameraPos[2]-(mins[2]+maxs[2])/2 2) 0.5)<if_stmt>distance<l>minDistance<block_start>minDistanceDoor=door<line_sep>minDistance=distance<line_sep><pass><block_end><continue><block_end><if_stmt>minDistance<g>1<block_start><return><block_end>mins,maxs=minDistanceDoor.getTightBounds()<if_stmt>abs(maxs[0]-mins[0])<g>abs(maxs[1]-mins[1])<block_start>minsExpected=Vec3(mins[0]-(maxs[1]-mins[1]) mins[1] mins[2])<line_sep>maxsExpected=Vec3(mins[0] mins[1]+(maxs[0]-mins[0]) maxs[2])<block_end><else_stmt><block_start>minsExpected=Vec3(mins[0]-(maxs[1]-mins[1])+(maxs[0]-mins[0]) mins[1]-(maxs[0]-mins[0]) mins[2])<line_sep>maxsExpected=Vec3(mins[0]+(maxs[0]-mins[0]) mins[1]+(maxs[0]-mins[0])-(maxs[0]-mins[0]) maxs[2])<line_sep><pass><block_end>minDistanceDoor.setH(minDistanceDoor 90)<line_sep>mins,maxs=minDistanceDoor.getTightBounds()<line_sep>minDistanceDoor.setPos(minDistanceDoor minsExpected[1]-mins[1] -minsExpected[0]+mins[0] 0)<line_sep>#print(scene.findAllMatches('doors')) <return><block_end><def_stmt>startChangingView self<block_start>self.viewChangingProgress=0<line_sep>self.prevCameraPos=copy.deepcopy(self.cameraPos)<line_sep>self.prevTarget=copy.deepcopy(self.target)<line_sep>self.prevH=self.camera.getR()<if_stmt>self.viewMode<eq>'T'<block_start>self.newCameraPos=self.startCameraPos<line_sep>self.newTarget=self.startTarget<line_sep>self.newH=self.startH<line_sep>self.viewMode='C'<block_end><else_stmt><block_start>self.newCameraPos=self.topDownCameraPos<line_sep>self.newTarget=self.topDownTarget<line_sep>self.newH=self.topDownH<line_sep>self.startCameraPos=copy.deepcopy(self.cameraPos)<line_sep>self.startTarget=copy.deepcopy(self.target)<line_sep>self.startH=self.camera.getR()<line_sep>self.viewMode='T'<line_sep><pass><block_end><return><block_end><def_stmt>changeView self<block_start>self.cameraPos=[]<line_sep>self.target=[]<for_stmt>c xrange(3)<block_start>self.cameraPos.append(self.prevCameraPos[c]+(self.newCameraPos[c]-self.prevCameraPos[c])<times>self.viewChangingProgress)<line_sep>self.target.append(self.prevTarget[c]+(self.newTarget[c]-self.prevTarget[c])<times>self.viewChangingProgress)<line_sep><continue><block_end>self.H=self.prevH+(self.newH-self.prevH)<times>self.viewChangingProgress<if_stmt>self.viewChangingProgress+0.02<ge>1<and>self.viewMode<eq>'C'<block_start>ceiling=self.scene.find("**/ceiling")<line_sep>ceiling.show()<line_sep><pass><block_end><if_stmt>self.viewChangingProgress<le>0.02<and>self.viewMode<eq>'T'<block_start>ceiling=self.scene.find("**/ceiling")<line_sep>ceiling.hide()<line_sep><pass><block_end><return><block_end><def_stmt>spinCameraTask self task#print(task.time) #angleDegrees = task.time * 6.0 <block_start>movementStep=0.003<if_stmt>self.viewChangingProgress<le>1.01<block_start>self.changeView()<line_sep>self.viewChangingProgress<augadd>0.02<line_sep><pass><block_end><if_stmt>base.mouseWatcherNode.is_button_down('w')<block_start><for_stmt>c xrange(2)<block_start>step=movementStep<times>(self.target[c]-self.cameraPos[c])<line_sep>self.cameraPos[c]<augadd>step<line_sep>self.target[c]<augadd>step<line_sep><continue><block_end><pass><block_end><if_stmt>base.mouseWatcherNode.is_button_down('s')<block_start><for_stmt>c xrange(2)<block_start>step=movementStep<times>(self.target[c]-self.cameraPos[c])<line_sep>self.cameraPos[c]<augsub>step<line_sep>self.target[c]<augsub>step<line_sep><continue><block_end><pass><block_end><if_stmt>base.mouseWatcherNode.is_button_down('a')<block_start>step=movementStep<times>(self.target[0]-self.cameraPos[0])<line_sep>self.cameraPos[1]<augadd>step<line_sep>self.target[1]<augadd>step<line_sep>step=movementStep<times>(self.target[1]-self.cameraPos[1])<line_sep>self.cameraPos[0]<augsub>step<line_sep>self.target[0]<augsub>step<line_sep><pass><block_end><if_stmt>base.mouseWatcherNode.is_button_down('d')<block_start>step=movementStep<times>(self.target[0]-self.cameraPos[0])<line_sep>self.cameraPos[1]<augsub>step<line_sep>self.target[1]<augsub>step<line_sep>step=movementStep<times>(self.target[1]-self.cameraPos[1])<line_sep>self.cameraPos[0]<augadd>step<line_sep>self.target[0]<augadd>step<line_sep><pass><block_end>rotationStep=0.02<if_stmt>base.mouseWatcherNode.is_button_down('arrow_left')<block_start>angle=np.angle(complex(self.target[0]-self.cameraPos[0] self.target[1]-self.cameraPos[1]))<line_sep>angle<augadd>rotationStep<line_sep>self.target[0]=self.cameraPos[0]+np.cos(angle)<line_sep>self.target[1]=self.cameraPos[1]+np.sin(angle)<line_sep><pass><block_end><if_stmt>base.mouseWatcherNode.is_button_down('arrow_right')<block_start>angle=np.angle(complex(self.target[0]-self.cameraPos[0] self.target[1]-self.cameraPos[1]))<line_sep>angle<augsub>rotationStep<line_sep>self.target[0]=self.cameraPos[0]+np.cos(angle)<line_sep>self.target[1]=self.cameraPos[1]+np.sin(angle)<line_sep><pass><block_end><if_stmt>base.mouseWatcherNode.is_button_down('arrow_up')<block_start>angle=np.arcsin(self.target[2]-self.cameraPos[2])<line_sep>angle<augadd>rotationStep<line_sep>self.target[2]=self.cameraPos[2]+np.sin(angle)<line_sep><pass><block_end><if_stmt>base.mouseWatcherNode.is_button_down('arrow_down')<block_start>angle=np.arcsin(self.target[2]-self.cameraPos[2])<line_sep>angle<augsub>rotationStep<line_sep>self.target[2]=self.cameraPos[2]+np.sin(angle)<line_sep><pass><block_end>angleDegrees=self.angle<line_sep>angleRadians=angleDegrees<times>(pi/180.0)<line_sep>#self.camera.setPos(2.0 * sin(angleRadians), -2.0 * cos(angleRadians), 3) self.camera.setPos(self.cameraPos[0] self.cameraPos[1] self.cameraPos[2])<line_sep>#self.camera.setHpr(angleDegrees, 0, 0) #self.camera.lookAt(0, 0, 0) self.camera.lookAt(self.target[0] self.target[1] self.target[2])<line_sep>self.camera.setR(self.H)<line_sep>#if base.mouseWatcherNode.hasMouse() <return>Task.cont<block_end><block_end>app=Viewer()<line_sep>app.run()<line_sep>
<import_stmt>numpy<as>np<class_stmt>KF1D# this EKF assumes constant covariance matrix, so calculations are much simpler # the Kalman gain also needs to be precomputed using the control module <block_start><def_stmt>__init__ self x0 A C K<block_start>self.x=x0<line_sep>self.A=A<line_sep>self.C=C<line_sep>self.K=K<line_sep>self.A_K=self.A-np.dot(self.K self.C)<line_sep># K matrix needs to be pre-computed as follow: # import control # (x, l, K) = control.dare(np.transpose(self.A), np.transpose(self.C), Q, R) # self.K = np.transpose(K) <block_end><def_stmt>update self meas<block_start>self.x=np.dot(self.A_K self.x)+np.dot(self.K meas)<line_sep><return>self.x<block_end><block_end>
# -*- coding: utf-8 -*- # SPDX-License-Identifier: GPL-2.0+ # # Copyright 2017 Google, Inc # <import_stmt>contextlib<import_stmt>os<import_stmt>re<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_stmt>unittest<import_stmt>gitutil<import_stmt>patchstream<import_stmt>settings<line_sep>@contextlib.contextmanager<def_stmt>capture <block_start><import_stmt>sys<import_from_stmt>cStringIO StringIO<line_sep>oldout,olderr=sys.stdout sys.stderr<try_stmt><block_start>out=[StringIO() StringIO()]<line_sep>sys.stdout,sys.stderr=out<line_sep><yield>out<block_end><finally_stmt><block_start>sys.stdout,sys.stderr=oldout olderr<line_sep>out[0]=out[0].getvalue()<line_sep>out[1]=out[1].getvalue()<block_end><block_end><class_stmt>TestFunctional(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.tmpdir=tempfile.mkdtemp(prefix='patman.')<block_end><def_stmt>tearDown self<block_start>shutil.rmtree(self.tmpdir)<block_end>@staticmethod<def_stmt>GetPath fname<block_start><return>os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])) 'test' fname)<block_end>@classmethod<def_stmt>GetText self fname<block_start><return>open(self.GetPath(fname)).read()<block_end>@classmethod<def_stmt>GetPatchName self subject<block_start>fname=re.sub('[ :]' '-' subject)<line_sep><return>fname.replace('--' '-')<block_end><def_stmt>CreatePatchesForTest self series<block_start>cover_fname=<none><line_sep>fname_list=[]<for_stmt>i,commit enumerate(series.commits)<block_start>clean_subject=self.GetPatchName(commit.subject)<line_sep>src_fname='%04d-%s.patch'%(i+1 clean_subject[:52])<line_sep>fname=os.path.join(self.tmpdir src_fname)<line_sep>shutil.copy(self.GetPath(src_fname) fname)<line_sep>fname_list.append(fname)<block_end><if_stmt>series.get('cover')<block_start>src_fname='0000-cover-letter.patch'<line_sep>cover_fname=os.path.join(self.tmpdir src_fname)<line_sep>fname=os.path.join(self.tmpdir src_fname)<line_sep>shutil.copy(self.GetPath(src_fname) fname)<block_end><return>cover_fname fname_list<block_end><def_stmt>testBasic self<block_start>"""Tests the basic flow of patman This creates a series from some hard-coded patches build from a simple tree with the following metadata in the top commit: Series-to: u-boot Series-prefix: RFC Series-cc: <NAME> <<EMAIL>> Cover-letter-cc: Lord Mëlchett <<EMAIL>> Series-version: 2 Series-changes: 4 - Some changes Cover-letter: test: A test patch series This is a test of how the cover leter works END and this in the first commit: Series-notes: some notes about some things from the first commit END Commit-notes: Some notes about the first commit END with the following commands: git log -n2 --reverse >/path/to/tools/patman/test/test01.txt git format-patch --subject-prefix RFC --cover-letter HEAD~2 mv 00* /path/to/tools/patman/test It checks these aspects: - git log can be processed by patchstream - emailing patches uses the correct command - CC file has information on each commit - cover letter has the expected text and subject - each patch has the correct subject - dry-run information prints out correctly - unicode is handled correctly - Series-to, Series-cc, Series-prefix, Cover-letter - Cover-letter-cc, Series-version, Series-changes, Series-notes - Commit-notes """<line_sep>process_tags=<true><line_sep>ignore_bad_tags=<true><line_sep>stefan=u'<NAME> <<EMAIL>>'<line_sep>rick='<NAME> <<EMAIL>>'<line_sep>mel=u'<NAME> <<EMAIL>>'<line_sep>ed=u'Lond Edmund Blackaddër <<EMAIL>'<line_sep>fred='<NAME> <<EMAIL>>'<line_sep>add_maintainers=[stefan rick]<line_sep>dry_run=<true><line_sep>in_reply_to=mel<line_sep>count=2<line_sep>settings.alias={'fdt':['simon'] 'u-boot':['<EMAIL>'] 'simon':[ed] 'fred':[fred] }<line_sep>text=self.GetText('test01.txt')<line_sep>series=patchstream.GetMetaDataForTest(text)<line_sep>cover_fname,args=self.CreatePatchesForTest(series)<with_stmt>capture()<as>out<block_start>patchstream.FixPatches(series args)<if_stmt>cover_fname<and>series.get('cover')<block_start>patchstream.InsertCoverLetter(cover_fname series count)<block_end>series.DoChecks()<line_sep>cc_file=series.MakeCcFile(process_tags cover_fname <not>ignore_bad_tags add_maintainers <none>)<line_sep>cmd=gitutil.EmailPatches(series cover_fname args dry_run <not>ignore_bad_tags cc_file in_reply_to=in_reply_to thread=<none>)<line_sep>series.ShowActions(args cmd process_tags)<block_end>cc_lines=open(cc_file).read().splitlines()<line_sep>os.remove(cc_file)<line_sep>lines=out[0].splitlines()<line_sep>#print '\n'.join(lines) self.assertEqual('Cleaned %s patches'%len(series.commits) lines[0])<line_sep>self.assertEqual('Change log missing for v2' lines[1])<line_sep>self.assertEqual('Change log missing for v3' lines[2])<line_sep>self.assertEqual('Change log for unknown version v4' lines[3])<line_sep>self.assertEqual("Alias 'pci' not found" lines[4])<line_sep>self.assertIn('Dry run' lines[5])<line_sep>self.assertIn('Send a total of %d patches'%count lines[7])<line_sep>line=8<for_stmt>i,commit enumerate(series.commits)<block_start>self.assertEqual(' %s'%args[i] lines[line+0])<line_sep>line<augadd>1<while_stmt>'Cc:'<in>lines[line]<block_start>line<augadd>1<block_end><block_end>self.assertEqual('To: <EMAIL>' lines[line])<line_sep>self.assertEqual('Cc: %s'%stefan.encode('utf-8') lines[line+1])<line_sep>self.assertEqual('Version: 3' lines[line+2])<line_sep>self.assertEqual('Prefix:\t RFC' lines[line+3])<line_sep>self.assertEqual('Cover: 4 lines' lines[line+4])<line_sep>line<augadd>5<line_sep>self.assertEqual(' Cc: %s'%mel.encode('utf-8') lines[line+0])<line_sep>self.assertEqual(' Cc: %s'%rick lines[line+1])<line_sep>self.assertEqual(' Cc: %s'%fred lines[line+2])<line_sep>self.assertEqual(' Cc: %s'%ed.encode('utf-8') lines[line+3])<line_sep>expected=('Git command: git send-email --annotate '<concat>'--in-reply-to="%s" --to "<EMAIL>" '<concat>'--cc "%s" --cc-cmd "%s --cc-cmd %s" %s %s'%(in_reply_to stefan sys.argv[0] cc_file cover_fname ' '.join(args))).encode('utf-8')<line_sep>line<augadd>4<line_sep>self.assertEqual(expected lines[line])<line_sep>self.assertEqual(('%s %s, %s'%(args[0] rick stefan)).encode('utf-8') cc_lines[0])<line_sep>self.assertEqual(('%s %s, %s, %s, %s'%(args[1] fred rick stefan ed)).encode('utf-8') cc_lines[1])<line_sep>expected=''' This is a test of how the cover leter works some notes about some things from the first commit Changes in v4: - Some changes <NAME> (2): pci: Correct cast for sandbox fdt: Correct cast for sandbox in fdtdec_setup_memory_size() cmd/pci.c | 3 ++- fs/fat/fat.c | 1 + lib/efi_loader/efi_memory.c | 1 + lib/fdtdec.c | 3 ++- 4 files changed, 6 insertions(+), 2 deletions(-) --\x20 2.7.4 '''<line_sep>lines=open(cover_fname).read().splitlines()<line_sep>#print '\n'.join(lines) self.assertEqual('Subject: [RFC PATCH v3 0/2] test: A test patch series' lines[3])<line_sep>self.assertEqual(expected.splitlines() lines[7:])<for_stmt>i,fname enumerate(args)<block_start>lines=open(fname).read().splitlines()<line_sep>#print '\n'.join(lines) subject=[line<for>line lines<if>line.startswith('Subject')]<line_sep>self.assertEqual('Subject: [RFC %d/%d]'%(i+1 count) subject[0][:18])<if_stmt>i<eq>0# Check that we got our commit notes <block_start>self.assertEqual('---' lines[17])<line_sep>self.assertEqual('Some notes about' lines[18])<line_sep>self.assertEqual('the first commit' lines[19])<block_end><block_end><block_end><block_end>
<import_stmt>re<line_sep>COMMENT_REGEX=re.compile(r'(checkov:skip=|bridgecrew:skip=) *([A-Z_\d]+)(:[^\n]+)?')<line_sep>
<import_stmt>logging<import_stmt>os<import_stmt>platform<import_from_stmt>datetime datetime<import_from_stmt>moabb.analysis plotting<as>plt<import_from_stmt>moabb.analysis.meta_analysis # noqa: E501 compute_dataset_statistics find_significant_differences <import_from_stmt>moabb.analysis.results Results# noqa: F401 log=logging.getLogger(__name__)<def_stmt>analyze results out_path name="analysis" plot=<false><block_start>"""Analyze results. Given a results dataframe, generates a folder with results and a dataframe of the exact data used to generate those results, aswell as introspection to return information on the computer parameters ---------- out_path: location to store analysis folder results: Dataframe generated from Results object path: string/None plot: whether to plot results Either path or results is necessary """<line_sep># input checks # <if_stmt><not>isinstance(out_path str)<block_start><raise>ValueError("Given out_path argument is not string")<block_end><elif_stmt><not>os.path.isdir(out_path)<block_start><raise>IOError("Given directory does not exist")<block_end><else_stmt><block_start>analysis_path=os.path.join(out_path name)<block_end>unique_ids=[plt._simplify_names(x)<for>x results.pipeline.unique()]<line_sep>simplify=<true><line_sep>print(unique_ids)<line_sep>print(set(unique_ids))<if_stmt>len(unique_ids)<ne>len(set(unique_ids))<block_start>log.warning("Pipeline names are too similar, turning off name shortening")<line_sep>simplify=<false><block_end>os.makedirs(analysis_path exist_ok=<true>)<line_sep># TODO: no good cross-platform way of recording CPU info? <with_stmt>open(os.path.join(analysis_path "info.txt") "a")<as>f<block_start>dt=datetime.now()<line_sep>f.write("Date: {:%Y-%m-%d}\n Time: {:%H:%M}\n".format(dt dt))<line_sep>f.write("System: {}\n".format(platform.system()))<line_sep>f.write("CPU: {}\n".format(platform.processor()))<block_end>results.to_csv(os.path.join(analysis_path "data.csv"))<line_sep>stats=compute_dataset_statistics(results)<line_sep>stats.to_csv(os.path.join(analysis_path "stats.csv"))<line_sep>P,T=find_significant_differences(stats)<if_stmt>plot<block_start>fig,color_dict=plt.score_plot(results)<line_sep>fig.savefig(os.path.join(analysis_path "scores.pdf"))<line_sep>fig=plt.summary_plot(P T simplify=simplify)<line_sep>fig.savefig(os.path.join(analysis_path "ordering.pdf"))<block_end><block_end>
""" Copyright (c) 2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>sys<import_from_stmt>argparse ArgumentParser<import_from_stmt>typing NamedTuple Any<import_stmt>torch<import_from_stmt>os listdir makedirs<import_from_stmt>os.path isfile join exists<import_from_stmt>shutil copyfile<import_from_stmt>nncf.torch.quantization.layers SymmetricQuantizer AsymmetricQuantizer<class_stmt>ParameterToAdd(NamedTuple)<block_start>name:str<line_sep>value:Any<block_end><def_stmt>main argv<block_start>parser=ArgumentParser()<line_sep>parser.add_argument('-i' '--input-folder' help='Path to directory with given checkpoints to modify' required=<true>)<line_sep>parser.add_argument('-o' '--output-folder' help='Path to directory to save modified checkpoints' required=<true>)<line_sep>parser.add_argument('-b' '--bitwidth' help='Bitwidth to initialize quantizer' required=<false> default=8 type=int)<line_sep>parser.add_argument('-v' '--verbose' help='Print all new names of parameters' required=<false> action='store_true')<line_sep>args=parser.parse_args(args=argv)<line_sep>src_dir=args.input_folder<line_sep>dst_dir=args.output_folder<if_stmt><not>exists(dst_dir)<block_start>makedirs(dst_dir)<block_end>param_list=[ParameterToAdd('_num_bits' torch.IntTensor([args.bitwidth])) ParameterToAdd('enabled' torch.IntTensor([1]))]<line_sep>pth_files=[(join(src_dir f) join(dst_dir f))<for>f listdir(src_dir)<if>isfile(join(src_dir f))<and>('.pth'<in>f<or>'.sd'<in>f)]<line_sep>files_to_copy=[]<for_stmt>pair pth_files<block_start>src_file,dst_file=pair<if_stmt>'binarization'<in>src_file<block_start>files_to_copy.append(pair)<line_sep><continue><block_end>sd=pth=torch.load(src_file)<if_stmt>'state_dict'<in>pth<block_start>sd=pth['state_dict']<block_end>hooks=[SymmetricQuantizer.SCALE_PARAM_NAME AsymmetricQuantizer.INPUT_LOW_PARAM_NAME]<line_sep>new_keys={}<for_stmt>new_parameter param_list<block_start>old_keys=list(sd.keys())<for_stmt>k sd.keys()<block_start><for_stmt>h hooks<block_start>new_key=k.replace(h new_parameter.name)<if_stmt>('.'+h<in>k)<and>('.'+new_parameter.name<not><in>k)<and>(new_key<not><in>old_keys)<block_start>new_keys[new_key]=new_parameter.value<block_end><block_end><block_end><block_end><if_stmt>new_keys<block_start>print(f'\nAdding #{len(new_keys)} of new keys')<if_stmt>args.verbose<block_start>print('New keys:' new_keys sep='\n')<block_end><for_stmt>new_key,value new_keys.items()<block_start>sd[new_key]=value<block_end>pth['state_dict']=sd<line_sep>torch.save(pth dst_file)<block_end><else_stmt><block_start>files_to_copy.append(pair)<block_end><block_end><for_stmt>src_file,dst_file files_to_copy<block_start>print("\nCopying {}".format(dst_file))<line_sep>copyfile(src_file dst_file)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main(sys.argv[1:])<block_end>
# fmt: off <import_stmt>logging<import_from_stmt>pathlib Path<import_from_stmt>farm.data_handler.data_silo DataSilo<import_from_stmt>farm.data_handler.processor RegressionProcessor TextPairClassificationProcessor<import_from_stmt>farm.experiment initialize_optimizer<import_from_stmt>farm.infer Inferencer<import_from_stmt>farm.modeling.adaptive_model AdaptiveModel<import_from_stmt>farm.modeling.language_model LanguageModel<import_from_stmt>farm.modeling.prediction_head RegressionHead TextClassificationHead<import_from_stmt>farm.modeling.tokenization Tokenizer<import_from_stmt>farm.train Trainer<import_from_stmt>farm.utils set_all_seeds MLFlowLogger initialize_device_settings reformat_msmarco_train reformat_msmarco_dev write_msmarco_results<import_from_stmt>farm.evaluation.msmarco_passage_farm msmarco_evaluation<def_stmt>text_pair_classification <block_start>logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" datefmt="%m/%d/%Y %H:%M:%S" level=logging.INFO)<line_sep>ml_logger=MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")<line_sep>ml_logger.init_experiment(experiment_name="Public_FARM" run_name="Run_text_pair_classification")<line_sep>########################## ########## Settings ########################## set_all_seeds(seed=42)<line_sep>device,n_gpu=initialize_device_settings(use_cuda=<true>)<line_sep>n_epochs=2<line_sep>batch_size=64<line_sep>evaluate_every=500<line_sep>lang_model="bert-base-cased"<line_sep>label_list=["0" "1"]<line_sep>train_filename="train.tsv"<line_sep>dev_filename="dev_200k.tsv"<line_sep># The source data can be found here https://github.com/microsoft/MSMARCO-Passage-Ranking generate_data=<false><line_sep>data_dir=Path("../data/msmarco_passage")<line_sep>predictions_raw_filename="predictions_raw.txt"<line_sep>predictions_filename="predictions.txt"<line_sep>train_source_filename="triples.train.1m.tsv"<line_sep>qrels_filename="qrels.dev.tsv"<line_sep>queries_filename="queries.dev.tsv"<line_sep>passages_filename="collection.tsv"<line_sep>top1000_filename="top1000.dev"<line_sep># 0. Preprocess and save MSMarco data in a format that can be ingested by FARM models. Only needs to be done once! # The final format is a tsv file with 3 columns (text, text_b and label) <if_stmt>generate_data<block_start>reformat_msmarco_train(data_dir/train_source_filename data_dir/train_filename)<line_sep>reformat_msmarco_dev(data_dir/queries_filename data_dir/passages_filename data_dir/qrels_filename data_dir/top1000_filename data_dir/dev_filename)<block_end># 1.Create a tokenizer tokenizer=Tokenizer.load(pretrained_model_name_or_path=lang_model do_lower_case=<false>)<line_sep># 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset # Evaluation during training will be performed on a slice of the train set # We will be using the msmarco dev set as our final evaluation set processor=TextPairClassificationProcessor(tokenizer=tokenizer label_list=label_list metric="f1_macro" train_filename=train_filename test_filename=<none> dev_split=0.001 max_seq_len=128 data_dir=data_dir delimiter="\t")<line_sep># 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets data_silo=DataSilo(processor=processor batch_size=batch_size)<line_sep># 4. Create an AdaptiveModel # a) which consists of a pretrained language model as a basis language_model=LanguageModel.load(lang_model)<line_sep># b) and a prediction head on top that is suited for our task prediction_head=TextClassificationHead(num_labels=len(label_list) class_weights=data_silo.calculate_class_weights(task_name="text_classification") )<line_sep>model=AdaptiveModel(language_model=language_model prediction_heads=[prediction_head] embeds_dropout_prob=0.1 lm_output_types=["per_sequence_continuous"] device=device)<line_sep># 5. Create an optimizer model,optimizer,lr_schedule=initialize_optimizer(model=model learning_rate=1e-5 device=device n_batches=len(data_silo.loaders["train"]) n_epochs=n_epochs)<line_sep># 6. Feed everything to the Trainer, which keeps care of growing our model into powerful plant and evaluates it from time to time trainer=Trainer(model=model optimizer=optimizer data_silo=data_silo epochs=n_epochs n_gpu=n_gpu lr_schedule=lr_schedule evaluate_every=evaluate_every device=device)<line_sep># 7. Let it grow trainer.train()<line_sep># 8. Hooray! You have a model. Store it: save_dir=Path("saved_models/passage_ranking_model")<line_sep>model.save(save_dir)<line_sep>processor.save(save_dir)<line_sep># 9. Load it & harvest your fruits (Inference) # Add your own text adapted to the dataset you provide model=Inferencer.load(save_dir gpu=<true> max_seq_len=128 batch_size=128)<line_sep>result=model.inference_from_file(data_dir/dev_filename)<line_sep>write_msmarco_results(result save_dir/predictions_raw_filename)<line_sep>msmarco_evaluation(preds_file=save_dir/predictions_raw_filename dev_file=data_dir/dev_filename qrels_file=data_dir/qrels_filename output_file=save_dir/predictions_filename)<line_sep>model.close_multiprocessing_pool()<block_end><if_stmt>__name__<eq>"__main__"<block_start>text_pair_classification()<block_end># fmt: on
<import_stmt>os<import_stmt>pytest<import_stmt>torch<import_stmt>torch.distributed<as>dist<import_from_stmt>ignite.distributed.comp_models has_native_dist_support<if_stmt><not>has_native_dist_support<block_start>pytest.skip("Skip if no native dist support" allow_module_level=<true>)<block_end><else_stmt><block_start><import_from_stmt>ignite.distributed.comp_models.native _expand_hostlist _NativeDistModel _setup_ddp_vars_from_slurm_env<block_end># tests from https://github.com/LLNL/py-hostlist/blob/master/hostlist/unittest_hostlist.py @pytest.mark.parametrize("hostlist, expected" [("localhost" "localhost") ("compute!:b24_[1-2].r" "compute!:b24_1.r,compute!:b24_2.r") ("quartz[4-8]" "quartz4,quartz5,quartz6,quartz7,quartz8") ("c1001a-[11,17]" "c1001a-11,c1001a-17") ("c1001a-s[11,17]" "c1001a-s11,c1001a-s17") ("c1009a-s17,c1010a-s11" "c1009a-s17,c1010a-s11") ("gpu-compute-on-demand-dy-g4dnxlarge-[1-4]" "gpu-compute-on-demand-dy-g4dnxlarge-1,"<concat>"gpu-compute-on-demand-dy-g4dnxlarge-2,"<concat>"gpu-compute-on-demand-dy-g4dnxlarge-3,"<concat>"gpu-compute-on-demand-dy-g4dnxlarge-4" ) ("node[18-19,1-16,21-22]" "node1,node2,node3,node4,node5,"<concat>"node6,node7,node8,node9,node10,"<concat>"node11,node12,node13,node14,node15,"<concat>"node16,node18,node19,node21,node22" ) ("node[4-8,12,16-20,22,24-26]" "node4,node5,node6,node7,node8,"<concat>"node12,node16,node17,node18,"<concat>"node19,node20,node22,node24,"<concat>"node25,node26" ) ("machine2-[02-4]vm1" "machine2-02vm1,machine2-03vm1,machine2-04vm1") ("machine2-[02-3]vm1, machine4-[0003-5].vml2" "machine2-02vm1,machine2-03vm1,machine4-0003.vml2,machine4-0004.vml2,machine4-0005.vml2" ) ("machine2-[009-11]vm1" "machine2-009vm1,machine2-010vm1,machine2-011vm1") ("node[1,2,3]" "node1,node2,node3") ("compute-b24-[1-3,5-9], compute-b25-[1,4,8],compute-b25-[2-9,13]" "compute-b24-1,compute-b24-2,compute-b24-3,compute-b24-5,compute-b24-6,"<concat>"compute-b24-7,compute-b24-8,compute-b24-9,compute-b25-1,compute-b25-4,"<concat>"compute-b25-8,compute-b25-2,compute-b25-3,compute-b25-4,compute-b25-5,"<concat>"compute-b25-6,compute-b25-7,compute-b25-8,compute-b25-9,compute-b25-13" ) ] )<def_stmt>test_expand_hostlist hostlist expected<block_start><assert_stmt>_expand_hostlist(hostlist)<eq>expected.split(",")<block_end><def_stmt>test_expand_hostlist_invalid <block_start><with_stmt>pytest.raises(ValueError match=r"hostlist invalid")<block_start>_expand_hostlist("invalid[]")<block_end><block_end>@pytest.mark.distributed<def_stmt>test__native_dist_model <block_start>available_backends=_NativeDistModel.available_backends<if_stmt>dist.is_nccl_available()<block_start><assert_stmt>"nccl"<in>available_backends<block_end><else_stmt><block_start><assert_stmt>"nccl"<not><in>available_backends<block_end><if_stmt>dist.is_gloo_available()<block_start><assert_stmt>"gloo"<in>available_backends<block_end><else_stmt><block_start><assert_stmt>"gloo"<not><in>available_backends<block_end><if_stmt>dist.is_mpi_available()<block_start><assert_stmt>"mpi"<in>available_backends<block_end><else_stmt><block_start><assert_stmt>"mpi"<not><in>available_backends<block_end><with_stmt>pytest.raises(ValueError match=r"Backend should be one of")<block_start>_NativeDistModel.create_from_backend("abc")<block_end><block_end>@pytest.mark.distributed@pytest.mark.skipif(<not>dist.is_nccl_available() reason="Skip if nccl not available")@pytest.mark.skipif("WORLD_SIZE"<in>os.environ reason="Skip if launched as multiproc")<def_stmt>test__native_nccl_but_no_gpu mock_gpu_is_not_available<block_start><with_stmt>pytest.raises(RuntimeError match=r"Nccl backend is required but no cuda capable devices")<block_start>_NativeDistModel(backend="nccl")<block_end><block_end>@pytest.mark.distributed@pytest.mark.skipif("WORLD_SIZE"<in>os.environ reason="Skip if launched as multiproc")<def_stmt>test__native_dist_model_create_from_backend_bad_config <block_start><import_stmt>os<import_from_stmt>datetime timedelta<line_sep>os.environ["RANK"]="1"<with_stmt>pytest.raises(RuntimeError match=r"PyTorch distributed configuration should define env variables")<block_start>_NativeDistModel.create_from_backend(backend="gloo" timeout=timedelta(seconds=10))<block_end><del_stmt>os.environ["RANK"]<block_end>@pytest.mark.distributed@pytest.mark.skipif("WORLD_SIZE"<in>os.environ reason="Skip if launched as multiproc")<def_stmt>test__native_dist_model_create_from_backend_bad_slurm_config <block_start><import_stmt>os<import_from_stmt>datetime timedelta<line_sep>os.environ["SLURM_JOB_ID"]="1"<with_stmt>pytest.raises(RuntimeError match=r"SLURM distributed configuration is missing")<block_start>_NativeDistModel.create_from_backend(backend="gloo" timeout=timedelta(seconds=10))<block_end><with_stmt>pytest.raises(ValueError match=r"Arguments rank and world_size should not be specified with SLURM")<block_start>_NativeDistModel.create_from_backend(backend="gloo" timeout=timedelta(seconds=10) rank=1 init_method="" world_size=1)<block_end>os.environ["SLURM_PROCID"]="0"<line_sep>os.environ["SLURM_LOCALID"]="0"<line_sep>os.environ["SLURM_NTASKS"]="1"<line_sep>os.environ["SLURM_JOB_NODELIST"]="localhost"<line_sep>os.environ["SLURM_JOB_NUM_NODES"]="1"<line_sep>os.environ["RANK"]="1"<with_stmt>pytest.warns(UserWarning match=r"We detected the following env variables")<block_start>model=_NativeDistModel.create_from_backend(backend="gloo" timeout=timedelta(seconds=10))<line_sep>model.finalize()<block_end><del_stmt>os.environ["SLURM_JOB_ID"]<del_stmt>os.environ["SLURM_PROCID"]<del_stmt>os.environ["SLURM_LOCALID"]<del_stmt>os.environ["SLURM_NTASKS"]<del_stmt>os.environ["SLURM_JOB_NODELIST"]<del_stmt>os.environ["SLURM_JOB_NUM_NODES"]<del_stmt>os.environ["RANK"]<block_end><def_stmt>_assert_model model true_conf<block_start><assert_stmt>model.device()<eq>torch.device(true_conf["device"])<assert_stmt>model.get_local_rank()<eq>true_conf["local_rank"]<assert_stmt>model.get_rank()<eq>true_conf["rank"]<assert_stmt>model.get_world_size()<eq>true_conf["world_size"]<assert_stmt>model.get_node_rank()<eq>true_conf["node_index"]<assert_stmt>model.get_nnodes()<eq>true_conf["nnodes"]<assert_stmt>model.get_nproc_per_node()<eq>true_conf["nproc_per_node"]<block_end><def_stmt>_test__native_dist_model_create_from_backend_no_dist backend true_device<block_start><import_from_stmt>datetime timedelta<line_sep>model=_NativeDistModel.create_from_backend(backend=backend timeout=timedelta(seconds=20))<assert_stmt>dist.is_available()<and>dist.is_initialized()<assert_stmt>dist.get_backend()<eq>backend<line_sep>_assert_model(model {"device":true_device "local_rank":0 "rank":0 "world_size":1 "node_index":0 "nnodes":1 "nproc_per_node":1 } )<line_sep>model.finalize()<block_end><def_stmt>_test__native_dist_model_create_from_backend_dist init_method local_rank rank world_size backend true_device<block_start><import_stmt>os<import_from_stmt>datetime timedelta<line_sep>timeout=timedelta(seconds=20)<line_sep>os.environ["RANK"]=f"{rank}"<assert_stmt>"MASTER_ADDR"<not><in>os.environ<assert_stmt>"MASTER_PORT"<not><in>os.environ<line_sep>model=_NativeDistModel.create_from_backend(backend=backend timeout=timeout init_method=init_method)<assert_stmt>dist.is_available()<and>dist.is_initialized()<assert_stmt>dist.get_backend()<eq>backend<with_stmt>pytest.raises(RuntimeError match=r"Can not create new distributed process group if default one is")<block_start>_NativeDistModel.create_from_backend(backend=backend timeout=timeout)<block_end>_assert_model(model {"device":true_device "local_rank":local_rank "rank":rank "world_size":world_size "node_index":0 "nnodes":1 "nproc_per_node":world_size } )<if_stmt>init_method<is><none><block_start><assert_stmt>model._init_method<eq>"env://"<block_end><else_stmt><block_start><assert_stmt>model._init_method<eq>init_method<block_end>model.finalize()<del_stmt>os.environ["RANK"]<assert_stmt>"MASTER_ADDR"<not><in>os.environ<assert_stmt>"MASTER_PORT"<not><in>os.environ<assert_stmt>"RANK"<not><in>os.environ<block_end><def_stmt>_test__native_dist_model_create_from_backend_slurm local_rank rank world_size backend true_device<block_start><import_stmt>os<import_from_stmt>datetime timedelta<line_sep>timeout=timedelta(seconds=20)<assert_stmt>"MASTER_ADDR"<not><in>os.environ<assert_stmt>"MASTER_PORT"<not><in>os.environ<del_stmt>os.environ["WORLD_SIZE"]<del_stmt>os.environ["LOCAL_RANK"]<line_sep>os.environ["SLURM_JOB_ID"]="15000"<line_sep>os.environ["SLURM_PROCID"]=str(rank)<line_sep>os.environ["SLURM_LOCALID"]=str(local_rank)<line_sep>os.environ["SLURM_NTASKS"]=str(world_size)<line_sep>os.environ["SLURM_JOB_NODELIST"]="localhost"<line_sep>os.environ["SLURM_JOB_NUM_NODES"]="1"<line_sep>model=_NativeDistModel.create_from_backend(backend=backend timeout=timeout)<assert_stmt>dist.is_available()<and>dist.is_initialized()<assert_stmt>dist.get_backend()<eq>backend<with_stmt>pytest.raises(RuntimeError match=r"Can not create new distributed process group if default one is")<block_start>_NativeDistModel.create_from_backend(backend=backend timeout=timeout)<block_end>_assert_model(model {"device":true_device "local_rank":local_rank "rank":rank "world_size":world_size "node_index":0 "nnodes":1 "nproc_per_node":world_size } )<line_sep>model.finalize()<del_stmt>os.environ["SLURM_JOB_ID"]<del_stmt>os.environ["SLURM_PROCID"]<del_stmt>os.environ["SLURM_LOCALID"]<del_stmt>os.environ["SLURM_NTASKS"]<del_stmt>os.environ["SLURM_JOB_NODELIST"]<del_stmt>os.environ["SLURM_JOB_NUM_NODES"]<assert_stmt>"MASTER_ADDR"<not><in>os.environ<assert_stmt>"MASTER_PORT"<not><in>os.environ<assert_stmt>"RANK"<not><in>os.environ<line_sep>os.environ["WORLD_SIZE"]=str(world_size)<line_sep>os.environ["LOCAL_RANK"]=str(local_rank)<block_end><def_stmt>_test__native_dist_model_create_from_context_no_local_rank <block_start><if_stmt>"LOCAL_RANK"<in>os.environ<block_start><del_stmt>os.environ["LOCAL_RANK"]<block_end><import_from_stmt>ignite.distributed.comp_models.base ComputationModel<if_stmt>ComputationModel._ext_local_rank<is><not><none><block_start>ComputationModel._ext_local_rank=<none><block_end><with_stmt>pytest.warns(UserWarning match=r"Local rank information for native distributed setting will be initialized")<block_start>_NativeDistModel.create_from_context()<block_end><block_end><def_stmt>_test__native_dist_model_create_from_context_env_local_rank true_conf<block_start><import_stmt>os<line_sep>remove_lrank=<false><if_stmt>"LOCAL_RANK"<not><in>os.environ<block_start>os.environ["LOCAL_RANK"]=str(true_conf["local_rank"])<line_sep>remove_lrank=<true><block_end>model=_NativeDistModel.create_from_context()<line_sep>_assert_model(model true_conf)<if_stmt>remove_lrank<block_start><del_stmt>os.environ["LOCAL_RANK"]<block_end><block_end><def_stmt>_test__native_dist_model_create_from_context_set_local_rank true_conf<block_start><import_from_stmt>ignite.distributed.comp_models.base ComputationModel<line_sep>lrank=<none><if_stmt>"LOCAL_RANK"<in>os.environ<block_start>lrank=os.environ["LOCAL_RANK"]<del_stmt>os.environ["LOCAL_RANK"]<block_end>ComputationModel._ext_local_rank=true_conf["local_rank"]<line_sep>model=_NativeDistModel.create_from_context()<line_sep>_assert_model(model true_conf)<line_sep>ComputationModel._ext_local_rank=<none><if_stmt>lrank<is><not><none><block_start>os.environ["LOCAL_RANK"]=lrank<block_end><block_end><def_stmt>_test__native_dist_model_create_from_context_no_dist true_backend true_device<block_start><assert_stmt>_NativeDistModel.create_from_context()<is><none><line_sep>dist.init_process_group(true_backend "tcp://0.0.0.0:2222" world_size=1 rank=0)<line_sep>dist.barrier()<line_sep>_test__native_dist_model_create_from_context_no_local_rank()<line_sep>true_conf={"device":true_device "local_rank":0 "rank":0 "world_size":1 "node_index":0 "nnodes":1 "nproc_per_node":1 }<line_sep>_test__native_dist_model_create_from_context_env_local_rank(true_conf)<line_sep>_test__native_dist_model_create_from_context_set_local_rank(true_conf)<line_sep>dist.destroy_process_group()<block_end><def_stmt>_test__native_dist_model_create_from_context_dist local_rank rank world_size true_backend true_device<block_start><assert_stmt>_NativeDistModel.create_from_context()<is><none><line_sep>dist.init_process_group(true_backend "tcp://0.0.0.0:2222" world_size=world_size rank=rank)<line_sep>dist.barrier()<if_stmt>torch.cuda.is_available()<block_start>torch.cuda.set_device(local_rank)<block_end>true_conf={"device":true_device "local_rank":local_rank "rank":rank "world_size":world_size "node_index":0 "nnodes":1 "nproc_per_node":world_size }<line_sep>_test__native_dist_model_create_from_context_env_local_rank(true_conf)<line_sep>_test__native_dist_model_create_from_context_set_local_rank(true_conf)<line_sep>dist.destroy_process_group()<block_end>@pytest.mark.distributed@pytest.mark.skipif("WORLD_SIZE"<in>os.environ reason="Should be no-dist config")<def_stmt>test__native_dist_model_create_no_dist_gloo clean_env<block_start>device=torch.device("cuda:0"<if>torch.cuda.is_available()<else>"cpu")<line_sep>_test__native_dist_model_create_from_backend_no_dist("gloo" device)<line_sep>_test__native_dist_model_create_from_context_no_dist("gloo" device)<block_end>@pytest.mark.distributed@pytest.mark.skipif("WORLD_SIZE"<in>os.environ reason="Should be no-dist config")@pytest.mark.skipif(torch.cuda.device_count()<l>1 reason="Skip if no GPU")<def_stmt>test__native_dist_model_create_no_dist_nccl clean_env<block_start>device=torch.device("cuda:0"<if>torch.cuda.is_available()<else>"cpu")<line_sep>_test__native_dist_model_create_from_backend_no_dist("nccl" device)<line_sep>_test__native_dist_model_create_from_context_no_dist("nccl" device)<block_end>@pytest.mark.distributed@pytest.mark.parametrize("init_method" [<none> "tcp://0.0.0.0:22334" "FILE"])<def_stmt>test__native_dist_model_create_dist_gloo_1 init_method get_fixed_dirname local_rank world_size<block_start><if_stmt>init_method<eq>"FILE"<block_start>init_method=f"file://{get_fixed_dirname('native_dist_model_create_dist_gloo_1')}/shared"<block_end>device=torch.device(f"cuda:{local_rank}"<if>torch.cuda.is_available()<else>"cpu")<line_sep>_test__native_dist_model_create_from_backend_dist(init_method local_rank local_rank world_size "gloo" device)<if_stmt>init_method<is><none><block_start>_test__native_dist_model_create_from_backend_slurm(local_rank local_rank world_size "gloo" device)<block_end><block_end>@pytest.mark.distributed<def_stmt>test__native_dist_model_create_dist_gloo_2 local_rank world_size<block_start>device=torch.device(f"cuda:{local_rank}"<if>torch.cuda.is_available()<else>"cpu")<line_sep>_test__native_dist_model_create_from_context_dist(local_rank local_rank world_size "gloo" device)<line_sep>_test__native_dist_model_create_from_backend_slurm(local_rank local_rank world_size "gloo" device)<block_end>@pytest.mark.distributed@pytest.mark.skipif(torch.cuda.device_count()<l>1 reason="Skip if no GPU")@pytest.mark.parametrize("init_method" [<none> "tcp://0.0.0.0:22334" "FILE"])<def_stmt>test__native_dist_model_create_dist_nccl_1 init_method get_fixed_dirname local_rank world_size<block_start><if_stmt>init_method<eq>"FILE"<block_start>init_method=f"file://{get_fixed_dirname('native_dist_model_create_dist_nccl_1')}/shared"<block_end>_test__native_dist_model_create_from_backend_dist(init_method local_rank local_rank world_size "nccl" f"cuda:{local_rank}")<if_stmt>init_method<is><none><block_start>_test__native_dist_model_create_from_backend_slurm(local_rank local_rank world_size "nccl" f"cuda:{local_rank}")<block_end><block_end>@pytest.mark.distributed@pytest.mark.skipif(torch.cuda.device_count()<l>1 reason="Skip if no GPU")<def_stmt>test__native_dist_model_create_dist_nccl_2 local_rank world_size<block_start>_test__native_dist_model_create_from_context_dist(local_rank local_rank world_size "nccl" f"cuda:{local_rank}")<block_end>@pytest.mark.distributed@pytest.mark.skipif(torch.cuda.device_count()<l>2 reason="Skip if less than 2 GPUs")<def_stmt>test__native_dist_model_warning_index_less_localrank local_rank world_size<block_start><assert_stmt>_NativeDistModel.create_from_context()<is><none><line_sep>dist.init_process_group("nccl" "tcp://0.0.0.0:2222" world_size=world_size rank=local_rank)<line_sep>dist.barrier()<line_sep># We deliberately incorrectly set cuda device to 0 torch.cuda.set_device(0)<line_sep>model=_NativeDistModel.create_from_context()<assert_stmt>isinstance(model _NativeDistModel) f"{type(model)} vs _NativeDistModel"<if_stmt>local_rank<eq>1<block_start><with_stmt>pytest.warns(UserWarning match=r"Current device index is less than current local rank.")<block_start>model.device()<block_end><block_end>dist.destroy_process_group()<block_end><def_stmt>_test_dist_spawn_fn local_rank backend world_size device<block_start><import_from_stmt>ignite.distributed.utils _model<assert_stmt>dist.is_available()<and>dist.is_initialized()<assert_stmt>dist.get_backend()<eq>backend<assert_stmt>isinstance(_model _NativeDistModel) f"{type(_model)} vs _NativeDistModel"<assert_stmt>_model.get_local_rank()<eq>local_rank<assert_stmt>_model.get_world_size()<eq>world_size<assert_stmt>_model.device().type<eq>torch.device(device).type<block_end><def_stmt>_test__native_dist_model_spawn backend num_workers_per_machine device init_method=<none> **spawn_kwargs<block_start>_NativeDistModel.spawn(_test_dist_spawn_fn args=(backend num_workers_per_machine device) kwargs_dict={} backend=backend nproc_per_node=num_workers_per_machine init_method=init_method **spawn_kwargs )<block_end>@pytest.mark.distributed@pytest.mark.skipif("WORLD_SIZE"<in>os.environ reason="Skip if launched as multiproc")@pytest.mark.parametrize("init_method" [<none> "env://" "tcp://0.0.0.0:22334" "FILE"])<def_stmt>test__native_dist_model_spawn_gloo init_method dirname<block_start><if_stmt>init_method<eq>"FILE"<block_start>init_method=f"file://{dirname}/shared"<block_end>nproc=torch.cuda.device_count()<if>torch.cuda.is_available()<else>4<line_sep>device=torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu")<line_sep>_test__native_dist_model_spawn("gloo" num_workers_per_machine=nproc device=device init_method=init_method)<if_stmt>device.type<eq>"cpu"<block_start>_test__native_dist_model_spawn("gloo" num_workers_per_machine=nproc device=device start_method="fork" init_method=init_method)<block_end><block_end>@pytest.mark.distributed@pytest.mark.skipif("WORLD_SIZE"<in>os.environ reason="Skip if launched as multiproc")@pytest.mark.skipif(torch.cuda.device_count()<l>1 reason="Skip if no GPU")@pytest.mark.parametrize("init_method" [<none> "tcp://0.0.0.0:22334" "FILE"])<def_stmt>test__native_dist_model_spawn_nccl init_method dirname<block_start><if_stmt>init_method<eq>"FILE"<block_start>init_method=f"file://{dirname}/shared"<block_end>num_workers_per_machine=torch.cuda.device_count()<line_sep>_test__native_dist_model_spawn("nccl" num_workers_per_machine=num_workers_per_machine device="cuda" init_method=init_method)<block_end>@pytest.mark.distributed@pytest.mark.skipif("WORLD_SIZE"<in>os.environ reason="Skip if launched as multiproc")@pytest.mark.skipif(<not>has_native_dist_support reason="Skip if no native dist support")<def_stmt>test__native_dist_model_init_method_is_none world_size<block_start><with_stmt>pytest.raises(ValueError match=r"Arguments rank and world_size should be None")<block_start>_NativeDistModel.create_from_backend(backend="gloo" world_size=world_size)<block_end><block_end>@pytest.mark.distributed@pytest.mark.skipif("WORLD_SIZE"<in>os.environ reason="Skip if launched as multiproc")@pytest.mark.skipif(<not>has_native_dist_support reason="Skip if no native dist support")<def_stmt>test__native_dist_model_init_method_is_not_none world_size local_rank get_fixed_dirname<block_start>init_method=f"file://{get_fixed_dirname('native_dist_model_init_method_is_not_none')}/shared"<with_stmt>pytest.raises(ValueError match=r"Both rank and world_size should be provided")<block_start>_NativeDistModel.create_from_backend(backend="gloo" world_size=world_size init_method=init_method)<block_end><with_stmt>pytest.raises(ValueError match=r"Both rank and world_size should be provided")<block_start>_NativeDistModel.create_from_backend(backend="gloo" rank=local_rank init_method=init_method)<block_end><block_end>@pytest.mark.parametrize("environ, expected" [# fmt: off # usual SLURM env ({"SLURM_PROCID":"1" "SLURM_LOCALID":"1" "SLURM_NTASKS":"2" "SLURM_JOB_NUM_NODES":"1" "SLURM_JOB_NODELIST":"c1" "SLURM_JOB_ID":"12345" } [1 1 2 "c1" 17345]) # usual SLURM env mnode ({"SLURM_PROCID":"5" "SLURM_LOCALID":"1" "SLURM_NTASKS":"8" "SLURM_JOB_NUM_NODES":"2" "SLURM_JOB_NODELIST":"c1, c2" "SLURM_JOB_ID":"12345" } [5 1 8 "c1" 17345]) # usual SLURM env 1 node, 1 task + torch.distributed.launch ({"SLURM_PROCID":"0" "SLURM_LOCALID":"0" "SLURM_NTASKS":"1" "SLURM_JOB_NUM_NODES":"1" "SLURM_JOB_NODELIST":"c1" "SLURM_JOB_ID":"12345" "MASTER_ADDR":"127.0.0.1" "MASTER_PORT":"2233" "RANK":"2" "LOCAL_RANK":"2" "WORLD_SIZE":"8" } [2 2 8 "127.0.0.1" 2233]) # usual SLURM env + enroot's pytorch hook ({"SLURM_PROCID":"3" "SLURM_LOCALID":"3" "SLURM_NTASKS":"4" "SLURM_JOB_NUM_NODES":"1" "SLURM_JOB_NODELIST":"c1" "SLURM_JOB_ID":"12345" "MASTER_ADDR":"c1" "MASTER_PORT":"12233" "RANK":"3" "LOCAL_RANK":"3" "WORLD_SIZE":"4" } [3 3 4 "c1" 12233]) # usual SLURM env mnode + enroot's pytorch hook ({"SLURM_PROCID":"3" "SLURM_LOCALID":"1" "SLURM_NTASKS":"4" "SLURM_JOB_NUM_NODES":"2" "SLURM_JOB_NODELIST":"c1, c2" "SLURM_JOB_ID":"12345" "MASTER_ADDR":"c1" "MASTER_PORT":"12233" "RANK":"3" "LOCAL_RANK":"1" "WORLD_SIZE":"4"} [3 1 4 "c1" 12233]) # fmt: on ] )<def_stmt>test__setup_ddp_vars_from_slurm_env environ expected<block_start>ddp_keys=["RANK" "LOCAL_RANK" "WORLD_SIZE" "MASTER_ADDR" "MASTER_PORT"]<line_sep>ddp_vars=_setup_ddp_vars_from_slurm_env(environ)<for_stmt>key,value zip(ddp_keys expected)<block_start><assert_stmt>key<in>ddp_vars<assert_stmt>ddp_vars[key]<eq>value<block_end><block_end><def_stmt>test__setup_ddp_vars_from_slurm_env_bad_configs <block_start><with_stmt>pytest.raises(RuntimeError match=r"Environment variable defined for PyTorch Distributed context is inconsistent")<block_start>environ={"SLURM_PROCID":"3" "SLURM_LOCALID":"1" "SLURM_NTASKS":"4" "SLURM_JOB_NUM_NODES":"2" "SLURM_JOB_NODELIST":"c1, c2" "SLURM_JOB_ID":"12345" "MASTER_ADDR":"another-addr" "MASTER_PORT":"12233" "RANK":"1" "LOCAL_RANK":"1" "WORLD_SIZE":"2" }<line_sep>_setup_ddp_vars_from_slurm_env(environ)<block_end><with_stmt>pytest.raises(RuntimeError match=r"Environment variable defined for PyTorch Distributed context is inconsistent")<block_start>environ={"SLURM_PROCID":"1" "SLURM_LOCALID":"1" "SLURM_NTASKS":"4" "SLURM_JOB_NUM_NODES":"1" "SLURM_JOB_NODELIST":"c1" "SLURM_JOB_ID":"12345" "MASTER_ADDR":"another-addr" "MASTER_PORT":"12233" "RANK":"1" "LOCAL_RANK":"1" "WORLD_SIZE":"2" }<line_sep>_setup_ddp_vars_from_slurm_env(environ)<block_end><with_stmt>pytest.warns(UserWarning match=r"We detected the following env variables")<block_start>environ={"SLURM_PROCID":"3" "SLURM_LOCALID":"1" "SLURM_NTASKS":"4" "SLURM_JOB_NUM_NODES":"2" "SLURM_JOB_NODELIST":"c1, c2" "SLURM_JOB_ID":"12345" "RANK":"1" "LOCAL_RANK":"1" "WORLD_SIZE":"2" }<line_sep>_setup_ddp_vars_from_slurm_env(environ)<block_end><with_stmt>pytest.raises(RuntimeError match=r"No hostname detected in SLURM_JOB_NODELIST by ignite")<block_start>environ={"SLURM_PROCID":"1" "SLURM_LOCALID":"1" "SLURM_NTASKS":"4" "SLURM_JOB_NUM_NODES":"1" "SLURM_JOB_NODELIST":"[]" "SLURM_JOB_ID":"12345" }<line_sep>_setup_ddp_vars_from_slurm_env(environ)<block_end><block_end>
<import_stmt>re<import_from_stmt>typing Dict Tuple List NamedTuple Optional<import_from_stmt>lib.utils.decorators with_exception_retry<import_from_stmt>.helpers.common split_hostport get_parsed_variables merge_hostport random_choice <import_from_stmt>.helpers.zookeeper get_hostname_and_port_from_zk<line_sep># TODO: make these configurable? MAX_URI_FETCH_ATTEMPTS=10<line_sep>MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC=5<class_stmt>RawHiveConnectionConf(NamedTuple)# Raw Connection Configuration that's from a string -> dict transformation <block_start>hosts:List[Tuple[str Optional[int]]]<line_sep>default_db:str<line_sep>session_variables:Dict[str str]<line_sep>conf_list:Dict[str str]<line_sep>var_list:Dict[str str]<block_end><class_stmt>HiveConnectionConf(NamedTuple)<block_start>host:str<line_sep>port:Optional[int]<line_sep>default_db:str<line_sep>configuration:Dict[str str]<block_end><def_stmt>_extract_connection_url connection_string:str<arrow>RawHiveConnectionConf# Parser for Hive JDBC string # Loosely based on https://cwiki.apache.org/confluence/display/Hive/HiveServer2+Clients#HiveServer2Clients-JDBC <block_start>match=re.search(r"^(?:jdbc:)?hive2:\/\/([\w.-]+(?:\:\d+)?(?:,[\w.-]+(?:\:\d+)?)*)\/(\w*)((?:;[\w.-]+=[\w.-]+)*)(\?[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?(\#[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?$" # noqa: E501 connection_string )<line_sep>hosts=match.group(1)<line_sep>default_db=match.group(2)<or>"default"<line_sep>session_variables=match.group(3)<or>""<line_sep>conf_list=match.group(4)<or>""<line_sep>var_list=match.group(5)<or>""<line_sep>parsed_hosts=[]<for_stmt>hostport hosts.split(",")<block_start>parsed_hosts.append(split_hostport(hostport))<block_end>parsed_session_variables=get_parsed_variables(session_variables[1:])<line_sep>parsed_conf_list=get_parsed_variables(conf_list[1:])<line_sep>parsed_var_list=get_parsed_variables(var_list[1:])<line_sep><return>RawHiveConnectionConf(hosts=parsed_hosts default_db=default_db session_variables=parsed_session_variables conf_list=parsed_conf_list var_list=parsed_var_list )<block_end>@with_exception_retry(max_retry=MAX_URI_FETCH_ATTEMPTS get_retry_delay=<lambda>retry:min(MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC retry) )<def_stmt>get_hive_host_port_from_zk connection_conf:RawHiveConnectionConf <arrow>Tuple[str int]<block_start>zk_quorum=",".join(map(<lambda>hostport:merge_hostport(hostport) connection_conf.hosts))<line_sep>zk_namespace=connection_conf.session_variables.get("zooKeeperNamespace")<line_sep>raw_server_uris=get_hostname_and_port_from_zk(zk_quorum zk_namespace)<or>[]<line_sep>server_uri_dicts=filter(<lambda>d:d<is><not><none> [_server_uri_to_dict(raw_server_uri)<for>raw_server_uri raw_server_uris] )<line_sep>server_uris=list(map(<lambda>d:d["serverUri"] server_uri_dicts))<line_sep>random_server_uri=random_choice(server_uris)<if_stmt><not>random_server_uri<block_start><raise>Exception("Failed to get hostname and port from Zookeeper")<block_end><return>split_hostport(random_server_uri)<block_end><def_stmt>_server_uri_to_dict server_uri:str<arrow>Optional[Dict[str str]]<block_start>match=re.search(r"serverUri=(.*);version=(.*);sequence=(.*)" server_uri)<if_stmt>match<block_start><return>{"serverUri":match.group(1) "version":match.group(2) "sequence":match.group(3) }<block_end><block_end><def_stmt>get_hive_connection_conf connection_string:str<arrow>HiveConnectionConf<block_start>hostname=<none><line_sep>port=<none><line_sep>connection_conf=_extract_connection_url(connection_string)<line_sep># We use zookeeper to find host name <if_stmt>connection_conf.session_variables.get("serviceDiscoveryMode")<eq>"zooKeeper"<block_start>hostname,port=get_hive_host_port_from_zk(connection_conf)<block_end><else_stmt># We just return a normal host <block_start>hostname,port=random_choice(connection_conf.hosts default=(<none> <none>))<block_end><return>HiveConnectionConf(host=hostname port=port default_db=connection_conf.default_db configuration=connection_conf.conf_list )<block_end>
# Copyright 2021 JD.com, Inc., JD AI """ @author: <NAME> @contact: <EMAIL> """<import_stmt>torch<import_stmt>torch.nn<as>nn<line_sep>__all__=["AttentionPooler"]<class_stmt>AttentionPooler(nn.Module)<block_start><def_stmt>__init__ self * hidden_size:int output_size:int dropout:float use_bn:bool<block_start>super(AttentionPooler self).__init__()<line_sep>self.att=nn.Sequential(nn.Linear(hidden_size hidden_size) nn.ReLU(inplace=<true>) nn.Dropout(p=dropout) nn.Linear(hidden_size 1))<line_sep>self.embed=nn.Linear(hidden_size output_size)<line_sep>self.softmax=nn.Softmax(dim=-1)<line_sep>self.bn=nn.BatchNorm1d(output_size)<if>use_bn<else><none><block_end><def_stmt>forward self hidden_states masks=<none> **kwargs<block_start>score=self.att(hidden_states).squeeze(-1)<if_stmt>masks<is><not><none><block_start>score=score+masks.view(score.size(0) -1)<block_end>score=self.softmax(score)<line_sep>output=score.unsqueeze(1).matmul(hidden_states).squeeze(1)<line_sep>output=self.embed(output)<if_stmt>self.bn<is><not><none><block_start>output=self.bn(output)<block_end><return>output<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>CalibTracker.SiStripCommon.ShallowEventDataProducer_cfi *<import_from_stmt>CalibTracker.SiStripCommon.ShallowDigisProducer_cfi *<import_from_stmt>CalibTracker.SiStripCommon.ShallowClustersProducer_cfi *<import_from_stmt>CalibTracker.SiStripCommon.ShallowTrackClustersProducer_cfi *<import_from_stmt>CalibTracker.SiStripCommon.ShallowRechitClustersProducer_cfi *<import_from_stmt>CalibTracker.SiStripCommon.ShallowTracksProducer_cfi *<import_from_stmt>RecoVertex.BeamSpotProducer.BeamSpot_cff *<import_from_stmt>RecoTracker.TrackProducer.TrackRefitters_cff *<line_sep>bigNtupleTrackCollectionTag=cms.InputTag("bigNtupleTracksRefit")<line_sep>bigNtupleClusterCollectionTag=cms.InputTag("siStripClusters")<line_sep>bigNtupleTracksRefit=RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone(src="generalTracks")<line_sep>bigNtupleEventRun=shallowEventRun.clone()<line_sep>bigNtupleDigis=shallowDigis.clone()<line_sep>bigNtupleClusters=shallowClusters.clone(Clusters=bigNtupleClusterCollectionTag)<line_sep>bigNtupleRecHits=shallowRechitClusters.clone(Clusters=bigNtupleClusterCollectionTag)<line_sep>bigNtupleTrackClusters=shallowTrackClusters.clone(Tracks=bigNtupleTrackCollectionTag Clusters=bigNtupleClusterCollectionTag)<line_sep>bigNtupleTracks=shallowTracks.clone(Tracks=bigNtupleTrackCollectionTag)<line_sep>bigShallowTree=cms.EDAnalyzer("ShallowTree" outputCommands=cms.untracked.vstring('drop *' 'keep *_bigNtupleEventRun_*_*' 'keep *_bigNtupleDigis_*_*' 'keep *_bigNtupleClusters_*_*' 'keep *_bigNtupleRechits_*_*' 'keep *_bigNtupleTracks_*_*' 'keep *_bigNtupleTrackClusters_*_*'))<import_from_stmt>Configuration.StandardSequences.RawToDigi_Data_cff *<import_from_stmt>Configuration.StandardSequences.Reconstruction_cff *<line_sep>theBigNtuple=cms.Sequence((siPixelRecHits+siStripMatchedRecHits+offlineBeamSpot+bigNtupleTracksRefit)<times>(bigNtupleEventRun+bigNtupleClusters+bigNtupleRecHits+bigNtupleTracks+bigNtupleTrackClusters))<line_sep>theBigNtupleDigi=cms.Sequence(siStripDigis+bigNtupleDigis)<line_sep>
<import_stmt>mod<def_stmt>foo <block_start><return>1<block_end><try_stmt><block_start>mod.foo=foo<block_end><except_stmt>RuntimeError<block_start>print("RuntimeError1")<block_end>print(mod.foo())<try_stmt><block_start>mod.foo=1<block_end><except_stmt>RuntimeError<block_start>print("RuntimeError2")<block_end>print(mod.foo)<try_stmt><block_start>mod.foo=2<block_end><except_stmt>RuntimeError<block_start>print("RuntimeError3")<block_end>print(mod.foo)<def_stmt>__main__ <block_start><pass><block_end>
<import_from_stmt>pathlib Path<line_sep>IPYTHON_STARTUP_FOLDER=Path.home()/".ipython"/"profile_default"/"startup"<line_sep>STARTUP_FILE=IPYTHON_STARTUP_FOLDER/"pyforest_autoimport.py"<def_stmt>_create_or_reset_startup_file <block_start><if_stmt>STARTUP_FILE.exists()<block_start>STARTUP_FILE.unlink()# deletes the old file # this is important if someone messed around with the file # if he calls our method, he expects that we repair everything # therefore, we delete the old file and write a new, valid version <block_end>STARTUP_FILE.touch()<block_end># create a new file <def_stmt>_write_into_startup_file <block_start><with_stmt>STARTUP_FILE.open("w")<as>file<block_start>file.write(f""" # HOW TO DEACTIVATE AUTO-IMPORT: # if you dont want to auto-import pyforest, you have two options: # 0) if you only want to disable the auto-import temporarily and activate it later, # you can uncomment the import statement below # 1) if you never want to auto-import pyforest again, you can delete this file try: import pyforest # uncomment this line if you temporarily dont want to auto-import pyforest pass except: pass """)<block_end><block_end><def_stmt>setup <block_start><if_stmt><not>IPYTHON_STARTUP_FOLDER.exists()<block_start>print(f"Error: Could not find the default IPython startup folder at {IPYTHON_STARTUP_FOLDER}")<line_sep><return><false><block_end>_create_or_reset_startup_file()<line_sep>_write_into_startup_file()<line_sep>print("Success: pyforest is now available in Jupyter Notebook, Jupyter Lab and IPython because it was added to the IPython auto import")<line_sep><return><true><block_end>
<import_from_stmt>common Modules data_strings load_yara_rules AndroidParseModule ModuleMetadata<import_from_stmt>base64 b64decode<import_from_stmt>string printable<class_stmt>dendroid(AndroidParseModule)<block_start><def_stmt>__init__ self<block_start>md=ModuleMetadata(module_name="dendroid" bot_name="Dendroid" description="Android RAT" authors=["<NAME> (@botnet_hunter)"] version="1.0.0" date="August 18, 2014" references=[])<line_sep>AndroidParseModule.__init__(self md)<line_sep>self.yara_rules=<none><line_sep><pass><block_end><def_stmt>_generate_yara_rules self<block_start><if_stmt>self.yara_rules<is><none><block_start>self.yara_rules=load_yara_rules("dendroid.yara")<block_end><return>self.yara_rules<block_end><def_stmt>get_bot_information self file_data<block_start>results={}<line_sep>uri=<none><line_sep>password=<none><for_stmt>s data_strings(file_data charset="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwx yz0123456789+/=")<block_start><try_stmt><block_start>line=b64decode(s)<if_stmt>len(line)<eq>0<block_start><continue><block_end>valid=<true><for_stmt>c line<block_start><if_stmt>c<not><in>printable<block_start>valid=<false><block_end><block_end><if_stmt><not>valid<block_start><continue><block_end><if_stmt>line.lower().startswith("https://")<or>line.lower().startswith("http://")<block_start>uri=line<line_sep><continue><block_end><if_stmt>uri<is><not><none><block_start>password=line<line_sep><break><block_end><block_end><except_stmt>TypeError<block_start><continue><block_end><block_end><if_stmt>uri<is><not><none><block_start>results["c2_uri"]=uri<if_stmt>password<is><not><none><block_start><try_stmt><block_start>password.decode("utf8")<line_sep>results["password"]=password<block_end><except_stmt>UnicodeDecodeError<block_start>results["password"]="h"+password.encode("hex")<block_end><block_end><block_end><return>results<block_end><block_end>Modules.list.append(dendroid())<line_sep>
<import_stmt>tensorboardX<with_stmt>tensorboardX.SummaryWriter("foo")<as>w<block_start>w.add_scalar("a" 1.0 1)<line_sep>w.add_scalar("a" 2.0 2)<block_end><with_stmt>tensorboardX.SummaryWriter("foo/bar")<as>w<block_start>w.add_scalar("a" 3.0 3)<line_sep>w.add_scalar("a" 4.0 4)<block_end><with_stmt>tensorboardX.SummaryWriter("foo/bar/baz")<as>w<block_start>w.add_scalar("a" 5.0 5)<line_sep>w.add_scalar("a" 6.0 6)<block_end>
<import_from_stmt>collections defaultdict OrderedDict<import_from_stmt>itertools islice<import_stmt>copy os pickle warnings<import_stmt>esda<import_stmt>numpy<import_from_stmt>.analysis GlobalAutoK<import_from_stmt>. util<import_from_stmt>libpysal cg examples weights<import_from_stmt>libpysal.common requires<try_stmt><block_start><import_from_stmt>libpysal open<block_end><except_stmt>ImportError<block_start><import_stmt>libpysal<line_sep>open=libpysal.io.open<block_end>__all__=["Network" "PointPattern" "GlobalAutoK"]<line_sep>SAME_SEGMENT=(-0.1 -0.1)<line_sep>dep_msg=("The next major release of pysal/spaghetti (2.0.0) will "<concat>"drop support for all ``libpysal.cg`` geometries. This change "<concat>"is a first step in refactoring ``spaghetti`` that is "<concat>"expected to result in dramatically reduced runtimes for "<concat>"network instantiation and operations. Users currently "<concat>"requiring network and point pattern input as ``libpysal.cg`` "<concat>"geometries should prepare for this simply by converting "<concat>"to ``shapely`` geometries.")<line_sep>warnings.warn(f"{dep_msg}" FutureWarning)<class_stmt>Network<block_start>"""Spatially-constrained network representation and analytical functionality. Naming conventions are as follows, (1) arcs and vertices for the full network object, and (2) edges and nodes for the simplified graph-theoretic object. The term 'link' is used to refer to a network arc or a graph edge. Parameters ---------- in_data : {str, iterable (list, tuple, numpy.ndarray), libpysal.cg.Chain, geopandas.GeoDataFrame} The input geographic data. Either (1) a path to a shapefile (str); (2) an iterable containing ``libpysal.cg.Chain`` objects; (3) a single ``libpysal.cg.Chain``; or (4) a ``geopandas.GeoDataFrame``. vertex_sig : int Round the x and y coordinates of all vertices to ``vertex_sig`` significant digits (combined significant digits on the left and right of the decimal place). Default is 11. Set to ``None`` for no rounding. unique_arcs : bool If ``True`` (default), keep only unique arcs (i.e., prune out any duplicated arcs). If ``False`` keep all segments. extractgraph : bool If ``True``, extract a graph-theoretic object with no degree 2 nodes. Default is ``True``. w_components : bool Set to ``False`` to not record connected components from a ``libpysal.weights.W`` object. Default is ``True``. weightings : {dict, bool} If dict, lists of weightings for each arc. If bool, ``True`` flags ``self.arc_lengths`` as the weightings, ``False`` sets no weightings. Default is ``False``. weights_kws : dict Keyword arguments for ``libpysal.weights.W``. vertex_atol : {int, None} Precision for vertex absolute tolerance. Round vertex coordinates to ``vertex_atol`` decimal places. Default is ``None``. **ONLY** change the default when there are known issues with digitization. Attributes ---------- adjacencylist : list List of lists storing vertex adjacency. vertex_coords : dict Keys are vertex IDs and values are :math:`(x,y)` coordinates of the vertices. vertex_list : list List of vertex IDs. vertices : dict Keys are tuples of vertex coords and values are the vertex ID. arcs : list List of arcs, where each arc is a sorted tuple of vertex IDs. arc_lengths : dict Keys are tuples of sorted vertex IDs representing an arc and values are the length. pointpatterns : dict Keys are a string name of the pattern and values are ``PointPattern`` class instances. distance_matrix : numpy.ndarray All network vertices (non-observations) distance matrix. Distances between vertices in disparate components are recorded as ``inf`` by default. network_trees : dict Keys are the vertex IDs (``int``). Values are dictionaries with the keys being the IDs of the destination vertex and values being lists of vertices along the shortest path. If the destination vertex is a) the origin or b) unreachable (disparate component) it is listed as itself being the neighbor. edges : list Tuples of graph edge IDs. edge_lengths : dict Keys are the graph edge IDs (``tuple``). Values are the graph edge length (``float``). non_articulation_points : list All vertices with degree 2 that are not in an isolated island ring (loop) component. w_network : libpysal.weights.W Weights object created from the network arcs. network_n_components : int Count of connected components in the network. network_fully_connected : bool ``True`` if the network representation is a single connected component, otherwise ``False``. network_component_labels : numpy.ndarray Component labels for network arcs. network_component2arc : dict Lookup in the form {int: list} for arcs comprising network connected components keyed by component labels with arcs in a list as values. network_component_lengths : dict Length of each network component (keyed by component label). network_longest_component : int The ID of the longest component in the network. This is not necessarily equal to ``network_largest_component``. network_component_vertices : dict Lookup in the form {int: list} for vertices comprising network connected components keyed by component labels with vertices in a list as values. network_component_vertex_count : dict The number of vertices in each network component (keyed by component label). network_largest_component : int The ID of the largest component in the network. Within ``spaghetti`` the largest component is the one with the most vertices. This is not necessarily equal to ``network_longest_component``. network_component_is_ring : dict Lookup in the form {int: bool} keyed by component labels with values as ``True`` if the component is a closed ring, otherwise ``False``. w_graph : libpysal.weights.W Weights object created from the graph edges. graph_n_components : int Count of connected components in the network. graph_fully_connected : bool ``True`` if the graph representation is a single connected component, otherwise ``False``. graph_component_labels : numpy.ndarray Component labels for graph edges. graph_component2edge : dict Lookup in the form {int: list} for edges comprising graph connected components keyed by component labels with edges in a list as values. graph_component_lengths : dict Length of each graph component (keyed by component label). graph_longest_component : int The ID of the longest component in the graph. This is not necessarily equal to ``graph_largest_component``. graph_component_vertices : dict Lookup in the form {int: list} for vertices comprising graph connected components keyed by component labels with vertices in a list as values. graph_component_vertex_count : dict The number of vertices in each graph component (keyed by component label). graph_largest_component : int The ID of the largest component in the graph. Within ``spaghetti`` the largest component is the one with the most vertices. This is not necessarily equal to ``graph_longest_component``. graph_component_is_ring : dict Lookup in the form {int: bool} keyed by component labels with values as ``True`` if the component is a closed ring, otherwise ``False``. Notes ----- **Important**: The core procedure for generating network representations is performed within the ``_extractnetwork()`` method. Here it is important to note that a ``spaghetti.Network`` instance is built up from the individual, constituent euclidean units of each line segment object. Therefore, the resulting network structure will generally have (1) more vertices and links than may expected, and, (2) many degree-2 vertices, which differs from a truly graph-theoretic object. This is demonstrated in the `Caveats Tutorial <https://pysal.org/spaghetti/notebooks/caveats.html#4.-Understanding-network-generation>`_. See :cite:`Cliff1981`, :cite:`Tansel1983a`, :cite:`AhujaRavindraK`, :cite:`Labbe1995`, :cite:`Kuby2009`, :cite:`Barthelemy2011`, :cite:`daskin2013`, :cite:`Okabe2012`, :cite:`Ducruet2014`, :cite:`Weber2016`, for more in-depth discussion on spatial networks, graph theory, and location along networks. For related network-centric software see `Snkit <https://github.com/tomalrussell/snkit>`_ :cite:`tom_russell_2019_3379659`, `SANET <http://sanet.csis.u-tokyo.ac.jp>`_ :cite:`Okabe2006a`, `NetworkX <https://networkx.github.io>`_ :cite:`Hagberg2008`, `Pandana <http://udst.github.io/pandana/>`_ :cite:`Foti2012`, and `OSMnx <https://osmnx.readthedocs.io/en/stable/>`_ :cite:`Boeing2017`. Examples -------- Create an instance of a network. >>> import spaghetti >>> from libpysal import examples >>> streets_file = examples.get_path("streets.shp") >>> ntw = spaghetti.Network(in_data=streets_file) Fetch the number connected components in the network. >>> ntw.network_n_components 1 Unique component labels in the network. >>> import numpy >>> list(numpy.unique(ntw.network_component_labels)) [0] Show whether each component of the network is an isolated ring (or not). >>> ntw.network_component_is_ring {0: False} Show how many network arcs are associated with the component. >>> arcs = len(ntw.network_component2arc[ntw.network_component_labels[0]]) >>> arcs 303 Do the same as above, but for the graph-theoretic representation of the network object. >>> ntw.graph_n_components 1 >>> list(numpy.unique(ntw.graph_component_labels)) [0] >>> ntw.graph_component_is_ring {0: False} >>> edges = len(ntw.graph_component2edge[ntw.graph_component_labels[0]]) >>> edges 179 The number of arcs in the network is always greater than or equal to the number of edges in the graph-theoretic representation. >>> arcs >= edges True Snap point observations to the network with attribute information. >>> crimes_file = examples.get_path("crimes.shp") >>> ntw.snapobservations(crimes_file, "crimes", attribute=True) And without attribute information. >>> schools_file = examples.get_path("schools.shp") >>> ntw.snapobservations(schools_file, "schools", attribute=False) Show the point patterns associated with the network. >>> ntw.pointpatterns.keys() dict_keys(['crimes', 'schools']) """<def_stmt>__init__ self in_data=<none> vertex_sig=11 unique_arcs=<true> extractgraph=<true> w_components=<true> weightings=<false> weights_kws=dict() vertex_atol=<none> # do this when creating a clean network instance from a # shapefile or a geopandas.GeoDataFrame, otherwise a shell # network instance is created (see `split_arcs()` method) <block_start><if_stmt>in_data<is><not><none># set parameters as attributes <block_start>self.in_data=in_data<line_sep>self.vertex_sig=vertex_sig<line_sep>self.vertex_atol=vertex_atol<line_sep>self.unique_arcs=unique_arcs<line_sep>self.adjacencylist=defaultdict(list)<line_sep>self.vertices={}<line_sep># initialize network arcs and arc_lengths self.arcs=[]<line_sep>self.arc_lengths={}<line_sep># initialize pointpatterns self.pointpatterns={}<line_sep># spatial representation of the network self._extractnetwork()<line_sep>self.arcs=sorted(self.arcs)<line_sep>self.vertex_coords=dict((v k)<for>k,v self.vertices.items())<line_sep># extract connected components <if_stmt>w_components<block_start>as_graph=<false><line_sep>network_weightings=<false><if_stmt>weightings<eq><true># set network arc weights to length if weights are # desired, but no other input in given <block_start>weightings=self.arc_lengths<line_sep>network_weightings=<true><block_end># extract contiguity weights from libpysal self.w_network=self.contiguityweights(graph=as_graph weightings=weightings weights_kws=weights_kws )<line_sep># identify connected components from the `w_network` self.identify_components(self.w_network graph=as_graph)<block_end># extract the graph -- repeat similar as above # for extracting the network <if_stmt>extractgraph<block_start>self.extractgraph()<if_stmt>w_components<block_start>as_graph=<true><if_stmt>network_weightings<block_start>weightings=self.edge_lengths<block_end>self.w_graph=self.contiguityweights(graph=as_graph weightings=weightings weights_kws=weights_kws )<line_sep>self.identify_components(self.w_graph graph=as_graph)<block_end><block_end># sorted list of vertex IDs self.vertex_list=sorted(self.vertices.values())<block_end><block_end><def_stmt>_round_sig self v<block_start>"""Used internally to round the vertex to a set number of significant digits. If ``sig`` is set to 4, then the following are some possible results for a coordinate are as follows. (1) 0.0xxxx, (2) 0.xxxx, (3) x.xxx, (4) xx.xx, (5) xxx.x, (6) xxxx.0, (7) xxxx0.0 Parameters ---------- v : tuple Coordinate (x,y) of the vertex. """<line_sep># set the number of significant digits sig=self.vertex_sig<line_sep># simply return vertex (x,y) coordinates <if_stmt>sig<is><none><block_start><return>v<block_end># for each coordinate in a coordinate pair # if the coordinate location is (0.0) simply return zero # else -- (1) take the absolute value of `val`; (2) take the # base 10 log for [1]; (3) take the floor of [2]; (4) convert # [3] into a negative integer; (5) add `sig - 1` to [4]; # (6) round `val` by [5] out_v=[val<if>val<eq>0<else>round(val -int(numpy.floor(numpy.log10(numpy.fabs(val))))+(sig-1))<for>val v]<if_stmt>self.vertex_atol<block_start>out_v=[round(v self.vertex_atol)<for>v out_v]<block_end><return>tuple(out_v)<block_end><def_stmt>identify_components self w graph=<false><block_start>"""Identify connected component information from a ``libpysal.weights.W`` object Parameters ---------- w : libpysal.weights.W Weights object created from the network segments (either raw or graph-theoretic). graph : bool Flag for a raw network (``False``) or graph-theoretic network (``True``). Default is ``False``. """<line_sep># flag network (arcs) or graph (edges) <if_stmt>graph<block_start>links=self.edges<line_sep>obj_type="graph_"<block_end><else_stmt><block_start>links=self.arcs<line_sep>obj_type="network_"<block_end># connected component count and labels n_components=w.n_components<line_sep>component_labels=w.component_labels<line_sep># is the network a single, fully-connected component? <if_stmt>n_components<eq>1<block_start>fully_connected=<true><block_end><else_stmt><block_start>fully_connected=<false><block_end># link to component lookup link2component=dict(zip(links component_labels))<line_sep># component ID lookups: links, lengths, vertices, vertex counts component2link={}<line_sep>component_lengths={}<line_sep>component_vertices={}<line_sep>component_vertex_count={}<line_sep>cp_labs_=set(w.component_labels)<line_sep>l2c_=link2component.items()<for_stmt>cpl cp_labs_<block_start>component2link[cpl]=sorted([k<for>k,v l2c_<if>v<eq>cpl])<line_sep>c2l_=component2link[cpl]<line_sep>arclens_=self.arc_lengths.items()<line_sep>component_lengths[cpl]=sum([v<for>k,v arclens_<if>k<in>c2l_])<line_sep>component_vertices[cpl]=list(set([v<for>l c2l_<for>v l]))<line_sep>component_vertex_count[cpl]=len(component_vertices[cpl])<block_end># longest and largest components longest_component=max(component_lengths key=component_lengths.get)<line_sep>largest_component=max(component_vertex_count key=component_vertex_count.get)<line_sep># component to ring lookup component_is_ring={}<line_sep>adj_=self.adjacencylist.items()<for_stmt>comp,verts component_vertices.items()<block_start>component_is_ring[comp]=<false><line_sep>_2neighs=[len(neighs)<eq>2<for>v,neighs adj_<if>v<in>verts]<if_stmt>all(_2neighs)<block_start>component_is_ring[comp]=<true><block_end><block_end># attribute label name depends on object type <if_stmt>graph<block_start>c2l_attr_name="component2edge"<block_end><else_stmt><block_start>c2l_attr_name="component2arc"<block_end># set all new variables into list extracted_attrs=[["fully_connected" fully_connected] ["n_components" n_components] ["component_labels" component_labels] [c2l_attr_name component2link] ["component_lengths" component_lengths] ["component_vertices" component_vertices] ["component_vertex_count" component_vertex_count] ["longest_component" longest_component] ["largest_component" largest_component] ["component_is_ring" component_is_ring] ]<line_sep># iterate over list and set attribute with # either "network" or "graph" extension <for_stmt>(attr_str attr) extracted_attrs<block_start>setattr(self obj_type+attr_str attr)<block_end><block_end><def_stmt>_extractnetwork self<block_start>"""Used internally to extract a network."""<line_sep># initialize vertex count vertex_count=0<line_sep># determine input network data type in_dtype=str(type(self.in_data)).split("'")[1]<line_sep>is_libpysal_chains=<false><line_sep>supported_iterables=["list" "tuple" "numpy.ndarray"]<line_sep># type error message msg="'%s' not supported for network instantiation."<line_sep># set appropriate geometries <if_stmt>in_dtype<eq>"str"<block_start>shps=open(self.in_data)<block_end><elif_stmt>in_dtype<in>supported_iterables<block_start>shps=self.in_data<line_sep>shp_type=str(type(shps[0])).split("'")[1]<if_stmt>shp_type<eq>"libpysal.cg.shapes.Chain"<block_start>is_libpysal_chains=<true><block_end><else_stmt><block_start><raise>TypeError(msg%shp_type)<block_end><block_end><elif_stmt>in_dtype<eq>"libpysal.cg.shapes.Chain"<block_start>shps=[self.in_data]<line_sep>is_libpysal_chains=<true><block_end><elif_stmt>in_dtype<eq>"geopandas.geodataframe.GeoDataFrame"<block_start>shps=self.in_data.geometry<block_end><else_stmt><block_start><raise>TypeError(msg%in_dtype)<block_end># iterate over each record of the network lines <for_stmt>shp shps# if the segments are native pysal geometries <block_start><if_stmt>is_libpysal_chains<block_start>vertices=shp.vertices<block_end><else_stmt># fetch all vertices between euclidean segments # in the line record -- these vertices are # coordinates in an (x, y) tuple. <block_start>vertices=weights._contW_lists._get_verts(shp)<block_end># iterate over each vertex (v) <for_stmt>i,v enumerate(vertices[:-1])# -- For vertex 1 # adjust precision -- this was originally # implemented to handle high-precision # network network vertices <block_start>v=self._round_sig(v)<line_sep># when the vertex already exists in lookup # set it as the current `vid` <try_stmt><block_start>vid=self.vertices[v]<block_end># when the vertex is not present in the lookup # add it and adjust vertex count <except_stmt>KeyError<block_start>self.vertices[v]=vid=vertex_count<line_sep>vertex_count<augadd>1<block_end># -- For vertex 2 # repeat the steps above for vertex 1 v2=self._round_sig(vertices[i+1])<try_stmt><block_start>nvid=self.vertices[v2]<block_end><except_stmt>KeyError<block_start>self.vertices[v2]=nvid=vertex_count<line_sep>vertex_count<augadd>1<block_end># records vertex 1 and vertex 2 adjacency self.adjacencylist[vid].append(nvid)<line_sep>self.adjacencylist[nvid].append(vid)<line_sep># Sort the edges so that mono-directional # keys can be stored. arc_vertices=sorted([vid nvid])<line_sep>arc=tuple(arc_vertices)<line_sep># record the euclidean arc within the network self.arcs.append(arc)<line_sep># record length length=util.compute_length(v vertices[i+1])<line_sep>self.arc_lengths[arc]=length<block_end><block_end><if_stmt>self.unique_arcs# Remove duplicate edges and duplicate adjacent nodes. <block_start>self.arcs=list(set(self.arcs))<for_stmt>k,v self.adjacencylist.items()<block_start>self.adjacencylist[k]=list(set(v))<block_end><block_end><block_end><def_stmt>extractgraph self<block_start>"""Using the existing network representation, create a graph-theoretic representation by removing all vertices with a neighbor incidence of two (non-articulation points). That is, we assume these vertices are bridges between vertices with higher or lower incidence. """<line_sep># initialize edges and edge_lengths self.edges=[]<line_sep>self.edge_lengths={}<line_sep># find all vertices with degree 2 that are not in an isolated # island ring (loop) component. These are non-articulation # points on the graph representation non_articulation_points=self._yield_napts()<line_sep># retain non_articulation_points as an attribute self.non_articulation_points=list(non_articulation_points)<line_sep># start with a copy of the spatial representation and # iteratively remove edges deemed to be segments self.edges=copy.deepcopy(self.arcs)<line_sep>self.edge_lengths=copy.deepcopy(self.arc_lengths)<line_sep># mapping all the 'network arcs' contained within a single # 'graph represented' edge self.arcs_to_edges={}<line_sep># build up bridges "rooted" on the initial # non-articulation points bridge_roots=[]<line_sep># iterate over all vertices that are not contained within # isolated loops that have a degree of 2 <for_stmt>s non_articulation_points# initialize bridge with an articulation point <block_start>bridge=[s]<line_sep># fetch all vertices adjacent to point `s` # that are also degree 2 neighbors=self._yieldneighbor(s non_articulation_points bridge)<while_stmt>neighbors# extract the current node in `neighbors` <block_start>cnode=neighbors.pop()<line_sep># remove it from `non_articulation_points` non_articulation_points.remove(cnode)<line_sep># add it to bridge bridge.append(cnode)<line_sep># fetch neighbors for the current node newneighbors=self._yieldneighbor(cnode non_articulation_points bridge)<line_sep># add the new neighbors back into `neighbors` neighbors<augadd>newneighbors<block_end># once all potential neighbors are exhausted add the # current bridge of non-articulation points to the # list of rooted bridges bridge_roots.append(bridge)<block_end># iterate over the list of newly created rooted bridges <for_stmt>bridge bridge_roots# if the vertex is only one non-articulation # point in the bridge <block_start><if_stmt>len(bridge)<eq>1# that the singular element of the bridge <block_start>n=self.adjacencylist[bridge[0]]<line_sep># and create a new graph edge from it new_edge=tuple(sorted([n[0] n[1]]))<line_sep># identify the arcs to be removed e1=tuple(sorted([bridge[0] n[0]]))<line_sep>e2=tuple(sorted([bridge[0] n[1]]))<line_sep># remove the network arcs (spatial) from the # graph-theoretic representation self.edges.remove(e1)<line_sep>self.edges.remove(e2)<line_sep># remove the former network arc lengths from the # graph edge lengths lookup length_e1=self.edge_lengths[e1]<line_sep>length_e2=self.edge_lengths[e2]<line_sep>self.edge_lengths.pop(e1 <none>)<line_sep>self.edge_lengths.pop(e2 <none>)<line_sep># and add the new edge length in their place self.edge_lengths[new_edge]=length_e1+length_e2<line_sep># update the pointers self.arcs_to_edges[e1]=new_edge<line_sep>self.arcs_to_edges[e2]=new_edge<block_end># if there are more than one vertices in the bridge <else_stmt><block_start>cumulative_length=0<line_sep>start_end={}<line_sep># initialize a redundant set of bridge edges redundant=set([])<line_sep># iterate over the current bridge <for_stmt>b bridge# iterate over each node in the bridge <block_start><for_stmt>n self.adjacencylist[b]# start the bridge with this node <block_start><if_stmt>n<not><in>bridge<block_start>start_end[b]=n<block_end># or create a redundant edge with the current # node and `b` <else_stmt><block_start>redundant.add(tuple(sorted([b n])))<block_end><block_end><block_end># initialize a new graph edge new_edge=tuple(sorted(start_end.values()))<line_sep># add start_end redundant edge <for_stmt>k,v start_end.items()<block_start>redundant.add(tuple(sorted([k v])))<block_end># remove all redundant network arcs while # adjusting the graph edge lengths lookup # and the edges_to_arcs lookup <for_stmt>r redundant<block_start>self.edges.remove(r)<line_sep>cumulative_length<augadd>self.edge_lengths[r]<line_sep>self.edge_lengths.pop(r <none>)<line_sep>self.arcs_to_edges[r]=new_edge<block_end># finally, add the new cumulative edge length self.edge_lengths[new_edge]=cumulative_length<block_end># add the updated graph edge self.edges.append(new_edge)<block_end># converted the graph edges into a sorted set to prune out # duplicate graph edges created during simplification self.edges=sorted(set(self.edges))<block_end><def_stmt>_yield_napts self<block_start>"""Find all nodes with degree 2 that are not in an isolated island ring (loop) component. These are non-articulation points on the graph representation. Returns ------- napts : list Non-articulation points on a graph representation. """<line_sep># non-articulation points napts=set()<line_sep># network vertices remaining to evaluate unvisted=set(self.vertices.values())<while_stmt>unvisted# iterate over each component <block_start><for_stmt>component_id,ring self.network_component_is_ring.items()# evaluate for non-articulation points <block_start>napts,unvisted=self._evaluate_napts(napts unvisted component_id ring)<block_end><block_end># convert set of non-articulation points into list napts=list(napts)<line_sep><return>napts<block_end><def_stmt>_evaluate_napts self napts unvisited component_id ring<block_start>"""Evaluate one connected component in a network for non-articulation points (``napts``) and return an updated set of ``napts`` and unvisted vertices. Parameters ---------- napts : set Non-articulation points (``napts``) in the network. The ``napts`` here do not include those within an isolated loop island. unvisited : set Vertices left to evaluate in the network. component_id : int ID for the network connected component for the current iteration of the algorithm. ring : bool Network component is isolated island loop ``True`` or not ``False``. Returns ------- napts : set Updated ``napts`` object. unvisited : set Updated ``napts`` object. """<line_sep># iterate over each `edge` of the `component` <for_stmt>component self.network_component2arc[component_id]# each `component` has two vertices <block_start><for_stmt>vertex component# if `component` is not an isolated island # and `vertex` has exactly 2 neighbors, # add `vertex` to `napts` <block_start><if_stmt><not>ring<block_start><if_stmt>len(self.adjacencylist[vertex])<eq>2<block_start>napts.add(vertex)<block_end><block_end># remove `vertex` from `unvisited` if # it is still in the set else move along to # the next iteration <try_stmt><block_start>unvisited.remove(vertex)<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><block_end><return>napts unvisited<block_end><def_stmt>_yieldneighbor self vtx arc_vertices bridge<block_start>"""Used internally, this method traverses a bridge arc to find the source and destination nodes. Parameters ---------- vtx : int The vertex ID. arc_vertices : list All non-articulation points (``napts``) in the network. These are referred to as degree-2 vertices. bridge : list Inital bridge list containing only ``vtx``. Returns ------- nodes : list Vertices to keep (articulation points). These elements are referred to as nodes. """<line_sep># instantiate empty lis to fill with network articulation # points (nodes with a degree of 1 [endpoints] or greater # than 2 [intersections]) nodes=[]<line_sep># get all nodes adjacent to `vtx` that are not in the # set of 'bridge' vertices <for_stmt>i self.adjacencylist[vtx]<block_start><if_stmt>i<in>arc_vertices<and>i<not><in>bridge<block_start>nodes.append(i)<block_end><block_end><return>nodes<block_end><def_stmt>contiguityweights self graph=<true> weightings=<none> from_split=<false> weights_kws=dict()<block_start>"""Create a contiguity-based ``libpysal.weights.W`` object. Parameters ---------- graph : bool Controls whether the ``libpysal.weights.W`` is generated using the spatial representation (``False``) or the graph representation (``True``). Default is ``True``. weightings : {dict, None} Dictionary of lists of weightings for each arc/edge. Default is ``None``. from_split : bool Flag for whether the method is being called from within ``split_arcs()`` (``True``) or not (``False``). Default is ``False``. weights_kws : dict Keyword arguments for ``libpysal.weights.W``. Returns ------- W : libpysal.weights.W A ``W`` representing the binary adjacency of the network. See also -------- libpysal.weights.W Examples -------- Instantiate a network. >>> import spaghetti >>> from libpysal import examples >>> import numpy >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Snap point observations to the network with attribute information. >>> ntw.snapobservations( ... examples.get_path("crimes.shp"), "crimes", attribute=True ... ) Find counts per network arc. >>> counts = ntw.count_per_link( ... ntw.pointpatterns["crimes"].obs_to_arc, graph=False ... ) >>> counts[(50, 165)] 4 Create a contiguity-based ``W`` object. >>> w = ntw.contiguityweights(graph=False) >>> w.n, w.n_components (303, 1) Notes ----- See :cite:`pysal2007` for more details. """<line_sep># instantiate OrderedDict to record network link # adjacency which will be keyed by the link ID (a tuple) # with values being lists of tuples (contiguous links) neighbors=OrderedDict()<line_sep># flag network (arcs) or graph (edges) <if_stmt>graph<block_start>links=self.edges<block_end><else_stmt><block_start>links=self.arcs<block_end># if weightings are desired instantiate a dictionary # other ignore weightings <if_stmt>weightings<block_start>_weights={}<block_end><else_stmt><block_start>_weights=<none><block_end># iterate over all links until all possibilities # for network link adjacency are exhausted working=<true><while_stmt>working# for each network link (1) <block_start><for_stmt>key links# instantiate a slot in the OrderedDict <block_start>neighbors[key]=[]<if_stmt>weightings<block_start>_weights[key]=[]<block_end># for each network link (2) <for_stmt>neigh links# skip if comparing link to itself <block_start><if_stmt>key<eq>neigh<block_start><continue><block_end># if link(1) and link(2) share any vertex # update neighbors adjacency <if_stmt>(key[0]<eq>neigh[0]<or>key[0]<eq>neigh[1]<or>key[1]<eq>neigh[0]<or>key[1]<eq>neigh[1])<block_start>neighbors[key].append(neigh)<line_sep># and add weights if desired <if_stmt>weightings<block_start>_weights[key].append(weightings[neigh])<block_end><block_end># break condition # -- everything is sorted, so we know when we have # stepped beyond a possible neighbor <if_stmt>key[1]<g>neigh[1]<block_start>working=<false><block_end><block_end><block_end><if_stmt>len(links)<eq>1<or>from_split<block_start>working=<false><block_end><block_end># call libpysal for `W` instance weights_kws["weights"]=_weights<line_sep>w=weights.W(neighbors **weights_kws)<line_sep><return>w<block_end><def_stmt>distancebandweights self threshold n_processes=1 gen_tree=<false><block_start>"""Create distance-based weights. Parameters ---------- threshold : float Distance threshold value. n_processes : {int, str} Specify the number of cores to utilize. Default is 1 core. Use ``"all"`` to request all available cores. Specify the exact number of cores with an integer. gen_tree : bool Rebuild shortest path with ``True``, or skip with ``False``. Default is ``False``. Returns ------- w : libpysal.weights.W A ``W`` object representing the binary adjacency of the network. Notes ----- See :cite:`AnselinRey2014` and :cite:`rey_open_2015` for more details regarding spatial weights. See also -------- libpysal.weights.W Examples -------- Instantiate an instance of a network. >>> import spaghetti >>> from libpysal import examples >>> streets_file = examples.get_path("streets.shp") >>> ntw = spaghetti.Network(in_data=streets_file) Create a contiguity-based ``W`` object based on network distance, ``500`` `US feet in this case <https://github.com/pysal/libpysal/blob/master/libpysal/examples/geodanet/streets.prj>`_. >>> w = ntw.distancebandweights(threshold=500) Show the number of units in the ``W`` object. >>> w.n 230 There are ``8`` units with ``3`` neighbors in the ``W`` object. >>> w.histogram[-1] (8, 3) """<line_sep># if the a vertex-to-vertex network distance matrix is # not present in the `network.Network` object; calculate # one at this point <if_stmt><not>hasattr(self "distance_matrix")<block_start>self.full_distance_matrix(n_processes gen_tree=gen_tree)<block_end># identify all network vertices which are within the # `threshold` parameter neighbor_query=numpy.where(self.distance_matrix<l>threshold)<line_sep># create an instance for recording neighbors which # inserts a new key if not present in object neighbors=defaultdict(list)<line_sep># iterate over neighbors within the `threshold` # and record all network vertices as neighbors # if the vertex is not being compared to itself <for_stmt>i,n enumerate(neighbor_query[0])<block_start>neigh=neighbor_query[1][i]<if_stmt>n<ne>neigh<block_start>neighbors[n].append(neigh)<block_end><block_end># call libpysal for `W` instance w=weights.W(neighbors)<line_sep><return>w<block_end><def_stmt>snapobservations self in_data name idvariable=<none> attribute=<false><block_start>"""Snap a point pattern shapefile to a network object. The point pattern is stored in the ``network.pointpattern`` attribute of the network object. Parameters ---------- in_data : {geopandas.GeoDataFrame, str} The input geographic data. Either (1) a path to a shapefile (str); or (2) a ``geopandas.GeoDataFrame``. name : str Name to be assigned to the point dataset. idvariable : str Column name to be used as the ID variable. attribute : bool Defines whether attributes should be extracted. ``True`` for attribute extraction. ``False`` for no attribute extraction. Default is ``False``. Notes ----- See :cite:`doi:10.1111/gean.12211` for a detailed discussion on the modeling consequences of snapping points to spatial networks. Examples -------- Instantiate a network. >>> import spaghetti >>> from libpysal import examples >>> streets_file = examples.get_path("streets.shp") >>> ntw = spaghetti.Network(in_data=streets_file) Snap observations to the network. >>> pt_str = "crimes" >>> in_data = examples.get_path(pt_str+".shp") >>> ntw.snapobservations(in_data, pt_str, attribute=True) Isolate the number of points in the dataset. >>> ntw.pointpatterns[pt_str].npoints 287 """<line_sep># create attribute of `pointpattern` but instantiating a # `network.PointPattern` class self.pointpatterns[name]=PointPattern(in_data=in_data idvariable=idvariable attribute=attribute)<line_sep># allocate the point observations to the nework self._snap_to_link(self.pointpatterns[name])<block_end><def_stmt>compute_distance_to_vertices self x y arc<block_start>"""Given an observation on a network arc, return the distance to the two vertices that bound that end. Parameters ---------- x : float The x-coordinate of the snapped point. y : float The y-coordinate of the snapped point. arc : tuple The (vtx0, vtx1) representation of the network arc. Returns ------- d1 : float The distance to vtx0. Always the vertex with the lesser ID. d2 : float The distance to vtx1. Always the vertex with the greater ID. """<line_sep># distance to vertex 1 d1=util.compute_length((x y) self.vertex_coords[arc[0]])<line_sep># distance to vertex 2 d2=util.compute_length((x y) self.vertex_coords[arc[1]])<line_sep><return>d1 d2<block_end><def_stmt>compute_snap_dist self pattern idx<block_start>"""Given an observation snapped to a network arc, calculate the distance from the original location to the snapped location. Parameters ----------- pattern : spaghetti.PointPattern The point pattern object. idx : int The point ID. Returns ------- dist : float The euclidean distance from original location to the snapped location. """<line_sep># set of original (x,y) point coordinates loc=pattern.points[idx]["coordinates"]<line_sep># set of snapped (x,y) point coordinate snp=pattern.snapped_coordinates[idx]<line_sep># distance from the original location to # the snapped location along the network dist=util.compute_length(loc snp)<line_sep><return>dist<block_end><def_stmt>_snap_to_link self pointpattern<block_start>"""Used internally to snap point observations to network arcs. Parameters ----------- pointpattern : spaghetti.PointPattern The point pattern object. Returns ------- obs_to_arc : dict Dictionary with arcs as keys and lists of points as values. arc_to_obs : dict Dictionary with point IDs as keys and arc tuples as values. dist_to_vertex : dict Dictionary with point IDs as keys and values as dictionaries with keys for vertex IDs and values as distances from point to vertex. dist_snapped : dict Dictionary with point IDs as keys and distance from point to the network arc that it is snapped. """<line_sep># instantiate observations snapped coordinates lookup pointpattern.snapped_coordinates={}<line_sep># record throw-away arcs (pysal.cg.Chain) enumerator arcs_=[]<line_sep># snapped(point)-to-arc lookup s2a={}<line_sep># iterate over network arc IDs <for_stmt>arc self.arcs# record the start and end of the arc <block_start>head=self.vertex_coords[arc[0]]<line_sep>tail=self.vertex_coords[arc[1]]<line_sep># create a pysal.cg.Chain object of the arc # and add it to the arcs enumerator arcs_.append(util._chain_constr(<none> [head tail]))<line_sep># add the arc into the snapped(point)-to-arc lookup s2a[(head tail)]=arc<block_end># instantiate crosswalks points={}# point ID to coordinates lookup obs_to_arc={}# observations to arcs lookup dist_to_vertex={}# distance to vertices lookup dist_snapped={}# snapped distance lookup # fetch and records point coordinates keyed by ID <for_stmt>point_idx,point pointpattern.points.items()<block_start>points[point_idx]=point["coordinates"]<block_end># snap point observations to the network snapped=util.snap_points_to_links(points arcs_)<line_sep># record obs_to_arc, dist_to_vertex, and dist_snapped # -- iterate over the snapped observation points <for_stmt>point_idx,snap_info snapped.items()# fetch the x and y coordinate <block_start>x,y=snap_info[1].tolist()<line_sep># look up the arc from snapped(point)-to-arc arc=s2a[tuple(snap_info[0])]<line_sep># add the arc key to observations to arcs lookup <if_stmt>arc<not><in>obs_to_arc<block_start>obs_to_arc[arc]={}<block_end># add the (x,y) coordinates of the original observation # point location to the observations to arcs lookup obs_to_arc[arc][point_idx]=(x y)<line_sep># add the (x,y) coordinates of the snapped observation # point location to the snapped coordinates lookup pointpattern.snapped_coordinates[point_idx]=(x y)<line_sep># calculate the distance to the left and right vertex # along the network link from the snapped point location d1,d2=self.compute_distance_to_vertices(x y arc)<line_sep># record the distances in the distance to vertices lookup dist_to_vertex[point_idx]={arc[0]:d1 arc[1]:d2}<line_sep># record the snapped distance dist_snapped[point_idx]=self.compute_snap_dist(pointpattern point_idx)<block_end># instantiate observations to network vertex lookup obs_to_vertex=defaultdict(list)<line_sep># iterate over the observations to arcs lookup <for_stmt>k,v obs_to_arc.items()# record the left and right vertex ids <block_start>keys=v.keys()<line_sep>obs_to_vertex[k[0]]=keys<line_sep>obs_to_vertex[k[1]]=keys<block_end># iterate over components and assign observations component_to_obs={}<for_stmt>comp,_arcids self.network_component2arc.items()<block_start>component_to_obs[comp]=[]<for_stmt>lk,odict obs_to_arc.items()<block_start><if_stmt>lk<in>_arcids<block_start>component_to_obs[comp].extend(list(odict.keys()))<block_end><block_end><block_end># set crosswalks as attributes of the `pointpattern` class pointpattern.obs_to_arc=obs_to_arc<line_sep>pointpattern.component_to_obs=component_to_obs<line_sep>pointpattern.dist_to_vertex=dist_to_vertex<line_sep>pointpattern.dist_snapped=dist_snapped<line_sep>pointpattern.obs_to_vertex=list(obs_to_vertex)<block_end><def_stmt>count_per_link self obs_on graph=<false><block_start>"""Compute the counts per arc or edge (link). Parameters ---------- obs_on : dict Dictionary of observations on the network. Either in the form ``{(<LINK>):{<POINT_ID>:(<COORDS>)}}`` or ``{<LINK>:[(<COORD>),(<COORD>)]}``. graph : bool Count observations on graph edges (``True``) or network arcs (``False``). Default is ``False``. Returns ------- counts : dict Counts per network link in the form ``{(<LINK>):<COUNT>}``. Examples -------- Note that this passes the ``obs_to_arc`` or ``obs_to_edge`` attribute of a point pattern snapped to the network. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Snap observations to the network. >>> ntw.snapobservations( ... examples.get_path("crimes.shp"), "crimes", attribute=True ... ) >>> counts = ntw.count_per_link( ... ntw.pointpatterns["crimes"].obs_to_arc, graph=False ... ) >>> counts[(140, 142)] 10 >>> s = sum([v for v in list(counts.values())]) >>> s 287 """<line_sep># instantiate observation counts by link lookup counts={}<line_sep># graph-theoretic object of nodes and edges <if_stmt>graph# iterate the links-to-observations lookup <block_start><for_stmt>key,observations obs_on.items()# isolate observation count for the link <block_start>cnt=len(observations)<line_sep># extract link (edges) key <if_stmt>key<in>self.arcs_to_edges.keys()<block_start>key=self.arcs_to_edges[key]<block_end># either add to current count or a dictionary # entry or create new dictionary entry <try_stmt><block_start>counts[key]<augadd>cnt<block_end><except_stmt>KeyError<block_start>counts[key]=cnt<block_end><block_end><block_end># network object of arcs and vertices <else_stmt># simplified version of the above process <block_start><for_stmt>key obs_on.keys()<block_start>counts[key]=len(obs_on[key])<block_end><block_end><return>counts<block_end><def_stmt>_newpoint_coords self arc distance<block_start>"""Used internally to compute new point coordinates during snapping."""<line_sep># extract coordinates for vertex 1 of arc x1=self.vertex_coords[arc[0]][0]<line_sep>y1=self.vertex_coords[arc[0]][1]<line_sep># extract coordinates for vertex 2 of arc x2=self.vertex_coords[arc[1]][0]<line_sep>y2=self.vertex_coords[arc[1]][1]<line_sep># if the network arc is vertical set the (x) coordinate # and proceed to calculating the (y) coordinate <if_stmt>x1<eq>x2<block_start>x0=x1<line_sep># if the vertical direction is positive from # vertex 1 to vertex 2 on the euclidean plane <if_stmt>y1<l>y2<block_start>y0=y1+distance<block_end># if the vertical direction is negative from # vertex 1 to vertex 2 on the euclidean plane # -- this shouldn't happen due to vertex sorting in # -- self._extractnetwork() and self.extractgraph() <elif_stmt>y1<g>y2<block_start>y0=y2+distance<block_end># otherwise the link is zero-length # -- this should never happen <else_stmt><block_start>y0=y1<block_end><return>x0 y0<block_end># calculate the slope of the arc, `m` m=(y2-y1)/(x2-x1)<line_sep># if the horizontal direction is negative from # vertex 1 to vertex 2 on the euclidean plane <if_stmt>x1<g>x2<block_start>x0=x1-distance/numpy.sqrt(1+m<power>2)<block_end># if the horizontal direction is positive from # vertex 1 to vertex 2 on the euclidean plane <elif_stmt>x1<l>x2<block_start>x0=x1+distance/numpy.sqrt(1+m<power>2)<block_end># calculate the (y) coordinate y0=m<times>(x0-x1)+y1<line_sep># the new (x,y) coordinates for the snapped observation <return>x0 y0<block_end><def_stmt>simulate_observations self count distribution="uniform"<block_start>"""Generate a simulated point pattern on the network. Parameters ---------- count : int The number of points to create. distribution : str A distribution of random points. Currently, the only supported distribution is uniform. Returns ------- random_pts : dict Keys are the edge tuple. Values are lists of new point coordinates. See also -------- numpy.random.Generator.uniform Examples -------- Instantiate a network. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Snap observations to the network. >>> ntw.snapobservations( ... examples.get_path("crimes.shp"), "crimes", attribute=True ... ) Isolate the number of points in the dataset. >>> npts = ntw.pointpatterns["crimes"].npoints >>> npts 287 Simulate ``npts`` number of points along the network in a `uniform` distribution. >>> sim = ntw.simulate_observations(npts) >>> isinstance(sim, spaghetti.network.SimulatedPointPattern) True >>> sim.npoints 287 """<line_sep># instantiate an empty `SimulatedPointPattern()` simpts=SimulatedPointPattern()<line_sep># record throw-away arcs enumerator arcs_=[]<line_sep># create array and fill each entry as length of network arc lengths=numpy.zeros(len(self.arc_lengths))<for_stmt>i,key enumerate(self.arc_lengths.keys())<block_start>arcs_.append(key)<line_sep>lengths[i]=self.arc_lengths[key]<block_end># cumulative network length stops=numpy.cumsum(lengths)<line_sep>cumlen=stops[-1]<line_sep># create lengths with a uniform distribution <if_stmt>distribution.lower()<eq>"uniform"<block_start>nrandompts=numpy.random.uniform(0 cumlen size=(count ))<block_end><else_stmt><block_start>msg="%s distribution not currently supported."%distribution<line_sep><raise>RuntimeError(msg)<block_end># iterate over random distances created above <for_stmt>i,r enumerate(nrandompts)# take the first element of the index array (arc ID) where the # random distance is greater than that of its value in `stops` <block_start>idx=numpy.where(r<l>stops)[0][0]<line_sep># assign the simulated point to the arc assignment_arc=arcs_[idx]<line_sep># calculate and set the distance from the arc start distance_from_start=stops[idx]-r<line_sep># populate the coordinates dict x0,y0=self._newpoint_coords(assignment_arc distance_from_start)<line_sep># record the snapped coordinates and associated vertices simpts.snapped_coordinates[i]=(x0 y0)<line_sep>simpts.obs_to_vertex[assignment_arc[0]].append(i)<line_sep>simpts.obs_to_vertex[assignment_arc[1]].append(i)<line_sep># calculate and set the distance from the arc end distance_from_end=self.arc_lengths[arcs_[idx]]-distance_from_start<line_sep># populate the distances to vertices simpts.dist_to_vertex[i]={assignment_arc[0]:distance_from_start assignment_arc[1]:distance_from_end }<line_sep># set snapped coordinates and point count attributes simpts.points=simpts.snapped_coordinates<line_sep>simpts.npoints=len(simpts.points)<block_end><return>simpts<block_end><def_stmt>enum_links_vertex self v0<block_start>"""Returns the arcs (links) adjacent to vertices. Parameters ----------- v0 : int The vertex ID. Returns ------- links : list List of tuple arcs adjacent to the vertex. Examples -------- Create an instance of a network. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Enumerate the links/arcs that are adjacent to vertex ``24``. >>> ntw.enum_links_vertex(24) [(24, 48), (24, 25), (24, 26)] """<line_sep># instantiate links list links=[]<line_sep>neighbor_vertices=self.adjacencylist[v0]<line_sep># enumerate links associated with the current vertex <for_stmt>n neighbor_vertices<block_start>links.append(tuple(sorted([n v0])))<block_end><return>links<block_end><def_stmt>full_distance_matrix self n_processes gen_tree=<false><block_start>"""All vertex-to-vertex distances on a network. This method is called from within ``allneighbordistances()``, ``nearestneighbordistances()``, and ``distancebandweights()``. Parameters ----------- n_processes : int Specify the number of cores to utilize. Default is 1 core. Use ``"all"`` to request all available cores. Specify the exact number of cores with an integer. gen_tree : bool Rebuild shortest path ``True``, or skip ``False``. Default is ``False``. Notes ----- Based on :cite:`Dijkstra1959a` and :cite:`doi:10.1002/9781119967101.ch3`. """<line_sep># create an empty matrix which will store shortest path distance nvtx=len(self.vertex_list)<line_sep>self.distance_matrix=numpy.empty((nvtx nvtx))<line_sep># create `network_trees` attribute that stores # all network path trees (if desired) self.network_trees={}<line_sep># single-core processing <if_stmt>n_processes<eq>1# iterate over each network vertex <block_start><for_stmt>vtx self.vertex_list# calculate the shortest path and preceding # vertices for traversal route <block_start>distance,pred=util.dijkstra(self vtx)<line_sep>pred=numpy.array(pred)<line_sep># generate the shortest path tree <if_stmt>gen_tree<block_start>tree=util.generatetree(pred)<block_end><else_stmt><block_start>tree=<none><block_end># populate distances and paths self.distance_matrix[vtx]=distance<line_sep>self.network_trees[vtx]=tree<block_end><block_end># multiprocessing <else_stmt># set up multiprocessing schema <block_start><import_stmt>multiprocessing<as>mp<import_from_stmt>itertools repeat<if_stmt>n_processes<eq>"all"<block_start>cores=mp.cpu_count()<block_end><else_stmt><block_start>cores=n_processes<block_end>p=mp.Pool(processes=cores)<line_sep># calculate the shortest path and preceding # vertices for traversal route by mapping each process distance_pred=p.map(util.dijkstra_mp zip(repeat(self) self.vertex_list))<line_sep># set range of iterations iterations=range(len(distance_pred))<line_sep># fill shortest paths distance=[distance_pred[itr][0]<for>itr iterations]<line_sep># fill preceding vertices pred=numpy.array([distance_pred[itr][1]<for>itr iterations])<line_sep># iterate of network vertices and generate # the shortest path tree for each <for_stmt>vtx self.vertex_list<block_start><if_stmt>gen_tree<block_start>tree=util.generatetree(pred[vtx])<block_end><else_stmt><block_start>tree=<none><block_end># populate distances and paths self.distance_matrix[vtx]=distance[vtx]<line_sep>self.network_trees[vtx]=tree<block_end><block_end><block_end><def_stmt>allneighbordistances self sourcepattern destpattern=<none> fill_diagonal=<none> n_processes=1 gen_tree=<false> snap_dist=<false> <block_start>"""Compute either all distances between :math:`i` and :math:`j` in a single point pattern or all distances between each :math:`i` from a source pattern and all :math:`j` from a destination pattern. Parameters ---------- sourcepattern : {str, spaghetti.PointPattern} The key of a point pattern snapped to the network or the full ``spaghetti.PointPattern`` object. destpattern : str (Optional) The key of a point pattern snapped to the network or the full ``spaghetti.PointPattern`` object. fill_diagonal : {float, int} (Optional) Fill the diagonal of the cost matrix. Default is ``None`` and will populate the diagonal with ``numpy.nan``. Do not declare a ``destpattern`` for a custom ``fill_diagonal``. n_processes : {int, str} Specify the number of cores to utilize. Default is 1 core. Use ``"all"`` to request all available cores. Specify the exact number of cores with an integer. gen_tree : bool Rebuild shortest path ``True``, or skip ``False``. Default is ``False``. snap_dist : bool Flag as ``True`` to include the distance from the original location to the snapped location along the network. Default is ``False``. Returns ------- nearest : numpy.ndarray An array of shape (n,m) storing distances between all source and destination points. tree_nearest : dict Nearest network node to point pattern vertex shortest path lookup. The values of the dictionary are a tuple of the nearest source vertex and the nearest destination vertex to query the lookup tree. If two observations are snapped to the same network arc a flag of -.1 is set for both the source and destination network vertex indicating the same arc is used while also raising an ``IndexError`` when rebuilding the path. Examples -------- Create a network instance. >>> import spaghetti >>> from libpysal import examples >>> import numpy >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Snap observations to the network. >>> ntw.snapobservations( ... examples.get_path("crimes.shp"), "crimes", attribute=True ... ) Calculate all distances between observations in the ``crimes`` dataset. >>> s2s_dist = ntw.allneighbordistances("crimes") If calculating a ``type-a`` to ``type-a`` distance matrix the distance between an observation and itself is ``nan`` and the distance between one observation and another will be positive value. >>> s2s_dist[0,0], s2s_dist[1,0] (nan, 3105.189475447081) If calculating a ``type-a`` to ``type-b`` distance matrix the distance between all observations will likely be positive values, may be zero (or approximately zero), but will never be negative. >>> ntw.snapobservations( ... examples.get_path("schools.shp"), "schools", attribute=False ... ) >>> s2d_dist = ntw.allneighbordistances("crimes", destpattern="schools") >>> numpy.round((s2d_dist[0,0], s2d_dist[1,0]), 5) array([4520.72354, 6340.42297]) Shortest paths can also be reconstructed when desired by setting the ``gen_tree`` keyword argument to ``True``. Here it is shown that the shortest path between school ``6`` and school ``7`` flows along network arcs through network vertices ``173`` and ``64``. The ``ntw.network_trees`` attribute may then be queried for the network elements comprising that path. >>> d2d_dist, tree = ntw.allneighbordistances("schools", gen_tree=True) >>> tree[(6, 7)] (173, 64) """<line_sep># calculate the network vertex to vertex distance matrix # if it is not already an attribute <if_stmt><not>hasattr(self "distance_matrix")<block_start>self.full_distance_matrix(n_processes gen_tree=gen_tree)<block_end># set the source and destination observation point patterns <if_stmt>type(sourcepattern)<is>str<block_start>sourcepattern=self.pointpatterns[sourcepattern]<if_stmt>destpattern<block_start>destpattern=self.pointpatterns[destpattern]<block_end><block_end># source pattern setup # set local copy of source pattern index src_indices=list(sourcepattern.points.keys())<line_sep># set local copy of source distance to vertex lookup src_d2v=copy.deepcopy(sourcepattern.dist_to_vertex)<line_sep># source point count nsource_pts=len(src_indices)<line_sep># create source point to network vertex lookup src_vertices={}<for_stmt>s src_indices<block_start>v1,v2=src_d2v[s].keys()<line_sep>src_vertices[s]=(v1 v2)<block_end># destination pattern setup # if only a source pattern is specified, also set it as # the destination pattern symmetric=<false><if_stmt>destpattern<is><none><block_start>symmetric=<true><line_sep>destpattern=sourcepattern<block_end># set local copy of destination pattern index dest_indices=list(destpattern.points.keys())<line_sep># set local copy of destination distance to vertex lookup dst_d2v=copy.deepcopy(destpattern.dist_to_vertex)<line_sep># destination point count ndest_pts=len(dest_indices)<line_sep># create `deepcopy` of destination points to # consider for searching dest_searchpts=copy.deepcopy(dest_indices)<line_sep># create destination point to network vertex lookup dest_vertices={}<for_stmt>s dest_indices<block_start>v1,v2=dst_d2v[s].keys()<line_sep>dest_vertices[s]=(v1 v2)<block_end># add snapping distance to each pointpattern <if_stmt>snap_dist# declare both point patterns and both # distance to vertex lookup in single lists <block_start>patterns=[sourcepattern destpattern]<line_sep>dist_copies=[src_d2v dst_d2v]<line_sep># iterate over each point pattern <for_stmt>elm,pp enumerate(patterns)# extract associated vertex distances <block_start><for_stmt>pidx,dists_dict dist_copies[elm].items()# add snapped distance to each point <block_start><for_stmt>vidx,vdist dists_dict.items()<block_start>dists_dict[vidx]=vdist+pp.dist_snapped[pidx]<block_end><block_end><block_end><block_end># output setup # create empty source x destination array # and fill with infinity values nearest=numpy.empty((nsource_pts ndest_pts))<line_sep>nearest[:]=numpy.inf<line_sep># create empty dictionary to store path trees tree_nearest={}<line_sep># iterate over each point in sources <for_stmt>p1 src_indices# get the source vertices and dist to source vertices <block_start>source1,source2=src_vertices[p1]<line_sep>set1=set(src_vertices[p1])<line_sep># distance from source vertex1 to point and # distance from source vertex2 to point sdist1,sdist2=src_d2v[p1].values()<if_stmt>symmetric# only compute the upper triangle if symmetric <block_start>dest_searchpts.remove(p1)<block_end># iterate over each point remaining in destinations <for_stmt>p2 dest_searchpts# get the destination vertices and # dist to destination vertices <block_start>dest1,dest2=dest_vertices[p2]<line_sep>set2=set(dest_vertices[p2])<line_sep># when the observations are snapped to the same arc <if_stmt>set1<eq>set2# calculate only the length between points along # that arc <block_start>x1,y1=sourcepattern.snapped_coordinates[p1]<line_sep>x2,y2=destpattern.snapped_coordinates[p2]<line_sep>computed_length=util.compute_length((x1 y1) (x2 y2))<line_sep>nearest[p1 p2]=computed_length<line_sep># set the nearest network vertices to a flag of -.1 # indicating the same arc is used while also raising # and indexing error when rebuilding the path tree_nearest[p1 p2]=SAME_SEGMENT<block_end># otherwise lookup distance between the source and # destination vertex <else_stmt># distance from destination vertex1 to point and # distance from destination vertex2 to point <block_start>ddist1,ddist2=dst_d2v[p2].values()<line_sep># set the four possible combinations of # source to destination shortest path traversal d11=self.distance_matrix[source1][dest1]<line_sep>d21=self.distance_matrix[source2][dest1]<line_sep>d12=self.distance_matrix[source1][dest2]<line_sep>d22=self.distance_matrix[source2][dest2]<line_sep># find the shortest distance from the path passing # through each of the two origin vertices to the # first destination vertex sd_1=d11+sdist1<line_sep>sd_21=d21+sdist2<line_sep>sp_combo1=source1 dest1<if_stmt>sd_1<g>sd_21<block_start>sd_1=sd_21<line_sep>sp_combo1=source2 dest1<block_end># now add the point to vertex1 distance on # the destination arc len_1=sd_1+ddist1<line_sep># repeat the prior but now for the paths entering # at the second vertex of the second arc sd_2=d12+sdist1<line_sep>sd_22=d22+sdist2<line_sep>sp_combo2=source1 dest2<if_stmt>sd_2<g>sd_22<block_start>sd_2=sd_22<line_sep>sp_combo2=source2 dest2<block_end>len_2=sd_2+ddist2<line_sep># now find the shortest distance path between point # 1 on arc 1 and point 2 on arc 2, and assign sp_12=len_1<line_sep>s_vertex,d_vertex=sp_combo1<if_stmt>len_1<g>len_2<block_start>sp_12=len_2<line_sep>s_vertex,d_vertex=sp_combo2<block_end># set distance and path tree nearest[p1 p2]=sp_12<line_sep>tree_nearest[p1 p2]=(s_vertex d_vertex)<block_end><if_stmt>symmetric# mirror the upper and lower triangle # when symmetric <block_start>nearest[p2 p1]=nearest[p1 p2]<block_end><block_end><block_end># populate the main diagonal when symmetric <if_stmt>symmetric# fill the matrix diagonal with NaN values is no fill # value is specified <block_start><if_stmt>fill_diagonal<is><none><block_start>numpy.fill_diagonal(nearest numpy.nan)<block_end># otherwise fill with specified value <else_stmt><block_start>numpy.fill_diagonal(nearest fill_diagonal)<block_end><block_end># if the nearest path tree is desired return it along # with the cost matrix <if_stmt>gen_tree<block_start><return>nearest tree_nearest<block_end><else_stmt><block_start><return>nearest<block_end><block_end><def_stmt>nearestneighbordistances self sourcepattern destpattern=<none> n_processes=1 gen_tree=<false> all_dists=<none> snap_dist=<false> keep_zero_dist=<true> <block_start>"""Compute the interpattern nearest neighbor distances or the intrapattern nearest neighbor distances between a source pattern and a destination pattern. Parameters ---------- sourcepattern : str The key of a point pattern snapped to the network. destpattern : str (Optional) The key of a point pattern snapped to the network. n_processes : {int, str} Specify the number of cores to utilize. Default is 1 core. Use ``"all"`` to request all available cores. Specify the exact number of cores with an integer. gen_tree : bool Rebuild shortest path ``True``, or skip ``False``. Default is ``False``. all_dists : numpy.ndarray An array of shape :math:`(n,n)` storing distances between all points. snap_dist : bool Flag as ``True`` to include the distance from the original location to the snapped location along the network. Default is ``False``. keep_zero_dist : bool Include zero values in minimum distance ``True`` or exclude ``False``. Default is ``True``. If the source pattern is the same as the destination pattern the diagonal is filled with ``numpy.nan``. Returns ------- nearest : dict Nearest neighbor distances keyed by the source point ID with the value as as tuple of lists containing nearest destination point ID(s) and distance. Examples -------- Instantiate a network. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Snap observations to the network. >>> ntw.snapobservations(examples.get_path("crimes.shp"), "crimes") Fetch nearest neighbor distances while (potentially) keeping neighbors that have been geocoded directly on top of each other. Here it is demonstrated that observation ``11`` has two neighbors (``18`` and ``19``) at an exactly equal distance. However, observation ``18`` is shown to have only one neighbor (``18``) with no distance between them. >>> nn = ntw.nearestneighbordistances("crimes", keep_zero_dist=True) >>> nn[11], nn[18] (([18, 19], 165.33982412719126), ([19], 0.0)) This may be remedied by setting the ``keep_zero_dist`` keyword argument to ``False``. With this parameter set, observation ``11`` still has the same neighbor/distance values, but observation ``18`` now has a single nearest neighbor (``11``) with a non-zero, postive distance. >>> nn = ntw.nearestneighbordistances("crimes", keep_zero_dist=False) >>> nn[11], nn[18] (([18, 19], 165.33982412719126), ([11], 165.33982412719126)) There are valid reasons for both retaining or masking zero distance neighbors. When conducting analysis, thought must be given as to which model more accurately represents the specific scenario. """<line_sep># raise exception is the specified point pattern does not exist <if_stmt>sourcepattern<not><in>self.pointpatterns.keys()<block_start>err_msg="Available point patterns are {}"<line_sep><raise>KeyError(err_msg.format(self.pointpatterns.keys()))<block_end># calculate the network vertex to vertex distance matrix # if it is not already an attribute <if_stmt><not>hasattr(self "distance_matrix")<block_start>self.full_distance_matrix(n_processes gen_tree=gen_tree)<block_end># determine if the source and destination patterns are equal symmetric=sourcepattern<ne>destpattern<line_sep># (for source-to-source patterns) if zero-distance neighbors are # desired, keep the diagonal as NaN and take the minimum # distance neighbor(s), which may include zero distance # neighors. fill_diagonal=<none><if_stmt><not>keep_zero_dist<and>symmetric# (for source-to-source patterns) if zero-distance neighbors # should be ignored, convert the diagonal to 0.0 and take # the minimum distance neighbor(s) that is/are not 0.0 # distance. <block_start>fill_diagonal=0.0<block_end># set the source and destination observation point patterns sourcepattern=self.pointpatterns[sourcepattern]<if_stmt>destpattern<block_start>destpattern=self.pointpatterns[destpattern]<block_end># if the full source to destination is not calculated, # do that at this time <if_stmt>all_dists<is><none><block_start>all_dists=self.allneighbordistances(sourcepattern destpattern=destpattern fill_diagonal=fill_diagonal n_processes=n_processes gen_tree=gen_tree snap_dist=snap_dist )<block_end># create empty nearest neighbors lookup nearest={}<line_sep># iterate over each source point <for_stmt>source_index sourcepattern.points.keys()# this considers all zero-distance neighbors <block_start><if_stmt>keep_zero_dist<and>symmetric<block_start>val=numpy.nanmin(all_dists[source_index :])<block_end># this does not consider zero-distance neighbors <else_stmt><block_start>val=numpy.min(all_dists[source_index :][numpy.nonzero(all_dists[source_index :])])<block_end># nearest destination (may be more than one if # observations are equal distances away) dest_idxs=numpy.where(all_dists[source_index :]<eq>val)[0].tolist()<line_sep># set nearest destination point(s) and distance nearest[source_index]=(dest_idxs val)<block_end><return>nearest<block_end><def_stmt>shortest_paths self tree pp_orig pp_dest=<none> n_processes=1<block_start>"""Return the shortest paths between observation points as ``libpysal.cg.Chain`` objects. Parameters ---------- tree : dict See ``tree_nearest`` in ``spaghetti.Network.allneighbordistances()``. pp_orig : str Origin point pattern for shortest paths. See ``name`` in ``spaghetti.Network.snapobservations()``. pp_dest : str Destination point pattern for shortest paths. See ``name`` in ``spaghetti.Network.snapobservations()``. Defaults ``pp_orig`` if not declared. n_processes : int See ``n_processes`` in ``spaghetti.Network.full_distance_matrix()``. Returns ------- paths : list The shortest paths between observations as geometric objects. Each element of the list is a list where the first element is an origin-destination pair tuple and the second element is a ``libpysal.cg.Chain``. Raises ------ AttributeError This exception is raised when an attempt to extract shortest path geometries is being made that but the ``network_trees`` attribute does not exist within the network object. Examples -------- Instantiate a network. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Snap observations to the network. >>> ntw.snapobservations(examples.get_path("schools.shp"), "schools") Create shortest path trees between observations. >>> _, tree = ntw.allneighbordistances("schools", gen_tree=True) Generate geometric objects from trees. >>> paths = ntw.shortest_paths(tree, "schools") Extract the first path, which is between observations ``0`` and ``1``. >>> path = paths[0] >>> path[0] (0, 1) The are ``n`` vertices in the path between observations ``0`` and ``1``. >>> n = len(path[1].vertices) >>> n 10 """<line_sep># build the network trees object if it is not already an attribute <if_stmt><not>hasattr(self "network_trees")<block_start>msg="The 'network_trees' attribute has not been created. "<line_sep>msg<augadd>"Rerun 'spaghetti.Network.allneighbordistances()' "<line_sep>msg<augadd>"with the 'gen_tree' parameter set to 'True'."<line_sep><raise>AttributeError(msg)<block_end># isolate network attributes pp_orig=self.pointpatterns[pp_orig]<if_stmt>pp_dest<block_start>pp_dest=self.pointpatterns[pp_dest]<block_end><else_stmt><block_start>pp_dest=pp_orig<block_end>vtx_coords=self.vertex_coords<line_sep>net_trees=self.network_trees<line_sep># instantiate a list to store paths paths=[]<line_sep># iterate over each path in the tree <for_stmt>idx,((obs0 obs1) (v0 v1)) enumerate(tree.items())# if the observations share the same segment # create a partial segment path <block_start><if_stmt>(v0 v1)<eq>SAME_SEGMENT# isolate the snapped coordinates and put in a list <block_start>partial_segment_verts=[cg.Point(pp_orig.snapped_coordinates[obs0]) cg.Point(pp_dest.snapped_coordinates[obs1]) ]<line_sep>path=partial_segment_verts<block_end><else_stmt># source and destination network vertices <block_start>svtx,dvtx=tree[obs0 obs1]<line_sep># path passes through these nodes # (source and destination inclusive) thru_nodes=net_trees[svtx][dvtx][::-1]+[dvtx]<line_sep># full-length network segments along path full_segs_path=[]<line_sep>iter_limit=len(thru_nodes)-1<for_stmt>_idx,item enumerate(islice(thru_nodes iter_limit))<block_start>full_segs_path.append((item thru_nodes[_idx+1]))<block_end># create copy of arc paths dataframe full_segments=[]<for_stmt>fsp full_segs_path<block_start>full_segments.append(util._chain_constr(vtx_coords fsp))<block_end># unpack the vertices containers segm_verts=[v<for>fs full_segments<for>v fs.vertices]<line_sep># remove duplicate vertices <for_stmt>idx,v enumerate(segm_verts)<block_start><try_stmt><block_start><if_stmt>v<eq>segm_verts[idx+1]<block_start>segm_verts.remove(v)<block_end><block_end><except_stmt>IndexError<as>e<block_start><if_stmt>e.args[0]<eq>"list index out of range"<block_start><continue><block_end><else_stmt><block_start><raise><block_end><block_end><block_end># partial-length network segments along path partial_segment_verts=[cg.Point(pp_orig.snapped_coordinates[obs0]) cg.Point(pp_dest.snapped_coordinates[obs1]) ]<line_sep># combine the full and partial segments into a single list first_vtx,last_vtx=partial_segment_verts<line_sep>path=[first_vtx]+segm_verts+[last_vtx]<block_end># populate the ``paths`` dataframe paths.append([(obs0 obs1) util._chain_constr(<none> path)])<block_end><return>paths<block_end><def_stmt>split_arcs self split_param split_by="distance" w_components=<true><block_start>"""Split all network arcs at either a fixed distance or fixed count. Parameters ----------- split_param : {int, float} Either the number of desired resultant split arcs or the distance at which arcs are split. split_by : str Either ``'distance'`` or ``'count'``. Default is ``'distance'``. w_components : bool Set to ``False`` to not record connected components from a ``libpysal.weights.W`` object. Default is ``True``. Returns ------- split_network : spaghetti.Network A newly instantiated ``spaghetti.Network`` object. Examples -------- Instantiate a network. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Split the network into a segments of 200 distance units in length (`US feet in this case <https://github.com/pysal/libpysal/blob/master/libpysal/examples/geodanet/streets.prj>`_.). This will include "remainder" segments unless the network is comprised of arcs with lengths exactly divisible by ``distance``. >>> n200 = ntw.split_arcs(200.0) >>> len(n200.arcs) 688 The number of arcs within the new object can be accessed via the weights object, as well. These counts will be equal. >>> len(n200.arcs) == n200.w_network.n True Neighboring arcs can also be queried through the weight object. >>> n200.w_network.neighbors[72,392] [(71, 72), (72, 252), (72, 391), (392, 393)] Network arcs can also be split by a specified number of divisions with the ``split_by`` keyword set to ``'count'``, which is ``'distance'`` by default. For example, each arc can be split into 2 equal parts. >>> n2 = ntw.split_arcs(2, split_by="count") >>> len(n2.arcs) 606 """<line_sep># catch invalid split types split_by=split_by.lower()<line_sep>valid_split_types=["distance" "count"]<if_stmt>split_by<not><in>valid_split_types<block_start>msg=f"'{split_by}' is not a valid value for 'split_by'. "<line_sep>msg<augadd>f"Valid arguments include: {valid_split_types}."<line_sep><raise>ValueError(msg)<block_end># catch invalid count params <if_stmt>split_by<eq>"count"<block_start><if_stmt>split_param<le>1<block_start>msg="Splitting arcs by 1 or less is not possible. "<line_sep>msg<augadd>f"Currently 'split_param' is set to {split_param}."<line_sep><raise>ValueError(msg)<block_end>split_integer=int(split_param)<if_stmt>split_param<ne>split_integer<block_start>msg="Network arcs must split by an integer. "<line_sep>msg<augadd>f"Currently 'split_param' is set to {split_param}."<line_sep><raise>TypeError(msg)<block_end><block_end># convert coordinates for integers if possible # e.g., (1.0, 0.5) --> (1, 0.5) int_coord=<lambda>c:int(c)<if>(type(c)<eq>float<and>c.is_integer())<else>c<line_sep># create new shell network instance split_network=Network()<line_sep># duplicate input network attributes split_network.adjacencylist=copy.deepcopy(self.adjacencylist)<line_sep>split_network.arc_lengths=copy.deepcopy(self.arc_lengths)<line_sep>split_network.arcs=copy.deepcopy(self.arcs)<line_sep>split_network.vertex_coords=copy.deepcopy(self.vertex_coords)<line_sep>split_network.vertex_list=copy.deepcopy(self.vertex_list)<line_sep>split_network.vertices=copy.deepcopy(self.vertices)<line_sep>split_network.pointpatterns=copy.deepcopy(self.pointpatterns)<line_sep>split_network.in_data=self.in_data<line_sep># set vertex ID to start iterations current_vertex_id=max(self.vertices.values())<line_sep># instantiate sets for newly created network arcs and # input network arcs to remove new_arcs=set()<line_sep>remove_arcs=set()<line_sep># iterate over all network arcs <for_stmt>arc split_network.arcs# fetch network arc length <block_start>length=split_network.arc_lengths[arc]<line_sep># set initial segmentation interval <if_stmt>split_by<eq>"distance"<block_start>interval=split_param<block_end><else_stmt><block_start>interval=length/float(split_param)<block_end># initialize arc new arc length at zero totallength=0<line_sep># initialize the current vertex and ending vertex currentstart,end_vertex=arc[0] arc[1]<line_sep># determine direction of arc vertices csx,csy=split_network.vertex_coords[currentstart]<line_sep>evx,evy=split_network.vertex_coords[end_vertex]<if_stmt>csy<g>evy<and>csx<eq>evx<block_start>currentstart,end_vertex=end_vertex currentstart<block_end># if the arc will be split remove the current # arc from the adjacency list <if_stmt>interval<l>length# remove old arc adjacency information <block_start>split_network.adjacencylist[currentstart].remove(end_vertex)<line_sep>split_network.adjacencylist[end_vertex].remove(currentstart)<line_sep># remove old arc length information split_network.arc_lengths.pop(arc <none>)<line_sep># add old arc to set of arcs to remove remove_arcs.add(arc)<block_end># if the arc will not be split, do nothing and continue <else_stmt><block_start><continue><block_end># traverse the length of the arc <while_stmt>totallength<l>length# once an arc can not be split further <block_start><if_stmt>totallength+interval<ge>length# record the ending vertex <block_start>currentstop=end_vertex<line_sep># set the length remainder interval=length-totallength<line_sep># full old length reached totallength=length<block_end><else_stmt># set the current vertex ID <block_start>current_vertex_id<augadd>1<line_sep># set the current stopping ID currentstop=current_vertex_id<line_sep># add the interval distance to the traversed length totallength<augadd>interval<line_sep># compute the new vertex coordinate newx,newy=self._newpoint_coords(arc totallength)<line_sep>new_vertex=(int_coord(newx) int_coord(newy))<line_sep># update the vertex and coordinate info if needed <if_stmt>new_vertex<not><in>split_network.vertices.keys()<block_start>split_network.vertices[new_vertex]=currentstop<line_sep>split_network.vertex_coords[currentstop]=new_vertex<line_sep>split_network.vertex_list.append(currentstop)<block_end><else_stmt># retrieve vertex ID if coordinate already exists <block_start>current_vertex_id<augsub>1<line_sep>currentstop=split_network.vertices[new_vertex]<block_end><block_end># update the new network adjacency list split_network.adjacencylist[currentstart].append(currentstop)<line_sep>split_network.adjacencylist[currentstop].append(currentstart)<line_sep># add the new arc to the arc dictionary # iterating over this so we need to add after iterating _new_arc=tuple(sorted([currentstart currentstop]))<line_sep>new_arcs.add(_new_arc)<line_sep># set the length of the arc split_network.arc_lengths[_new_arc]=interval<line_sep># increment the starting vertex to the stopping vertex currentstart=currentstop<block_end><block_end># add the newly created arcs to the network and remove the old arcs split_network.arcs=set(split_network.arcs)<line_sep>split_network.arcs.update(new_arcs)<line_sep>split_network.arcs.difference_update(remove_arcs)<line_sep>split_network.arcs=sorted(list(split_network.arcs))<line_sep># extract connected components <if_stmt>w_components# extract contiguity weights from libpysal <block_start>split_network.w_network=split_network.contiguityweights(graph=<false> from_split=<true>)<line_sep># identify connected components from the `w_network` split_network.identify_components(split_network.w_network graph=<false>)<block_end># update the snapped point pattern <for_stmt>instance split_network.pointpatterns.values()<block_start>split_network._snap_to_link(instance)<block_end><return>split_network<block_end><def_stmt>GlobalAutoK self pointpattern nsteps=10 permutations=99 threshold=0.5 distribution="uniform" upperbound=<none> <block_start>r"""Compute a global auto :math:`K`-function based on a network constrained cost matrix through `Monte Carlo simulation <https://en.wikipedia.org/wiki/Monte_Carlo_method>`_ according to the formulation adapted from :cite:`doi:10.1002/9780470549094.ch5`. See the **Notes** section for further description. Parameters ---------- pointpattern : spaghetti.PointPattern A ``spaghetti`` point pattern object. nsteps : int The number of steps at which the count of the nearest neighbors is computed. Default is ``10``. permutations : int The number of permutations to perform. Default is ``99``. threshold : float The level at which significance is computed. (0.5 would be 97.5% and 2.5%). Default is ``0.5``. distribution : str The distribution from which random points are sampled. Currently, the only supported distribution is ``'uniform'``. upperbound : float The upper bound at which the :math:`K`-function is computed. Defaults to the maximum observed nearest neighbor distance. Returns ------- GlobalAutoK : spaghetti.analysis.GlobalAutoK The global auto :math:`K`-function class instance. Notes ----- The :math:`K`-function can be formulated as: .. math:: \displaystyle K(r)=\frac{\sum^n_{i=1} \#[\hat{A} \in D(a_i, r)]}{n\lambda}, where $n$ is the set cardinality of :math:`A`, :math:`\hat{A}` is the subset of observations in :math:`A` that are within :math:`D` units of distance from :math:`a_i` (each single observation in :math:`A`), and :math:`r` is the range of distance values over which the :math:`K`-function is calculated. The :math:`\lambda` term is the intensity of observations along the network, calculated as: .. math:: \displaystyle \lambda = \frac{n}{\big|N_{arcs}\big|}, where :math:`\big|N_{arcs}\big|` is the summed length of network arcs. The global auto :math:`K`-function measures overall clustering in one set of observations by comparing all intra-set distances over a range of distance buffers :math:`D \in r`. The :math:`K`-function improves upon nearest-neighbor distance measures through the analysis of all neighbor distances. For an explanation on how to interpret the results of the :math:`K`-function see the `Network Spatial Dependence tutorial <https://pysal.org/spaghetti/notebooks/network-spatial-dependence.html>`_. For original implementation see :cite:`Ripley1976` and :cite:`Ripley1977`. For further Network-`K` formulations see :cite:`doi:10.1111/j.1538-4632.2001.tb00448.x`, :cite:`doi:10.1002/9781119967101.ch6`, and :cite:`Baddeley2020`. See also -------- pointpats.K Examples -------- Create a network instance. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(in_data=examples.get_path("streets.shp")) Snap observation points onto the network. >>> pt_str = "schools" >>> in_data = examples.get_path(pt_str+".shp") >>> ntw.snapobservations(in_data, pt_str, attribute=True) >>> schools = ntw.pointpatterns[pt_str] Compute a :math:`K`-function from school observations with ``99`` ``permutations`` at ``10`` intervals. >>> kres = ntw.GlobalAutoK(schools, permutations=99, nsteps=10) >>> kres.lowerenvelope.shape[0] 10 """<line_sep># call analysis.GlobalAutoK <return>GlobalAutoK(self pointpattern nsteps=nsteps permutations=permutations threshold=threshold distribution=distribution upperbound=upperbound )<block_end><def_stmt>Moran self pp_name permutations=999 graph=<false><block_start>"""Calculate a Moran's *I* statistic on a set of observations based on network arcs. The Moran’s *I* test statistic allows for the inference of how clustered (or dispersed) a dataset is while considering both attribute values and spatial relationships. A value of closer to +1 indicates absolute clustering while a value of closer to -1 indicates absolute dispersion. Complete spatial randomness takes the value of 0. See the `esda documentation <https://pysal.org/esda/generated/esda.Moran.html#esda.Moran>`_ for in-depth descriptions and tutorials. Parameters ---------- pp_name : str The name of the point pattern in question. permutations : int The number of permutations to perform. Default is ``999``. graph : bool Perform the Moran calculation on the graph `W` object (``True``). Default is ``False``, which performs the Moran calculation on the network `W` object. Returns ------- moran : esda.Moran A Moran's *I* statistic object results. y : list The y-axis (counts). Examples -------- Create a network instance. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(in_data=examples.get_path("streets.shp")) Snap observation points onto the network. >>> crimes = "crimes" >>> in_data = examples.get_path(crimes+".shp") >>> ntw.snapobservations(in_data, crimes, attribute=True) Compute a Moran's :math:`I` from crime observations. >>> moran_res, _ = ntw.Moran(crimes) >>> round(moran_res.I, 6) 0.005193 Notes ----- See :cite:`moran:_cliff81` and :cite:`esda:_2019` for more details. """<line_sep># set proper weights attribute <if_stmt>graph<block_start>w=self.w_graph<block_end><else_stmt><block_start>w=self.w_network<block_end># Compute the counts pointpat=self.pointpatterns[pp_name]<line_sep>counts=self.count_per_link(pointpat.obs_to_arc graph=graph)<line_sep># Build the y vector y=[counts[i]<if>i<in>counts<else>0.0<for>i w.neighbors]<line_sep># Moran's I moran=esda.moran.Moran(y w permutations=permutations)<line_sep><return>moran y<block_end><def_stmt>savenetwork self filename<block_start>"""Save a network to disk as a binary file. Parameters ---------- filename : str The filename where the network should be saved. This should be a full path or it will be saved in the current directory. Examples -------- Create a network instance. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Save out the network instance. >>> ntw.savenetwork("mynetwork.pkl") """<with_stmt>open(filename "wb")<as>networkout<block_start>pickle.dump(self networkout protocol=2)<block_end><block_end>@staticmethod<def_stmt>loadnetwork filename<block_start>"""Load a network from a binary file saved on disk. Parameters ---------- filename : str The filename where the network is saved. Returns ------- self : spaghetti.Network A pre-computed ``spaghetti`` network object. """<with_stmt>open(filename "rb")<as>networkin<block_start>self=pickle.load(networkin)<block_end><return>self<block_end><block_end><def_stmt>extract_component net component_id weightings=<none><block_start>"""Extract a single component from a network object. Parameters ---------- net : spaghetti.Network Full network object. component_id : int The ID of the desired network component. weightings : {dict, bool} See the ``weightings`` keyword argument in ``spaghetti.Network``. Returns ------- cnet : spaghetti.Network The pruned network containing the component specified in ``component_id``. Notes ----- Point patterns are not reassigned when extracting a component. Therefore, component extraction should be performed prior to snapping any point sets onto the network. Also, if the ``spaghetti.Network`` object has ``distance_matrix`` or ``network_trees`` attributes, they are deleted and must be computed again on the single component. Examples -------- Instantiate a network object. >>> from libpysal import examples >>> import spaghetti >>> snow_net = examples.get_path("Soho_Network.shp") >>> ntw = spaghetti.Network(in_data=snow_net, extractgraph=False) The network is not fully connected. >>> ntw.network_fully_connected False Examine the number of network components. >>> ntw.network_n_components 45 Extract the longest component. >>> longest = spaghetti.extract_component(ntw, ntw.network_longest_component) >>> longest.network_n_components 1 >>> longest.network_component_lengths {0: 13508.169276875526} """<def_stmt>_reassign attr cid<block_start>"""Helper for reassigning attributes."""<line_sep># set for each attribute(s) <if_stmt>attr<eq>"_fully_connected"<block_start>_val=[<true><for>objt obj_type]<line_sep>attr=[objt+attr<for>objt obj_type]<block_end><elif_stmt>attr<eq>"_n_components"<block_start>_val=[1<for>objt obj_type]<line_sep>attr=[objt+attr<for>objt obj_type]<block_end><elif_stmt>attr<in>["_longest_component" "_largest_component"]<block_start>_val=[cid<for>objt obj_type]<line_sep>attr=[objt+attr<for>objt obj_type]<block_end><elif_stmt>attr<eq>"vertex_list"# reassigns vertex list + network, graph component vertices <block_start>supp=[objt+"_component_vertices"<for>objt obj_type]<line_sep>_val=[getattr(cnet supp[0])[cid]]<line_sep>_val<augadd>[{cid:getattr(cnet s)[cid]}<for>s supp]<line_sep>attr=[attr]+supp<block_end><elif_stmt>attr<eq>"vertex_coords"# reassigns both vertex_coords and vertices <block_start>supp=getattr(cnet "vertex_list")<line_sep>_val=[{k:v<for>k,v getattr(cnet attr).items()<if>k<in>supp}]<line_sep>_val<augadd>[{v:k<for>k,v _val[0].items()}]<line_sep>attr=[attr "vertices"]<block_end><elif_stmt>attr<eq>"_component_vertex_count"# reassigns both network and graph _component_vertex_count <block_start>supp=len(getattr(cnet "vertex_list"))<line_sep>_val=[{cid:supp}<for>objt obj_type]<line_sep>attr=[objt+attr<for>objt obj_type]<block_end><elif_stmt>attr<eq>"adjacencylist"<block_start>supp_adj=copy.deepcopy(list(getattr(cnet attr).keys()))<line_sep>supp_vtx=getattr(cnet "vertex_list")<line_sep>supp_rmv=[v<for>v supp_adj<if>v<not><in>supp_vtx]<line_sep>[getattr(cnet attr).pop(s)<for>s supp_rmv]<line_sep><return><block_end><elif_stmt>attr<eq>"_component_is_ring"# reassigns both network and graph _component_is_ring <block_start>supp=[getattr(cnet objt+attr)<for>objt obj_type]<line_sep>_val=[{cid:s[cid]}<for>s supp]<line_sep>attr=[objt+attr<for>objt obj_type]<block_end><elif_stmt>attr<eq>"non_articulation_points"<block_start>supp_vtx=getattr(cnet "vertex_list")<line_sep>_val=[[s<for>s getattr(cnet attr)<if>s<in>supp_vtx]]<line_sep>attr=[attr]<block_end><elif_stmt>attr<eq>"_component2"# reassigns both network and graph _component2 attributes <block_start>supp=[_n+"_component2"+_a]<if_stmt>hasgraph<block_start>supp<augadd>[_g+"_component2"+_e]<block_end>_val=[{cid:getattr(cnet s)[cid]}<for>s supp]<line_sep>attr=supp<block_end><elif_stmt>attr<eq>"arcs"# reassigns both arcs and edges <block_start>c2="_component2"<line_sep>supp=[_n+c2+_a]<if_stmt>hasgraph<block_start>supp<augadd>[_g+c2+_e]<block_end>_val=[getattr(cnet s)[cid]<for>s supp]<line_sep>attr=[attr]<if_stmt>hasgraph<block_start>attr<augadd>["edges"]<block_end><block_end><elif_stmt>attr<eq>"_component_labels"# reassigns both network and graph _component_labels <block_start>supp=[len(getattr(cnet o+"s"))<for>o obj]<line_sep>_val=[numpy.array([cid]<times>s)<for>s supp]<line_sep>attr=[objt+attr<for>objt obj_type]<block_end><elif_stmt>attr<eq>"_component_lengths"# reassigns both network and graph _component_lengths <block_start>supp=[objt+attr<for>objt obj_type]<line_sep>_val=[{cid:getattr(cnet s)[cid]}<for>s supp]<line_sep>attr=supp<block_end><elif_stmt>attr<eq>"_lengths"# reassigns both arc and edge _lengths <block_start>supp_name=[o+attr<for>o obj]<line_sep>supp_lens=[getattr(cnet s)<for>s supp_name]<line_sep>supp_link=[getattr(cnet o+"s")<for>o obj]<line_sep>supp_ll=list(zip(supp_lens supp_link))<line_sep>_val=[{k:v<for>k,v l1.items()<if>k<in>l2}<for>l1,l2 supp_ll]<line_sep>attr=supp_name<block_end># reassign attributes <for_stmt>a,av zip(attr _val)<block_start>setattr(cnet a av)<block_end><block_end># provide warning (for now) if the network contains a point pattern <if_stmt>getattr(net "pointpatterns")<block_start>msg="There is a least one point pattern associated with the network."<line_sep>msg<augadd>" Component extraction should be performed prior to snapping"<line_sep>msg<augadd>" point patterns to the network object; failing to do so may"<line_sep>msg<augadd>" lead to unexpected results."<line_sep>warnings.warn(msg)<block_end># provide warning (for now) if the network contains a point pattern dm,nt="distance_matrix" "network_trees"<if_stmt>hasattr(net dm)<or>hasattr(net nt)<block_start>msg="Either one or both (%s, %s) attributes"%(dm nt)<line_sep>msg<augadd>" are present and will be deleted. These must be"<line_sep>msg<augadd>" recalculated following component extraction."<line_sep>warnings.warn(msg)<for_stmt>attr [dm nt]<block_start><if_stmt>hasattr(net attr)<block_start>_attr=getattr(net attr)<del_stmt>_attr<block_end><block_end><block_end># make initial copy of the network cnet=copy.deepcopy(net)<line_sep># set labels _n,_a,_g,_e="network" "arc" "graph" "edge"<line_sep>obj_type=[_n]<line_sep>obj=[_a]<line_sep>hasgraph=<false><if_stmt>hasattr(cnet "w_graph")<block_start>obj_type<augadd>[_g]<line_sep>obj<augadd>[_e]<line_sep>hasgraph=<true><block_end># attributes to reassign update_attributes=["_fully_connected" "_n_components" "_longest_component" "_largest_component" "vertex_list" "vertex_coords" "_component_vertex_count" "adjacencylist" "_component_is_ring" "_component2" "arcs" "_component_lengths" "_lengths" "_component_labels" ]<if_stmt>hasgraph<block_start>update_attributes.append("non_articulation_points")<block_end># reassign attributes <for_stmt>attribute update_attributes<block_start>_reassign(attribute component_id)<block_end># recreate spatial weights cnet.w_network=cnet.contiguityweights(graph=<false> weightings=weightings)<if_stmt>hasgraph<block_start>cnet.w_graph=cnet.contiguityweights(graph=<true> weightings=weightings)<block_end><return>cnet<block_end><def_stmt>spanning_tree net method="sort" maximum=<false> silence_warnings=<true><block_start>"""Extract a minimum or maximum spanning tree from a network. Parameters ---------- net : spaghetti.Network Instance of a network object. method : str Method for determining spanning tree. Currently, the only supported method is 'sort', which sorts the network arcs by length prior to building intermediary networks and checking for cycles within the tree/subtrees. Future methods may include linear programming approachs, etc. maximum : bool When ``True`` a maximum spanning tree is created. When ``False`` a minimum spanning tree is created. Default is ``False``. silence_warnings : bool Warn if there is more than one connected component. Default is ``False`` due to the nature of constructing a minimum spanning tree. Returns ------- net : spaghetti.Network Pruned instance of the network object. Notes ----- For in-depth background and details see :cite:`GrahamHell_1985`, :cite:`AhujaRavindraK`, and :cite:`Okabe2012`. See also -------- networkx.algorithms.tree.mst scipy.sparse.csgraph.minimum_spanning_tree Examples -------- Create a network instance. >>> from libpysal import cg >>> import spaghetti >>> p00 = cg.Point((0,0)) >>> lines = [cg.Chain([p00, cg.Point((0,3)), cg.Point((4,0)), p00])] >>> ntw = spaghetti.Network(in_data=lines) Extract the minimum spanning tree. >>> minst_net = spaghetti.spanning_tree(ntw) >>> min_len = sum(minst_net.arc_lengths.values()) >>> min_len 7.0 Extract the maximum spanning tree. >>> maxst_net = spaghetti.spanning_tree(ntw, maximum=True) >>> max_len = sum(maxst_net.arc_lengths.values()) >>> max_len 9.0 >>> max_len > min_len True """<line_sep># (un)silence warning weights_kws={"silence_warnings":silence_warnings}<line_sep># do not extract graph object while testing for cycles net_kws={"extractgraph":<false> "weights_kws":weights_kws}<line_sep># if the network has no cycles, it is already a spanning tree <if_stmt>util.network_has_cycle(net.adjacencylist)<block_start><if_stmt>method.lower()<eq>"sort"<block_start>spanning_tree=mst_weighted_sort(net maximum net_kws)<block_end><else_stmt><block_start>msg="'%s' not a valid method for minimum spanning tree creation"<line_sep><raise>ValueError(msg%method)<block_end># instantiate the spanning tree as a network object net=Network(in_data=spanning_tree weights_kws=weights_kws)<block_end><return>net<block_end><def_stmt>mst_weighted_sort net maximum net_kws<block_start>"""Extract a minimum or maximum spanning tree from a network used the length-weighted sort method. Parameters ---------- net : spaghetti.Network See ``spanning_tree()``. maximum : bool See ``spanning_tree()``. net_kws : dict Keywords arguments for instaniating a ``spaghetti.Network``. Returns ------- spanning_tree : list All networks arcs that are members of the spanning tree. Notes ----- This function is based on the method found in Chapter 3 Section 4.3 of :cite:`Okabe2012`. """<line_sep># network arcs dictionary sorted by arc length sort_kws={"key":net.arc_lengths.get "reverse":maximum}<line_sep>sorted_lengths=sorted(net.arc_lengths **sort_kws)<line_sep># the spanning tree is initially empty spanning_tree=[]<line_sep># iterate over each lengths of network arc <while_stmt>sorted_lengths<block_start>_arc=sorted_lengths.pop(0)<line_sep># make a spatial representation of an arc chain_rep=util.chain_constr(net.vertex_coords [_arc])<line_sep># current set of network arcs as libpysal.cg.Chain _chains=spanning_tree+chain_rep<line_sep># current network iteration _ntw=Network(in_data=_chains **net_kws)<line_sep># determine if the network contains a cycle <if_stmt><not>util.network_has_cycle(_ntw.adjacencylist)# If no cycle is present, add the arc to the spanning tree <block_start>spanning_tree.extend(chain_rep)<block_end><block_end><return>spanning_tree<block_end>@requires("geopandas" "shapely")<def_stmt>element_as_gdf net vertices=<false> arcs=<false> pp_name=<none> snapped=<false> routes=<none> id_col="id" geom_col="geometry" <block_start>"""Return a ``geopandas.GeoDataFrame`` of network elements. This can be (a) the vertices of a network; (b) the arcs of a network; (c) both the vertices and arcs of the network; (d) the raw point pattern associated with the network; (e) the snapped point pattern of (d); or (f) the shortest path routes between point observations. Parameters ---------- net : spaghetti.Network A `spaghetti` network object. vertices : bool Extract the network vertices (``True``). Default is ``False``. arcs : bool Extract the network arcs (``True``). Default is ``False``. pp_name : str Name of the ``network.PointPattern`` to extract. Default is ``None``. snapped : bool If extracting a ``network.PointPattern``, set to ``True`` for snapped point locations along the network. Default is ``False``. routes : dict See ``paths`` from ``spaghetti.Network.shortest_paths``. Default is ``None``. id_col : str ``geopandas.GeoDataFrame`` column name for IDs. Default is ``"id"``. When extracting routes this creates an (origin, destination) tuple. geom_col : str ``geopandas.GeoDataFrame`` column name for geometry. Default is ``"geometry"``. Raises ------ KeyError In order to extract a ``network.PointPattern`` it must already be a part of the network object. This exception is raised when a ``network.PointPattern`` is being extracted that does not exist within the network object. Returns ------- points : geopandas.GeoDataFrame Network point elements (either vertices or ``network.PointPattern`` points) as a ``geopandas.GeoDataFrame`` of ``shapely.geometry.Point`` objects with an ``"id"`` column and ``"geometry""`` column. If the network object has a ``network_component_vertices`` attribute, then component labels are also added in a column. lines : geopandas.GeoDataFrame Network arc elements as a ``geopandas.GeoDataFrame`` of ``shapely.geometry.LineString`` objects with an ``"id"`` column and ``"geometry"`` column. If the network object has a ``network_component_labels`` attribute, then component labels are also added in a column. paths : geopandas.GeoDataFrame Shortest path routes along network arc elements as a ``geopandas.GeoDataFrame`` of ``shapely.geometry.LineString`` objects with an ``"id"`` (see ``spaghetti.Network.shortest_paths()``) column and ``"geometry"`` column. Notes ----- When both network vertices and arcs are desired, the variable declaration must be in the order: <vertices>, <arcs>. This function requires ``geopandas``. See also -------- geopandas.GeoDataFrame Examples -------- Instantiate a network object. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Extract the network elements (vertices and arcs) as ``geopandas.GeoDataFrame`` objects. >>> vertices_df, arcs_df = spaghetti.element_as_gdf( ... ntw, vertices=True, arcs=True ... ) Examine the first vertex. It is a member of the component labeled ``0``. >>> vertices_df.loc[0] id 0 geometry POINT (728368.04762 877125.89535) comp_label 0 Name: 0, dtype: object Calculate the total length of the network. >>> arcs_df.geometry.length.sum() 104414.09200823458 """<line_sep># shortest path routes between observations <if_stmt>routes<block_start>paths=util._routes_as_gdf(routes id_col geom_col)<line_sep><return>paths<block_end># need vertices place holder to create network segment LineStrings # even if only network edges are desired. vertices_for_arcs=<false><if_stmt>arcs<and><not>vertices<block_start>vertices_for_arcs=<true><block_end># vertices/nodes/points <if_stmt>vertices<or>vertices_for_arcs<or>pp_name<block_start>points=util._points_as_gdf(net vertices vertices_for_arcs pp_name snapped id_col=id_col geom_col=geom_col )<line_sep># return points geodataframe if arcs not specified or # if extracting `PointPattern` points <if_stmt><not>arcs<or>pp_name<block_start><return>points<block_end><block_end># arcs arcs=util._arcs_as_gdf(net points id_col=id_col geom_col=geom_col)<if_stmt>vertices_for_arcs<block_start><return>arcs<block_end><else_stmt><block_start><return>points arcs<block_end><block_end><def_stmt>regular_lattice bounds nh nv=<none> exterior=<false><block_start>"""Generate a regular lattice of line segments (`libpysal.cg.Chain objects <https://pysal.org/libpysal/generated/libpysal.cg.Chain.html#libpysal.cg.Chain>`_). Parameters ---------- bounds : {tuple, list} Area bounds in the form - <minx,miny,maxx,maxy>. nh : int The number of internal horizontal lines of the lattice. nv : int The number of internal vertical lines of the lattice. Defaults to ``nh`` if left as None. exterior : bool Flag for including the outer bounding box segments. Default is False. Returns ------- lattice : list The ``libpysal.cg.Chain`` objects forming a regular lattice. Notes ----- The ``nh`` and ``nv`` parameters do not include the external line segments. For example, setting ``nh=3, nv=2, exterior=True`` will result in 5 horizontal line sets and 4 vertical line sets. Examples -------- Create a 5x5 regular lattice with an exterior >>> import spaghetti >>> lattice = spaghetti.regular_lattice((0,0,4,4), 3, exterior=True) >>> lattice[0].vertices [(0.0, 0.0), (1.0, 0.0)] Create a 5x5 regular lattice without an exterior >>> lattice = spaghetti.regular_lattice((0,0,5,5), 3, exterior=False) >>> lattice[-1].vertices [(3.75, 3.75), (3.75, 5.0)] Create a 7x9 regular lattice with an exterior from the bounds of ``streets.shp``. >>> path = libpysal.examples.get_path("streets.shp") >>> shp = libpysal.io.open(path) >>> lattice = spaghetti.regular_lattice(shp.bbox, 5, nv=7, exterior=True) >>> lattice[0].vertices [(723414.3683108028, 875929.0396895551), (724286.1381211297, 875929.0396895551)] """<line_sep># check for bounds validity <if_stmt>len(bounds)<ne>4<block_start>bounds_len=len(bounds)<line_sep>msg="The 'bounds' parameter is %s elements "%bounds_len<line_sep>msg<augadd>"but should be exactly 4 - <minx,miny,maxx,maxy>."<line_sep><raise>RuntimeError(msg)<block_end># check for bounds validity <if_stmt><not>nv<block_start>nv=nh<block_end><try_stmt><block_start>nh,nv=int(nh) int(nv)<block_end><except_stmt>TypeError<block_start>nlines_types=type(nh) type(nv)<line_sep>msg="The 'nh' and 'nv' parameters (%s, %s) "%nlines_types<line_sep>msg<augadd>"could not be converted to integers."<line_sep><raise>TypeError(msg)<block_end># bounding box line lengths len_h,len_v=bounds[2]-bounds[0] bounds[3]-bounds[1]<line_sep># horizontal and vertical increments incr_h,incr_v=len_h/float(nh+1) len_v/float(nv+1)<line_sep># define the horizontal and vertical space space_h=[incr_h<times>slot<for>slot range(nv+2)]<line_sep>space_v=[incr_v<times>slot<for>slot range(nh+2)]<line_sep># create vertical and horizontal lines lines_h=util.build_chains(space_h space_v exterior bounds)<line_sep>lines_v=util.build_chains(space_h space_v exterior bounds h=<false>)<line_sep># combine into one list lattice=lines_h+lines_v<line_sep><return>lattice<block_end><class_stmt>PointPattern<block_start>"""A stub point pattern class used to store a point pattern. Note from the original author of ``pysal.network``: This class is monkey patched with network specific attributes when the points are snapped to a network. In the future this class may be replaced with a generic point pattern class. Parameters ---------- in_data : {str, list, tuple, libpysal.cg.Point, geopandas.GeoDataFrame} The input geographic data. Either (1) a path to a shapefile (str); (2) an iterable containing ``libpysal.cg.Point`` objects; (3) a single ``libpysal.cg.Point``; or (4) a ``geopandas.GeoDataFrame``. idvariable : str Field in the shapefile to use as an ID variable. attribute : bool A flag to indicate whether all attributes are tagged to this class (``True``) or excluded (``False``). Default is ``False``. Attributes ---------- points : dict Keys are the point IDs (int). Values are the :math:`(x,y)` coordinates (tuple). npoints : int The number of points. obs_to_arc : dict Keys are arc IDs (tuple). Values are snapped point information (``dict``). Within the snapped point information (``dict``) keys are observation IDs (``int``), and values are snapped coordinates. obs_to_vertex : list List of incident network vertices to snapped observation points converted from a ``default_dict``. Originally in the form of paired left/right nearest network vertices {netvtx1: obs_id1, netvtx2: obs_id1, netvtx1: obs_id2... netvtx1: obs_idn}, then simplified to a list in the form [netvtx1, netvtx2, netvtx1, netvtx2, ...]. dist_to_vertex : dict Keys are observations IDs (``int``). Values are distance lookup (``dict``). Within distance lookup (``dict``) keys are the two incident vertices of the arc and values are distance to each of those arcs. snapped_coordinates : dict Keys are the point IDs (int). Values are the snapped :math:`(x,y)` coordinates (tuple). snap_dist : bool Flag as ``True`` to include the distance from the original location to the snapped location along the network. Default is ``False``. """<def_stmt>__init__ self in_data=<none> idvariable=<none> attribute=<false># initialize points dictionary and counter <block_start>self.points={}<line_sep>self.npoints=0<line_sep># determine input point data type in_dtype=str(type(in_data)).split("'")[1]<line_sep># flag for points from a shapefile from_shp=<false><line_sep># flag for points as libpysal.cg.Point objects is_libpysal_points=<false><line_sep>supported_iterables=["list" "tuple"]<line_sep># type error message msg="'%s' not supported for point pattern instantiation."<line_sep># set appropriate geometries <if_stmt>in_dtype<eq>"str"<block_start>from_shp=<true><block_end><elif_stmt>in_dtype<in>supported_iterables<block_start>dtype=str(type(in_data[0])).split("'")[1]<if_stmt>dtype<eq>"libpysal.cg.shapes.Point"<block_start>is_libpysal_points=<true><block_end><else_stmt><block_start><raise>TypeError(msg%dtype)<block_end><block_end><elif_stmt>in_dtype<eq>"libpysal.cg.shapes.Point"<block_start>in_data=[in_data]<line_sep>is_libpysal_points=<true><block_end><elif_stmt>in_dtype<eq>"geopandas.geodataframe.GeoDataFrame"<block_start>from_shp=<false><block_end><else_stmt><block_start><raise>TypeError(msg%in_dtype)<block_end># either set native point ID from dataset or create new IDs <if_stmt>idvariable<and><not>is_libpysal_points<block_start>ids=weights.util.get_ids(in_data idvariable)<block_end><else_stmt><block_start>ids=<none><block_end># extract the point geometries <if_stmt><not>is_libpysal_points<block_start><if_stmt>from_shp<block_start>pts=open(in_data)<block_end><else_stmt><block_start>pts_objs=list(in_data.geometry)<line_sep>pts=[cg.shapes.Point((p.x p.y))<for>p pts_objs]<block_end><block_end><else_stmt><block_start>pts=in_data<block_end># fetch attributes if requested <if_stmt>attribute<and><not>is_libpysal_points# open the database file if data is from shapefile <block_start><if_stmt>from_shp<block_start>dbname=os.path.splitext(in_data)[0]+".dbf"<line_sep>db=open(dbname)<block_end># if data is from a GeoDataFrame, drop the geometry column # and declare attribute values as a list of lists <else_stmt><block_start>db=in_data.drop(in_data.geometry.name axis=1).values.tolist()<line_sep>db=[[d]<for>d db]<block_end><block_end><else_stmt><block_start>db=<none><block_end># iterate over all points <for_stmt>i,pt enumerate(pts)# IDs, attributes <block_start><if_stmt>ids<and>db<is><not><none><block_start>self.points[ids[i]]={"coordinates":pt "properties":db[i]}<block_end># IDs, no attributes <elif_stmt>ids<and>db<is><none><block_start>self.points[ids[i]]={"coordinates":pt "properties":<none>}<block_end># no IDs, attributes <elif_stmt><not>ids<and>db<is><not><none><block_start>self.points[i]={"coordinates":pt "properties":db[i]}<block_end># no IDs, no attributes <else_stmt><block_start>self.points[i]={"coordinates":pt "properties":<none>}<block_end><block_end># close the shapefile and database file # if the input data is a .shp <if_stmt>from_shp<block_start>pts.close()<if_stmt>db<block_start>db.close()<block_end><block_end># record number of points self.npoints=len(self.points.keys())<block_end><block_end><class_stmt>SimulatedPointPattern<block_start>"""Note from the original author of ``pysal.network``: Struct style class to mirror the ``PointPattern`` class. If the ``PointPattern`` class has methods, it might make sense to make this a child of that class. This class is not intended to be used by the external user. Attributes ---------- npoints : int The number of points. obs_to_arc : dict Keys are arc IDs (tuple). Values are snapped point information (dict). Within the snapped point information (dict) keys are observation IDs (int), and values are snapped coordinates. obs_to_vertex : list List of incident network vertices to snapped observation points converted from a default_dict. Originally in the form of paired left/right nearest network vertices {netvtx1: obs_id1, netvtx2: obs_id1, netvtx1: obs_id2... netvtx1: obs_idn}, then simplified to a list in the form [netvtx1, netvtx2, netvtx1, netvtx2, ...]. dist_to_vertex : dict Keys are observations IDs (int). Values are distance lookup (dict). Within distance lookup (dict) keys are the two incident vertices of the arc and values are distance to each of those arcs. snapped_coordinates : dict Keys are the point IDs (int). Values are the snapped :math:`(x,y)` coordinates (tuple). snap_dist : bool Flag as ``True`` to include the distance from the original location to the snapped location along the network. Default is ``False``. """<def_stmt>__init__ self# duplicate post-snapping PointPattern class structure <block_start>self.npoints=0<line_sep>self.obs_to_arc={}<line_sep>self.obs_to_vertex=defaultdict(list)<line_sep>self.dist_to_vertex={}<line_sep>self.snapped_coordinates={}<block_end><block_end>
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convolutional Neural Network Estimator for MNIST, built with tf.layers."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>os<import_stmt>sys<import_stmt>tensorflow<as>tf<import_stmt>dataset<class_stmt>Model(object)<block_start>"""Class that defines a graph to recognize digits in the MNIST dataset."""<def_stmt>__init__ self data_format<block_start>"""Creates a model for classifying a hand-written digit. Args: data_format: Either 'channels_first' or 'channels_last'. 'channels_first' is typically faster on GPUs while 'channels_last' is typically faster on CPUs. See https://www.tensorflow.org/performance/performance_guide#data_formats """<if_stmt>data_format<eq>'channels_first'<block_start>self._input_shape=[-1 1 28 28]<block_end><else_stmt><block_start><assert_stmt>data_format<eq>'channels_last'<line_sep>self._input_shape=[-1 28 28 1]<block_end>self.conv1=tf.layers.Conv2D(32 5 padding='same' data_format=data_format activation=tf.nn.relu)<line_sep>self.conv2=tf.layers.Conv2D(64 5 padding='same' data_format=data_format activation=tf.nn.relu)<line_sep>self.fc1=tf.layers.Dense(1024 activation=tf.nn.relu)<line_sep>self.fc2=tf.layers.Dense(10)<line_sep>self.dropout=tf.layers.Dropout(0.4)<line_sep>self.max_pool2d=tf.layers.MaxPooling2D((2 2) (2 2) padding='same' data_format=data_format)<block_end><def_stmt>__call__ self inputs training<block_start>"""Add operations to classify a batch of input images. Args: inputs: A Tensor representing a batch of input images. training: A boolean. Set to True to add operations required only when training the classifier. Returns: A logits Tensor with shape [<batch_size>, 10]. """<line_sep>y=tf.reshape(inputs self._input_shape)<line_sep>y=self.conv1(y)<line_sep>y=self.max_pool2d(y)<line_sep>y=self.conv2(y)<line_sep>y=self.max_pool2d(y)<line_sep>y=tf.layers.flatten(y)<line_sep>y=self.fc1(y)<line_sep>y=self.dropout(y training=training)<line_sep><return>self.fc2(y)<block_end><block_end><def_stmt>model_fn features labels mode params<block_start>"""The model_fn argument for creating an Estimator."""<line_sep>model=Model(params['data_format'])<line_sep>image=features<if_stmt>isinstance(image dict)<block_start>image=features['image']<block_end><if_stmt>mode<eq>tf.estimator.ModeKeys.PREDICT<block_start>logits=model(image training=<false>)<line_sep>predictions={'classes':tf.argmax(logits axis=1) 'probabilities':tf.nn.softmax(logits) }<line_sep><return>tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.PREDICT predictions=predictions export_outputs={'classify':tf.estimator.export.PredictOutput(predictions)})<block_end><if_stmt>mode<eq>tf.estimator.ModeKeys.TRAIN<block_start>optimizer=tf.train.AdamOptimizer(learning_rate=1e-4)<line_sep>logits=model(image training=<true>)<line_sep>loss=tf.losses.softmax_cross_entropy(onehot_labels=labels logits=logits)<line_sep>accuracy=tf.metrics.accuracy(labels=tf.argmax(labels axis=1) predictions=tf.argmax(logits axis=1))<line_sep># Name the accuracy tensor 'train_accuracy' to demonstrate the # LoggingTensorHook. tf.identity(accuracy[1] name='train_accuracy')<line_sep>tf.summary.scalar('train_accuracy' accuracy[1])<line_sep><return>tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.TRAIN loss=loss train_op=optimizer.minimize(loss tf.train.get_or_create_global_step()))<block_end><if_stmt>mode<eq>tf.estimator.ModeKeys.EVAL<block_start>logits=model(image training=<false>)<line_sep>loss=tf.losses.softmax_cross_entropy(onehot_labels=labels logits=logits)<line_sep><return>tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.EVAL loss=loss eval_metric_ops={'accuracy':tf.metrics.accuracy(labels=tf.argmax(labels axis=1) predictions=tf.argmax(logits axis=1)) })<block_end><block_end><def_stmt>main unused_argv<block_start>data_format=FLAGS.data_format<if_stmt>data_format<is><none><block_start>data_format=('channels_first'<if>tf.test.is_built_with_cuda()<else>'channels_last')<block_end>mnist_classifier=tf.estimator.Estimator(model_fn=model_fn model_dir=FLAGS.model_dir params={'data_format':data_format})<line_sep># Train the model <def_stmt>train_input_fn # When choosing shuffle buffer sizes, larger sizes result in better # randomness, while smaller sizes use less memory. MNIST is a small # enough dataset that we can easily shuffle the full epoch. <block_start>ds=dataset.train(FLAGS.data_dir)<line_sep>ds=ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size).repeat(FLAGS.train_epochs)<line_sep>(images labels)=ds.make_one_shot_iterator().get_next()<line_sep><return>(images labels)<block_end># Set up training hook that logs the training accuracy every 100 steps. tensors_to_log={'train_accuracy':'train_accuracy'}<line_sep>logging_hook=tf.train.LoggingTensorHook(tensors=tensors_to_log every_n_iter=100)<line_sep>mnist_classifier.train(input_fn=train_input_fn hooks=[logging_hook])<line_sep># Evaluate the model and print results <def_stmt>eval_input_fn <block_start><return>dataset.test(FLAGS.data_dir).batch(FLAGS.batch_size).make_one_shot_iterator().get_next()<block_end>eval_results=mnist_classifier.evaluate(input_fn=eval_input_fn)<line_sep>print()<line_sep>print('Evaluation results:\n\t%s'%eval_results)<line_sep># Export the model <if_stmt>FLAGS.export_dir<is><not><none><block_start>image=tf.placeholder(tf.float32 [<none> 28 28])<line_sep>input_fn=tf.estimator.export.build_raw_serving_input_receiver_fn({'image':image })<line_sep>mnist_classifier.export_savedmodel(FLAGS.export_dir input_fn)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--batch_size' type=int default=100 help='Number of images to process in a batch')<line_sep>parser.add_argument('--data_dir' type=str default='/tmp/mnist_data' help='Path to directory containing the MNIST dataset')<line_sep>parser.add_argument('--model_dir' type=str default='/tmp/mnist_model' help='The directory where the model will be stored.')<line_sep>parser.add_argument('--train_epochs' type=int default=40 help='Number of epochs to train.')<line_sep>parser.add_argument('--data_format' type=str default=<none> choices=['channels_first' 'channels_last'] help='A flag to override the data format used in the model. channels_first '<concat>'provides a performance boost on GPU but is not always compatible '<concat>'with CPU. If left unspecified, the data format will be chosen '<concat>'automatically based on whether TensorFlow was built for CPU or GPU.')<line_sep>parser.add_argument('--export_dir' type=str help='The directory where the exported SavedModel will be stored.')<line_sep>tf.logging.set_verbosity(tf.logging.INFO)<line_sep>FLAGS,unparsed=parser.parse_known_args()<line_sep>tf.app.run(main=main argv=[sys.argv[0]]+unparsed)<block_end>
# encoding: utf-8 # module Autodesk.Revit.UI.Plumbing calls itself Plumbing # from RevitAPIUI,Version=172.16.58.3,Culture=neutral,PublicKeyToken=null # by generator 1.145 # no doc # no imports # no functions # classes <class_stmt>IPipeFittingAndAccessoryPressureDropUIServer(IExternalServer)<block_start>""" Interface for external servers providing optional UI for pipe fitting and pipe accessory coefficient calculation. """<def_stmt>GetDBServerId self<block_start>""" GetDBServerId(self: IPipeFittingAndAccessoryPressureDropUIServer) -> Guid Returns the Id of the corresponding DB server for which this server provides an optional UI. Returns: The Id of the DB server. """<line_sep><pass><block_end><def_stmt>ShowSettings self data<block_start>""" ShowSettings(self: IPipeFittingAndAccessoryPressureDropUIServer,data: PipeFittingAndAccessoryPressureDropUIData) -> bool Shows the settings UI. data: The input data of the calculation. Returns: True if the user makes any changes in the UI,false otherwise. """<line_sep><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end><block_end><class_stmt>PipeFittingAndAccessoryPressureDropUIData(object IDisposable)<block_start>""" The input and output data used by external UI servers for storing UI settings. """<def_stmt>Dispose self<block_start>""" Dispose(self: PipeFittingAndAccessoryPressureDropUIData) """<line_sep><pass><block_end><def_stmt>GetUIDataItems self<block_start>""" GetUIDataItems(self: PipeFittingAndAccessoryPressureDropUIData) -> IList[PipeFittingAndAccessoryPressureDropUIDataItem] Gets all UI data items stored in the UI data. Returns: An array of UI data items. """<line_sep><pass><block_end><def_stmt>GetUnits self<block_start>""" GetUnits(self: PipeFittingAndAccessoryPressureDropUIData) -> Units Gets units. Returns: The Units object. """<line_sep><pass><block_end><def_stmt>ReleaseUnmanagedResources self *args<block_start>""" ReleaseUnmanagedResources(self: PipeFittingAndAccessoryPressureDropUIData,disposing: bool) """<line_sep><pass><block_end><def_stmt>__enter__ self *args<block_start>""" __enter__(self: IDisposable) -> object """<line_sep><pass><block_end><def_stmt>__exit__ self *args<block_start>""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """<line_sep><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end><def_stmt>__repr__ self *args<block_start>""" __repr__(self: object) -> str """<line_sep><pass><block_end>IsValidObject=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: PipeFittingAndAccessoryPressureDropUIData) -> bool """<block_end><class_stmt>PipeFittingAndAccessoryPressureDropUIDataItem(object IDisposable)<block_start>""" The input and output data used by external UI servers for initializing and storing the UI settings. """<def_stmt>Dispose self<block_start>""" Dispose(self: PipeFittingAndAccessoryPressureDropUIDataItem) """<line_sep><pass><block_end><def_stmt>GetEntity self<block_start>""" GetEntity(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> Entity Returns the entity set by UI server. or an invalid entity otherwise. Returns: The returned Entity. """<line_sep><pass><block_end><def_stmt>GetPipeFittingAndAccessoryData self<block_start>""" GetPipeFittingAndAccessoryData(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> PipeFittingAndAccessoryData Gets the fitting data stored in the UI data item. Returns: The fitting data stored in the UI data item. """<line_sep><pass><block_end><def_stmt>ReleaseUnmanagedResources self *args<block_start>""" ReleaseUnmanagedResources(self: PipeFittingAndAccessoryPressureDropUIDataItem,disposing: bool) """<line_sep><pass><block_end><def_stmt>SetEntity self entity<block_start>""" SetEntity(self: PipeFittingAndAccessoryPressureDropUIDataItem,entity: Entity) Stores the entity in the UI data item. entity: The Entity to be stored. """<line_sep><pass><block_end><def_stmt>__enter__ self *args<block_start>""" __enter__(self: IDisposable) -> object """<line_sep><pass><block_end><def_stmt>__exit__ self *args<block_start>""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """<line_sep><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end><def_stmt>__repr__ self *args<block_start>""" __repr__(self: object) -> str """<line_sep><pass><block_end>IsValidObject=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> bool """<block_end>
# Copyright 2021 QuantumBlack Visual Analytics Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND # NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS # BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo # (either separately or in combination, "QuantumBlack Trademarks") are # trademarks of QuantumBlack. The License does not grant you any right or # license to the QuantumBlack Trademarks. You may not use the QuantumBlack # Trademarks or any confusingly similar mark as a trademark for your product, # or use the QuantumBlack Trademarks in any other manner that might cause # confusion in the marketplace, including but not limited to in advertising, # on websites, or on software. # # See the License for the specific language governing permissions and # limitations under the License. """`kedro_viz.services.layers` defines layers-related logic."""<import_stmt>logging<import_from_stmt>collections defaultdict<import_from_stmt>typing Dict List Set<import_from_stmt>toposort CircularDependencyError toposort_flatten<import_from_stmt>kedro_viz.models.graph GraphNode<line_sep>logger=logging.getLogger(__name__)<def_stmt>sort_layers nodes:Dict[str GraphNode] dependencies:Dict[str Set[str]]<arrow>List[str]<block_start>"""Given a DAG represented by a dictionary of nodes, some of which have a `layer` attribute, along with their dependencies, return the list of all layers sorted according to the nodes' topological order, i.e. a layer should appear before another layer in the list if its node is a dependency of the other layer's node, directly or indirectly. For example, given the following graph: node1(layer=a) -> node2 -> node4 -> node6(layer=d) | ^ v | node3(layer=b) -> node5(layer=c) The layers ordering should be: [a, b, c, d] In theory, this is a problem of finding the [transitive closure](https://en.wikipedia.org/wiki/Transitive_closure) in a graph of layers and then toposort them. The algorithm below follows a repeated depth-first search approach: * For every node, find all layers that depends on it in a depth-first search. * While traversing, build up a dictionary of {node_id -> layers} for the node that have already been visited. * Turn the final {node_id -> layers} into a {layer -> layers} to represent the layers' dependencies. Note: the key is a layer and the values are the parents of that layer, just because that's the format toposort requires. * Feed this layers dictionary to ``toposort`` and return the sorted values. * Raise CircularDependencyError if the layers cannot be sorted topologically, i.e. there are cycles among the layers. Args: nodes: A dictionary of {node_id -> node} represents the nodes in the graph. dependencies: A dictionary of {node_id -> set(child_ids)} represents the direct dependencies between nodes in the graph. Returns: The list of layers sorted based on topological order. Raises: CircularDependencyError: When the layers have cyclic dependencies. """<line_sep>node_layers:Dict[str Set[str]]={}# map node_id to the layers that depend on it <def_stmt>find_child_layers node_id:str<arrow>Set[str]<block_start>"""For the given node_id, find all layers that depend on it in a depth-first manner. Build up the node_layers dependency dictionary while traversing so each node is visited only once. Note: Python's default recursive depth limit is 1000, which means this algorithm won't work for pipeline with more than 1000 nodes. However, we can rewrite this using stack if we run into this limit in practice. """<if_stmt>node_id<in>node_layers<block_start><return>node_layers[node_id]<block_end>node_layers[node_id]=set()<line_sep># The layer of the current node can also be considered as depending on that node. # This is to cater for the edge case where all nodes are completely disjoint from each other # and no dependency graph for layers can be constructed, # yet the layers still need to be displayed. node_layer=getattr(nodes[node_id] "layer" <none>)<if_stmt>node_layer<is><not><none><block_start>node_layers[node_id].add(node_layer)<block_end># for each child node of the given node_id, # mark its layer and all layers that depend on it as child layers of the given node_id. <for_stmt>child_node_id dependencies[node_id]<block_start>child_node=nodes[child_node_id]<line_sep>child_layer=getattr(child_node "layer" <none>)<if_stmt>child_layer<is><not><none><block_start>node_layers[node_id].add(child_layer)<block_end>node_layers[node_id].update(find_child_layers(child_node_id))<block_end><return>node_layers[node_id]<block_end># populate node_layers dependencies <for_stmt>node_id nodes<block_start>find_child_layers(node_id)<block_end># compute the layer dependencies dictionary based on the node_layers dependencies, # represented as {layer -> set(parent_layers)} layer_dependencies=defaultdict(set)<for_stmt>node_id,child_layers node_layers.items()<block_start>node_layer=getattr(nodes[node_id] "layer" <none>)<line_sep># add the node's layer as a parent layer for all child layers. # Even if a child layer is the same as the node's layer, i.e. a layer is marked # as its own parent, toposort still works so we don't need to check for that explicitly. <if_stmt>node_layer<is><not><none><block_start><for_stmt>layer child_layers<block_start>layer_dependencies[layer].add(node_layer)<block_end><block_end><block_end># toposort the layer_dependencies to find the layer order. # Note that for string, toposort_flatten will default to alphabetical order for tie-break. <try_stmt><block_start><return>toposort_flatten(layer_dependencies)<block_end><except_stmt>CircularDependencyError<block_start>logger.warning("Layers visualisation is disabled as circular dependency detected among layers.")<line_sep><return>[]<block_end><block_end>
<import_from_future_stmt> print_function<import_from_future_stmt> division<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.nn Parameter<import_stmt>math<import_from_stmt>torchkit.util.utils l2_norm<import_from_stmt>torchkit.head.localfc.common calc_logits<class_stmt>CurricularFace(nn.Module)<block_start>""" Implement of CurricularFace (https://arxiv.org/abs/2004.00288) """<def_stmt>__init__ self in_features out_features scale=64.0 margin=0.5 alpha=0.1<block_start>""" Args: in_features: size of each input features out_features: size of each output features scale: norm of input feature margin: margin """<line_sep>super(CurricularFace self).__init__()<line_sep>self.in_features=in_features<line_sep>self.out_features=out_features<line_sep>self.margin=margin<line_sep>self.scale=scale<line_sep>self.alpha=alpha<line_sep>self.cos_m=math.cos(margin)<line_sep>self.sin_m=math.sin(margin)<line_sep>self.threshold=math.cos(math.pi-margin)<line_sep>self.mm=math.sin(math.pi-margin)<times>margin<line_sep>self.kernel=Parameter(torch.Tensor(in_features out_features))<line_sep>self.register_buffer('t' torch.zeros(1))<line_sep>nn.init.normal_(self.kernel std=0.01)<block_end><def_stmt>forward self embeddings labels<block_start>cos_theta,origin_cos=calc_logits(embeddings self.kernel)<line_sep>target_logit=cos_theta[torch.arange(0 embeddings.size(0)) labels].view(-1 1)<line_sep>sin_theta=torch.sqrt(1.0-torch.pow(target_logit 2))<line_sep>cos_theta_m=target_logit<times>self.cos_m-sin_theta<times>self.sin_m# cos(target+margin) mask=cos_theta<g>cos_theta_m<line_sep>final_target_logit=torch.where(target_logit<g>self.threshold cos_theta_m target_logit-self.mm)<line_sep>hard_example=cos_theta[mask]<with_stmt>torch.no_grad()<block_start>self.t=target_logit.mean()<times>self.alpha+(1-self.alpha)<times>self.t<block_end>cos_theta[mask]=hard_example<times>(self.t+hard_example)<line_sep>cos_theta.scatter_(1 labels.view(-1 1).long() final_target_logit)<line_sep>output=cos_theta<times>self.scale<line_sep><return>output origin_cos<times>self.scale<block_end><block_end>
<try_stmt><block_start><import_from_stmt>public_config *<block_end><except_stmt>ImportError<block_start><pass><block_end>HOST='0.0.0.0'<line_sep>PORT=9038<line_sep>SERVICE_NAME='jobs'<line_sep>SERVER_ENV='prod'<line_sep>SQLALCHEMY_POOL_SIZE=10<line_sep>SQLALCHEMY_POOL_RECYCLE=3600<line_sep>JOBS=[{# 任务 信用积分每日检查, 每周一到每周五 早上 10:30 分运行 # 检查每个设备的借用日期是否超时 :发送提醒邮件,扣除信用分 1分 'id':'credit-check-daily' # 任务 id, 唯一 'func':'apps.jobs.business.jobs:JobsBusiness.credit_check_daily' # 路径 'args':<none> # 参数 'trigger':'cron' # 启动方式, 时间间隔 'day_of_week':'mon-fri' # 周1 - 周5 'hour':11 # 早上 11 点 'minute':30 # 具体分钟数 # 'trigger': 'interval', # 启动方式 时间区间 # 'hours': 10 # 'seconds': 10 } {# cidata 数据更新 'id':'cijob_update' # 任务 id, 唯一 'func':'apps.extention.business.cidata:CiJobBusiness.update_jenkins_data' # 路径 'args':<none> # 参数 'trigger':'interval' # 启动方式 时间区间 'hours':10# 'seconds': 10 } {# 定时把redis中的接口调用情况放到数据库中 'id':'get_statistics_route_job' # 任务 id, 唯一 'func':'apps.public.daos.public:get_statistics_route_job' # 路径 'args':<none> # 参数 'trigger':'interval' # 启动方式, 时间间隔 'day_of_week':'mon-fri' # 周1 - 周5 'hour':3 # 早上 3 点 # 'minute': 5, # 具体分钟数 }]<line_sep>
<import_from_stmt>sklearn.ensemble RandomForestRegressor<import_from_stmt>openbox.utils.config_space ConfigurationSpace<import_from_stmt>openbox.utils.config_space UniformFloatHyperparameter CategoricalHyperparameter Constant UniformIntegerHyperparameter<import_stmt>numpy<as>np<import_from_stmt>openbox.utils.config_space.util convert_configurations_to_array<import_stmt>threading<import_from_stmt>joblib Parallel delayed<import_from_stmt>sklearn.utils.fixes _joblib_parallel_args<import_from_stmt>sklearn.utils.validation check_is_fitted<import_from_stmt>sklearn.ensemble._base _partition_estimators<def_stmt>_accumulate_prediction predict X out lock<block_start>""" This is a utility function for joblib's Parallel. It can't go locally in ForestClassifier or ForestRegressor, because joblib complains that it cannot pickle it when placed there. """<line_sep>prediction=predict(X check_input=<false>)<with_stmt>lock<block_start><if_stmt>len(out)<eq>1<block_start>out[0]<augadd>prediction<block_end><else_stmt><block_start><for_stmt>i range(len(out))<block_start>out[i]<augadd>prediction[i]<block_end><block_end><block_end><block_end><def_stmt>_collect_prediction predict X out lock<block_start>""" This is a utility function for joblib's Parallel. It can't go locally in ForestClassifier or ForestRegressor, because joblib complains that it cannot pickle it when placed there. """<line_sep>prediction=predict(X check_input=<false>)<with_stmt>lock<block_start>out.append(prediction)<block_end><block_end><def_stmt>predictmv rf X<block_start>check_is_fitted(rf)<line_sep># Check data X=rf._validate_X_predict(X)<line_sep># Assign chunk of trees to jobs n_jobs,_,_=_partition_estimators(rf.n_estimators rf.n_jobs)<line_sep>print('n_jobs=' n_jobs)<line_sep># avoid storing the output of every estimator by summing them here <if_stmt>rf.n_outputs_<g>1<block_start>y_hat=np.zeros((X.shape[0] rf.n_outputs_) dtype=np.float64)<block_end><else_stmt><block_start>print('here, rf.n_outputs_=1')<line_sep>y_hat=np.zeros((X.shape[0]) dtype=np.float64)<block_end># Parallel loop lock=threading.Lock()<line_sep># Parallel(n_jobs=n_jobs, verbose=rf.verbose, # **_joblib_parallel_args(require="sharedmem"))( # delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock) # for e in rf.estimators_) # # y_hat /= len(rf.estimators_) # # return y_hat all_y_preds=list()<line_sep>Parallel(n_jobs=n_jobs verbose=rf.verbose **_joblib_parallel_args(require="sharedmem"))(delayed(_collect_prediction)(e.predict X all_y_preds lock)<for>e rf.estimators_)<line_sep>all_y_preds=np.asarray(all_y_preds dtype=np.float64)<line_sep><return>all_y_preds<block_end><def_stmt>get_cs <block_start>cs=ConfigurationSpace()<line_sep>n_estimators=UniformIntegerHyperparameter("n_estimators" 100 1000 default_value=500 q=50)<line_sep>num_leaves=UniformIntegerHyperparameter("num_leaves" 31 2047 default_value=128)<line_sep>max_depth=Constant('max_depth' 15)<line_sep>learning_rate=UniformFloatHyperparameter("learning_rate" 1e-3 0.3 default_value=0.1 log=<true>)<line_sep>min_child_samples=UniformIntegerHyperparameter("min_child_samples" 5 30 default_value=20)<line_sep>subsample=UniformFloatHyperparameter("subsample" 0.7 1 default_value=1 q=0.1)<line_sep>colsample_bytree=UniformFloatHyperparameter("colsample_bytree" 0.7 1 default_value=1 q=0.1)<line_sep>cs.add_hyperparameters([n_estimators num_leaves max_depth learning_rate min_child_samples subsample colsample_bytree])<line_sep><return>cs<block_end>n_obs=50<line_sep>n_new=5<line_sep>cs=get_cs()<line_sep>cs.seed(1)<line_sep>configs=cs.sample_configuration(n_obs)<line_sep>new_configs=cs.sample_configuration(n_new)<line_sep>X=convert_configurations_to_array(configs)<line_sep>Y=np.random.RandomState(47).random(size=(n_obs ))<line_sep>pX=convert_configurations_to_array(new_configs)<line_sep>print('shape of pX' pX.shape)<line_sep>rf=RandomForestRegressor(random_state=np.random.RandomState(47) n_estimators=3)<line_sep>rf.fit(X Y)<line_sep>preds=rf.predict(pX)<line_sep>print(preds)<line_sep>ppp=predictmv(rf pX)<line_sep>print('final predict' ppp)<line_sep>m=np.mean(ppp axis=0)<line_sep>v=np.var(ppp axis=0)<line_sep>print(m v)<line_sep>print(type(m) type(v))<import_from_stmt>joblib effective_n_jobs<line_sep>print(effective_n_jobs(<none>))<line_sep>
<import_from_stmt>opt_utils *<import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("-s" "--skip_compilation" action='store_true' help="skip compilation")<line_sep>args=parser.parse_args()<if_stmt><not>args.skip_compilation<block_start>compile_all_opt_examples()<block_end><for_stmt>example all_examples<block_start>args=[]<line_sep>output=run_example(example args <true>).decode('ascii')<with_stmt>open(example+".log" "w")<as>text_file<block_start>text_file.write(output)<block_end><block_end>
<import_from_stmt>nose.tools raises<import_from_stmt>blocks.bricks Bias Linear Logistic<import_from_stmt>blocks.bricks.parallel Merge<import_from_stmt>blocks.filter VariableFilter<import_from_stmt>blocks.graph ComputationGraph<import_from_stmt>blocks.roles BIAS FILTER PARAMETER OUTPUT<import_from_stmt>theano tensor<def_stmt>test_variable_filter # Creating computation graph <block_start>brick1=Linear(input_dim=2 output_dim=2 name='linear1')<line_sep>brick2=Bias(2 name='bias1')<line_sep>activation=Logistic(name='sigm')<line_sep>x=tensor.vector()<line_sep>h1=brick1.apply(x)<line_sep>h2=activation.apply(h1)<line_sep>h2.name="h2act"<line_sep>y=brick2.apply(h2)<line_sep>cg=ComputationGraph(y)<line_sep>parameters=[brick1.W brick1.b brick2.parameters[0]]<line_sep>bias=[brick1.b brick2.parameters[0]]<line_sep>brick1_bias=[brick1.b]<line_sep># Testing filtering by role role_filter=VariableFilter(roles=[PARAMETER])<assert_stmt>parameters<eq>role_filter(cg.variables)<line_sep>role_filter=VariableFilter(roles=[FILTER])<assert_stmt>[]<eq>role_filter(cg.variables)<line_sep># Testing filtering by role using each_role flag role_filter=VariableFilter(roles=[PARAMETER BIAS])<assert_stmt>parameters<eq>role_filter(cg.variables)<line_sep>role_filter=VariableFilter(roles=[PARAMETER BIAS] each_role=<true>)<assert_stmt><not>parameters<eq>role_filter(cg.variables)<assert_stmt>bias<eq>role_filter(cg.variables)<line_sep># Testing filtering by bricks classes brick_filter=VariableFilter(roles=[BIAS] bricks=[Linear])<assert_stmt>brick1_bias<eq>brick_filter(cg.variables)<line_sep># Testing filtering by bricks instances brick_filter=VariableFilter(roles=[BIAS] bricks=[brick1])<assert_stmt>brick1_bias<eq>brick_filter(cg.variables)<line_sep># Testing filtering by brick instance brick_filter=VariableFilter(roles=[BIAS] bricks=[brick1])<assert_stmt>brick1_bias<eq>brick_filter(cg.variables)<line_sep># Testing filtering by name name_filter=VariableFilter(name='W_norm')<assert_stmt>[cg.variables[2]]<eq>name_filter(cg.variables)<line_sep># Testing filtering by name regex name_filter_regex=VariableFilter(name_regex='W_no.?m')<assert_stmt>[cg.variables[2]]<eq>name_filter_regex(cg.variables)<line_sep># Testing filtering by theano name theano_name_filter=VariableFilter(theano_name='h2act')<assert_stmt>[cg.variables[11]]<eq>theano_name_filter(cg.variables)<line_sep># Testing filtering by theano name regex theano_name_filter_regex=VariableFilter(theano_name_regex='h2a.?t')<assert_stmt>[cg.variables[11]]<eq>theano_name_filter_regex(cg.variables)<line_sep># Testing filtering by application appli_filter=VariableFilter(applications=[brick1.apply])<line_sep>variables=[cg.variables[1] cg.variables[8]]<assert_stmt>variables<eq>appli_filter(cg.variables)<line_sep># Testing filtering by application appli_filter_list=VariableFilter(applications=[brick1.apply])<assert_stmt>variables<eq>appli_filter_list(cg.variables)<line_sep>input1=tensor.matrix('input1')<line_sep>input2=tensor.matrix('input2')<line_sep>merge=Merge(['input1' 'input2'] [5 6] 2)<line_sep>merged=merge.apply(input1 input2)<line_sep>merge_cg=ComputationGraph(merged)<line_sep>outputs=VariableFilter(roles=[OUTPUT] bricks=[merge])(merge_cg.variables)<assert_stmt>merged<in>outputs<assert_stmt>len(outputs)<eq>3<line_sep>outputs_application=VariableFilter(roles=[OUTPUT] applications=[merge.apply])(merge_cg.variables)<assert_stmt>outputs_application<eq>[merged]<block_end>@raises(TypeError)<def_stmt>test_variable_filter_roles_error # Creating computation graph <block_start>brick1=Linear(input_dim=2 output_dim=2 name='linear1')<line_sep>x=tensor.vector()<line_sep>h1=brick1.apply(x)<line_sep>cg=ComputationGraph(h1)<line_sep># testing role error VariableFilter(roles=PARAMETER)(cg.variables)<block_end>@raises(TypeError)<def_stmt>test_variable_filter_applications_error # Creating computation graph <block_start>brick1=Linear(input_dim=2 output_dim=2 name='linear1')<line_sep>x=tensor.vector()<line_sep>h1=brick1.apply(x)<line_sep>cg=ComputationGraph(h1)<line_sep>VariableFilter(applications=brick1.apply)(cg.variables)<block_end>
<import_from_stmt>tensorflow.keras.models Model<import_from_stmt>tensorflow.keras.layers Dense Flatten Dropout Input<import_from_stmt>tensorflow.keras.layers MaxPooling1D Conv1D<import_from_stmt>tensorflow.keras.layers LSTM Bidirectional<import_from_stmt>tensorflow.keras.layers BatchNormalization GlobalAveragePooling1D Permute concatenate Activation add<import_stmt>numpy<as>np<import_stmt>math<def_stmt>get_model model_name input_shape nb_class<block_start><if_stmt>model_name<eq>"vgg"<block_start>model=cnn_vgg(input_shape nb_class)<block_end><elif_stmt>model_name<eq>"lstm1"<block_start>model=lstm1(input_shape nb_class)<block_end><elif_stmt>model_name<eq>"lstm"<block_start>model=lstm1v0(input_shape nb_class)<block_end><elif_stmt>model_name<eq>"lstm2"<block_start>model=lstm2(input_shape nb_class)<block_end><elif_stmt>model_name<eq>"blstm1"<block_start>model=blstm1(input_shape nb_class)<block_end><elif_stmt>model_name<eq>"blstm2"<block_start>model=blstm2(input_shape nb_class)<block_end><elif_stmt>model_name<eq>"lstmfcn"<block_start>model=lstm_fcn(input_shape nb_class)<block_end><elif_stmt>model_name<eq>"resnet"<block_start>model=cnn_resnet(input_shape nb_class)<block_end><elif_stmt>model_name<eq>"mlp"<block_start>model=mlp4(input_shape nb_class)<block_end><elif_stmt>model_name<eq>"lenet"<block_start>model=cnn_lenet(input_shape nb_class)<block_end><else_stmt><block_start>print("model name missing")<block_end><return>model<block_end><def_stmt>mlp4 input_shape nb_class# <NAME>, <NAME>, <NAME>, "Time Series Classification from Scratch with Deep Neural Networks: A Strong Baseline," Int. Joint Conf. Neural Networks, 2017, pp. 1578-1585 <block_start>ip=Input(shape=input_shape)<line_sep>fc=Flatten()(ip)<line_sep>fc=Dropout(0.1)(fc)<line_sep>fc=Dense(500 activation='relu')(fc)<line_sep>fc=Dropout(0.2)(fc)<line_sep>fc=Dense(500 activation='relu')(fc)<line_sep>fc=Dropout(0.2)(fc)<line_sep>fc=Dense(500 activation='relu')(fc)<line_sep>fc=Dropout(0.3)(fc)<line_sep>out=Dense(nb_class activation='softmax')(fc)<line_sep>model=Model([ip] [out])<line_sep>model.summary()<line_sep><return>model<block_end><def_stmt>cnn_lenet input_shape nb_class# <NAME>, <NAME>, <NAME>, and <NAME>, “Gradient-based learning applied to document recognition,” Proceedings of the IEEE, vol. 86, no. 11, pp. 2278–2324, 1998. <block_start>ip=Input(shape=input_shape)<line_sep>conv=ip<line_sep>nb_cnn=int(round(math.log(input_shape[0] 2))-3)<line_sep>print("pooling layers: %d"%nb_cnn)<for_stmt>i range(nb_cnn)<block_start>conv=Conv1D(6+10<times>i 3 padding='same' activation="relu" kernel_initializer='he_uniform')(conv)<line_sep>conv=MaxPooling1D(pool_size=2)(conv)<block_end>flat=Flatten()(conv)<line_sep>fc=Dense(120 activation='relu')(flat)<line_sep>fc=Dropout(0.5)(fc)<line_sep>fc=Dense(84 activation='relu')(fc)<line_sep>fc=Dropout(0.5)(fc)<line_sep>out=Dense(nb_class activation='softmax')(fc)<line_sep>model=Model([ip] [out])<line_sep>model.summary()<line_sep><return>model<block_end><def_stmt>cnn_vgg input_shape nb_class# <NAME> and <NAME>, "Very deep convolutional networks for large-scale image recognition," arXiv preprint arXiv:1409.1556, 2014. <block_start>ip=Input(shape=input_shape)<line_sep>conv=ip<line_sep>nb_cnn=int(round(math.log(input_shape[0] 2))-3)<line_sep>print("pooling layers: %d"%nb_cnn)<for_stmt>i range(nb_cnn)<block_start>num_filters=min(64<times>2<power>i 512)<line_sep>conv=Conv1D(num_filters 3 padding='same' activation="relu" kernel_initializer='he_uniform')(conv)<line_sep>conv=Conv1D(num_filters 3 padding='same' activation="relu" kernel_initializer='he_uniform')(conv)<if_stmt>i<g>1<block_start>conv=Conv1D(num_filters 3 padding='same' activation="relu" kernel_initializer='he_uniform')(conv)<block_end>conv=MaxPooling1D(pool_size=2)(conv)<block_end>flat=Flatten()(conv)<line_sep>fc=Dense(4096 activation='relu')(flat)<line_sep>fc=Dropout(0.5)(fc)<line_sep>fc=Dense(4096 activation='relu')(fc)<line_sep>fc=Dropout(0.5)(fc)<line_sep>out=Dense(nb_class activation='softmax')(fc)<line_sep>model=Model([ip] [out])<line_sep>model.summary()<line_sep><return>model<block_end><def_stmt>lstm1v0 input_shape nb_class# Original proposal: # <NAME> and <NAME>, “Long Short-Term Memory,” Neural Computation, vol. 9, no. 8, pp. 1735–1780, Nov. 1997. <block_start>ip=Input(shape=input_shape)<line_sep>l2=LSTM(512)(ip)<line_sep>out=Dense(nb_class activation='softmax')(l2)<line_sep>model=Model([ip] [out])<line_sep>model.summary()<line_sep><return>model<block_end><def_stmt>lstm1 input_shape nb_class# Original proposal: # <NAME> and <NAME>, “Long Short-Term Memory,” Neural Computation, vol. 9, no. 8, pp. 1735–1780, Nov. 1997. # Hyperparameter choices: # <NAME> and <NAME>, "Optimal hyperparameters for deep lstm-networks for sequence labeling tasks," arXiv, preprint arXiv:1707.06799, 2017 <block_start>ip=Input(shape=input_shape)<line_sep>l2=LSTM(100)(ip)<line_sep>out=Dense(nb_class activation='softmax')(l2)<line_sep>model=Model([ip] [out])<line_sep>model.summary()<line_sep><return>model<block_end><def_stmt>lstm2 input_shape nb_class<block_start>ip=Input(shape=input_shape)<line_sep>l1=LSTM(100 return_sequences=<true>)(ip)<line_sep>l2=LSTM(100)(l1)<line_sep>out=Dense(nb_class activation='softmax')(l2)<line_sep>model=Model([ip] [out])<line_sep>model.summary()<line_sep><return>model<block_end><def_stmt>blstm1 input_shape nb_class# Original proposal: # <NAME> and <NAME>, “Bidirectional recurrent neural networks,” IEEE Transactions on Signal Processing, vol. 45, no. 11, pp. 2673–2681, 1997. # Hyperparameter choices: # <NAME> and <NAME>, "Optimal hyperparameters for deep lstm-networks for sequence labeling tasks," arXiv, preprint arXiv:1707.06799, 2017 <block_start>ip=Input(shape=input_shape)<line_sep>l2=Bidirectional(LSTM(100))(ip)<line_sep>out=Dense(nb_class activation='softmax')(l2)<line_sep>model=Model([ip] [out])<line_sep>model.summary()<line_sep><return>model<block_end><def_stmt>blstm2 input_shape nb_class<block_start>ip=Input(shape=input_shape)<line_sep>l1=Bidirectional(LSTM(100 return_sequences=<true>))(ip)<line_sep>l2=Bidirectional(LSTM(100))(l1)<line_sep>out=Dense(nb_class activation='softmax')(l2)<line_sep>model=Model([ip] [out])<line_sep>model.summary()<line_sep><return>model<block_end><def_stmt>lstm_fcn input_shape nb_class# <NAME>, <NAME>, <NAME>, and <NAME>, “LSTM Fully Convolutional Networks for Time Series Classification,” IEEE Access, vol. 6, pp. 1662–1669, 2018. <block_start>ip=Input(shape=input_shape)<line_sep># lstm part is a 1 time step multivariate as described in Karim et al. Seems strange, but works I guess. lstm=Permute((2 1))(ip)<line_sep>lstm=LSTM(128)(lstm)<line_sep>lstm=Dropout(0.8)(lstm)<line_sep>conv=Conv1D(128 8 padding='same' kernel_initializer='he_uniform')(ip)<line_sep>conv=BatchNormalization()(conv)<line_sep>conv=Activation('relu')(conv)<line_sep>conv=Conv1D(256 5 padding='same' kernel_initializer='he_uniform')(conv)<line_sep>conv=BatchNormalization()(conv)<line_sep>conv=Activation('relu')(conv)<line_sep>conv=Conv1D(128 3 padding='same' kernel_initializer='he_uniform')(conv)<line_sep>conv=BatchNormalization()(conv)<line_sep>conv=Activation('relu')(conv)<line_sep>flat=GlobalAveragePooling1D()(conv)<line_sep>flat=concatenate([lstm flat])<line_sep>out=Dense(nb_class activation='softmax')(flat)<line_sep>model=Model([ip] [out])<line_sep>model.summary()<line_sep><return>model<block_end><def_stmt>cnn_resnet input_shape nb_class# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "Data augmentation using synthetic data for time series classification with deep residual networks," International Workshop on Advanced Analytics and Learning on Temporal Data ECML/PKDD, 2018 <block_start>ip=Input(shape=input_shape)<line_sep>residual=ip<line_sep>conv=ip<for_stmt>i,nb_nodes enumerate([64 128 128])<block_start>conv=Conv1D(nb_nodes 8 padding='same' kernel_initializer="glorot_uniform")(conv)<line_sep>conv=BatchNormalization()(conv)<line_sep>conv=Activation('relu')(conv)<line_sep>conv=Conv1D(nb_nodes 5 padding='same' kernel_initializer="glorot_uniform")(conv)<line_sep>conv=BatchNormalization()(conv)<line_sep>conv=Activation('relu')(conv)<line_sep>conv=Conv1D(nb_nodes 3 padding='same' kernel_initializer="glorot_uniform")(conv)<line_sep>conv=BatchNormalization()(conv)<line_sep>conv=Activation('relu')(conv)<if_stmt>i<l>2# expands dimensions according to Fawaz et al. <block_start>residual=Conv1D(nb_nodes 1 padding='same' kernel_initializer="glorot_uniform")(residual)<block_end>residual=BatchNormalization()(residual)<line_sep>conv=add([residual conv])<line_sep>conv=Activation('relu')(conv)<line_sep>residual=conv<block_end>flat=GlobalAveragePooling1D()(conv)<line_sep>out=Dense(nb_class activation='softmax')(flat)<line_sep>model=Model([ip] [out])<line_sep>model.summary()<line_sep><return>model<block_end>
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>sys<line_sep>sys.path.append('..')<line_sep>sys.path.append('.')<import_from_stmt>auto_scan_test FusePassAutoScanTest IgnoreReasons<import_from_stmt>program_config TensorConfig ProgramConfig OpConfig CxxConfig TargetType PrecisionType DataLayoutType Place<import_stmt>numpy<as>np<import_from_stmt>functools partial<import_from_stmt>typing Optional List Callable Dict Any Set<import_from_stmt>test_conv_util UpdatePaddingAndDilation ConvOutputSize ConvTransposeOutputSize<import_stmt>unittest<import_stmt>hypothesis<import_from_stmt>hypothesis given settings seed example assume reproduce_failure<import_stmt>hypothesis.strategies<as>st<class_stmt>TestConvElementwiseFuse(FusePassAutoScanTest)<block_start><def_stmt>__init__ self *args **kwargs<block_start>FusePassAutoScanTest.__init__(self *args **kwargs)<line_sep>self.enable_testing_on_place(TargetType.ARM [PrecisionType.FP32] DataLayoutType.NCHW thread=[1 4])<line_sep>self.enable_testing_on_place(TargetType.X86 [PrecisionType.FP32] DataLayoutType.NCHW thread=[1 4])<line_sep>opencl_places=[Place(TargetType.OpenCL PrecisionType.FP16 DataLayoutType.ImageDefault) Place(TargetType.OpenCL PrecisionType.FP16 DataLayoutType.ImageFolder) Place(TargetType.OpenCL PrecisionType.FP32 DataLayoutType.NCHW) Place(TargetType.OpenCL PrecisionType.Any DataLayoutType.ImageDefault) Place(TargetType.OpenCL PrecisionType.Any DataLayoutType.ImageFolder) Place(TargetType.OpenCL PrecisionType.Any DataLayoutType.NCHW) Place(TargetType.Host PrecisionType.FP32)]<line_sep>self.enable_testing_on_place(places=opencl_places)<block_end><def_stmt>is_program_valid self program_config:ProgramConfig predictor_config:CxxConfig<arrow>bool<block_start><return><true><block_end><def_stmt>sample_program_configs self draw#conv or conv_transpose <block_start>Transpose=draw(st.sampled_from([<true> <false>]))<line_sep>#conv param or conv_transpose param in_shape=draw(st.lists(st.integers(min_value=3 max_value=128) min_size=3 max_size=3))<line_sep>in_shape=[draw(st.integers(min_value=1 max_value=4))]+in_shape<line_sep>weight_shape=draw(st.lists(st.integers(min_value=1 max_value=8) min_size=4 max_size=4))<line_sep>paddings=draw(st.lists(st.integers(min_value=0 max_value=2) min_size=2 max_size=2))<line_sep>dilations=draw(st.sampled_from([[2 2]]))<line_sep>groups=draw(st.sampled_from([1 2 in_shape[1]]))<line_sep>padding_algorithm=draw(st.sampled_from(["VALID" "SAME"]))<line_sep>strides=draw(st.sampled_from([[1 1] [2 2]]))<line_sep>output_padding=draw(st.sampled_from([[] [draw(st.integers(min_value=0 max_value=max(strides[0] dilations[0])-1)) draw(st.integers(min_value=0 max_value=max(strides[1] dilations[1])-1))]]))<line_sep>scale_in=draw(st.floats(min_value=0.001 max_value=0.1))<line_sep>scale_out=draw(st.floats(min_value=0.001 max_value=0.1))<if_stmt>Transpose<block_start>bias_sample_shape=weight_shape[1]<times>groups<block_end><else_stmt><block_start>bias_sample_shape=weight_shape[0]<block_end>elementwise_bias_shape=[bias_sample_shape]<line_sep>conv_out_shape=[]<line_sep>paddings_,dilations_=UpdatePaddingAndDilation(in_shape weight_shape paddings dilations groups padding_algorithm strides)<if_stmt>Transpose<block_start>assume(in_shape[1]<eq>weight_shape[0])<line_sep>assume(in_shape[1]%groups<eq>0)#TODO <if_stmt>len(output_padding)<block_start>assume(output_padding[0]<l>max(strides[0] dilations_[0]))<line_sep>assume(output_padding[1]<l>max(strides[1] dilations_[1]))<block_end>conv_out_shape=[in_shape[0] weight_shape[1]<times>groups]<line_sep>oh,ow=ConvTransposeOutputSize(in_shape weight_shape dilations_ paddings_ strides)<if_stmt>len(output_padding)<block_start>oh=oh+output_padding[0]<line_sep>ow=ow+output_padding[1]<block_end>conv_out_shape=conv_out_shape+[int(oh) int(ow)]<line_sep>assume(oh<g>0<and>ow<g>0)<if_stmt>len(output_padding)<block_start>conv_output_h=(oh+output_padding[0]+paddings[0]+paddings[1]-(dilations[0]<times>(weight_shape[2]-1)+1))/strides[0]+1<line_sep>conv_output_w=(oh+output_padding[1]+paddings[0]+paddings[1]-(dilations[1]<times>(weight_shape[3]-1)+1))/strides[1]+1<line_sep>assume(in_shape[2]<eq>(int)(conv_output_h))<line_sep>assume(in_shape[3]<eq>(int)(conv_output_w))<block_end><block_end><else_stmt><block_start>assume(in_shape[1]<eq>weight_shape[1]<times>groups)<line_sep>assume(weight_shape[0]%groups<eq>0)<line_sep>conv_out_shape=[in_shape[0] weight_shape[0]]<line_sep>oh,ow=ConvOutputSize(in_shape weight_shape dilations_ paddings_ strides)<line_sep>conv_out_shape=conv_out_shape+[int(oh) int(ow)]<line_sep>assume(oh<g>0<and>ow<g>0)<block_end>conv_type=""<line_sep>conv_attrs={}<if_stmt>Transpose<block_start>conv_type="conv2d_transpose"<line_sep>conv_attrs={"data_format":'nchw' "dilations":dilations "padding_algorithm":padding_algorithm "groups":groups "paddings":paddings "strides":strides "Scale_in":scale_in "Scale_out":scale_out "output_size":[] "output_padding":output_padding}<block_end><else_stmt><block_start>conv_type="conv2d"<line_sep>conv_attrs={"data_format":'nchw' "dilations":dilations "padding_algorithm":padding_algorithm "groups":groups "paddings":paddings "strides":strides "Scale_in":scale_in "Scale_out":scale_out}<block_end>conv_op=OpConfig(type=conv_type inputs={"Input":["input_data"] "Filter":["filter_data"]} outputs={"Output":["conv_output_data"]} attrs=conv_attrs)<line_sep>elementwise_add_op=OpConfig(type="elementwise_add" inputs={"X":["conv_output_data"] "Y":["add_bias_data"]} outputs={"Out":["output_data"]} attrs={"axis":1})<line_sep>ops=[conv_op elementwise_add_op]<line_sep>self.ops=ops<line_sep>program_config=ProgramConfig(ops=ops weights={"filter_data":TensorConfig(shape=weight_shape) "add_bias_data":TensorConfig(shape=elementwise_bias_shape)} inputs={"input_data":TensorConfig(shape=in_shape)} outputs=["output_data"])<line_sep><return>program_config<block_end><def_stmt>sample_predictor_configs self<block_start>config=CxxConfig()<line_sep><return>self.get_predictor_configs() [self.ops[0].type] (1e-4 1e-5)<block_end><def_stmt>add_ignore_pass_case self<block_start><pass><block_end><def_stmt>test self *args **kwargs<block_start>self.run_and_statis(quant=<false> max_examples=500 passes=["lite_conv_elementwise_fuser_pass"])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main(argv=[''])<block_end>