content
stringlengths
0
1.55M
<import_stmt>torch<import_stmt>numpy<as>np<import_stmt>json<import_stmt>os<import_stmt>pickle<import_stmt>sys<import_stmt>logging<import_stmt>shutil<import_from_stmt>tqdm tqdm<import_from_stmt>torch.autograd Variable<import_stmt>torch.optim<as>optim<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.utils.data.sampler RandomSampler<import_stmt>config<import_from_stmt>model utils data vector<import_from_stmt>model.retriever LSTMRetriever<import_from_stmt>multi_corpus MultiCorpus<import_from_stmt>torch.utils.data.sampler SequentialSampler RandomSampler<import_stmt>math<line_sep>logger=logging.getLogger()<line_sep>global_timer=utils.Timer()<line_sep>stats={'timer':global_timer 'epoch':0 'best_valid':0 'best_verified_valid':0 'best_acc':0 'best_verified_acc':0}<def_stmt>make_data_loader args corpus train_time=<false><block_start>dataset=data.MultiCorpusDataset(args corpus args.word_dict args.feature_dict single_answer=<false> para_mode=args.para_mode train_time=train_time)<line_sep>sampler=SequentialSampler(dataset)<if><not>train_time<else>RandomSampler(dataset)<line_sep>loader=torch.utils.data.DataLoader(dataset batch_size=args.batch_size sampler=sampler num_workers=args.data_workers collate_fn=vector.batchify(args args.para_mode train_time=train_time) pin_memory=<true>)<line_sep><return>loader<block_end><def_stmt>init_from_checkpoint args<block_start>logger.info('Loading model from saved checkpoint {}'.format(args.pretrained))<line_sep>model=torch.load(args.pretrained)<line_sep>word_dict=model['word_dict']<line_sep>feature_dict=model['feature_dict']<line_sep>args.vocab_size=len(word_dict)<line_sep>args.embedding_dim_orig=args.embedding_dim<line_sep>args.word_dict=word_dict<line_sep>args.feature_dict=feature_dict<line_sep>ret=LSTMRetriever(args word_dict feature_dict)<line_sep># load saved param values ret.model.load_state_dict(model['state_dict']['para_clf'])<line_sep>optimizer=<none><line_sep>parameters=ret.get_trainable_params()<if_stmt>args.optimizer<eq>'sgd'<block_start>optimizer=optim.SGD(parameters args.learning_rate momentum=args.momentum weight_decay=args.weight_decay)<block_end><elif_stmt>args.optimizer<eq>'adamax'<block_start>optimizer=optim.Adamax(parameters weight_decay=args.weight_decay)<block_end><elif_stmt>args.optimizer<eq>'nag'<block_start>optimizer=NAG(parameters args.learning_rate momentum=args.momentum weight_decay=args.weight_decay)<block_end><else_stmt><block_start><raise>RuntimeError('Unsupported optimizer: %s'%args.optimizer)<block_end>optimizer.load_state_dict(model['state_dict']['optimizer'])<line_sep>logger.info('Model loaded...')<line_sep><return>ret optimizer word_dict feature_dict<block_end><def_stmt>init_from_scratch args train_exs<block_start>logger.info('Initializing model from scratch')<line_sep>word_dict=feature_dict=<none><line_sep># create or get vocab word_dict=utils.build_word_dict(args train_exs)<if_stmt>word_dict<is><not><none><block_start>args.vocab_size=len(word_dict)<block_end>args.embedding_dim_orig=args.embedding_dim<line_sep>args.word_dict=word_dict<line_sep>args.feature_dict=feature_dict<line_sep>ret=LSTMRetriever(args word_dict feature_dict)<line_sep># -------------------------------------------------------------------------- # TRAIN/VALID LOOP # -------------------------------------------------------------------------- # train parameters=ret.get_trainable_params()<line_sep>optimizer=<none><if_stmt>parameters<is><not><none><and>len(parameters)<g>0<block_start><if_stmt>args.optimizer<eq>'sgd'<block_start>optimizer=optim.SGD(parameters args.learning_rate momentum=args.momentum weight_decay=args.weight_decay)<block_end><elif_stmt>args.optimizer<eq>'adamax'<block_start>optimizer=optim.Adamax(parameters weight_decay=args.weight_decay)<block_end><elif_stmt>args.optimizer<eq>'nag'<block_start>optimizer=NAG(parameters args.learning_rate momentum=args.momentum weight_decay=args.weight_decay)<block_end><else_stmt><block_start><raise>RuntimeError('Unsupported optimizer: %s'%args.optimizer)<block_end><block_end><else_stmt><block_start><pass><block_end><return>ret optimizer word_dict feature_dict<block_end><def_stmt>train_binary_classification args ret_model optimizer train_loader verified_dev_loader=<none><block_start>args.train_time=<true><line_sep>para_loss=utils.AverageMeter()<line_sep>ret_model.model.train()<for_stmt>idx,ex enumerate(train_loader)<block_start><if_stmt>ex<is><none><block_start><continue><block_end>inputs=[e<if>e<is><none><or>type(e)<ne>type(ex[0])<else>Variable(e.cuda(async=<true>))<for>e ex[:]]<line_sep>ret_input=[*inputs[:4]]<line_sep>scores,_,_=ret_model.score_paras(*ret_input)<line_sep>y_num_occurrences=Variable(ex[-2])<line_sep>labels=(y_num_occurrences<g>0).float()<line_sep>labels=labels.cuda()<line_sep># BCE logits loss batch_para_loss=F.binary_cross_entropy_with_logits(scores.squeeze(1) labels)<line_sep>optimizer.zero_grad()<line_sep>batch_para_loss.backward()<line_sep>torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params() 2.0)<line_sep>optimizer.step()<line_sep>para_loss.update(batch_para_loss.data.item())<if_stmt>math.isnan(para_loss.avg)<block_start><import_stmt>pdb<line_sep>pdb.set_trace()<block_end><if_stmt>idx%25<eq>0<and>idx<g>0<block_start>logger.info('Epoch = {} | iter={}/{} | para loss = {:2.4f}'.format(stats['epoch'] idx len(train_loader) para_loss.avg))<line_sep>para_loss.reset()<block_end><block_end><block_end><def_stmt>eval_binary_classification args ret_model corpus dev_loader verified_dev_loader=<none> save_scores=<true><block_start>total_exs=0<line_sep>args.train_time=<false><line_sep>ret_model.model.eval()<line_sep>accuracy=0.0<for_stmt>idx,ex enumerate(tqdm(dev_loader))<block_start><if_stmt>ex<is><none><block_start><raise>BrokenPipeError<block_end>inputs=[e<if>e<is><none><or>type(e)<ne>type(ex[0])<else>Variable(e.cuda(async=<true>))<for>e ex[:]]<line_sep>ret_input=[*inputs[:4]]<line_sep>total_exs<augadd>ex[0].size(0)<line_sep>scores,_,_=ret_model.score_paras(*ret_input)<line_sep>scores=F.sigmoid(scores)<line_sep>y_num_occurrences=Variable(ex[-2])<line_sep>labels=(y_num_occurrences<g>0).float()<line_sep>labels=labels.data.numpy()<line_sep>scores=scores.cpu().data.numpy()<line_sep>scores=scores.reshape((-1))<if_stmt>save_scores<block_start><for_stmt>i,pid enumerate(ex[-1])<block_start>corpus.paragraphs[pid].model_score=scores[i]<block_end><block_end>scores=scores<g>0.5<line_sep>a=scores<eq>labels<line_sep>accuracy<augadd>a.sum()<block_end>logger.info('Eval accuracy = {} '.format(accuracy/total_exs))<line_sep>top1=get_topk(corpus)<line_sep><return>top1<block_end><def_stmt>print_vectors args para_vectors question_vectors corpus train=<false> test=<false><block_start>all_question_vectors=[]<line_sep>all_para_vectors=[]<line_sep>qid2idx={}<line_sep>cum_num_lens=[]<line_sep>all_correct_ans={}<line_sep>cum_num_len=0<for_stmt>question_i,qid enumerate(corpus.questions)<block_start>labels=[]<line_sep>all_question_vectors.append(question_vectors[qid])<line_sep>qid2idx[qid]=question_i<line_sep>cum_num_len<augadd>len(corpus.questions[qid].pids)<line_sep>cum_num_lens.append(cum_num_len)<for_stmt>para_i,pid enumerate(corpus.questions[qid].pids)<block_start><if_stmt>corpus.paragraphs[pid].ans_occurance<g>0<block_start>labels.append(para_i)<block_end>all_para_vectors.append(para_vectors[pid])<block_end>all_correct_ans[qid]=labels<block_end>all_para_vectors=np.stack(all_para_vectors)<line_sep>all_question_vectors=np.stack(all_question_vectors)<assert_stmt>all_para_vectors.shape[0]<eq>cum_num_lens[-1]<assert_stmt>all_question_vectors.shape[0]<eq>len(cum_num_lens)<assert_stmt>all_question_vectors.shape[0]<eq>len(qid2idx)<assert_stmt>all_question_vectors.shape[0]<eq>len(all_correct_ans)<line_sep>## saving code <if_stmt>train<block_start>OUT_DIR=os.path.join(args.save_dir args.src args.domain "train/")<block_end><else_stmt><block_start><if_stmt>args.is_test<eq>0<block_start>OUT_DIR=os.path.join(args.save_dir args.src args.domain "dev/")<block_end><else_stmt><block_start>OUT_DIR=os.path.join(args.save_dir args.src args.domain "test/")<block_end><block_end>logger.info("Printing vectors at {}".format(OUT_DIR))<if_stmt><not>os.path.exists(OUT_DIR)<block_start>os.makedirs(OUT_DIR)<block_end><else_stmt><block_start>shutil.rmtree(OUT_DIR ignore_errors=<true>)<line_sep>os.makedirs(OUT_DIR)<block_end>json.dump(qid2idx open(OUT_DIR+'map.json' 'w'))<line_sep>json.dump(all_correct_ans open(OUT_DIR+'correct_paras.json' 'w'))<line_sep>all_cumlen=np.array(cum_num_lens)<line_sep>np.save(OUT_DIR+"document" all_para_vectors)<line_sep>np.save(OUT_DIR+"question" all_question_vectors)<line_sep>np.save(OUT_DIR+"all_cumlen" cum_num_lens)<block_end><def_stmt>save_vectors args ret_model corpus data_loader verified_dev_loader=<none> save_scores=<true> train=<false> test=<false><block_start>total_exs=0<line_sep>args.train_time=<false><line_sep>ret_model.model.eval()<line_sep>para_vectors={}<line_sep>question_vectors={}<for_stmt>idx,ex enumerate(tqdm(data_loader))<block_start><if_stmt>ex<is><none><block_start><raise>BrokenPipeError<block_end>inputs=[e<if>e<is><none><or>type(e)<ne>type(ex[0])<else>Variable(e.cuda(async=<true>))<for>e ex[:]]<line_sep>ret_input=[*inputs[:4]]<line_sep>total_exs<augadd>ex[0].size(0)<line_sep>scores,doc,ques=ret_model.score_paras(*ret_input)<line_sep>scores=scores.cpu().data.numpy()<line_sep>scores=scores.reshape((-1))<if_stmt>save_scores<block_start><for_stmt>i,pid enumerate(ex[-1])<block_start>para_vectors[pid]=doc[i]<block_end><for_stmt>i,qid enumerate([corpus.paragraphs[pid].qid<for>pid ex[-1]])<block_start><if_stmt>qid<not><in>question_vectors<block_start>question_vectors[qid]=ques[i]<block_end><block_end><for_stmt>i,pid enumerate(ex[-1])<block_start>corpus.paragraphs[pid].model_score=scores[i]<block_end><block_end><block_end>get_topk(corpus)<line_sep>print_vectors(args para_vectors question_vectors corpus train test)<block_end><def_stmt>get_topk corpus<block_start>top1=0<line_sep>top3=0<line_sep>top5=0<for_stmt>qid corpus.questions<block_start>para_scores=[(corpus.paragraphs[pid].model_score corpus.paragraphs[pid].ans_occurance)<for>pid corpus.questions[qid].pids]<line_sep>sorted_para_scores=sorted(para_scores key=<lambda>x:x[0] reverse=<true>)<if_stmt>sorted_para_scores[0][1]<g>0<block_start>top1<augadd>1<block_end><if_stmt>sum([ans[1]<for>ans sorted_para_scores[:3]])<g>0<block_start>top3<augadd>1<block_end><if_stmt>sum([ans[1]<for>ans sorted_para_scores[:5]])<g>0<block_start>top5<augadd>1<block_end><block_end>top1=top1/len(corpus.questions)<line_sep>top3=top3/len(corpus.questions)<line_sep>top5=top5/len(corpus.questions)<line_sep>logger.info('top1 = {}, top3 = {}, top5 = {} '.format(top1 top3 top5))<line_sep><return>top1<block_end><def_stmt>get_topk_tfidf corpus<block_start>top1=0<line_sep>top3=0<line_sep>top5=0<for_stmt>qid corpus.questions<block_start>para_scores=[(corpus.paragraphs[pid].tfidf_score corpus.paragraphs[pid].ans_occurance)<for>pid corpus.questions[qid].pids]<line_sep>sorted_para_scores=sorted(para_scores key=<lambda>x:x[0])<line_sep># import pdb # pdb.set_trace() <if_stmt>sorted_para_scores[0][1]<g>0<block_start>top1<augadd>1<block_end><if_stmt>sum([ans[1]<for>ans sorted_para_scores[:3]])<g>0<block_start>top3<augadd>1<block_end><if_stmt>sum([ans[1]<for>ans sorted_para_scores[:5]])<g>0<block_start>top5<augadd>1<block_end><block_end>logger.info('top1 = {}, top3 = {}, top5 = {} '.format(top1/len(corpus.questions) top3/len(corpus.questions) top5/len(corpus.questions)))<block_end><def_stmt>run_predictions args data_loader model eval_on_train_set=<false><block_start>args.train_time=<false><line_sep>top_1=0<line_sep>top_3=0<line_sep>top_5=0<line_sep>total_num_questions=0<line_sep>map_counter=0<line_sep>cum_num_lens=[]<line_sep>qid2idx={}<line_sep>sum_num_paras=0<line_sep>all_correct_answers={}<for_stmt>ex_counter,ex tqdm(enumerate(data_loader))<block_start>ret_input=[*ex]<line_sep>y_num_occurrences=ex[3]<line_sep>labels=(y_num_occurrences<g>0)<try_stmt><block_start>topk_paras,docs,ques=model.return_topk(5 *ret_input)<block_end><except_stmt>RuntimeError<block_start><import_stmt>pdb<line_sep>pdb.set_trace()<block_end>num_paras=ex[1]<line_sep>qids=ex[-1]<if_stmt>args.save_para_clf_output<block_start>docs=docs.cpu().data.numpy()<line_sep>ques=ques.cpu().data.numpy()<if_stmt>ex_counter<eq>0<block_start>documents=docs<line_sep>questions=ques<block_end><else_stmt><block_start>documents=np.concatenate([documents docs])<line_sep>questions=np.concatenate([questions ques])<block_end>### create map and cum_num_lens <for_stmt>i,qid enumerate(qids)<block_start>qid2idx[qid]=map_counter<line_sep>sum_num_paras<augadd>num_paras[i]<line_sep>cum_num_lens.append(sum_num_paras)<line_sep>all_correct_answers[map_counter]=[]<line_sep>st=sum(num_paras[:i])<for_stmt>j range(num_paras[i])<block_start><if_stmt>labels[st+j]<eq>1<block_start>all_correct_answers[map_counter].append(j)<block_end><block_end>### Test case: <assert_stmt>len(all_correct_answers[map_counter])<eq>sum(labels.data.numpy()[st:st+num_paras[i]])<line_sep>map_counter<augadd>1<block_end><block_end>counter=0<for_stmt>q_counter,ranked_para_ids enumerate(topk_paras)<block_start>total_num_questions<augadd>1<for_stmt>i,no_paras enumerate(ranked_para_ids)<block_start><if_stmt>labels[counter+no_paras]<eq>1<block_start><if_stmt>i<le>4<block_start>top_5<augadd>1<block_end><if_stmt>i<le>2<block_start>top_3<augadd>1<block_end><if_stmt>i<le>0<block_start>top_1<augadd>1<block_end><break><block_end><block_end>counter<augadd>num_paras[q_counter]<block_end><block_end>logger.info('Accuracy of para classifier when evaluated on the annotated dev set.')<line_sep>logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format((top_1<times>1.0/total_num_questions) (top_3<times>1.0/total_num_questions) (top_5<times>1.0/total_num_questions)))<line_sep>## saving code <if_stmt>args.save_para_clf_output<block_start><if_stmt>eval_on_train_set<block_start>OUT_DIR="/iesl/canvas/sdhuliawala/vectors_web/train/"<block_end><else_stmt><block_start>OUT_DIR="/iesl/canvas/sdhuliawala/vectors_web/dev/"<block_end><if_stmt><not>os.path.exists(OUT_DIR)<block_start>os.mkdir(OUT_DIR)<block_end><else_stmt><block_start>shutil.rmtree(OUT_DIR ignore_errors=<true>)<line_sep>os.mkdir(OUT_DIR)<block_end>#Test cases <assert_stmt>cum_num_lens[-1]<eq>documents.shape[0]<assert_stmt>questions.shape[0]<eq>documents.shape[0]<assert_stmt>len(cum_num_lens)<eq>len(qid2idx)<assert_stmt>len(cum_num_lens)<eq>len(all_correct_answers)<line_sep>json.dump(qid2idx open(OUT_DIR+'map.json' 'w'))<line_sep>json.dump(all_correct_answers open(OUT_DIR+'correct_paras.json' 'w'))<line_sep>all_cumlen=np.array(cum_num_lens)<line_sep>np.save(OUT_DIR+"document" documents)<line_sep>np.save(OUT_DIR+"question" questions)<line_sep>np.save(OUT_DIR+"all_cumlen" all_cumlen)<block_end><return>(top_1<times>1.0/total_num_questions) (top_3<times>1.0/total_num_questions) (top_5<times>1.0/total_num_questions)<block_end><def_stmt>save args model optimizer filename epoch=<none><block_start>params={'state_dict':{'para_clf':model.state_dict() 'optimizer':optimizer.state_dict()} 'word_dict':args.word_dict 'feature_dict':args.feature_dict}<line_sep>args.word_dict=<none><line_sep>args.feature_dict=<none><line_sep>params['config']=vars(args)<if_stmt>epoch<block_start>params['epoch']=epoch<block_end><try_stmt><block_start>torch.save(params filename)<line_sep># bad hack for not saving dictionary twice args.word_dict=params['word_dict']<line_sep>args.feature_dict=params['feature_dict']<block_end><except_stmt>BaseException<block_start>logger.warn('[ WARN: Saving failed... continuing anyway. ]')<block_end><block_end># ------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------ <def_stmt>main args# PRINT CONFIG <block_start>logger.info('-'<times>100)<line_sep>logger.info('CONFIG:\n%s'%json.dumps(vars(args) indent=4 sort_keys=<true>))<line_sep># small can't test <if_stmt>args.small<eq>1<block_start>args.test=0<block_end><if_stmt>args.small<eq>1<block_start>args.train_file_name=args.train_file_name+"_small"<line_sep>args.dev_file_name=args.dev_file_name+"_small"<if_stmt>args.test<eq>1<block_start>args.test_file_name=args.test_file_name+"_small"<block_end><block_end>args.train_file_name=args.train_file_name+".pkl"<line_sep>args.dev_file_name=args.dev_file_name+".pkl"<if_stmt>args.test<eq>1<block_start>args.test_file_name=args.test_file_name+".pkl"<block_end>logger.info("Loading pickle files")<line_sep>fin=open(os.path.join(args.data_dir args.src "data" args.domain args.train_file_name) "rb")<line_sep>all_train_exs=pickle.load(fin)<line_sep>fin.close()<line_sep>fin=open(os.path.join(args.data_dir args.src "data" args.domain args.dev_file_name) "rb")<line_sep>all_dev_exs=pickle.load(fin)<line_sep>fin.close()<if_stmt>args.test<eq>1<block_start>fin=open(os.path.join(args.data_dir args.src "data" args.domain args.test_file_name) "rb")<line_sep>all_test_exs=pickle.load(fin)<line_sep>fin.close()<block_end>logger.info("Loading done!")<line_sep>logger.info("Num train examples {}".format(len(all_train_exs.paragraphs)))<line_sep>logger.info("Num dev examples {}".format(len(all_dev_exs.paragraphs)))<if_stmt>args.test<eq>1<block_start>logger.info("Num test examples {}".format(len(all_test_exs.paragraphs)))<block_end><if_stmt>args.pretrained<is><none><block_start>ret_model,optimizer,word_dict,feature_dict=init_from_scratch(args all_train_exs)<block_end><else_stmt><block_start>ret_model,optimizer,word_dict,feature_dict=init_from_checkpoint(args)<block_end># make data loader logger.info("Making data loaders...")<if_stmt>word_dict<eq><none><block_start>args.word_dict=utils.build_word_dict(args (all_train_exs all_dev_exs))<line_sep>word_dict=args.word_dict<block_end>train_loader=make_data_loader(args all_train_exs train_time=<false>)<if>args.eval_only<else>make_data_loader(args all_train_exs train_time=<true>)<line_sep>dev_loader=make_data_loader(args all_dev_exs)<if_stmt>args.test<block_start>test_loader=make_data_loader(args all_test_exs)<block_end><if_stmt>args.eval_only<block_start>logger.info("Saving dev paragraph vectors")<line_sep>save_vectors(args ret_model all_dev_exs dev_loader verified_dev_loader=<none>)<line_sep>logger.info("Saving train paragraph vectors")<line_sep>save_vectors(args ret_model all_train_exs train_loader verified_dev_loader=<none> train=<true>)<if_stmt>args.test<block_start>args.is_test=1<line_sep>logger.info("Saving test paragraph vectors")<line_sep>save_vectors(args ret_model all_test_exs test_loader verified_dev_loader=<none>)<block_end><block_end><else_stmt><block_start>get_topk_tfidf(all_dev_exs)<for_stmt>epoch range(args.num_epochs)<block_start>stats['epoch']=epoch<line_sep>train_binary_classification(args ret_model optimizer train_loader verified_dev_loader=<none>)<line_sep>logger.info('checkpointing model at {}'.format(args.model_file))<line_sep>## check pointing## save(args ret_model.model optimizer args.model_file+".ckpt" epoch=stats['epoch'])<line_sep>logger.info("Evaluating on the full dev set....")<line_sep>top1=eval_binary_classification(args ret_model all_dev_exs dev_loader verified_dev_loader=<none>)<if_stmt>stats['best_acc']<l>top1<block_start>stats['best_acc']=top1<line_sep>logger.info('Best accuracy {}'.format(stats['best_acc']))<line_sep>logger.info('Saving model at {}'.format(args.model_file))<line_sep>logger.info("Logs saved at {}".format(args.log_file))<line_sep>save(args ret_model.model optimizer args.model_file epoch=stats['epoch'])<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'# MODEL <block_start>logger.info('-'<times>100)<line_sep># Parse cmdline args and setup environment args=config.get_args()<line_sep># Set cuda args.cuda=<not>args.no_cuda<and>torch.cuda.is_available()<if_stmt>args.cuda<block_start>torch.cuda.set_device(args.gpu)<block_end># Set random state np.random.seed(args.random_seed)<line_sep>torch.manual_seed(args.random_seed)<if_stmt>args.cuda<block_start>torch.cuda.manual_seed(args.random_seed)<block_end># Set logging logger.setLevel(logging.INFO)<line_sep>fmt=logging.Formatter('%(asctime)s: %(message)s' '%m/%d/%Y %I:%M:%S %p')<line_sep>console=logging.StreamHandler()<line_sep>console.setFormatter(fmt)<line_sep>logger.addHandler(console)<if_stmt>args.log_file<block_start><if_stmt>args.checkpoint<block_start>logfile=logging.FileHandler(args.log_file 'a')<block_end><else_stmt><block_start>logfile=logging.FileHandler(args.log_file 'w')<block_end>logfile.setFormatter(fmt)<line_sep>logger.addHandler(logfile)<block_end>logger.info('[ COMMAND: %s ]'%' '.join(sys.argv))<line_sep># Run! main(args)<block_end>
<def_stmt>load workflow_version<block_start>""" Dummy loading function. """<line_sep><pass><block_end>
<import_stmt>pytest<import_from_stmt>align.schema.types BaseModel Optional List Dict<import_from_stmt>align.schema.visitor Visitor Transformer cache<line_sep>@pytest.fixture<def_stmt>dummy <block_start><class_stmt>DummyModel(BaseModel)<block_start>arg1:str<line_sep>arg2:Optional[str]<line_sep>arg3:List[str]<line_sep>arg4:List[Optional[str]]<line_sep>arg5:Dict[str str]<line_sep>arg6:Dict[str Optional[str]]<line_sep>arg7:"Optional[DummyModel]"<line_sep>arg8:"Optional[List[DummyModel]]"<block_end>DummyModel.update_forward_refs()<line_sep>base=DummyModel(arg1='arg1' arg3=['arg3_1' 'arg3_2'] arg4=[] arg5={'arg5_k':'arg5_v'} arg6={'arg6_k':<none>})<line_sep>dummy=DummyModel(arg1='arg1' arg3=['arg3_1' 'arg3_2'] arg4=[] arg5={'arg5_k':'arg5_v'} arg6={'arg6_k':<none>} arg7=base arg8=[base base])<line_sep><return>dummy<block_end><def_stmt>test_visitor_no_output dummy<block_start><assert_stmt>Visitor().visit(dummy)<eq>[]<block_end><def_stmt>test_visitor_raw_output dummy<block_start><class_stmt>StrValVisitor(Visitor)<block_start><def_stmt>visit_str self node<block_start><return>node<block_end><block_end><assert_stmt>StrValVisitor().visit(dummy)<eq>['arg1' 'arg3_1' 'arg3_2' 'arg5_v' 'arg1' 'arg3_1' 'arg3_2' 'arg5_v' 'arg1' 'arg3_1' 'arg3_2' 'arg5_v' 'arg1' 'arg3_1' 'arg3_2' 'arg5_v' ]<block_end><def_stmt>test_visitor_processed_output dummy<block_start><class_stmt>DummyCounter(Visitor)<block_start>'''Simply counts the number of times the dummy class is encountered'''<def_stmt>visit_DummyModel self node<block_start><return>sum(self.generic_visit(node))+1<block_end><block_end><assert_stmt>DummyCounter().visit(dummy)<eq>4<block_end><def_stmt>test_transformer_no_visitor dummy<block_start><assert_stmt>Transformer().visit(dummy.arg1)<is>dummy.arg1<assert_stmt>Transformer().visit(dummy.arg2)<is>dummy.arg2<assert_stmt>Transformer().visit(dummy.arg3)<is>dummy.arg3<assert_stmt>Transformer().visit(dummy.arg4)<is>dummy.arg4<assert_stmt>Transformer().visit(dummy.arg5)<is>dummy.arg5<assert_stmt>Transformer().visit(dummy.arg6)<is>dummy.arg6<assert_stmt>Transformer().visit(dummy.arg7)<is>dummy.arg7<assert_stmt>Transformer().visit(dummy.arg8)<is>dummy.arg8<assert_stmt>Transformer().visit(dummy)<is>dummy<block_end><def_stmt>test_transformer_string_visitor dummy<block_start><class_stmt>AddStringPrefix(Transformer)<block_start><def_stmt>visit_str self node<block_start><return>'prefix_'+node<block_end><block_end>transformed=AddStringPrefix().visit(dummy)<assert_stmt>isinstance(transformed dummy.__class__)<line_sep># String in subtree <assert_stmt>transformed.arg1<eq>'prefix_arg1'<assert_stmt>transformed.arg1<is><not>dummy.arg1<line_sep># No string in subtree <assert_stmt>transformed.arg2<eq><none><assert_stmt>transformed.arg2<is>dummy.arg2<line_sep># String in subtree <assert_stmt>transformed.arg3<eq>['prefix_arg3_1' 'prefix_arg3_2']<assert_stmt>transformed.arg3<is><not>dummy.arg3<line_sep># No string in subtree <assert_stmt>transformed.arg4<eq>[]<assert_stmt>transformed.arg4<is>dummy.arg4 f'old:({id(dummy.arg4)}, {dummy.arg4}), new:({id(transformed.arg4)}, {transformed.arg4})'<line_sep># String in subtree <assert_stmt>transformed.arg5<eq>{'arg5_k':'prefix_arg5_v'}<assert_stmt>transformed.arg5<is><not>dummy.arg5<line_sep># No string in subtree <assert_stmt>transformed.arg6<eq>{'arg6_k':<none>}<assert_stmt>transformed.arg6<is>dummy.arg6<line_sep># Expected result for arg7 and arg8 basedict={'arg1':'prefix_arg1' 'arg2':<none> 'arg3':['prefix_arg3_1' 'prefix_arg3_2'] 'arg4':[] 'arg5':{'arg5_k':'prefix_arg5_v'} 'arg6':{'arg6_k':<none>} 'arg7':<none> 'arg8':<none>}<line_sep># String in subtree <assert_stmt>transformed.arg7<eq>basedict<assert_stmt>transformed.arg7<is><not>dummy.arg7<line_sep># String in subtree <assert_stmt>transformed.arg8<eq>[basedict basedict]<assert_stmt>transformed.arg8<is><not>dummy.arg8<line_sep># Ensure cache is working for generic_visitor <assert_stmt>transformed.arg7<is>transformed.arg8[0]<assert_stmt>transformed.arg8[0]<is>transformed.arg8[1]<block_end><def_stmt>test_cache dummy<block_start><class_stmt>UncachedTransformer(Transformer)<block_start><def_stmt>visit_DummyModel self node<block_start><if_stmt><not>hasattr(self 'top')<block_start>self.top=node<line_sep><return>self.generic_visit(node)<block_end><else_stmt><block_start><return>node.copy()<block_end><block_end><block_end>control=UncachedTransformer().visit(dummy)<assert_stmt>control.arg7<is><not>control.arg8[0]<assert_stmt>control.arg8[0]<is><not>control.arg8[1]<class_stmt>CachedTransformer(Transformer)<block_start>@cache# DO THIS FOR MOST VISITORS <def_stmt>visit_DummyModel self node<block_start><if_stmt><not>hasattr(self 'top')<block_start>self.top=node<line_sep><return>self.generic_visit(node)<block_end><else_stmt><block_start><return>node.copy()<block_end><block_end><block_end>transformed=CachedTransformer().visit(dummy)<assert_stmt>transformed.arg7<is>transformed.arg8[0]<assert_stmt>transformed.arg8[0]<is>transformed.arg8[1]<block_end>
<import_from_stmt>mpunet.logging ScreenLogger<def_stmt>warn_sparse_param logger<block_start>logger=logger<or>ScreenLogger<line_sep>sparse_err="mpunet 0.1.3 or higher requires integer targets"<concat>" as opposed to one-hot encoded targets. Setting the 'sparse'"<concat>" parameter no longer has any effect and may not be allowed"<concat>" in future versions."<line_sep>logger.warn(sparse_err)<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>house_renting.spider_settings lianjia a58<line_sep>BOT_NAME='house_renting'<line_sep>COMMANDS_MODULE='house_renting.commands'<line_sep>SPIDER_MODULES=['house_renting.spiders']<line_sep>NEWSPIDER_MODULE='house_renting.spiders'<line_sep>USER_AGENT='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 '<concat>'Safari/605.1.15 '<line_sep>USER_AGENTS=('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)' 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; '<concat>'.NET CLR 3.0.04506)' 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR '<concat>'2.0.50727)' 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)' 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR '<concat>'3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)' 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; '<concat>'.NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)' 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR '<concat>'3.0.04506.30)' 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 ('<concat>'Change: 287 c9dfb30)' 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6' 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1' 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0' 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5' 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6' 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11' 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 '<concat>'Safari/535.20' 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 '<concat>'Safari/605.1.15' 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52' )<line_sep>ROBOTSTXT_OBEY=<false><line_sep>DOWNLOAD_DELAY=10<line_sep>CONCURRENT_REQUESTS_PER_DOMAIN=1<line_sep>COOKIES_ENABLED=<false><line_sep>TELNETCONSOLE_ENABLED=<false><line_sep>DEFAULT_REQUEST_HEADERS={'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' 'Accept-Language':'en' }<line_sep>SPIDER_MIDDLEWARES={}<line_sep>DOWNLOADER_MIDDLEWARES={'house_renting.middlewares.HouseRentingAgentMiddleware':100 'house_renting.middlewares.HouseRentingProxyMiddleware':200 'house_renting.middlewares.HouseRentingRetryMiddleware':300 'scrapy.downloadermiddlewares.retry.RetryMiddleware':<none> 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware':<none> }<line_sep>ITEM_PIPELINES={'house_renting.pipelines.HouseRentingPipeline':100 'house_renting.pipelines.DuplicatesPipeline':200 'scrapy.pipelines.images.ImagesPipeline':300 'house_renting.pipelines.ESPipeline':400 }<line_sep>IMAGES_STORE='/house-renting/data/images'<line_sep>MEDIA_ALLOW_REDIRECTS=<true><line_sep># Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED=<true><line_sep># The initial download delay AUTOTHROTTLE_START_DELAY=10<line_sep># The maximum download delay to be set in case of high latencies AUTOTHROTTLE_MAX_DELAY=10<line_sep># The average number of requests Scrapy should be sending in parallel to # each remote server AUTOTHROTTLE_TARGET_CONCURRENCY=2.0<line_sep># Enable showing throttling stats for every response received: AUTOTHROTTLE_DEBUG=<true><line_sep>DOWNLOAD_TIMEOUT=30<line_sep>RETRY_TIMES=3<line_sep>LOG_LEVEL='INFO'<line_sep>SPIDER_SETTINGS={'lianjia':{'cities':lianjia.cities 'available_cities':lianjia.available_cities 'available_cities_map':lianjia.available_cities_map } '58':{'cities':a58.cities 'available_cities':a58.available_cities 'available_cities_map':a58.available_cities_map } }<line_sep># ES 节点, 可以配置多个节点(集群), 默认为 None, 不会存储到 ES ELASTIC_HOSTS=[{'host':'elastic' 'port':9200} ]<line_sep>REDIS_HOST='redis'# 默认为 None, 不会去重 REDIS_PORT=6379# 默认 6379
<import_from_stmt>selenium.webdriver Firefox<import_from_stmt>selenium.webdriver.support.ui WebDriverWait<import_from_stmt>selenium.webdriver.support.expected_conditions url_contains url_matches <line_sep>url='https://selenium.dunossauro.live/aula_10_c.html'<line_sep>browser=Firefox()<line_sep>browser.get(url)<line_sep>wdw=WebDriverWait(browser 10)<line_sep>links=browser.find_elements_by_css_selector('.body_b a')<line_sep>links[1].click()<line_sep>wdw.until(url_contains('selenium') )<line_sep>wdw.until(url_matches('http.*live') )<line_sep>
# -*- coding: utf-8 -*- ''' >>> from opem.Dynamic.Padulles_Amphlett import * >>> import shutil >>> Test_Vector={"A":50.6,"l":0.0178,"lambda":23,"JMax":1.5,"T":343,"N0":5,"KO2":0.0000211,"KH2":0.0000422,"KH2O":0.000007716,"tH2":3.37,"tO2":6.74,"t1":2,"t2":2,"tH2O":18.418,"rho":1.168,"qMethanol":0.0002,"CV":2,"i-start":0.1,"i-stop":4,"i-step":0.1,"Name":"Test"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . . . I : 0.1 E : 6.0684154992732005 V Eta Activation : 0.18557231242539243 V Eta Concentration : 1.948431634418616e-05 V Eta Ohmic : 0.00017548304819292376 V FC Efficiency : 0.6589203974773784 FC Power : 0.5139579100323552 W FC Voltage : 5.1395791003235525 V Loss : 0.18576727978992955 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 0.1010420899676448 W ########### I : 0.2 E : 6.068413961701556 V Eta Activation : 0.23146009851376736 V Eta Concentration : 3.899435456560147e-05 V Eta Ohmic : 0.0003510800160998837 V FC Efficiency : 0.6293798842665886 FC Power : 0.9818326194558784 W FC Voltage : 4.909163097279392 V Loss : 0.23185017288443285 V PH2 : 0.1971566919511875 atm PH2O : 0.24266586776736396 atm PO2 : 0.1906184358000996 atm Power-Thermal : 0.24816738054412169 W ########### I : 0.3 E : 6.068412424065923 V Eta Activation : 0.2583036192079603 V Eta Concentration : 5.853018266659147e-05 V Eta Ohmic : 0.0005267910327125488 V FC Efficiency : 0.6120471438396443 FC Power : 1.4321903165847678 W FC Voltage : 4.7739677219492265 V Loss : 0.25888894042333943 V PH2 : 0.19714264156957312 atm PH2O : 0.24264857417203542 atm PO2 : 0.1906105029619013 atm Power-Thermal : 0.41280968341523216 W ########### I : 0.4 E : 6.068410886366294 V Eta Activation : 0.27735002084480426 V Eta Concentration : 7.809186891953766e-05 V Eta Ohmic : 0.0007026162388380664 V FC Efficiency : 0.5997124668722417 FC Power : 1.8711028966413943 W FC Voltage : 4.677757241603485 V Loss : 0.27813072895256186 V PH2 : 0.19712859118795872 atm PH2O : 0.24263128057670688 atm PO2 : 0.19060257012370302 atm Power-Thermal : 0.588897103358606 W ########### I : 0.5 E : 6.068409348602667 V Eta Activation : 0.2921240370409447 V Eta Concentration : 9.76794818682758e-05 V Eta Ohmic : 0.0008785557847524419 V FC Efficiency : 0.5901164085980564 FC Power : 2.30145399353242 W FC Voltage : 4.60290798706484 V Loss : 0.2931002723075654 V PH2 : 0.19711454080634436 atm PH2O : 0.24261398698137834 atm PO2 : 0.1905946372855047 atm Power-Thermal : 0.7735460064675803 W ########### I : 0.6 E : 6.0684078107750326 V Eta Activation : 0.3041956781419353 V Eta Concentration : 0.00011729309032954864 V Eta Ohmic : 0.0010546098289093816 V FC Efficiency : 0.5822525519832258 FC Power : 2.724941943281497 W FC Voltage : 4.541569905469162 V Loss : 0.30536758106117423 V PH2 : 0.19710049042472996 atm PH2O : 0.2425966933860498 atm PO2 : 0.1905867044473064 atm Power-Thermal : 0.9650580567185031 W ########### I : 0.7 E : 6.068406272883388 V Eta Activation : 0.31440243547871893 V Eta Concentration : 0.00013693276339445145 V Eta Ohmic : 0.0012307785370829418 V FC Efficiency : 0.5755840434599239 FC Power : 3.1426888772911847 W FC Voltage : 4.489555538987407 V Loss : 0.3157701467791963 V PH2 : 0.19708644004311557 atm PH2O : 0.24257939979072127 atm PO2 : 0.19057877160910808 atm Power-Thermal : 1.1623111227088154 W ########### I : 0.8 E : 6.068404734927729 V Eta Activation : 0.3232442167420945 V Eta Concentration : 0.00015659857042988755 V Eta Ohmic : 0.0014070620817435461 V FC Efficiency : 0.569790429225178 FC Power : 3.555492278365111 W FC Voltage : 4.4443653479563885 V Loss : 0.324807877394268 V PH2 : 0.19707238966150117 atm PH2O : 0.24256210619539273 atm PO2 : 0.1905708387709098 atm Power-Thermal : 1.3645077216348895 W ########### I : 0.9 E : 6.068403196908046 V Eta Activation : 0.3310434726426763 V Eta Concentration : 0.0001762905810800498 V Eta Ohmic : 0.0015834606415773538 V FC Efficiency : 0.5646650099463304 FC Power : 3.96394836982324 W FC Voltage : 4.404387077581378 V Loss : 0.3328032238653337 V PH2 : 0.19705833927988675 atm PH2O : 0.24254481260006414 atm PO2 : 0.19056290593271147 atm Power-Thermal : 1.5710516301767605 W ########### I : 1.0 E : 6.068401658824337 V Eta Activation : 0.33802037026202836 V Eta Concentration : 0.0001960088652678871 V Eta Ohmic : 0.0017599744011013664 V FC Efficiency : 0.5600666527156857 FC Power : 4.368519891182348 W FC Voltage : 4.368519891182348 V Loss : 0.3399763535283976 V PH2 : 0.19704428889827239 atm PH2O : 0.2425275190047356 atm PO2 : 0.1905549730945132 atm Power-Thermal : 1.781480108817652 W ########### I : 1.1 E : 6.068400120676597 V Eta Activation : 0.3443319458183834 V Eta Concentration : 0.00021575349319660598 V Eta Ohmic : 0.0019366035503462617 V FC Efficiency : 0.55589469312397 FC Power : 4.769576467003663 W FC Voltage : 4.335978606366966 V Loss : 0.3464843028619262 V PH2 : 0.197030238516658 atm PH2O : 0.24251022540940706 atm PO2 : 0.19054704025631486 atm Power-Thermal : 1.9954235329963377 W ########### I : 1.2 E : 6.068398582464819 V Eta Activation : 0.35009414904739194 V Eta Concentration : 0.00023552453535116493 V Eta Ohmic : 0.002113348284589288 V FC Efficiency : 0.5520748042471996 FC Power : 5.1674201677537885 W FC Voltage : 4.306183473128157 V Loss : 0.3524430218673324 V PH2 : 0.1970161881350436 atm PH2O : 0.24249293181407852 atm PO2 : 0.19053910741811658 atm Power-Thermal : 2.212579832246212 W ########### I : 1.3 E : 6.068397044188998 V Eta Activation : 0.35539503345654255 V Eta Concentration : 0.0002553220624997795 V Eta Ohmic : 0.0022902088041253615 V FC Efficiency : 0.5485505413555333 FC Power : 5.562302489345107 W FC Voltage : 4.27869422257316 V Loss : 0.3579405643231677 V PH2 : 0.19700213775342923 atm PH2O : 0.24247563821874998 atm PO2 : 0.19053117457991825 atm Power-Thermal : 2.432697510654893 W ########### I : 1.4 E : 6.06839550584913 V Eta Activation : 0.36030304442922906 V Eta Concentration : 0.00027514614569545357 V Eta Ohmic : 0.0024671853140681515 V FC Efficiency : 0.5452780290261753 FC Power : 5.954436076965834 W FC Voltage : 4.253168626404167 V Loss : 0.36304537588899266 V PH2 : 0.19698808737181484 atm PH2O : 0.24245834462342142 atm PO2 : 0.19052324174171997 atm Power-Thermal : 2.6555639230341663 W ########### I : 1.5 E : 6.068393967445208 V Eta Activation : 0.3648724409731032 V Eta Concentration : 0.0002949968562774962 V Eta Ohmic : 0.002644278024175193 V FC Efficiency : 0.5422224856637728 FC Power : 6.344003082266143 W FC Voltage : 4.229335388177429 V Loss : 0.3678117158535559 V PH2 : 0.19697403699020044 atm PH2O : 0.24244105102809288 atm PO2 : 0.19051530890352164 atm Power-Thermal : 2.8809969177338575 W ########### I : 1.6 E : 6.068392428977227 V Eta Activation : 0.36914696409844006 V Eta Concentration : 0.0003148742658730733 V Eta Ohmic : 0.0028214871486926026 V FC Efficiency : 0.5393558719759229 FC Power : 6.731161282259518 W FC Voltage : 4.206975801412199 V Loss : 0.37228332551300575 V PH2 : 0.19695998660858605 atm PH2O : 0.24242375743276434 atm PO2 : 0.19050737606532336 atm Power-Thermal : 3.1088387177404826 W ########### I : 1.7 E : 6.068390890445182 V Eta Activation : 0.3731623911228729 V Eta Concentration : 0.0003347784463987542 V Eta Ohmic : 0.0029988129062160497 V FC Efficiency : 0.5366552535984287 FC Power : 7.116048662715165 W FC Voltage : 4.185910978067744 V Loss : 0.3764959824754877 V PH2 : 0.19694593622697168 atm PH2O : 0.2424064638374358 atm PO2 : 0.19049944322712503 atm Power-Thermal : 3.338951337284836 W ########### I : 1.8 E : 6.068389351849069 V Eta Activation : 0.3769483587657406 V Eta Concentration : 0.0003547094700620668 V Eta Ohmic : 0.003176255519565377 V FC Efficiency : 0.5341016324451575 FC Power : 7.498786919530012 W FC Voltage : 4.165992733072229 V Loss : 0.380479323755368 V PH2 : 0.19693188584535729 atm PH2O : 0.24238917024210727 atm PO2 : 0.19049151038892673 atm Power-Thermal : 3.5712130804699886 W ########### I : 1.9 E : 6.068387813188879 V Eta Activation : 0.38052969267197334 V Eta Concentration : 0.0003746674093630815 V Eta Ohmic : 0.0033538152156708046 V FC Efficiency : 0.5316790944492106 FC Power : 7.879484179737301 W FC Voltage : 4.147096936703843 V Loss : 0.38425817529700723 V PH2 : 0.1969178354637429 atm PH2O : 0.24237187664677873 atm PO2 : 0.19048357755072845 atm Power-Thermal : 3.8055158202626993 W ########### I : 2.0 E : 6.0683862744646095 V Eta Activation : 0.3839273955127959 V Eta Concentration : 0.00039465233709598025 V Eta Ohmic : 0.003531492225469087 V FC Efficiency : 0.5293741761651032 FC Power : 8.25823714817561 W FC Voltage : 4.129118574087805 V Loss : 0.387853540075361 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 4.041762851824391 W ########### I : 2.1 E : 6.068384735676256 V Eta Activation : 0.38715939375662295 V Eta Concentration : 0.00041466432635066115 V Eta Ohmic : 0.0037092867838082735 V FC Efficiency : 0.5271753860695316 FC Power : 8.635132823818928 W FC Voltage : 4.111968011342347 V Loss : 0.3912833448667819 V PH2 : 0.19688973470051413 atm PH2O : 0.24233728945612165 atm PO2 : 0.19046771187433184 atm Power-Thermal : 4.279867176181073 W ########### I : 2.2 E : 6.068383196823811 V Eta Activation : 0.39024111055794025 V Eta Concentration : 0.0004347034505143372 V Eta Ohmic : 0.0038871991293599716 V FC Efficiency : 0.5250728373249665 FC Power : 9.010249888496427 W FC Voltage : 4.095568131134739 V Loss : 0.39456301313781456 V PH2 : 0.19687568431889974 atm PH2O : 0.2423199958607931 atm PO2 : 0.1904597790361335 atm Power-Thermal : 4.519750111503576 W ########### I : 2.3 E : 6.068381657907269 V Eta Activation : 0.39318591119501267 V Eta Concentration : 0.00045476978327314626 V Eta Ohmic : 0.004065229504538212 V FC Efficiency : 0.5230579622427114 FC Power : 9.383659842634243 W FC Voltage : 4.079852105493149 V Loss : 0.397705910482824 V PH2 : 0.19686163393728537 atm PH2O : 0.24230270226546458 atm PO2 : 0.19045184619793523 atm Power-Thermal : 4.761340157365757 W ########### I : 2.4 E : 6.068380118926627 V Eta Activation : 0.3960054536369255 V Eta Concentration : 0.00047486339861378836 V Eta Ohmic : 0.004243378155424144 V FC Efficiency : 0.5211232875604884 FC Power : 9.755427943132343 W FC Voltage : 4.06476164297181 V Loss : 0.40072369519096346 V PH2 : 0.19684758355567097 atm PH2O : 0.242285408670136 atm PO2 : 0.1904439133597369 atm Power-Thermal : 5.004572056867657 W ########### I : 2.5 E : 6.068378579881878 V Eta Activation : 0.39870996749954657 V Eta Concentration : 0.000494984370825149 V Eta Ohmic : 0.00442164533169592 V FC Efficiency : 0.5192622556245563 FC Power : 10.12561398467885 W FC Voltage : 4.05024559387154 V Loss : 0.4036265972020676 V PH2 : 0.19683353317405658 atm PH2O : 0.24226811507480747 atm PO2 : 0.19043598052153862 atm Power-Thermal : 5.249386015321152 W ########### I : 2.6 E : 6.068377040773017 V Eta Activation : 0.40130847825734167 V Eta Concentration : 0.0005151327744999589 V Eta Ohmic : 0.004600031286563196 V FC Efficiency : 0.5174690806642298 FC Power : 10.494272955870581 W FC Voltage : 4.036258829180992 V Loss : 0.40642364231840483 V PH2 : 0.19681948279244216 atm PH2O : 0.2422508214794789 atm PO2 : 0.1904280476833403 atm Power-Thermal : 5.495727044129421 W ########### I : 2.7 E : 6.068375501600038 V Eta Activation : 0.4038089891176398 V Eta Concentration : 0.0005353086845364485 V Eta Ohmic : 0.004778536276705824 V FC Efficiency : 0.5157386322058496 FC Power : 10.861455594255196 W FC Voltage : 4.022761331205627 V Loss : 0.40912283407888206 V PH2 : 0.19680543241082776 atm PH2O : 0.24223352788415034 atm PO2 : 0.190420114845142 atm Power-Thermal : 5.7435444057448075 W ########### I : 2.8 E : 6.068373962362936 V Eta Activation : 0.40621862980268425 V Eta Concentration : 0.000555512176140013 V Eta Ohmic : 0.004957160562216277 V FC Efficiency : 0.5140663396997094 FC Power : 11.227208859041653 W FC Voltage : 4.0097174496577335 V Loss : 0.41173130254104057 V PH2 : 0.1967913820292134 atm PH2O : 0.2422162342888218 atm PO2 : 0.19041218200694368 atm Power-Thermal : 5.992791140958347 W ########### I : 2.9 E : 6.068372423061707 V Eta Activation : 0.4085437792118771 V Eta Concentration : 0.0005757433248249061 V Eta Ohmic : 0.005135904406545483 V FC Efficiency : 0.5124481138904448 FC Power : 11.591576336201861 W FC Voltage : 3.997095288345469 V Loss : 0.4142554269432475 V PH2 : 0.196777331647599 atm PH2O : 0.24219894069349326 atm PO2 : 0.1904042491687454 atm Power-Thermal : 6.24342366379814 W ########### I : 3.0 E : 6.0683708836963435 V Eta Activation : 0.4107901672807063 V Eta Concentration : 0.0005960022064159204 V Eta Ohmic : 0.005314768076451755 V FC Efficiency : 0.5108802815228812 FC Power : 11.95459858763542 W FC Voltage : 3.9848661958784737 V Loss : 0.41670093756357396 V PH2 : 0.1967632812659846 atm PH2O : 0.24218164709816473 atm PO2 : 0.19039631633054707 atm Power-Thermal : 6.49540141236458 W ########### I : 3.1 E : 6.068369344266841 V Eta Activation : 0.4129629601316751 V Eta Concentration : 0.0006162888970501038 V Eta Ohmic : 0.0054937518419525275 V FC Efficiency : 0.5093595307581349 FC Power : 12.316313453731704 W FC Voltage : 3.9730043399134525 V Loss : 0.4190730008706778 V PH2 : 0.19674923088437024 atm PH2O : 0.2421643535028362 atm PO2 : 0.1903883834923488 atm Power-Thermal : 6.748686546268298 W ########### I : 3.2 E : 6.068367804773196 V Eta Activation : 0.41506683170178466 V Eta Concentration : 0.0006366034731784721 V Eta Ohmic : 0.005672855976278701 V FC Efficiency : 0.507882865258588 FC Power : 12.676756316854359 W FC Voltage : 3.9614863490169867 V Loss : 0.4213762911512418 V PH2 : 0.19673518050275585 atm PH2O : 0.24214705990750765 atm PO2 : 0.19038045065415046 atm Power-Thermal : 7.003243683145644 W ########### I : 3.3 E : 6.0683662652154 V Eta Activation : 0.417106024344736 V Eta Concentration : 0.0006569460115677318 V Eta Ohmic : 0.005852080755831333 V FC Efficiency : 0.5064475653403494 FC Power : 13.035960331860592 W FC Voltage : 3.950291009654725 V Loss : 0.42361505111213504 V PH2 : 0.19672113012114145 atm PH2O : 0.2421297663121791 atm PO2 : 0.19037251781595219 atm Power-Thermal : 7.259039668139408 W ########### I : 3.4 E : 6.06836472559345 V Eta Activation : 0.4190844003836543 V Eta Concentration : 0.0006773165893020328 V Eta Ohmic : 0.0060314264601405215 V FC Efficiency : 0.5050511549266622 FC Power : 13.393956628655083 W FC Voltage : 3.9393990084279658 V Loss : 0.4257931434330969 V PH2 : 0.19670707973952706 atm PH2O : 0.24211247271685057 atm PO2 : 0.19036458497775385 atm Power-Thermal : 7.516043371344917 W ########### I : 3.5 E : 6.068363185907339 V Eta Activation : 0.42100548618901656 V Eta Concentration : 0.0006977152837847073 V Eta Ohmic : 0.006210893371826288 V FC Efficiency : 0.5036913732928463 FC Power : 13.750774490894704 W FC Voltage : 3.9287927116842014 V Loss : 0.42791409484462756 V PH2 : 0.1966930293579127 atm PH2O : 0.24209517912152204 atm PO2 : 0.19035665213955555 atm Power-Thermal : 7.774225509105296 W ########### I : 3.6 E : 6.068361646157063 V Eta Activation : 0.4228725100457559 V Eta Concentration : 0.0007181421727400468 V Eta Ohmic : 0.006390481776561363 V FC Efficiency : 0.5023661507925354 FC Power : 14.106441514254398 W FC Voltage : 3.918455976181777 V Loss : 0.4299811339950573 V PH2 : 0.1966789789762983 atm PH2O : 0.2420778855261935 atm PO2 : 0.19034871930135727 atm Power-Thermal : 8.033558485745605 W ########### I : 3.7 E : 6.068360106342617 V Eta Activation : 0.4246884348310017 V Eta Concentration : 0.0007385973342150736 V Eta Ohmic : 0.00657019196303564 V FC Efficiency : 0.50107358791043 FC Power : 14.460983747095012 W FC Voltage : 3.9083739857013544 V Loss : 0.4319972241282524 V PH2 : 0.1966649285946839 atm PH2O : 0.24206059193086493 atm PO2 : 0.19034078646315894 atm Power-Thermal : 8.29401625290499 W ########### I : 3.8 E : 6.068358566463993 V Eta Activation : 0.4264559863331208 V Eta Concentration : 0.0007590808465813247 V Eta Ohmic : 0.006750024222922298 V FC Efficiency : 0.49981193710908595 FC Power : 14.814425815913308 W FC Voltage : 3.8985331094508706 V Loss : 0.43396509140262446 V PH2 : 0.19665087821306954 atm PH2O : 0.2420432983355364 atm PO2 : 0.19033285362496066 atm Power-Thermal : 8.555574184086693 W ########### I : 3.9 E : 6.068357026521189 V Eta Activation : 0.42817767789163225 V Eta Concentration : 0.0007795927885366656 V Eta Ohmic : 0.006929978850845375 V FC Efficiency : 0.49857958703411753 FC Power : 15.166791037577857 W FC Voltage : 3.888920778866117 V Loss : 0.4358872495310143 V PH2 : 0.19663682783145514 atm PH2O : 0.24202600474020786 atm PO2 : 0.19032492078676233 atm Power-Thermal : 8.818208962422144 W ########### Report is generating ... Done! >>> Padulles_Amphlett_Data["Status"] True >>> Padulles_Amphlett_Data["P"][5] 2.724941943281497 >>> Padulles_Amphlett_Data["I"][5] 0.6 >>> Padulles_Amphlett_Data["V"][5] 4.541569905469162 >>> Padulles_Amphlett_Data["EFF"][5] 0.5822525519832258 >>> Padulles_Amphlett_Data["PO2"][5] 0.1905867044473064 >>> Padulles_Amphlett_Data["PH2"][5] 0.19710049042472996 >>> Padulles_Amphlett_Data["PH2O"][5] 0.2425966933860498 >>> Padulles_Amphlett_Data["Ph"][5] 0.9650580567185031 >>> Padulles_Amphlett_Data["VE"][5] 4.553525621759973 >>> Padulles_Amphlett_Data["V0"] 4.698326931114575 >>> Padulles_Amphlett_Data["K"] -0.24133551559100302 >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False) >>> Padulles_Amphlett_Data["Status"] False >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4) 2.9 >>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None) [Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None) >>> Test_Vector={"A":50.6,"l":0.0178,"lambda":23,"JMax":1.5,"T":2,"N0":5,"KO2":0.0000211,"KH2":0.0000422,"KH2O":0.000007716,"tH2":3.37,"tO2":6.74,"t1":2,"t2":2,"tH2O":18.418,"rho":1.168,"qMethanol":0.0002,"CV":2,"i-start":5,"i-stop":0.1,"i-step":-2,"Name":"Test"} >>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True) ########### Padulles-Amphlett-Model Simulation ########### Analyzing . . . I : 0.1 E : 6.14455344314445 V Eta Activation : 0.9092187394310518 V Eta Concentration : 1.1361117401857817e-07 V Eta Ohmic : 4.63717533307516e+269 V FC Efficiency : -2.9725482904327946e+269 FC Power : -2.3185876665375803e+269 W FC Voltage : -2.3185876665375803e+270 V Loss : 4.63717533307516e+269 V PH2 : 0.19717074233280188 atm PH2O : 0.2426831613626925 atm PO2 : 0.1906263686382979 atm Power-Thermal : 2.3185876665375803e+269 W ########### I : 2.0 E : 6.144553272737403 V Eta Activation : 0.9103753288368093 V Eta Concentration : 2.301179808139826e-06 V Eta Ohmic : 9.331810347802308e+270 V FC Efficiency : -5.981929710129684e+270 FC Power : -9.331810347802308e+271 W FC Voltage : -4.665905173901154e+271 V Loss : 9.331810347802308e+270 V PH2 : 0.19690378508212852 atm PH2O : 0.2423545830514502 atm PO2 : 0.19047564471253012 atm Power-Thermal : 9.331810347802308e+271 W ########### I : 4.0 E : 6.144553093215826 V Eta Activation : 0.9106431331307118 V Eta Concentration : 4.6654999364844955e-06 V Eta Ohmic : 1.8785852500552963e+271 V FC Efficiency : -1.2042213141380103e+271 FC Power : -3.757170500110593e+272 W FC Voltage : -9.392926250276482e+271 V Loss : 1.8785852500552963e+271 V PH2 : 0.19662277744984075 atm PH2O : 0.24200871114487932 atm PO2 : 0.19031698794856405 atm Power-Thermal : 3.757170500110593e+272 W ########### Report is generating ... Warning : The value of I(>0.1) leads to minus amount of V, please check your inputs Done! >>> shutil.rmtree("Padulles-Amphlett") '''<line_sep>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>observations.r.rep_vict rep_vict<def_stmt>test_rep_vict <block_start>"""Test module rep_vict.py by downloading rep_vict.csv and testing shape of extracted data has 8 rows and 8 columns """<line_sep>test_path=tempfile.mkdtemp()<line_sep>x_train,metadata=rep_vict(test_path)<try_stmt><block_start><assert_stmt>x_train.shape<eq>(8 8)<block_end><except_stmt><block_start>shutil.rmtree(test_path)<line_sep><raise>()<block_end><block_end>
# coding: utf8 """ @Author : <NAME> """<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.autograd Variable<import_stmt>numpy<as>np<import_from_stmt>easydict EasyDict<as>edict<import_from_stmt>collections OrderedDict<as>odict<import_from_stmt>itertools product<def_stmt>eval_cls Preds GTs<block_start>acc=torch.mean((Preds<eq>GTs).float())<line_sep><return>acc.item()<block_end><class_stmt>Cross_Entropy_Loss_Handler<block_start><def_stmt>__init__ self<block_start>self.cross_entropy_loss=nn.CrossEntropyLoss().cuda()<block_end># interface function <def_stmt>compute_loss self tgts Pred GT<block_start>""" tgts: list of target names GT : dict of ground truth for each target BxHxW Pred: dict of prediction for each target BxHxWx4 """<line_sep>mask=GT['mask']<line_sep>Loss=edict()<for_stmt>tgt tgts<block_start>gt=GT[tgt][mask].view(-1)# as (BxK,) pr=Pred[tgt][mask].view(gt.size(0) -1)# Pred[tgt][mask] (BxK, 4) Loss[tgt]=self.cross_entropy_loss(pr gt).double()<block_end><return>Loss<block_end><block_end><class_stmt>Neg_Dot_Loss_Handler<block_start><def_stmt>__init_ self<block_start><pass><block_end><def_stmt>compute_loss self tgts Pred GT<block_start>Loss=edict()<for_stmt>tgt tgts<block_start>""" Bug fixed on 22 Aug 2018 torch.dot can only be applied to 1-dim tensor Don't know why there's no error. """<line_sep># Loss[tgt] = torch.mean( -torch.dot(GT[tgt],Pred[tgt]) ) # In fact here only does -GT[tgt]*Pred[tgt] Loss[tgt]=torch.mean(-torch.sum(GT[tgt]<times>Pred[tgt] dim=1))<block_end><return>Loss<block_end><block_end><class_stmt>Cos_Proximity_Loss_Handler<block_start><def_stmt>__init__ self<block_start>self.cos_sim=nn.CosineSimilarity(dim=1).cuda()<block_end><def_stmt>compute_loss self tgts Pred GT<block_start>""" tgts: list of target names. In this case has to be tgts=['norm'] GT : dict of ground truth for each target BxHxWx3 Pred: dict of prediction for each target BxHxWx3 """<line_sep>mask=GT['mask']<line_sep>Loss=edict()<line_sep>Errs=edict()<for_stmt>tgt tgts<block_start>cos_sim=self.cos_sim(Pred[tgt][mask] GT[tgt][mask])<line_sep>Loss[tgt]=torch.mean(1-cos_sim)# use 1-cos(theta) to make loss as positive. Errs[tgt]=torch.acos(cos_sim.clamp(-1 1))<times>180./np.pi# .clip(-1,1) <block_end><return>Loss Errs<block_end><block_end><class_stmt>Smooth_L1_Loss_Handler<block_start><def_stmt>__init__ self<block_start>self.smooth_l1_loss=nn.SmoothL1Loss().cuda()<block_end><def_stmt>compute_loss self tgts Pred GT<block_start>""" tgts: list of target names e.g. tgts=['a', 'e', 't'] GT : dict of ground truth for each target Pred: dict of prediction for each target """<line_sep>Loss=edict()<for_stmt>tgt tgts<block_start>Loss[tgt]=self.smooth_l1_loss(Pred[tgt] GT[tgt])# [warning] pred first, gt second <block_end><return>Loss<block_end><block_end>
# -*- coding: utf-8 -*- """ The UCB policy for bounded bandits, with UCB indexes computed with Julia. Reference: [Lai & Robbins, 1985]. .. warning:: Using a Julia function *from* Python will not speed up anything, as there is a lot of overhead in the "bridge" protocol used by pyjulia. The idea of using naively a tiny Julia function to speed up computations is basically useless. A naive benchmark showed that in this approach, :class:`UCBjulia` (used withing Python) is about 125 times slower (!) than :class:`UCB`. .. warning:: This is only experimental, and purely useless. See https://github.com/SMPyBandits/SMPyBandits/issues/98 """<import_from_future_stmt> division print_function# Python 2 compatibility __author__="<NAME>"<line_sep>__version__="0.9"<line_sep># WARNING: this is a HUGE hack to fix a mystery bug on importing this policy <import_from_stmt>sys path<import_from_stmt>os.path dirname<line_sep>path.insert(0 '/'.join(dirname(__file__).split('/')[:-1]))<try_stmt><block_start><import_from_stmt>.IndexPolicy IndexPolicy<block_end><except_stmt>ImportError<block_start><import_from_stmt>IndexPolicy IndexPolicy<block_end><class_stmt>UCBjulia(IndexPolicy)<block_start>""" The UCB policy for bounded bandits, with UCB indexes computed with Julia. Reference: [Lai & Robbins, 1985]. .. warning:: This is only experimental, and purely useless. See https://github.com/SMPyBandits/SMPyBandits/issues/98 """<def_stmt>__init__ self nbArms lower=0. amplitude=1.<block_start>""" Will fail directly if the bridge with julia is unavailable or buggy."""<line_sep>super(UCBjulia self).__init__(nbArms lower=lower amplitude=amplitude)<line_sep>self.t=0<line_sep># Importing the julia module and creating the bridge <try_stmt><block_start><import_stmt>julia<block_end><except_stmt>ImportError<as>e<block_start>print("Error: unable to load the 'julia' Python module. Install with 'pip install julia', or see https://github.com/JuliaPy/pyjulia/")# DEBUG <raise>e<block_end>_j=julia.Julia()<try_stmt><block_start>self._index_function=_j.evalfile("Policies/UCBjulia.jl")<block_end><except_stmt>RuntimeError<block_start><try_stmt><block_start>self._index_function=_j.evalfile("UCBjulia.jl")<block_end><except_stmt>RuntimeError<block_start><raise>ValueError("Error: Unable to load 'UCBjulia.jl' julia file.")# WARNING <block_end><block_end><try_stmt><block_start>self._index_function([1] [1] 1 1)<block_end><except_stmt>(RuntimeError ValueError)<block_start><raise>ValueError("Error: the index function loaded from 'UCBjulia.jl' is bugged or unavailable.")<block_end><block_end># WARNING <def_stmt>computeIndex self arm<block_start>r""" Compute the current index, at time t and after :math:`N_k(t)` pulls of arm k: .. math:: I_k(t) = \frac{X_k(t)}{N_k(t)} + \sqrt{\frac{2 \log(t)}{N_k(t)}}. """<line_sep># WARNING: the 'arm + 1' part comes from the difference between 0-based indexes # for Python and the 1-based indexes in Julia. The rest works pretty well! <return>self._index_function(self.rewards self.pulls self.t arm+1)<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks, unused-argument """STFT operator"""<import_from_stmt>math pi<import_stmt>tvm<import_from_stmt>tvm te tir<import_from_stmt>..utils ceil_div<def_stmt>_get_max_threads batch_row<block_start>max_threads=tvm.target.Target.current(allow_none=<false>).max_num_threads<line_sep><return>tir.min(batch_row max_threads)<block_end><def_stmt>stft data n_fft hop_length win_length window normalized onesided output_shape <block_start>""" The STFT computes the Fourier transform of short overlapping windows of the input. This gives frequency components of the signal as they change over time. Parameters ---------- data : relay.Expr Either a 1-D tensor or a 2-D batch tensor. n_fft : int The size of Fourier transform hop_length : int The distance between neighboring sliding window frames win_length : int The size of window frame and STFT filter window : relay.Expr A 1-D tensor window frame normalized : bool Whether to return the normalized STFT results onesided : bool Whether to return onesided result or fill with conjugate symmetry Returns ------- output : relay.Expr Tensor containing the STFT result Examples -------- .. code-block:: python data = [1, 2, 3, 4, 5, 6] window = [4, 3, 2] [n_fft, hop_length, win_length, normalized, onesided] = [3, 3, 3, False, True] relay.stft(data, n_fft, hop_length, win_length, window, normalized, onesided) -> [[[15.0000, 0.0000], [34.0000, 0.0000]], [[ 4.5000, 0.8660], [ 1.0000, -1.7321]]] """<def_stmt>gen_ir data_ptr n_fft hop_length win_length window_ptr normalized onesided output_ptr <block_start>ib=tir.ir_builder.create()<line_sep>data=ib.buffer_ptr(data_ptr)<line_sep>window=ib.buffer_ptr(window_ptr)<line_sep>output=ib.buffer_ptr(output_ptr)<line_sep>max_threads=_get_max_threads(output_ptr.shape[0]<times>output_ptr.shape[1])<line_sep>output_size=output_ptr.shape[0]<times>output_ptr.shape[1]<times>output_ptr.shape[2]<with_stmt>ib.new_scope()<block_start>nthread_tx=max_threads<line_sep>nthread_bx=ceil_div(output_size max_threads)<line_sep>tx=te.thread_axis("threadIdx.x")<line_sep>bx=te.thread_axis("blockIdx.x")<line_sep>ib.scope_attr(tx "thread_extent" nthread_tx)<line_sep>ib.scope_attr(bx "thread_extent" nthread_bx)<line_sep>tid=bx<times>max_threads+tx<with_stmt>ib.if_scope(tid<l>output_size)<block_start>matrix_size=output_ptr.shape[1]<times>output_ptr.shape[2]<line_sep>batch=tir.floordiv(tid matrix_size)<line_sep>row=tir.floordiv(tir.indexmod(tid matrix_size) output_ptr.shape[2])<line_sep>col=tir.indexmod(tir.indexmod(tid matrix_size) output_ptr.shape[2])<line_sep>output[batch row col 0]=tir.Cast(data_ptr.dtype 0)<line_sep>output[batch row col 1]=tir.Cast(data_ptr.dtype 0)<with_stmt>ib.for_range(0 win_length)<as>wlen<block_start>output[batch row col 0]<augadd>(window[wlen]<times>data[batch col<times>hop_length+wlen]<times>tir.cos(2<times>pi<times>row<times>wlen/win_length))<line_sep>output[batch row col 1]<augsub>(window[wlen]<times>data[batch col<times>hop_length+wlen]<times>tir.sin(2<times>pi<times>row<times>wlen/win_length))<block_end><with_stmt>ib.if_scope(normalized)<block_start>output[batch row col 0]<augdiv>tir.sqrt(tir.const(n_fft "float32"))<line_sep>output[batch row col 1]<augdiv>tir.sqrt(tir.const(n_fft "float32"))<block_end><block_end><block_end><return>ib.get()<block_end>output_buf=tir.decl_buffer(output_shape data.dtype "output_buf")<line_sep><return>te.extern(output_shape [data window] <lambda>ins outs:gen_ir(ins[0] n_fft hop_length win_length ins[1] normalized onesided outs[0]) dtype=[data.dtype] out_buffers=[output_buf] name="stft_cuda" tag="stft_cuda" )<block_end>
<import_from_stmt>nose.tools *<import_stmt>title_cleaner<line_sep>TRUTH=[(<true> 'Manhattan: 1st Ave. - 34th St. E.') (<true> 'Queens: Hoyt Avenue - 24th Street') (<false> "Queens: Flushing Meadow Park - New York World's Fair of 1939-40 - [Industrial exhibits.]") (<false> 'Fifth Avenue - 90th Street, southeast corner') (<false> 'Recreation and hobbies - Miscellaneous - Children.') (<true> 'Manhattan: 59th Street - 6th Avenue') (<true> 'Queens: Queens Boulevard - Junction Boulevard') (<true> 'Manhattan: 50th Street (West) - 5th Avenue') (<true> 'Manhattan: 5th Avenue - 78th Street') (<true> 'Manhattan: 5th Avenue - 33rd Street') (<true> 'Queens: Queens Boulevard - 62nd Avenue') (<false> 'Manhattan: Battery Park.') (<false> 'Manhattan: Central Park - The Sailboat Pool') (<true> 'Queens: Colonial Avenue - 62nd Drive') (<true> 'Queens: Woodhaven Blvd - Fleet Street') (<true> 'Richmond: New Dorp Lane - Cedar Grove Avenue')]<def_stmt>test_clean_title <block_start><for_stmt>correct,title TRUTH<block_start><assert_stmt>correct<eq>title_cleaner.is_pure_location(title) '%s %s'%(correct title)<block_end><block_end>
<import_from_stmt>.environment *<import_from_stmt>.brain *<import_from_stmt>.exception *<line_sep>
<import_stmt>numpy<as>np<import_stmt>sys<import_stmt>math<import_stmt>operator<import_stmt>csv<import_stmt>glob os<import_stmt>xlrd<import_stmt>cv2<import_stmt>pandas<as>pd<import_from_stmt>sklearn.svm SVC<import_from_stmt>collections Counter<import_from_stmt>sklearn.metrics confusion_matrix<import_stmt>scipy.io<as>sio<import_from_stmt>keras.models Sequential<import_from_stmt>keras.layers LSTM Dense TimeDistributed<import_from_stmt>keras.utils np_utils<import_from_stmt>keras metrics<import_from_stmt>keras backend<as>K<import_from_stmt>labelling collectinglabel<import_from_stmt>reordering readinput<import_from_stmt>evaluationmatrix fpr<line_sep>workplace='/media/ice/OS/Datasets/CASME2_TIM/'<line_sep>dB="CASME2_TIM"<line_sep>rootpath='/media/ice/OS/Datasets/CASME2_TIM/CASME2_TIM/'<if_stmt>dB<eq>"CASME2_raw"<block_start>inputDir='/media/ice/OS/Datasets/CASME2-RAW/'<line_sep>resizedFlag=1<block_end><elif_stmt>dB<eq>"CASME2_large"<block_start>inputDir='/media/ice/OS/Datasets/CASME 2/'<line_sep>wb=xlrd.open_workbook('/media/ice/OS/Datasets/CASME 2/CASME2_label_Ver_2.xls')<line_sep>ws=wb.sheet_by_index(0)<line_sep>colm=ws.col_slice(colx=0 start_rowx=1 end_rowx=<none>)<line_sep>iD=[str(x.value)<for>x colm]<line_sep>colm=ws.col_slice(colx=1 start_rowx=1 end_rowx=<none>)<line_sep>vidName=[str(x.value)<for>x colm]<line_sep>colm=ws.col_slice(colx=6 start_rowx=1 end_rowx=<none>)<line_sep>expression=[str(x.value)<for>x colm]<line_sep>table=np.transpose(np.array([np.array(iD) np.array(vidName) np.array(expression)] dtype=str))<line_sep>subjects=26<line_sep>samples=246<line_sep>n_exp=5<line_sep>resizedFlag=1<line_sep>r=68<line_sep>w=56<line_sep>VidPerSubject=[9 13 7 5 19 5 9 3 13 13 10 12 8 4 3 4 34 3 15 11 2 2 12 7 7 16]<line_sep>IgnoredSamples=['sub09/EP13_02' 'sub09/EP02_02f' 'sub10/EP13_01' 'sub17/EP15_01' 'sub17/EP15_03' 'sub19/EP19_04' 'sub24/EP10_03' 'sub24/EP07_01' 'sub24/EP07_04f' 'sub24/EP02_07' 'sub26/EP15_01']<line_sep>listOfIgnoredSamples=[]<for_stmt>s range(len(IgnoredSamples))<block_start><if_stmt>s<eq>0<block_start>listOfIgnoredSamples=[inputDir+IgnoredSamples[s]]<block_end><else_stmt><block_start>listOfIgnoredSamples.append(inputDir+IgnoredSamples[s])<block_end><block_end><block_end><elif_stmt>dB<eq>"CASME2_TIM"<block_start>inputDir='/media/ice/OS/Datasets/CASME2_TIM/CASME2_TIM/'#replace with croppoed for testing wb=xlrd.open_workbook('/media/ice/OS/Datasets/CASME2_label_Ver_2.xls')<line_sep>ws=wb.sheet_by_index(0)<line_sep>colm=ws.col_slice(colx=0 start_rowx=1 end_rowx=<none>)<line_sep>iD=[str(x.value)<for>x colm]<line_sep>colm=ws.col_slice(colx=1 start_rowx=1 end_rowx=<none>)<line_sep>vidName=[str(x.value)<for>x colm]<line_sep>colm=ws.col_slice(colx=6 start_rowx=1 end_rowx=<none>)<line_sep>expression=[str(x.value)<for>x colm]<line_sep>table=np.transpose(np.array([np.array(iD) np.array(vidName) np.array(expression)] dtype=str))<line_sep># print(type(table)) r=50<line_sep>w=50<line_sep>resizedFlag=1<line_sep>subjects=26<line_sep>samples=246<line_sep>n_exp=5<line_sep>VidPerSubject=[9 13 7 5 19 5 9 3 13 13 10 12 8 4 3 4 34 3 15 11 2 2 12 7 7 16]<line_sep>IgnoredSamples=['sub09/EP13_02/' 'sub09/EP02_02f/' 'sub10/EP13_01/' 'sub17/EP15_01/' 'sub17/EP15_03/' 'sub19/EP19_04/' 'sub24/EP10_03/' 'sub24/EP07_01/' 'sub24/EP07_04f/' 'sub24/EP02_07/' 'sub26/EP15_01/']<line_sep>listOfIgnoredSamples=[]<for_stmt>s range(len(IgnoredSamples))<block_start><if_stmt>s<eq>0<block_start>listOfIgnoredSamples=[inputDir+IgnoredSamples[s]]<block_end><else_stmt><block_start>listOfIgnoredSamples.append(inputDir+IgnoredSamples[s])<block_end><block_end><block_end><elif_stmt>dB<eq>"SMIC"<block_start>inputDir="/srv/oyh/DataBase/SMIC/HS_naming_modified/"<line_sep>wb=xlrd.open_workbook('/srv/oyh/DataBase/SMIC_label.xlsx')<line_sep>ws=wb.sheet_by_index(0)<line_sep>colm=ws.col_slice(colx=1 start_rowx=1 end_rowx=<none>)<line_sep>vidName=[str(x.value)<for>x colm]<line_sep>colm=ws.col_slice(colx=2 start_rowx=1 end_rowx=<none>)<line_sep>expression=[int(x.value)<for>x colm]<line_sep>table=np.transpose(np.array([np.array(vidName) np.array(expression)] dtype=str))<line_sep>samples=164<line_sep>#6 samples are excluded subjects=16<line_sep>n_exp=3<line_sep>r=170<line_sep>w=140<line_sep>VidPerSubject=[6 6 39 19 2 4 13 4 7 9 10 10 4 7 2 22]<line_sep>listOfIgnoredSamples=[]<line_sep>resizedFlag=1<block_end><else_stmt><block_start>print("NOT in the selection.")<block_end>######### Reading in the input images ######## SubperdB=[]<for_stmt>sub sorted([infile<for>infile os.listdir(inputDir)])<block_start>VidperSub=[]<for_stmt>vid sorted([inrfile<for>inrfile os.listdir(inputDir+sub)])<block_start>path=inputDir+sub+'/'+vid+'/'<if_stmt>path<in>listOfIgnoredSamples<block_start><continue><block_end># print(dB) # print(path) imgList=readinput(path dB)<line_sep>numFrame=len(imgList)<if_stmt>resizedFlag<eq>1<block_start>col=w<line_sep>row=r<block_end><else_stmt><block_start>img=cv2.imread(imgList[0])<line_sep>[row col _l]=img.shape<block_end>## ##read the label for each input video collectinglabel(table sub[3:] vid workplace+'Classification/' dB)<for_stmt>var range(numFrame)<block_start>img=cv2.imread(imgList[var])<line_sep>[_ _ dim]=img.shape<if_stmt>dim<eq>3<block_start>img=cv2.cvtColor(img cv2.COLOR_BGR2GRAY)<block_end><if_stmt>resizedFlag<eq>1#in resize function, [col,row] <block_start>img=cv2.resize(img (col row))<block_end><if_stmt>var<eq>0<block_start>FrameperVid=img.flatten()<block_end><else_stmt><block_start>FrameperVid=np.vstack((FrameperVid img.flatten()))<block_end><block_end>VidperSub.append(FrameperVid)<block_end>SubperdB.append(VidperSub)<block_end>##### Setting up the LSTM model ######## data_dim=r<times>w# 2500 print(data_dim)<line_sep>timesteps=10<line_sep># LSTM1 = LSTM(2500, return_sequences=True, input_shape=(timesteps, data_dim)) model=Sequential()<line_sep># model.add(TimeDistributed(Dense(data_dim), input_shape=(timesteps, data_dim))) model.add(LSTM(2500 return_sequences=<true> input_shape=(timesteps data_dim)))<line_sep>model.add(LSTM(500 return_sequences=<false>))<line_sep>##model.add(LSTM(500,return_sequences=True)) ##model.add(LSTM(50,return_sequences=False)) model.add(Dense(50 activation='sigmoid'))<line_sep>model.add(Dense(5 activation='sigmoid'))<line_sep>model.compile(loss='categorical_crossentropy' optimizer='Adam' metrics=[metrics.categorical_accuracy])<line_sep>#### generate the label based on subjects ######### label=np.loadtxt(workplace+'Classification/'+dB+'_label.txt')<line_sep>labelperSub=[]<line_sep>counter=0<for_stmt>sub range(subjects)<block_start>numVid=VidPerSubject[sub]<line_sep>labelperSub.append(label[counter:counter+numVid])<line_sep>counter=counter+numVid<block_end>##print(np.shape(labelperSub[1])) ##print(labelperSub[1]) ######## Seperating the input files into LOSO CV ######## tot_mat=np.zeros((n_exp n_exp))<for_stmt>sub range(subjects)<block_start>Train_X=[]<line_sep>Train_Y=[]<line_sep>Test_X=SubperdB[sub]<line_sep>Test_X=np.array(Test_X)<line_sep>Test_Y=labelperSub[sub]<line_sep>Test_Yy=np_utils.to_categorical(Test_Y 5)<line_sep>print(Test_Y)<line_sep>## print(np.shape(Test_Y)) <if_stmt>sub<eq>0<block_start><for_stmt>i range(1 subjects)<block_start>Train_X.append(SubperdB[i])<line_sep>Train_Y.append(labelperSub[i])<block_end><block_end><elif_stmt>sub<eq>subjects-1<block_start><for_stmt>i range(subjects-1)<block_start>Train_X.append(SubperdB[i])<line_sep>Train_Y.append(labelperSub[i])<block_end><block_end><else_stmt><block_start><for_stmt>i range(subjects)<block_start><if_stmt>sub<eq>i<block_start><continue><block_end><else_stmt><block_start>Train_X.append(SubperdB[i])<line_sep>Train_Y.append(labelperSub[i])<block_end><block_end><block_end># print(Train_X) # Train_X=np.hstack(Train_X) # print(Train_X.shape) Train_X=np.vstack(Train_X)# changed to hstack from vstack # print(Train_X.shape) # Train_X = Train_X.shape[1:] # print(Train_X.shape) # Train_X = np.expand_dims(Train_X, axis=2) # Train_X = np.reshape(Train_X, Train_X.shape + (1, 1,) ) # Train_X = np.reshape( Train_X, Train_X.shape ) # Train_X = np.reshape(2500, 16077) print(Train_X.shape)<line_sep>Train_Y=np.hstack(Train_Y)<line_sep>Train_Y=np_utils.to_categorical(Train_Y 5)<line_sep>print(np.shape(Train_Y))<line_sep>print(np.shape(Train_X))<line_sep>print(np.shape(Test_Y))<line_sep>print(np.shape(Test_X))<line_sep>model.fit(Train_X Train_Y validation_split=0.05 epochs=1 batch_size=20)<line_sep>model.summary()<line_sep>predict=model.predict_classes(Test_X)<line_sep>## predict[predict>= 0.5] = 1 ## predict[predict<0.5] = 0 print(predict)<line_sep>print(Test_Y)<line_sep>#compute the ConfusionMat ct=confusion_matrix(Test_Y predict)<line_sep>#check the order of the CT order=np.unique(np.concatenate((predict Test_Y)))<line_sep>#create an array to hold the CT for each CV mat=np.zeros((n_exp n_exp))<line_sep>#put the order accordingly, in order to form the overall ConfusionMat <for_stmt>m range(len(order))<block_start><for_stmt>n range(len(order))<block_start>mat[int(order[m]) int(order[n])]=ct[m n]<block_end><block_end>tot_mat=mat+tot_mat<line_sep># write each CT of each CV into .txt file <if_stmt><not>os.path.exists(workplace+'Classification/'+'Result/'+dB+'/')<block_start>os.mkdir(workplace+'Classification/'+'Result/'+dB+'/')<block_end><with_stmt>open(workplace+'Classification/'+'Result/'+dB+'/sub_CT.txt' 'a')<as>csvfile<block_start>thewriter=csv.writer(csvfile delimiter=' ')<line_sep>thewriter.writerow('Sub '+str(sub+1))<line_sep>thewriter=csv.writer(csvfile dialect=csv.excel_tab)<for_stmt>row ct<block_start>thewriter.writerow(row)<block_end>thewriter.writerow(order)<line_sep>thewriter.writerow('\n')<block_end><if_stmt>sub<eq>subjects-1# compute the accuracy, F1, P and R from the overall CT <block_start>microAcc=np.trace(tot_mat)/np.sum(tot_mat)<line_sep>[f1 p r]=fpr(tot_mat n_exp)<line_sep># save into a .txt file <with_stmt>open(workplace+'Classification/'+'Result/'+dB+'/final_CT.txt' 'w')<as>csvfile<block_start>thewriter=csv.writer(csvfile dialect=csv.excel_tab)<for_stmt>row tot_mat<block_start>thewriter.writerow(row)<block_end><block_end>thewriter=csv.writer(csvfile delimiter=' ')<line_sep>thewriter.writerow('micro:'+str(microAcc))<line_sep>thewriter.writerow('F1:'+str(f1))<line_sep>thewriter.writerow('Precision:'+str(p))<line_sep>thewriter.writerow('Recall:'+str(r))<block_end><block_end>
# Django Library <import_from_stmt>django.urls path<line_sep># Localfolder Library <import_from_stmt>.views.paypal_config UpdatePaypalConfigView<line_sep># http://www.secnot.com/django-shop-paypal-rest-1.html app_name='paypal'<line_sep>urlpatterns=[path('paypal-config/<int:pk>' UpdatePaypalConfigView.as_view() name='paypal-config') ]<line_sep>
<import_from_stmt>.base *<line_sep>DEBUG=<false><line_sep>TEMPLATE_DEBUG=DEBUG<line_sep># Use the cached template loader so template is compiled once and read from # memory instead of reading from disk on each load. TEMPLATE_LOADERS=(('django.template.loaders.cached.Loader' ('django.template.loaders.filesystem.Loader' 'django.template.loaders.app_directories.Loader' )) )<line_sep># Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS=['.glucosetracker.net']<line_sep># Make this unique, and don't share it with anybody. SECRET_KEY=os.environ['DJANGO_SECRET_KEY']<line_sep>DATABASES={'default':{'ENGINE':'django.db.backends.postgresql_psycopg2' 'NAME':'glucosetracker' 'USER':os.environ['DATABASE_USER'] 'PASSWORD':os.environ['DATABASE_PASSWORD'] 'HOST':'localhost' 'PORT':'' }}<line_sep># 3rd-party apps tracking IDs. INTERCOM_APP_ID='a6d0326564469dfd7f7d9b1bfc909ee3815a85a8'<line_sep>GOOGLE_ANALYTICS_TRACKING_ID='UA-45698014-1'<line_sep>ADDTHIS_PUBLISHER_ID='ra-52fffdf9456ec7d2'<line_sep># The 'From:' header of admin-related emails. DEFAULT_FROM_EMAIL='<EMAIL>'<line_sep>ADMINS=(('Local Admin' '<EMAIL>') )<line_sep>MANAGERS=ADMINS<line_sep>CONTACTS={'support_email':'GlucoseTracker.net <<EMAIL>>' 'admin_email':'<EMAIL>' 'info_email':'GlucoseTracker.net <<EMAIL>>' }<line_sep># Subscribers app settings SEND_SUBSCRIBERS_EMAIL_CONFIRMATION=<true><line_sep># Django-storages settings DEFAULT_FILE_STORAGE='core.s3utils.MediaRootS3BotoStorage'<line_sep>AWS_ACCESS_KEY_ID=os.environ['AWS_ACCESS_KEY_ID']<line_sep>AWS_SECRET_ACCESS_KEY=os.environ['AWS_SECRET_ACCESS_KEY']<line_sep>AWS_STORAGE_BUCKET_NAME='glucosetracker-assets'<line_sep>AWS_QUERYSTRING_AUTH=<false><line_sep>MEDIA_URL='//%s.s3.amazonaws.com/%s/'%(AWS_STORAGE_BUCKET_NAME MEDIA_ROOT)<line_sep>
"""OpHandler for OutputNonPassthrough ops. OutputNonPassthrough ops take their regularizer from the output and do not passthrough the regularizer to their input. This is the default OpHandler for ops like Conv2D and MatMul when L1-gamma regularization is used. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>morph_net.framework op_handler<import_from_stmt>morph_net.framework op_handler_util<class_stmt>OutputNonPassthroughOpHandler(op_handler.OpHandler)<block_start>"""OpHandler implementation for OutputNonPassthrough operations. These ops take their regularizer from the output and do not passthrough the regularizer to their input. """<line_sep>@property<def_stmt>is_source_op self<block_start><return><false><block_end>@property<def_stmt>is_passthrough self<block_start><return><false><block_end><def_stmt>assign_grouping self op op_reg_manager<block_start>"""Assign grouping to the given op and updates the manager. Args: op: tf.Operation to assign grouping to. op_reg_manager: OpRegularizerManager to keep track of the grouping. """<line_sep># Check if all input ops have groups, or tell the manager to process them. input_ops=op_handler_util.get_input_ops(op op_reg_manager)<line_sep>input_ops_without_group=op_handler_util.get_ops_without_groups(input_ops op_reg_manager)<line_sep># Check if all output ops have groups, or tell the manager to process them. output_ops=op_handler_util.get_output_ops(op op_reg_manager)<line_sep>output_ops_without_group=op_handler_util.get_ops_without_groups(output_ops op_reg_manager)<line_sep># Remove non-passthrough ops from outputs ops to group with. output_ops=op_handler_util.remove_non_passthrough_ops(output_ops op_reg_manager)<line_sep># Only group with ops that have the same size. Process the ops that have # mismatched size. output_ops_to_group,output_ops_to_process=(op_handler_util.separate_same_size_ops(op output_ops))<line_sep># Also process ungrouped ops. input_ops_to_process=input_ops_without_group<line_sep>output_ops_to_process.extend(output_ops_without_group)<line_sep># Align op slice sizes if needed. op_slices=op_reg_manager.get_op_slices(op)<line_sep>output_op_slices=op_handler_util.get_op_slices(output_ops_to_group op_reg_manager)<line_sep>aligned_op_slice_sizes=op_handler_util.get_aligned_op_slice_sizes(op_slices [] output_op_slices)<line_sep>op_handler_util.reslice_ops([op]+output_ops_to_group aligned_op_slice_sizes op_reg_manager)<line_sep># TODO(a1): Consider refactoring this method. # Repopulate OpSlice data, as ops may have been resliced. output_op_slices=self._get_output_op_slices(output_ops_to_group op_reg_manager)<line_sep># Group with inputs and outputs. op_handler_util.group_op_with_inputs_and_outputs(op [] output_op_slices aligned_op_slice_sizes op_reg_manager)<line_sep># Reprocess ops. op_reg_manager.process_ops(output_ops_to_process+input_ops_to_process)<block_end><def_stmt>_group_with_output_slices self op output_op_slices op_slices op_reg_manager<block_start>"""Groups OpSlice of current op with output ops. Assuming OpSlice of op have been aligned with output, groups the corresponding OpSlice. Args: op: tf.Operation to determine grouping for. output_op_slices: List of list of OpSlice, with a list per output op. op_slices: List of OpSlice for current op. op_reg_manager: OpRegularizerManager to keep track of grouping. Raises: ValueError: If sizes for current and output op slices are not the same. """<line_sep># Assert that op slices for output and current op are aligned. output_op_slices_sizes=op_handler_util.get_op_slice_sizes(output_op_slices)<line_sep>op_slice_sizes=op_handler_util.get_op_slice_sizes([op_slices])<if_stmt>op_slice_sizes<ne>output_op_slices_sizes<block_start><raise>ValueError('Current op and output op have differing slice '<concat>'sizes: {}, {}'.format(op_slice_sizes output_op_slices_sizes))<block_end>op_handler_util.group_op_with_inputs_and_outputs(op [] output_op_slices op_slice_sizes op_reg_manager)<block_end><def_stmt>_get_output_op_slices self output_ops op_reg_manager<block_start>"""Returns op slices for outputs. Args: output_ops: List of tf.Operation. op_reg_manager: OpRegularizerManager to keep track of the grouping. Returns: A list of list of OpSlice with a list per output op. """<line_sep><return>op_handler_util.get_op_slices(output_ops op_reg_manager)<block_end><def_stmt>create_regularizer self _<block_start><raise>NotImplementedError('Not a source op.')<block_end><block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>edward<as>ed<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>collections namedtuple<import_from_stmt>edward.models Beta Dirichlet DirichletProcess Gamma MultivariateNormalDiag Normal Poisson TransformedDistribution <import_from_stmt>tensorflow.contrib.distributions bijectors<class_stmt>test_transform_class(tf.test.TestCase)<block_start><def_stmt>assertSamplePosNeg self sample<block_start>num_pos=np.sum((sample<g>0.0) axis=0 keepdims=<true>)<line_sep>num_neg=np.sum((sample<l>0.0) axis=0 keepdims=<true>)<line_sep>self.assertTrue((num_pos<g>0).all())<line_sep>self.assertTrue((num_neg<g>0).all())<block_end><def_stmt>test_args self<block_start><with_stmt>self.test_session()<block_start>x=Normal(-100.0 1.0)<line_sep>y=ed.transform(x bijectors.Softplus())<line_sep>sample=y.sample(10).eval()<line_sep>self.assertTrue((sample<ge>0.0).all())<block_end><block_end><def_stmt>test_kwargs self<block_start><with_stmt>self.test_session()<block_start>x=Normal(-100.0 1.0)<line_sep>y=ed.transform(x bijector=bijectors.Softplus())<line_sep>sample=y.sample(10).eval()<line_sep>self.assertTrue((sample<ge>0.0).all())<block_end><block_end><def_stmt>test_01 self<block_start><with_stmt>self.test_session()<block_start>x=Beta(1.0 1.0)<line_sep>y=ed.transform(x)<line_sep>self.assertIsInstance(y TransformedDistribution)<line_sep>sample=y.sample(10 seed=1).eval()<line_sep>self.assertSamplePosNeg(sample)<block_end><block_end><def_stmt>test_nonnegative self<block_start><with_stmt>self.test_session()<block_start>x=Gamma(1.0 1.0)<line_sep>y=ed.transform(x)<line_sep>self.assertIsInstance(y TransformedDistribution)<line_sep>sample=y.sample(10 seed=1).eval()<line_sep>self.assertSamplePosNeg(sample)<block_end><block_end><def_stmt>test_simplex self<block_start><with_stmt>self.test_session()<block_start>x=Dirichlet([1.1 1.2 1.3 1.4])<line_sep>y=ed.transform(x)<line_sep>self.assertIsInstance(y TransformedDistribution)<line_sep>sample=y.sample(10 seed=1).eval()<line_sep>self.assertSamplePosNeg(sample)<block_end><block_end><def_stmt>test_real self<block_start><with_stmt>self.test_session()<block_start>x=Normal(0.0 1.0)<line_sep>y=ed.transform(x)<line_sep>self.assertIsInstance(y Normal)<line_sep>sample=y.sample(10 seed=1).eval()<line_sep>self.assertSamplePosNeg(sample)<block_end><block_end><def_stmt>test_multivariate_real self<block_start><with_stmt>self.test_session()<block_start>x=MultivariateNormalDiag(tf.zeros(2) tf.ones(2))<line_sep>y=ed.transform(x)<line_sep>sample=y.sample(10 seed=1).eval()<line_sep>self.assertSamplePosNeg(sample)<block_end><block_end><def_stmt>test_no_support self<block_start><with_stmt>self.test_session()<block_start>x=DirichletProcess(1.0 Normal(0.0 1.0))<with_stmt>self.assertRaises(AttributeError)<block_start>y=ed.transform(x)<block_end><block_end><block_end><def_stmt>test_unhandled_support self<block_start><with_stmt>self.test_session()<block_start>FakeRV=namedtuple('FakeRV' ['support'])<line_sep>x=FakeRV(support='rational')<with_stmt>self.assertRaises(ValueError)<block_start>y=ed.transform(x)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
<import_from_stmt>typing Callable Tuple Any TypeVar Generic<import_from_stmt>.util Unit<import_from_stmt>.typing Functor<import_from_stmt>.typing Monad<line_sep>TState=TypeVar("TState")<line_sep>TSource=TypeVar("TSource")<line_sep>TResult=TypeVar("TResult")<class_stmt>State(Generic[TSource TState])<block_start>"""The state monad. Wraps stateful computations. A stateful computation is a function that takes a state and returns a result and new state: state -> (result, state') """<def_stmt>__init__ self fn:Callable[[TState] Tuple[TSource TState]]<arrow><none><block_start>"""Initialize a new state. Keyword arguments: fn -- State processor. """<line_sep>self._fn=fn<block_end>@classmethod<def_stmt>unit cls value:TSource<arrow>"State[TSource, TState]"<block_start>r"""Create new State. The unit function creates a new State object wrapping a stateful computation. State $ \s -> (x, s) """<line_sep><return>cls(<lambda>state:(value state))<block_end><def_stmt>map self mapper:Callable[[TSource] TResult]<arrow>"State[TResult, TState]"<block_start><def_stmt>_ a:Any state:Any<arrow>Tuple[Any Any]<block_start><return>mapper(a) state<block_end><return>State(<lambda>state:_(*self.run(state)))<block_end><def_stmt>bind self fn:Callable[[TSource] "State[TState, TResult]"]<arrow>"State[TResult, TState]"<block_start>r"""m >>= k = State $ \s -> let (a, s') = runState m s in runState (k a) s' """<def_stmt>_ result:Any state:Any<arrow>Tuple[Any Any]<block_start><return>fn(result).run(state)<block_end><return>State(<lambda>state:_(*self.run(state)))<block_end>@classmethod<def_stmt>get cls<arrow>"State[TState, TState]"<block_start>r"""get = state $ \s -> (s, s)"""<line_sep><return>State(<lambda>state:(state state))<block_end>@classmethod<def_stmt>put cls new_state:TState<arrow>"State[Tuple, TState]"<block_start>r"""put newState = state $ \s -> ((), newState)"""<line_sep><return>State(<lambda>state:(Unit new_state))<block_end><def_stmt>run self state:TState<arrow>Tuple[TSource TState]<block_start>"""Return wrapped state computation. This is the inverse of unit and returns the wrapped function. """<line_sep><return>self._fn(state)<block_end><def_stmt>__call__ self state:Any<arrow>Tuple<block_start><return>self.run(state)<block_end><block_end><assert_stmt>issubclass(State Functor)<assert_stmt>issubclass(State Monad)<line_sep>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. """ """<class_stmt>ErrorNLU<block_start>"""Base model for generating NLU error."""<def_stmt>__init__ self act_type_rate=0.0 slot_rate=0.0<block_start>""" Args: act_type_rate (float): The error rate applied on dialog act type. slot_rate (float): Error rate applied on slots. """<line_sep>self.set_error_rate(act_type_rate slot_rate)<block_end><def_stmt>set_error_rate self act_type_rate slot_rate<block_start>""" Set error rate parameter for error model. Args: act_type_rate (float): The error rate applied on dialog act type. slot_rate (float): Error rate applied on slots. """<line_sep>self.act_type_rate=act_type_rate<line_sep>self.slot_rate=slot_rate<block_end><def_stmt>apply self dialog_act<block_start>""" Apply the error model on dialog act. Args: dialog_act (tuple): Dialog act. Returns: dialog_act (tuple): Dialog act with noise. """<line_sep>#TODO <return><block_end><block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_from_stmt>mmcv.utils build_from_cfg<import_from_stmt>mmdet.core.bbox.iou_calculators.builder IOU_CALCULATORS<line_sep>ROTATED_IOU_CALCULATORS=IOU_CALCULATORS<def_stmt>build_iou_calculator cfg default_args=<none><block_start>"""Builder of IoU calculator."""<line_sep><return>build_from_cfg(cfg ROTATED_IOU_CALCULATORS default_args)<block_end>
""" The tool to check the availability or syntax of domain, IP or URL. :: ██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗ ██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝ ██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗ ██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝ ██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝ Tests of URL 2 Network Location converter. Author: <NAME>, @funilrys, contactTATAfunilrysTODTODcom Special thanks: https://pyfunceble.github.io/special-thanks.html Contributors: https://pyfunceble.github.io/contributors.html Project link: https://github.com/funilrys/PyFunceble Project documentation: https://pyfunceble.readthedocs.io/en/dev/ Project homepage: https://pyfunceble.github.io/ License: :: Copyright 2017, 2018, 2019, 2020, 2021 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>unittest<import_stmt>unittest.mock<import_from_stmt>PyFunceble.converter.url2netloc Url2Netloc<class_stmt>TestUrl2Netloc(unittest.TestCase)<block_start>""" Tests our internal URL converter. """<def_stmt>setUp self<arrow><none><block_start>""" Setups everything needed for the tests. """<line_sep>self.converter=Url2Netloc()<block_end><def_stmt>tearDown self<arrow><none><block_start>""" Destroys everything previously created for the tests. """<del_stmt>self.converter<block_end><def_stmt>test_set_data_to_convert_no_string self<arrow><none><block_start>""" Tests the method which let us set the data to work with for the case that a non-string value is given. """<line_sep>given=["Hello" "World"]<line_sep>self.assertRaises(TypeError <lambda>:self.converter.set_data_to_convert(given))<block_end><def_stmt>test_set_data_to_convert_empty_string self<arrow><none><block_start>""" Tests the method which let us set the data to work with for the case that an empty-string value is given. """<line_sep>given=""<line_sep>self.assertRaises(ValueError <lambda>:self.converter.set_data_to_convert(given))<block_end><def_stmt>test_get_converted_nothing_to_decode self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that no conversion is needed. """<line_sep>given="example.org"<line_sep>expected="example.org"<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><def_stmt>test_get_converted_full_url self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that a full URL is given. """<line_sep>given="https://example.org/hello/world/this/is/a/test"<line_sep>expected="example.org"<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><def_stmt>test_get_converted_full_url_with_port self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that a full URL (with explicit port) is given. """<line_sep>given="https://example.org:8080/hello/world/this/is/a/test"<line_sep>expected="example.org:8080"<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><def_stmt>test_get_converted_full_url_with_params self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that a full URL (with params) is given. """<line_sep>given="https://example.org/?is_admin=true"<line_sep>expected="example.org"<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><def_stmt>test_get_converted_url_without_scheme self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that no scheme is given. """<line_sep>given="example.org/hello/world/this/is/a/test"<line_sep>expected="example.org"<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><def_stmt>test_get_converted_url_without_scheme_and_with_params self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that no scheme (but with params) is given. """<line_sep>given="example.org/?is_admin=true"<line_sep>expected="example.org"<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><def_stmt>test_get_converted_url_without_protocol self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that no protocol is given. """<line_sep>given="://example.org/hello/world/this/is/a/test"<line_sep>expected="example.org"<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><def_stmt>test_get_converted_url_without_protocol_and_with_params self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that no protocol (but params) is given. """<line_sep>given="://example.org/?is_admin=true"<line_sep>expected="example.org"<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><def_stmt>test_get_converted_url_without_protocol_and_path self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that no protocol and path is given. """<line_sep>given="://example.org/"<line_sep>expected="example.org"<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><def_stmt>test_get_converted_url_startswith_2_slashes self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that the given url starts with 2 slashes. """<line_sep>given="//example.org/hello/world/this/is/a/test"<line_sep>expected="example.org"<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><def_stmt>test_get_converted_url_startswith_1_slash self<arrow><none><block_start>""" Tests the method which let us extracts the netloc from a given URL for the case that the given url starts with 1 slash. """<line_sep>given="/example.org/hello/world/this/is/a/test"<line_sep>expected=""<line_sep>self.converter.data_to_convert=given<line_sep>actual=self.converter.get_converted()<line_sep>self.assertEqual(expected actual)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>typing List<import_stmt>torch<import_from_stmt>nemo.core.classes Loss typecheck<import_from_stmt>nemo.core.neural_types LossType NeuralType<line_sep>__all__=['AggregatorLoss']<class_stmt>AggregatorLoss(Loss)<block_start>""" Sums several losses into one. Args: num_inputs: number of input losses weights: a list of coefficient for merging losses """<line_sep>@property<def_stmt>input_types self<block_start>"""Returns definitions of module input ports. """<line_sep>input_types={}<for_stmt>i range(self._num_losses)<block_start>input_types["loss_"+str(i+1)]=NeuralType(elements_type=LossType())<block_end><return>input_types<block_end>@property<def_stmt>output_types self<block_start>"""Returns definitions of module output ports. """<line_sep><return>{"loss":NeuralType(elements_type=LossType())}<block_end><def_stmt>__init__ self num_inputs:int=2 weights:List[float]=<none><block_start>super().__init__()<line_sep>self._num_losses=num_inputs<if_stmt>weights<is><not><none><and>len(weights)<ne>num_inputs<block_start><raise>ValueError("Length of weights should be equal to the number of inputs (num_inputs)")<block_end>self._weights=weights<block_end>@typecheck()<def_stmt>forward self **kwargs<block_start>values=[kwargs[x]<for>x sorted(kwargs.keys())]<line_sep>loss=torch.zeros_like(values[0])<for_stmt>loss_idx,loss_value enumerate(values)<block_start><if_stmt>self._weights<is><not><none><block_start>loss=loss.add(loss_value alpha=self._weights[loss_idx])<block_end><else_stmt><block_start>loss=loss.add(loss_value)<block_end><block_end><return>loss<block_end><block_end>
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for estimator.py."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_stmt>six<import_from_stmt>tensorflow.contrib.distributions.python.ops estimator<as>estimator_lib<import_from_stmt>tensorflow.contrib.learn.python.learn.estimators constants<import_from_stmt>tensorflow.contrib.learn.python.learn.estimators head<as>head_lib<import_from_stmt>tensorflow.contrib.learn.python.learn.estimators model_fn<import_from_stmt>tensorflow.contrib.learn.python.learn.estimators.head_test _assert_metrics<import_from_stmt>tensorflow.contrib.learn.python.learn.estimators.head_test _assert_no_variables<import_from_stmt>tensorflow.contrib.learn.python.learn.estimators.head_test _assert_summary_tags<import_from_stmt>tensorflow.python.client session<import_from_stmt>tensorflow.python.framework ops<import_from_stmt>tensorflow.python.ops nn_ops<import_from_stmt>tensorflow.python.ops.distributions normal<as>normal_lib<import_from_stmt>tensorflow.python.platform test<class_stmt>EstimatorHeadDistributionRegressionTest(test.TestCase)<block_start><def_stmt>_assert_output_alternatives self model_fn_ops<block_start>self.assertEquals({<none>:constants.ProblemType.LINEAR_REGRESSION} {k:v[0]<for>k,v six.iteritems(model_fn_ops.output_alternatives)})<block_end><def_stmt>testNormalLocScaleLogits self# We will bias logits[..., 1] so that: logits[..., 1]=0 implies scale=1. <block_start>scale_bias=np.log(np.expm1(1.))<def_stmt>softplus x<block_start><return>np.log1p(np.exp(x))<block_end><def_stmt>actual_loss logits labels<block_start>mu=actual_mean(logits)<line_sep>sigma=actual_stddev(logits)<line_sep>labels=np.squeeze(labels -1)<line_sep>z=(labels-mu)/sigma<line_sep>loss=0.5<times>(z<power>2.+np.log(2.<times>np.pi))+np.log(sigma)<line_sep><return>loss.mean()<block_end><def_stmt>actual_mean logits<block_start><return>logits[<ellipsis> 0]<block_end><def_stmt>actual_stddev logits<block_start><return>softplus(logits[<ellipsis> 1]+scale_bias)<block_end><def_stmt>make_distribution_fn logits<block_start><return>normal_lib.Normal(loc=logits[<ellipsis> 0] scale=nn_ops.softplus(logits[<ellipsis> 1]+scale_bias))<block_end>head=estimator_lib.estimator_head_distribution_regression(make_distribution_fn logits_dimension=2)<line_sep>labels=np.float32([[-1.] [0.] [1.]])<line_sep>logits=np.float32([[0. -1] [1 0.5] [-1 1]])<with_stmt>ops.Graph().as_default() session.Session()# Convert to tensor so we can index into head.distributions. <block_start>tflogits=ops.convert_to_tensor(logits name="logits")<line_sep>model_fn_ops=head.create_model_fn_ops({} labels=labels mode=model_fn.ModeKeys.TRAIN train_op_fn=head_lib.no_op_train_fn logits=tflogits)<line_sep>self._assert_output_alternatives(model_fn_ops)<line_sep>_assert_summary_tags(self ["loss"])<line_sep>_assert_no_variables(self)<line_sep>loss=actual_loss(logits labels)<line_sep>_assert_metrics(self loss {"loss":loss} model_fn_ops)<line_sep># Now we verify the underlying distribution was correctly constructed. expected_mean=logits[<ellipsis> 0]<line_sep>self.assertAllClose(expected_mean head.distribution(tflogits).mean().eval() rtol=1e-6 atol=0.)<line_sep>expected_stddev=softplus(logits[<ellipsis> 1]+scale_bias)<line_sep>self.assertAllClose(expected_stddev head.distribution(tflogits).stddev().eval() rtol=1e-6 atol=0.)<line_sep># Should have created only one distribution. self.assertEqual(1 len(head.distributions))<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>test.main()<block_end>
# Copyright (c) 2012-2016 Seafile Ltd. <import_stmt>logging<import_from_stmt>rest_framework status<import_from_stmt>rest_framework.views APIView<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.authentication SessionAuthentication<import_from_stmt>seaserv ccnet_api<import_from_stmt>seahub.api2.permissions IsProVersion<import_from_stmt>seahub.api2.throttling UserRateThrottle<import_from_stmt>seahub.api2.authentication TokenAuthentication<import_from_stmt>seahub.api2.utils api_error<import_from_stmt>seahub.api2.endpoints.utils is_org_user<import_from_stmt>seahub.utils is_valid_email<import_from_stmt>seahub.base.accounts User<import_from_stmt>seahub.base.templatetags.seahub_tags email2nickname<import_from_stmt>seahub.profile.models Profile<line_sep>logger=logging.getLogger(__name__)<def_stmt>get_user_info email<block_start>profile=Profile.objects.get_profile_by_user(email)<line_sep>info={}<line_sep>info['email']=email<line_sep>info['name']=email2nickname(email)<line_sep>info['contact_email']=profile.contact_email<if>profile<and>profile.contact_email<else>''<line_sep><return>info<block_end><class_stmt>OrgAdminUser(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>throttle_classes=(UserRateThrottle )<line_sep>permission_classes=(IsProVersion )<def_stmt>put self request org_id email<block_start>""" update name of an org user. Permission checking: 1. only admin can perform this action. """<line_sep># resource check org_id=int(org_id)<if_stmt><not>ccnet_api.get_org_by_id(org_id)<block_start>error_msg='Organization %s not found.'%org_id<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end><try_stmt><block_start>user=User.objects.get(email=email)<block_end><except_stmt>User.DoesNotExist<block_start>error_msg='User %s not found.'%email<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end># permission check <if_stmt><not>request.user.org.is_staff<block_start>error_msg='Permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end><if_stmt>request.user.org.org_id<ne>org_id<block_start>error_msg='Permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end><if_stmt><not>is_org_user(email org_id)<block_start>error_msg='Permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end># update user's name name=request.data.get("name" <none>)<if_stmt>name<is><not><none><block_start>name=name.strip()<if_stmt>len(name)<g>64<block_start>error_msg='Name is too long (maximum is 64 characters).'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><if_stmt>"/"<in>name<block_start>error_msg="Name should not include '/'."<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><try_stmt><block_start>Profile.objects.add_or_update(email nickname=name)<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<line_sep>error_msg='Internal Server Error'<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR error_msg)<block_end><block_end># update user's contact email contact_email=request.data.get("contact_email" <none>)<if_stmt>contact_email<is><not><none><block_start>contact_email=contact_email.strip()<if_stmt>contact_email<ne>''<and><not>is_valid_email(contact_email)<block_start>error_msg='contact_email invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><try_stmt><block_start>Profile.objects.add_or_update(email contact_email=contact_email)<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<line_sep>error_msg='Internal Server Error'<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR error_msg)<block_end><block_end>info=get_user_info(email)<line_sep>info['is_active']=user.is_active<line_sep><return>Response(info)<block_end><block_end>
<import_stmt>torch<import_stmt>pytest<import_from_stmt>allennlp.common Params<import_from_stmt>allennlp.modules.transformer BiModalAttention<line_sep>@pytest.fixture<def_stmt>params_dict <block_start><return>{"hidden_size1":6 "hidden_size2":4 "combined_hidden_size":16 "num_attention_heads":2 "dropout1":0.1 "dropout2":0.2 }<block_end>@pytest.fixture<def_stmt>params params_dict<block_start><return>Params(params_dict)<block_end>@pytest.fixture<def_stmt>biattention params<block_start><return>BiModalAttention.from_params(params.duplicate())<block_end><def_stmt>test_can_construct_from_params biattention params_dict<block_start><assert_stmt>biattention.num_attention_heads<eq>params_dict["num_attention_heads"]<assert_stmt>biattention.attention_head_size<eq>int(params_dict["combined_hidden_size"]/params_dict["num_attention_heads"])<assert_stmt>(biattention.all_head_size<eq>params_dict["num_attention_heads"]<times>biattention.attention_head_size)<assert_stmt>biattention.query1.in_features<eq>params_dict["hidden_size1"]<assert_stmt>biattention.key1.in_features<eq>params_dict["hidden_size1"]<assert_stmt>biattention.value1.in_features<eq>params_dict["hidden_size1"]<assert_stmt>biattention.dropout1.p<eq>params_dict["dropout1"]<assert_stmt>biattention.query2.in_features<eq>params_dict["hidden_size2"]<assert_stmt>biattention.key2.in_features<eq>params_dict["hidden_size2"]<assert_stmt>biattention.value2.in_features<eq>params_dict["hidden_size2"]<assert_stmt>biattention.dropout2.p<eq>params_dict["dropout2"]<block_end><def_stmt>test_forward_runs biattention<block_start>biattention(torch.randn(2 3 6) torch.randn(2 3 4) torch.randint(0 2 (2 2 3 3))<eq>1 # creating boolean tensors torch.randint(0 2 (2 2 3 3))<eq>1 )<block_end>
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file <import_stmt>sys<import_from_stmt>waflib.Tools ar d<import_from_stmt>waflib.Configure conf<line_sep>@conf<def_stmt>find_dmd conf<block_start>conf.find_program(['dmd' 'dmd2' 'ldc'] var='D')<line_sep>out=conf.cmd_and_log(conf.env.D+['--help'])<if_stmt>out.find("D Compiler v")<eq>-1<block_start>out=conf.cmd_and_log(conf.env.D+['-version'])<if_stmt>out.find("based on DMD v1.")<eq>-1<block_start>conf.fatal("detected compiler is not dmd/ldc")<block_end><block_end><block_end>@conf<def_stmt>common_flags_ldc conf<block_start>v=conf.env<line_sep>v.DFLAGS=['-d-version=Posix']<line_sep>v.LINKFLAGS=[]<line_sep>v.DFLAGS_dshlib=['-relocation-model=pic']<block_end>@conf<def_stmt>common_flags_dmd conf<block_start>v=conf.env<line_sep>v.D_SRC_F=['-c']<line_sep>v.D_TGT_F='-of%s'<line_sep>v.D_LINKER=v.D<line_sep>v.DLNK_SRC_F=''<line_sep>v.DLNK_TGT_F='-of%s'<line_sep>v.DINC_ST='-I%s'<line_sep>v.DSHLIB_MARKER=v.DSTLIB_MARKER=''<line_sep>v.DSTLIB_ST=v.DSHLIB_ST='-L-l%s'<line_sep>v.DSTLIBPATH_ST=v.DLIBPATH_ST='-L-L%s'<line_sep>v.LINKFLAGS_dprogram=['-quiet']<line_sep>v.DFLAGS_dshlib=['-fPIC']<line_sep>v.LINKFLAGS_dshlib=['-L-shared']<line_sep>v.DHEADER_ext='.di'<line_sep>v.DFLAGS_d_with_header=['-H' '-Hf']<line_sep>v.D_HDR_F='%s'<block_end><def_stmt>configure conf<block_start>conf.find_dmd()<if_stmt>sys.platform<eq>'win32'<block_start>out=conf.cmd_and_log(conf.env.D+['--help'])<if_stmt>out.find('D Compiler v2.')<g>-1<block_start>conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead')<block_end><block_end>conf.load('ar')<line_sep>conf.load('d')<line_sep>conf.common_flags_dmd()<line_sep>conf.d_platform_flags()<if_stmt>str(conf.env.D).find('ldc')<g>-1<block_start>conf.common_flags_ldc()<block_end><block_end>
<def_stmt>dummy_config <block_start><return>{'uuid':'TEST-UUID' 'main':{'server':'https://test.forge.io/api/'}}<block_end>
""" Tests for non-ascii-name checker. """<line_sep>áéíóú=4444# [non-ascii-name] <def_stmt>úóíéá # [non-ascii-name] <block_start>"""yo"""<block_end>
<import_from_stmt>django.contrib admin<import_from_stmt>django.apps apps<line_sep>questions=apps.get_app_config('questions')<for_stmt>model_name,model questions.models.items()<block_start>admin.site.register(model)<block_end>
<import_from_stmt>typing Callable Union<import_stmt>rx<import_from_stmt>rx.core Observable typing<import_from_stmt>rx.disposable SingleAssignmentDisposable SerialDisposable<import_from_stmt>rx.internal.utils is_future<def_stmt>catch_handler source:Observable handler:Callable[[Exception Observable] Observable]<arrow>Observable<block_start><def_stmt>subscribe observer scheduler=<none><block_start>d1=SingleAssignmentDisposable()<line_sep>subscription=SerialDisposable()<line_sep>subscription.disposable=d1<def_stmt>on_error exception<block_start><try_stmt><block_start>result=handler(exception source)<block_end><except_stmt>Exception<as>ex# By design. pylint: disable=W0703 <block_start>observer.on_error(ex)<line_sep><return><block_end>result=rx.from_future(result)<if>is_future(result)<else>result<line_sep>d=SingleAssignmentDisposable()<line_sep>subscription.disposable=d<line_sep>d.disposable=result.subscribe(observer scheduler=scheduler)<block_end>d1.disposable=source.subscribe_(observer.on_next on_error observer.on_completed scheduler)<line_sep><return>subscription<block_end><return>Observable(subscribe)<block_end><def_stmt>_catch handler:Union[Observable Callable[[Exception Observable] Observable]]<arrow>Callable[[Observable] Observable]<block_start><def_stmt>catch source:Observable<arrow>Observable<block_start>"""Continues an observable sequence that is terminated by an exception with the next observable sequence. Examples: >>> op = catch(ys) >>> op = catch(lambda ex, src: ys(ex)) Args: handler: Second observable sequence used to produce results when an error occurred in the first sequence, or an exception handler function that returns an observable sequence given the error and source observable that occurred in the first sequence. Returns: An observable sequence containing the first sequence's elements, followed by the elements of the handler sequence in case an exception occurred. """<if_stmt>callable(handler)<block_start><return>catch_handler(source handler)<block_end><elif_stmt>isinstance(handler typing.Observable)<block_start><return>rx.catch(source handler)<block_end><else_stmt><block_start><raise>TypeError('catch operator takes whether an Observable or a callable handler as argument.')<block_end><block_end><return>catch<block_end>
<import_stmt>logging<import_from_stmt>btb.selection.ucb1 UCB1<line_sep># the minimum number of scores that each choice must have in order to use # best-K optimizations. If not all choices meet this threshold, default UCB1 # selection will be used. K_MIN=2<line_sep>logger=logging.getLogger('btb')<class_stmt>RecentKReward(UCB1)<block_start>"""Recent K reward selector Args: k (int): number of best scores to consider """<def_stmt>__init__ self choices k=K_MIN<block_start>super(RecentKReward self).__init__(choices)<line_sep>self.k=k<block_end><def_stmt>compute_rewards self scores<block_start>"""Retain the K most recent scores, and replace the rest with zeros"""<for_stmt>i range(len(scores))<block_start><if_stmt>i<ge>self.k<block_start>scores[i]=0.<block_end><block_end><return>scores<block_end><def_stmt>select self choice_scores<block_start>"""Use the top k learner's scores for usage in rewards for the bandit calculation"""<line_sep># if we don't have enough scores to do K-selection, fall back to UCB1 min_num_scores=min([len(s)<for>s choice_scores.values()])<if_stmt>min_num_scores<ge>K_MIN<block_start>logger.info('{klass}: using Best K bandit selection'.format(klass=type(self).__name__))<line_sep>reward_func=self.compute_rewards<block_end><else_stmt><block_start>logger.warning('{klass}: Not enough choices to do K-selection; using plain UCB1'.format(klass=type(self).__name__))<line_sep>reward_func=super(RecentKReward self).compute_rewards<block_end>choice_rewards={}<for_stmt>choice,scores choice_scores.items()<block_start><if_stmt>choice<not><in>self.choices<block_start><continue><block_end>choice_rewards[choice]=reward_func(scores)<block_end><return>self.bandit(choice_rewards)<block_end><block_end><class_stmt>RecentKVelocity(RecentKReward)<block_start>"""Recent K velocity selector"""<def_stmt>compute_rewards self scores<block_start>"""Compute the velocity of thte k+1 most recent scores. The velocity is the average distance between scores. Return a list with those k velocities padded out with zeros so that the count remains the same. """<line_sep># take the k + 1 most recent scores so we can get k velocities recent_scores=scores[:-self.k-2:-1]<line_sep>velocities=[recent_scores[i]-recent_scores[i+1]<for>i range(len(recent_scores)-1)]<line_sep># pad the list out with zeros, so the length of the list is # maintained zeros=(len(scores)-self.k)<times>[0]<line_sep><return>velocities+zeros<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>django.core.exceptions ValidationError<import_from_stmt>django.core.urlresolvers reverse<import_from_stmt>ralph.accounts.tests.factories RegionFactory UserFactory<import_from_stmt>ralph.back_office.tests.factories BackOfficeAssetFactory<import_from_stmt>ralph.lib.transitions.tests TransitionTestCase<import_from_stmt>ralph.licences.models BaseObjectLicence Licence LicenceUser<import_from_stmt>ralph.licences.tests.factories LicenceFactory<import_from_stmt>ralph.tests RalphTestCase<import_from_stmt>ralph.tests.mixins ClientMixin<class_stmt>BaseObjectLicenceCleanTest(RalphTestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.region_pl=RegionFactory(name='pl')<line_sep>self.region_de=RegionFactory(name='de')<line_sep>self.licence_de=LicenceFactory(region=self.region_de)<line_sep>self.bo_asset=BackOfficeAssetFactory(region=self.region_pl)<block_end><def_stmt>test_region_validate self<block_start>base_object_licence=BaseObjectLicence()<line_sep>base_object_licence.licence=self.licence_de<line_sep>base_object_licence.base_object=self.bo_asset<with_stmt>self.assertRaisesRegex(ValidationError ('Asset region is in a different region than licence.'))<block_start>base_object_licence.clean()<block_end><block_end><block_end><class_stmt>LicenceTest(RalphTestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.licence_1=LicenceFactory(number_bought=3)<line_sep>self.licence_2=LicenceFactory(number_bought=1)<line_sep>self.user_1=UserFactory()<line_sep>self.bo_asset=BackOfficeAssetFactory()<block_end><def_stmt>test_get_autocomplete_queryset self<block_start><with_stmt>self.assertNumQueries(2)<block_start>self.assertCountEqual(Licence.get_autocomplete_queryset().values_list('pk' flat=<true>) [self.licence_1.pk self.licence_2.pk])<block_end><block_end><def_stmt>test_get_autocomplete_queryset_all_used self<block_start>BaseObjectLicence.objects.create(base_object=self.bo_asset licence=self.licence_1 quantity=1 )<line_sep>LicenceUser.objects.create(user=self.user_1 licence=self.licence_1 quantity=2)<with_stmt>self.assertNumQueries(2)<block_start>self.assertCountEqual(Licence.get_autocomplete_queryset().values_list('pk' flat=<true>) [self.licence_2.pk])<block_end><block_end><block_end><class_stmt>LicenceFormTest(TransitionTestCase ClientMixin)<block_start><def_stmt>test_service_env_not_required self<block_start>self.assertTrue(self.login_as_user())<line_sep>licence=LicenceFactory()<line_sep>url=reverse('admin:licences_licence_change' args=(licence.pk ))<line_sep>resp=self.client.get(url follow=<true>)<line_sep>self.assertEqual(resp.status_code 200)<line_sep>form=resp.context['adminform'].form<line_sep>self.assertIn('service_env' form.fields)<line_sep>self.assertFalse(form.fields['service_env'].required)<block_end><def_stmt>test_depreciation_rate_not_required self<block_start>self.assertTrue(self.login_as_user())<line_sep>licence=LicenceFactory()<line_sep>url=reverse('admin:licences_licence_change' args=(licence.pk ))<line_sep>resp=self.client.get(url follow=<true>)<line_sep>self.assertEqual(resp.status_code 200)<line_sep>form=resp.context['adminform'].form<line_sep>self.assertIn('depreciation_rate' form.fields)<line_sep>self.assertFalse(form.fields['depreciation_rate'].required)<block_end><block_end>
"""The laundrify integration."""<import_from_future_stmt> annotations<import_from_stmt>laundrify_aio LaundrifyAPI<import_from_stmt>laundrify_aio.exceptions ApiConnectionException UnauthorizedException<import_from_stmt>homeassistant.config_entries ConfigEntry<import_from_stmt>homeassistant.const CONF_ACCESS_TOKEN Platform<import_from_stmt>homeassistant.core HomeAssistant<import_from_stmt>homeassistant.exceptions ConfigEntryAuthFailed ConfigEntryNotReady<import_from_stmt>homeassistant.helpers.aiohttp_client async_get_clientsession<import_from_stmt>.const DEFAULT_POLL_INTERVAL DOMAIN<import_from_stmt>.coordinator LaundrifyUpdateCoordinator<line_sep>PLATFORMS=[Platform.BINARY_SENSOR]<async_keyword><def_stmt>async_setup_entry hass:HomeAssistant entry:ConfigEntry<arrow>bool<block_start>"""Set up laundrify from a config entry."""<line_sep>session=async_get_clientsession(hass)<line_sep>api_client=LaundrifyAPI(entry.data[CONF_ACCESS_TOKEN] session)<try_stmt><block_start><await>api_client.validate_token()<block_end><except_stmt>UnauthorizedException<as>err<block_start><raise>ConfigEntryAuthFailed("Invalid authentication")<from>err<block_end><except_stmt>ApiConnectionException<as>err<block_start><raise>ConfigEntryNotReady("Cannot reach laundrify API")<from>err<block_end>coordinator=LaundrifyUpdateCoordinator(hass api_client DEFAULT_POLL_INTERVAL)<line_sep><await>coordinator.async_config_entry_first_refresh()<line_sep>hass.data.setdefault(DOMAIN {})[entry.entry_id]={"api":api_client "coordinator":coordinator }<line_sep>hass.config_entries.async_setup_platforms(entry PLATFORMS)<line_sep><return><true><block_end><async_keyword><def_stmt>async_unload_entry hass:HomeAssistant entry:ConfigEntry<arrow>bool<block_start>"""Unload a config entry."""<if_stmt>unload_ok:=<await>hass.config_entries.async_unload_platforms(entry PLATFORMS)<block_start>hass.data[DOMAIN].pop(entry.entry_id)<block_end><return>unload_ok<block_end>
# # Copyright (C) 2020 IBM. All Rights Reserved. # # See LICENSE.txt file in the root directory # of this source tree for licensing information. # <import_stmt>os<import_from_stmt>pathlib Path<import_from_stmt>clai.tools.colorize_console Colorize<import_from_stmt>clai.server.searchlib.data Datastore<import_from_stmt>clai.server.agent Agent<import_from_stmt>clai.server.command_message State Action NOOP_COMMAND<import_from_stmt>clai.server.logger current_logger<as>logger<class_stmt>HelpMeAgent(Agent)<block_start><def_stmt>__init__ self<block_start>super(HelpMeAgent self).__init__()<line_sep>inifile_path=os.path.join(str(Path(__file__).parent.absolute()) 'config.ini')<line_sep>self.store=Datastore(inifile_path)<block_end><def_stmt>compute_simple_token_similarity self src_sequence tgt_sequence<block_start>src_tokens=set([x.lower().strip()<for>x src_sequence.split()])<line_sep>tgt_tokens=set([x.lower().strip()<for>x tgt_sequence.split()])<line_sep><return>len(src_tokens&tgt_tokens)/len(src_tokens)<block_end><def_stmt>compute_confidence self query forum manpage<block_start>""" Computes the confidence based on query, stack-exchange post answer and manpage Algorithm: 1. Compute token-wise similarity b/w query and forum text 2. Compute token-wise similarity b/w forum text and manpage description 3. Return product of two similarities Args: query (str): standard error captured in state variable forum (str): answer text from most relevant stack exchange post w.r.t query manpage (str): manpage description for most relevant manpage w.r.t. forum Returns: confidence (float): confidence on the returned manpage w.r.t. query """<line_sep>query_forum_similarity=self.compute_simple_token_similarity(query forum[0]['Content'])<line_sep>forum_manpage_similarity=self.compute_simple_token_similarity(forum[0]['Answer'] manpage)<line_sep>confidence=query_forum_similarity<times>forum_manpage_similarity<line_sep><return>confidence<block_end><def_stmt>get_next_action self state:State<arrow>Action<block_start><return>Action(suggested_command=state.command)<block_end><def_stmt>post_execute self state:State<arrow>Action<block_start>logger.info("==================== In Helpme Bot:post_execute ============================")<line_sep>logger.info("State:\n\tCommand: {}\n\tError Code: {}\n\tStderr: {}".format(state.command state.result_code state.stderr))<line_sep>logger.info("============================================================================")<if_stmt>state.result_code<eq>'0'<block_start><return>Action(suggested_command=state.command)<block_end>apis:OrderedDict=self.store.get_apis()<line_sep>helpWasFound=<false><for_stmt>provider apis# We don't want to process the manpages provider... thats the provider # that we use to clarify results from other providers <block_start><if_stmt>provider<eq>"manpages"<block_start>logger.info(f"Skipping search provider 'manpages'")<line_sep><continue><block_end>thisAPI:Provider=apis[provider]<line_sep># Skip this provider if it isn't supported on the target OS <if_stmt><not>thisAPI.can_run_on_this_os()<block_start>logger.info(f"Skipping search provider '{provider}'")<line_sep>logger.info(f"==> Excluded on platforms: {str(thisAPI.get_excludes())}")<line_sep><continue><block_end># Move to next provider in list logger.info(f"Processing search provider '{provider}'")<if_stmt>thisAPI.has_variants()<block_start>logger.info(f"==> Has search variants: {str(thisAPI.get_variants())}")<line_sep>variants:List=thisAPI.get_variants()<block_end><else_stmt><block_start>logger.info(f"==> Has no search variants")<line_sep>variants:List=[<none>]<block_end># For each search variant supported by the current API, query # the data store to find the closest matching data. If there are # no search variants (ie: the singleton variant case), the variants # list will only contain a single, Nonetype value. <for_stmt>variant variants<block_start><if_stmt>variant<is><not><none><block_start>logger.info(f"==> Searching variant '{variant}'")<line_sep>data=self.store.search(state.stderr service=provider size=1 searchType=variant)<block_end><else_stmt><block_start>data=self.store.search(state.stderr service=provider size=1)<block_end><if_stmt>data<block_start>apiString=str(thisAPI)<if_stmt>variant<is><not><none><block_start>apiString=f"{apiString} '{variant}' variant"<block_end>logger.info(f"==> Success!!! Found a result in the {apiString}")<line_sep># Find closest match b/w relevant data and manpages for unix searchResult=thisAPI.extract_search_result(data)<line_sep>manpages=self.store.search(searchResult service='manpages' size=5)<if_stmt>manpages<block_start>logger.info("==> Success!!! found relevant manpages.")<line_sep>command=manpages['commands'][-1]<line_sep>confidence=manpages['dists'][-1]<line_sep># FIXME: Artificially boosted confidence confidence=1.0<line_sep>logger.info("==> Command: {} \t Confidence:{}".format(command confidence))<line_sep># Set return data suggested_command="man {}".format(command)<line_sep>description=Colorize().emoji(Colorize.EMOJI_ROBOT).append(f"I did little bit of Internet searching for you, ").append(f"and found this in the {thisAPI}:\n").info().append(thisAPI.get_printable_output(data)).warning().append("Do you want to try: man {}".format(command)).to_console()<line_sep># Mark that help was indeed found helpWasFound=<true><line_sep># We've found help; no need to keep searching <break><block_end><block_end><block_end># If we found help, then break out of the outer loop as well <if_stmt>helpWasFound<block_start><break><block_end><block_end><if_stmt><not>helpWasFound<block_start>logger.info("Failure: Unable to be helpful")<line_sep>logger.info("============================================================================")<line_sep>suggested_command=NOOP_COMMAND<line_sep>description=Colorize().emoji(Colorize.EMOJI_ROBOT).append(f"Sorry. It looks like you have stumbled across a problem that even the Internet doesn't have answer to.\n").info().append(f"Have you tried turning it OFF and ON again. ;)").to_console()<line_sep>confidence=0.0<block_end><return>Action(suggested_command=suggested_command description=description confidence=confidence)<block_end><block_end>
""" Test the ability to ignore undriven inputs (useful for formal verification tools that use undriven inputs to mark wires that can take on any value) """<import_stmt>pytest<import_stmt>magma<as>m<import_from_stmt>magma.testing check_files_equal<def_stmt>test_ignore_unused_undriven_basic <block_start><class_stmt>Main(m.Circuit)<block_start>_ignore_undriven_=<true><line_sep>io=m.IO(I=m.In(m.Bit) O=m.Out(m.Bit))<line_sep>temp=~io.I<block_end>m.compile("build/test_ignore_unused_undriven_basic" Main inline=<true> drive_undriven=<true> terminate_unused=<true>)<assert_stmt>check_files_equal(__file__ "build/test_ignore_unused_undriven_basic.v" "gold/test_ignore_unused_undriven_basic.v")<block_end><def_stmt>test_ignore_unused_undriven_hierarchy # For backwards compatability test <block_start><with_stmt>pytest.warns(DeprecationWarning)<block_start>Bar=m.DeclareCircuit("Bar" "I" m.In(m.Bit))<block_end><class_stmt>Foo(m.Circuit)<block_start>io=m.IO(I0=m.In(m.Bit) I1=m.In(m.Bit) O0=m.Out(m.Bit) O1=m.Out(m.Bit))<line_sep>io.O1<augmatmul>io.I0<line_sep>Bar()(io.I1)<block_end><class_stmt>Main(m.Circuit)<block_start>_ignore_undriven_=<true><line_sep>io=m.IO(I0=m.In(m.Bit) I1=m.In(m.Bit) O0=m.Out(m.Bit) O1=m.Out(m.Bit) O2=m.Out(m.Tuple[m.Bit m.Bit]) O3=m.Out(m.Array[2 m.Bit]))<line_sep>foo=Foo()<line_sep>foo.I0<augmatmul>io.I0<line_sep>io.O0<augmatmul>foo.O0<line_sep># partially undriven io.O2[0]<augmatmul>1<line_sep>io.O3[0]<augmatmul>1<block_end>m.compile("build/test_ignore_unused_undriven_hierarchy" Main inline=<true> drive_undriven=<true> terminate_unused=<true>)<assert_stmt>check_files_equal(__file__ "build/test_ignore_unused_undriven_hierarchy.v" "gold/test_ignore_unused_undriven_hierarchy.v")<block_end><def_stmt>test_ignore_undriven_coreir <block_start><class_stmt>Foo(m.Circuit)<block_start>_ignore_undriven_=<true><line_sep>io=m.IO(I0=m.In(m.Bit) O0=m.Out(m.Bit) O1=m.Out(m.Bit))<line_sep>io<augadd>m.ClockIO()<line_sep>io.O1<augmatmul>io.I0<block_end><class_stmt>Main(m.Circuit)<block_start>_ignore_undriven_=<true><line_sep>io=m.IO(I0=m.In(m.Bits[2]) I1=m.In(m.Bits[2]) O0=m.Out(m.Bit) O1=m.Out(m.Bit))+m.ClockIO()<line_sep>foo=Foo()<line_sep>foo.I0<augmatmul>io.I0<eq>io.I1<line_sep>io.O0<augmatmul>foo.O0<block_end>m.compile("build/test_ignore_undriven_coreir" Main output="coreir" drive_undriven=<true> terminate_unused=<true>)<assert_stmt>check_files_equal(__file__ "build/test_ignore_undriven_coreir.json" "gold/test_ignore_undriven_coreir.json")<block_end>
<import_from_stmt>functools reduce<import_from_stmt>itertools chain takewhile<import_stmt>os<import_stmt>pkg_resources<import_stmt>re<class_stmt>MetaDataNotFound(Exception)<block_start><pass><block_end><def_stmt>get_local_dist package_name<block_start>working_set=dict((dist.project_name dist)<for>dist pkg_resources.WorkingSet())<line_sep><return>working_set[package_name]<block_end><def_stmt>get_dist_metadata dist<block_start>metadata_path=get_local_dist_metadata_filepath(dist)<with_stmt>open(metadata_path)<as>fh<block_start>metadata=parse_metadata(fh.read())<block_end><return>metadata<block_end><def_stmt>get_funding_data metadata<block_start><return>metadata.get('funding_url')<block_end><def_stmt>get_local_dist_metadata_filepath dist# Dist filename syntax # name ["-" version ["-py" pyver ["-" required_platform]]] "." ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata <block_start><def_stmt>valid_component component<block_start><return>component[1]<block_end># Stop taking filename components at the first missing/invalid component filename_component=takewhile(valid_component (('' pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))) ('-' pkg_resources.to_filename(pkg_resources.safe_version(dist.version))) ('-py' dist.py_version) ('-' dist.platform) ))<line_sep>filename=''.join(chain(*filename_component))<if_stmt>isinstance(dist pkg_resources.EggInfoDistribution)<block_start>ext='egg-info'<line_sep>metadata_file='PKG-INFO'<block_end><elif_stmt>isinstance(dist pkg_resources.DistInfoDistribution)<block_start>ext='dist-info'<line_sep>metadata_file='METADATA'<block_end><elif_stmt>isinstance(dist pkg_resources.Distribution)<block_start>ext=os.path.join('egg' 'EGG-INFO')<line_sep>metadata_file='PKG-INFO'<block_end><else_stmt><block_start>ext=<none><line_sep>metadata_file=<none><block_end>filename='{}.{}'.format(filename ext)<line_sep>path=os.path.join(dist.location filename metadata_file)<if_stmt>ext<block_start><return>path<block_end><else_stmt><block_start><return><none><block_end><block_end>metadata_patterns=re.compile(r""" (\s*Author:\s+(?P<author>.*)\s*)? # Author (\s*Maintainer:\s+(?P<maintainer>.+)\s*)? # Maintainer (\s*Project-URL:\sFunding,\s+(?P<funding_url>.+)\s*)? # Funding URL """ re.VERBOSE)<def_stmt>get_line_metadata line<block_start><return>metadata_patterns.search(line).groupdict()<block_end><def_stmt>filter_empty_metadata metadata<block_start><return>dict((k v)<for>k,v metadata.items()<if>v)<block_end><def_stmt>parse_metadata metadata<block_start>metadata=(filter_empty_metadata(get_line_metadata(line))<for>line metadata.splitlines())<line_sep>metadata=[m<for>m metadata<if>m]<line_sep>metadata=reduce(<lambda>x y:dict((k v)<for>k,v chain(x.items() y.items())) metadata {} )<line_sep><return>metadata<block_end><def_stmt>get_local_metadata package_name<block_start><try_stmt><block_start>dist=get_local_dist(package_name)<line_sep>metadata=get_dist_metadata(dist)<block_end><except_stmt>FileNotFoundError# No metadata.json file locally <block_start><raise>MetaDataNotFound()<block_end><return>metadata<block_end><def_stmt>get_local_funding_metadata package_name<block_start><try_stmt><block_start>metadata=get_local_metadata(package_name)<line_sep>funding_url=get_funding_data(metadata)<block_end><except_stmt>KeyError# Package not available locally, # or there isn't a 'Funding' entry in the project_urls <block_start><raise>MetaDataNotFound()<block_end><return>funding_url<block_end>
# encoding: utf-8 # # Copyright 2016 Cluster Labs, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>logging<import_from_stmt>datetime datetime<import_from_stmt>django.conf settings<import_from_stmt>django.db transaction<import_from_stmt>backend.lk.logic appstore_fetch<import_from_stmt>backend.lk.models AppWebsiteScreenshot<import_from_stmt>backend.lk.models AppWebsitePage<import_from_stmt>backend.util dnsutil<import_from_stmt>backend.util text<def_stmt>check_domain_for_cname_record domain<block_start>cname,error_message=dnsutil.get_cname_for_domain(domain)<if_stmt>error_message<block_start><return><false> error_message<block_end><if_stmt>cname<ne>'%s.'%settings.HOSTED_WEBSITE_CNAME<block_start><return><false> 'The CNAME value is set but incorrect'<block_end><return><true> <none><block_end><def_stmt>_short_description long_description<block_start><if_stmt><not>long_description<block_start><return>long_description<block_end><return>'%s...'%long_description[:180]<block_end><def_stmt>example_from_itunes_id itunes_id country<block_start>info=appstore_fetch.app_info_with_id(itunes_id country)<line_sep>app_name,app_tagline=text.app_name_tagline(info.name)<line_sep>example_website={'id':'example' 'appName':app_name 'tagline':app_tagline 'longDescription':info.description 'shortDescription':_short_description(info.description) 'itunesId':info.itunes_id 'images':{'screenshots':{'iPhone':[{'url':screenshot}<for>screenshot info.screenshots]} 'icon':{'url':info.icon_512} }}<line_sep><return>example_website<block_end><def_stmt>get_fancy_cluster_example <block_start><return>{'id':'example' 'domain':'cluster.co' 'template':'' 'appName':'Cluster' 'tagline':'Privately share special moments with friends and family' 'shortDescription':'Cluster gives you a private space to share photos and memories with the people you choose, away from social media. Make your own groups and share pics, videos, comments, and chat!' 'longDescription':u'Cluster makes it possible to create private groups where you share moments through photos and videos with the people you care about. Create a group with family, a group of friends, coworkers, people from your home town, or anyone else!\r\n\r\nGreat for:\r\n\u2022 New Moms! Share photos of a new baby with close friends and family without spamming everyone on other social networks\r\n\u2022 College Students! Share memories with friends not appropriate for Facebook\r\n\u2022 Families! Keep in touch even if you\u2019re not in the same place.\r\n\r\nTons of people already trust Cluster. Here\u2019s why:\r\n\r\n\u2022 Private & secure: Only invited members of the group can see what you post.\r\n\u2022 An app for everyone: Access Cluster through gorgeous mobile apps and the web.\r\n\u2022 Relevant notifications: Know when people you invited post new things to the group.' 'keywords':'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby' 'itunesId':'596595032' 'playStoreId':'com.getcluster.android' 'supportLink':'http://cluster.co/help' 'termsLink':'http://cluster.co/terms' 'privacyLink':'http://cluster.co/privacy' 'primaryColor':'#0092F2' 'font':'Lato' 'frameScreenshots':'white' 'images':{'logo':{'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'} 'background':{'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'} 'icon':{'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'} 'screenshots':{'iPhone':[{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'} {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'} {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'} {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'} {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'} ] }} }<block_end>@transaction.atomic<def_stmt>update_website_screenshots website screenshot_images platform<block_start>existing_screenshots=list(AppWebsiteScreenshot.objects.filter(website_id=website.id platform=platform).order_by('order'))<line_sep>screenshot_image_ids=set([i.id<for>i screenshot_images])<line_sep>screenshots_to_delete=[s<for>s existing_screenshots<if>s.image_id<not><in>screenshot_image_ids]<for_stmt>screenshot screenshots_to_delete<block_start>screenshot.image.decrement_ref_count()<line_sep>screenshot.delete()<block_end>existing_by_image_id={i.image_id:i<for>i existing_screenshots}<for_stmt>i,image enumerate(screenshot_images)<block_start>order=i+1<if_stmt>image.id<in>existing_by_image_id<block_start>screenshot=existing_by_image_id[image.id]<if_stmt>screenshot.order<ne>order<block_start>screenshot.order=order<line_sep>screenshot.save()<block_end><block_end><else_stmt><block_start>image.increment_ref_count()<line_sep>screenshot=AppWebsiteScreenshot(website=website image=image platform=platform order=order)<line_sep>screenshot.save()<block_end><block_end><block_end>@transaction.atomic<def_stmt>create_or_update_hosted_page website slug body<block_start>hosted_page_titles={'terms':'Terms and Conditions' 'privacy':'Privacy Policy' 'support':'Support' }<line_sep>page=AppWebsitePage.objects.filter(website=website slug=slug).first()<if_stmt>page<and>body<block_start>page.body=body<line_sep>page.save()<block_end><elif_stmt><not>page<and>body<block_start>AppWebsitePage.objects.create(website=website slug=slug body=body title=hosted_page_titles[slug])<block_end><elif_stmt>page<and><not>body<block_start>page.delete()<block_end><block_end>@transaction.atomic<def_stmt>delete_website website<block_start>screenshots=list(website.screenshots.all())<for_stmt>screenshot screenshots<block_start>screenshot.image.decrement_ref_count()<line_sep>screenshot.delete()<block_end><if_stmt>website.icon<block_start>website.icon.decrement_ref_count()<line_sep>website.icon=<none><block_end><if_stmt>website.logo<block_start>website.logo.decrement_ref_count()<line_sep>website.logo=<none><block_end><if_stmt>website.background<block_start>website.background.decrement_ref_count()<line_sep>website.background=<none><block_end># TODO(Taylor): Mark as deleted instead of actually deleting potentially huge number of rows # AppWebsiteView.objects.filter(website_id=website.id).delete() website.domain=<none><line_sep>website.delete_time=datetime.now()<line_sep>website.save()<block_end>
<import_stmt>os<import_stmt>gensim<import_stmt>pytest<import_stmt>compress_fasttext<import_from_stmt>sklearn.pipeline make_pipeline<import_from_stmt>sklearn.linear_model LogisticRegression<import_from_stmt>compress_fasttext.feature_extraction FastTextTransformer<line_sep>BIG_MODEL_FILE=os.path.join(os.path.dirname(os.path.dirname(__file__)) 'data/test_data/ft_leipzig_ru_mini.bin')<line_sep>BASE_MODEL_URL='https://github.com/avidale/compress-fasttext/releases/download/'<def_stmt>cosine_sim x y<block_start><return>sum(x<times>y)/(sum(x<power>2)<times>sum(y<power>2))<power>0.5<block_end>@pytest.mark.parametrize('method, params' [(compress_fasttext.quantize_ft dict(qdim=32)) (compress_fasttext.prune_ft_freq dict(pq=<false> new_ngrams_size=10_000 new_vocab_size=10_000)) (compress_fasttext.prune_ft_freq dict(pq=<true> new_ngrams_size=10_000 new_vocab_size=10_000 qdim=16)) (compress_fasttext.prune_ft dict(new_ngrams_size=10_000 new_vocab_size=10_000)) (compress_fasttext.svd_ft dict(n_components=32)) ])<def_stmt>test_prune_save_load method params<block_start>word1='синий'<line_sep>word2='белый'<line_sep>big_ft=gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE)<line_sep>vec0=big_ft[word1]<line_sep>small_model=method(big_ft **params)<assert_stmt>cosine_sim(vec0 small_model[word1])<g>0.75<line_sep>out1=small_model.most_similar(word1)<assert_stmt>word2<in>{w<for>w,sim out1}<line_sep>small_model.save('tmp_small.bin')<line_sep>small_model2=compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin')<assert_stmt>cosine_sim(vec0 small_model2[word1])<g>0.75<line_sep>out2=small_model2.most_similar(word1)<assert_stmt>word2<in>{w<for>w,sim out2}<assert_stmt>out1[0][1]<eq>pytest.approx(out2[0][1])<block_end>@pytest.mark.parametrize('word1, word2, model_name' [('белый' 'черный' 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin') ('white' 'black' 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin') ('white' 'black' 'v0.0.4/cc.en.300.compressed.bin') ])<def_stmt>test_loading_existing_models word1 word2 model_name<block_start>ft=compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL+model_name)<line_sep>out=ft.most_similar(word1)<assert_stmt>word2<in>{w<for>w,sim out}<block_end><def_stmt>test_sklearn_wrapper <block_start>small_model=compress_fasttext.models.CompressedFastTextKeyedVectors.load('https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin')<line_sep>classifier=make_pipeline(FastTextTransformer(model=small_model) LogisticRegression()).fit(['banana' 'soup' 'burger' 'car' 'tree' 'city'] [1 1 1 0 0 0])<assert_stmt>(classifier.predict(['jet' 'train' 'cake' 'apple'])<eq>[0 0 1 1]).all()<block_end>
# tests/test_provider_hashicorp_hashicups.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:18:02 UTC) <def_stmt>test_provider_import <block_start><import_stmt>terrascript.provider.hashicorp.hashicups<block_end><def_stmt>test_resource_import <block_start><import_from_stmt>terrascript.resource.hashicorp.hashicups hashicups_order<block_end><def_stmt>test_datasource_import <block_start><import_from_stmt>terrascript.data.hashicorp.hashicups hashicups_coffees<import_from_stmt>terrascript.data.hashicorp.hashicups hashicups_ingredients<import_from_stmt>terrascript.data.hashicorp.hashicups hashicups_order<block_end># TODO: Shortcut imports without namespace for official and supported providers. # TODO: This has to be moved into a required_providers block. # def test_version_source(): # # import terrascript.provider.hashicorp.hashicups # # t = terrascript.provider.hashicorp.hashicups.hashicups() # s = str(t) # # assert 'https://github.com/hashicorp/terraform-provider-hashicups' in s # assert '0.3.1' in s
<import_stmt>pandas<as>pd<import_from_stmt>src.config Config<line_sep>config=Config()<line_sep>dfs=[]<for_stmt>cloth ['blouse' 'skirt' 'outwear' 'dress' 'trousers']<block_start>df=pd.read_csv(config.proj_path+'kp_predictions/'+cloth+'.csv')<line_sep>dfs.append(df)<block_end>res_df=pd.concat(dfs)<line_sep>res_df.to_csv(config.proj_path+'kp_predictions/result.csv' index=<false>)<line_sep>
<import_from_stmt>.base BaseHandler DefaultHandler# noqa: F401 <import_from_stmt>.cast caster<as>callback_caster# noqa: F401
# encoding: utf-8 """ od.py Created by <NAME> on 2009-09-06. Copyright (c) 2009-2017 Exa Networks. All rights reserved. License: 3-clause BSD. (See the COPYRIGHT file) """<def_stmt>od value<block_start><def_stmt>spaced value<block_start>even=<none><for_stmt>v value<block_start><if_stmt>even<is><false><block_start><yield>' '<block_end><yield>'%02X'%v<line_sep>even=<not>even<block_end><block_end><return>''.join(spaced(value))<block_end>
<import_from_stmt>django.conf settings<import_stmt>time<import_stmt>requests<import_stmt>hashlib<import_stmt>traceback<import_stmt>json<class_stmt>WorkFlowAPiRequest(object)<block_start><def_stmt>__init__ self token=settings.WORKFLOW_TOKEN appname=settings.WORKFLOW_APP username='admin' workflowurl=settings.WORKFLOW_URL<block_start>self.token=token<line_sep>self.appname=appname<line_sep>self.username=username<line_sep>self.workflowurl=workflowurl<block_end><def_stmt>getrequestheader self<block_start>timestamp=str(time.time())[:10]<line_sep>ori_str=timestamp+self.token<line_sep>signature=hashlib.md5(ori_str.encode(encoding='utf-8')).hexdigest()<line_sep>headers=dict(signature=signature timestamp=timestamp appname=self.appname username=self.username)<line_sep><return>headers<block_end><def_stmt>getdata self parameters=dict() method='get' url='/api/v1.0/workflows/' timeout=300 data=dict()<block_start><if_stmt>method<not><in>['get' 'post' 'put' 'delete' 'patch']<block_start><return><false> 'method must be one of get post put delete or patch'<block_end><if_stmt><not>isinstance(parameters dict)<block_start><return><false> 'Parameters must be dict'<block_end>headers=self.getrequestheader()<try_stmt><block_start>r=getattr(requests method)('{0}{1}'.format(self.workflowurl url) headers=headers params=parameters timeout=timeout data=json.dumps(data))<line_sep>result=r.json()<line_sep><return><true> result<block_end><except_stmt><block_start><return><false> traceback.format_exc()<block_end><block_end><block_end># ins = WorkFlowAPiRequest() # print (ins.getdata(parameters=dict(username='admin', per_page=20, name=''),method='get',url='/api/v1.0/workflows'))
# -*- coding: utf-8 -*- __author__='PatchLion'<import_from_stmt>PIL Image ImageDraw ImageFont<def_stmt>drawNumberOnIcon imgpath number<block_start>img=Image.open(imgpath)<if_stmt>(<none><eq>img)<block_start>print('打开图片失败')<line_sep><return><block_end>img=img.resize((160 160))<line_sep>print(imgpath "->" img.format img.size img.mode)<line_sep>draw=ImageDraw.Draw(img)<line_sep>img_size=img.size<line_sep>font=ImageFont.truetype("Varela-Regular.otf" size=int(img_size[1]/4))<line_sep>text_size=font.getsize(str(number))<line_sep>draw.text((img_size[0]-text_size[0] 0) str(number) font=font fill=(255 0 0))<line_sep>img.save('icon_withnumber.jpg')<line_sep>print('生成图片成功')<block_end>drawNumberOnIcon("icon.jpg" 21)<line_sep>
<import_stmt>argparse<import_stmt>os<import_from_stmt>collections Counter<import_from_stmt>sklearn.metrics confusion_matrix classification_report<def_stmt>read_labels filename<block_start>labels=[]<with_stmt>open(filename)<as>f<block_start><for_stmt>line f<block_start>line=line.strip()<if_stmt>len(line)<eq>0<block_start><continue><block_end>_,label=line.split('\t')<line_sep>labels.append(label)<block_end><block_end><return>labels<block_end><def_stmt>compare_labels true_labels pred_labels<block_start>true_set=set(true_labels)<line_sep>pred_set=set(pred_labels)<line_sep>print('\n▶ Label usage:')<line_sep>print(' ~ Used in both: {}'.format(true_set|pred_set))<line_sep>print(' ~ Extra in true: {}'.format(true_set-pred_set))<line_sep>print(' ~ Extra in pred: {}'.format(pred_set-true_set))<line_sep>print('\n▶ Raw counts:')<line_sep>true_counts=Counter(true_labels)<line_sep>pred_counts=Counter(pred_labels)<line_sep>sorted_labels=sorted(true_counts key=true_counts.get reverse=<true>)+sorted(pred_set-true_set)<line_sep>print('\tTrue\tPred\tDiff')<for_stmt>label sorted_labels<block_start>diff=pred_counts[label]-true_counts[label]<line_sep>direction='+'<if>diff<g>0<else>'-'<if>diff<l>0<else>' '<if_stmt>diff<l>0<block_start>diff=-diff<block_end>print('{}\t{}\t{}\t{}{:4}'.format(label true_counts[label] pred_counts[label] direction diff))<block_end>print('\n▶ Confusion matrix:')<line_sep>sorted_labels=sorted(true_set|pred_set)<line_sep>padded_labels=[lab+' '<times>(4-len(lab))<if>len(lab)<l>8<else>lab<for>lab sorted_labels]<line_sep>cm=confusion_matrix(true_labels pred_labels labels=sorted_labels)<line_sep>print(' \tpredicted:')<line_sep>print(' \t'+'\t'.join(padded_labels))<for_stmt>i range(len(cm))<block_start>prefix='true: '<if>i<eq>0<else>' '<times>6<line_sep>prefix<augadd>padded_labels[i]<line_sep>print(prefix+'\t'+'\t'.join([str(n)<for>n cm[i]]))<block_end>print('\n▶ Classification report:')<line_sep>print(classification_report(true_labels pred_labels digits=3))<line_sep>print('\n▶ Classification report w/o O label:')<line_sep>print(classification_report(true_labels pred_labels labels=list(true_set-{'O'}) digits=3))<block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--path" default=<none> type=str required=<true> help="Base path")<line_sep>parser.add_argument("--name" default=<none> type=str required=<true> help="File name [train,dev,test]")<line_sep>args=parser.parse_args()<line_sep>true_path=os.path.join(args.path args.name+'.true.tsv')<line_sep>pred_path=os.path.join(args.path args.name+'.pred.tsv')<line_sep>true_labels=read_labels(true_path)<line_sep>print('▶ Read true labels from {}'.format(true_path))<line_sep>pred_labels=read_labels(pred_path)<line_sep>print('▶ Read pred labels from {}'.format(pred_path))<if_stmt>len(true_labels)<ne>len(pred_labels)<block_start>print('True and pred file do not have the same amount of labels ({} and {})'.format(len(true_labels) len(pred_labels)))<line_sep>exit(-1)<block_end>print('\nFull label comparison:')<line_sep>compare_labels(true_labels pred_labels)<if_stmt>set([lab[0]<for>lab true_labels])<eq>{'B' 'I' 'O'}<block_start>true_label_cats=[lab<if>lab<eq>'O'<else>lab[2:]<for>lab true_labels]<line_sep>pred_label_cats=[lab<if>lab<eq>'O'<else>lab[2:]<for>lab pred_labels]<line_sep>print('\nBIO category comparison:')<line_sep>compare_labels(true_label_cats pred_label_cats)<block_end><if_stmt>'O'<in>true_labels<block_start>true_label_binary=['O'<if>lab<eq>'O'<else>'X'<for>lab true_labels]<line_sep>pred_label_binary=['O'<if>lab<eq>'O'<else>'X'<for>lab pred_labels]<line_sep>print('\nBinary comparison:')<line_sep>compare_labels(true_label_binary pred_label_binary)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# Copyright 2021 Sony Corporation. # Copyright 2021 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>urllib.request<as>request<import_from_stmt>html.parser HTMLParser<import_stmt>re<import_from_stmt>mako.template Template<import_stmt>os<import_from_stmt>gpu_info incompatible_arcs gpu_compute_capability_to_arc<line_sep>basedir=os.path.dirname(os.path.abspath(__file__))<line_sep>r=request.urlopen('https://developer.nvidia.com/cuda-gpus')<class_stmt>GetGpuListFromNvidiaSite(HTMLParser)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.td=<false><line_sep>self.last_value=<none><line_sep>self.last_data=''<line_sep>self.gpu_data={}<block_end><def_stmt>handle_starttag self tag attrs<block_start><if_stmt>tag<eq>'td'<block_start>self.td=<true><block_end><block_end><def_stmt>handle_endtag self tag<block_start><if_stmt>tag<eq>'td'<block_start><if_stmt>self.td<block_start>m=re.match(r'((\d+)\.(\d+))' self.last_data.strip())<if_stmt>m<block_start>cap=m.group(1)<line_sep>cap_major=int(m.group(2))<line_sep>cap_minor=int(m.group(3))<line_sep>arch=gpu_compute_capability_to_arc.get(cap_major)<if_stmt>arch<is><none><block_start>arch=gpu_compute_capability_to_arc.get((cap_major cap_minor))<if_stmt>arch<is><none><block_start>print(f'Error: unknown capability [{cap}]')<line_sep>arch=''<block_end><block_end>name=self.last_value.lower().replace('nvidia ' '').replace('tesla ' '')<line_sep># remove prefix self.gpu_data[name]=(arch cap)<block_end>self.last_value=self.last_data.strip()<line_sep>self.last_data=''<line_sep>self.td=<false><block_end><block_end><block_end><def_stmt>handle_data self data<block_start><if_stmt>self.td<block_start>self.last_data<augadd>data<block_end><block_end><block_end>parser=GetGpuListFromNvidiaSite()<line_sep>parser.feed(r.read().decode())<line_sep>gpus_info=parser.gpu_data<line_sep>incompatible_gpus={}<for_stmt>k incompatible_arcs<block_start><if_stmt><not>incompatible_gpus.get(k)<block_start>incompatible_gpus[k]=[]<block_end>iarc=incompatible_arcs[k]<for_stmt>gpu_name gpus_info.keys()<block_start><if_stmt>gpus_info[gpu_name][0]<in>iarc<block_start>incompatible_gpus[k].append(gpu_name)<block_end><block_end><block_end>fname=os.path.join(basedir 'skel' 'incompatibale_gpu_list.py.tmpl')<line_sep>tmpl=Template(filename=fname)<line_sep>lines=tmpl.render(args=incompatible_gpus)<with_stmt>open("./python/src/nnabla_ext/cuda/incompatible_gpu_list.py" 'w')<as>f<block_start><for_stmt>l lines<block_start>f.write(l)<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_stmt>ipaddress<import_from_stmt>itertools chain<import_from_stmt>django.db migrations models<line_sep>IPADDRESS_STATUS_RESERVED=2<def_stmt>_reserve_margin_addresses network bottom_count top_count IPAddress<block_start>ips=[]<line_sep>ips_query=IPAddress.objects.filter(models.Q(number__gte=network.min_ip+1 number__lte=network.min_ip+bottom_count+1)|models.Q(number__gte=network.max_ip-top_count number__lte=network.max_ip))<line_sep>existing_ips=set(ips_query.values_list('number' flat=<true>))<line_sep>to_create=set(chain.from_iterable([range(int(network.min_ip+1) int(network.min_ip+bottom_count+1)) # noqa range(int(network.max_ip-top_count) int(network.max_ip))]))<line_sep>to_create=to_create-existing_ips<for_stmt>ip_as_int to_create<block_start>ips.append(IPAddress(address=str(ipaddress.ip_address(ip_as_int)) number=ip_as_int network=network status=IPADDRESS_STATUS_RESERVED))<block_end>print('Creating {} ips for {}'.format(len(ips) network))<line_sep>IPAddress.objects.bulk_create(ips)<line_sep>ips_query.update(status=IPADDRESS_STATUS_RESERVED)<block_end><def_stmt>create_reserved_ips apps schema_editor<block_start>IPAddress=apps.get_model('networks' 'IPAddress')<line_sep>Network=apps.get_model('networks' 'Network')<for_stmt>network Network.objects.all()<block_start>_reserve_margin_addresses(network network.reserved_from_beginning network.reserved_from_end IPAddress)<block_end><block_end><def_stmt>remove_reserved_ips apps schema_editor<block_start>IPAddress=apps.get_model('networks' 'IPAddress')<line_sep>ips=IPAddress.objects.filter(models.Q(ethernet__isnull=<true>)|(models.Q(ethernet__base_object__isnull=<true>)&models.Q(ethernet__mac__isnull=<false>)) status=IPADDRESS_STATUS_RESERVED gateway_network__isnull=<true> )<line_sep>print('Removing {} reserved IPs'.format(ips.count()))<line_sep>ips.delete()<block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('networks' '0007_auto_20160804_1409') ]<line_sep>operations=[migrations.AddField(model_name='network' name='reserved_from_beginning' field=models.PositiveIntegerField(help_text='Number of addresses to be omitted in DHCP automatic assignmentcounted from the first IP in range (excluding network address)' default=10) ) migrations.AddField(model_name='network' name='reserved_from_end' field=models.PositiveIntegerField(help_text='Number of addresses to be omitted in DHCP automatic assignmentcounted from the last IP in range (excluding broadcast address)' default=0) ) migrations.RunPython(remove_reserved_ips reverse_code=create_reserved_ips) ]<block_end>
<import_stmt>os<import_stmt>torch<import_stmt>constants<import_from_stmt>utils.misc get_learning_rate<import_from_stmt>utils.summary TensorboardSummary<import_from_stmt>utils.loss SegmentationLosses<import_from_stmt>utils.calculate_weights calculate_weights_labels<import_from_stmt>torch.utils.data DataLoader<import_stmt>numpy<as>np<import_from_stmt>utils.metrics Evaluator<import_from_stmt>tqdm tqdm<import_stmt>random<class_stmt>Trainer<block_start><def_stmt>__init__ self args model train_set val_set test_set class_weights saver<block_start>self.args=args<line_sep>self.saver=saver<line_sep>self.saver.save_experiment_config()<line_sep>self.train_dataloader=DataLoader(train_set batch_size=args.batch_size shuffle=<true> num_workers=args.workers)<line_sep>self.val_dataloader=DataLoader(val_set batch_size=args.batch_size shuffle=<false> num_workers=args.workers)<line_sep>self.test_dataloader=DataLoader(test_set batch_size=args.batch_size shuffle=<false> num_workers=args.workers)<line_sep>self.train_summary=TensorboardSummary(os.path.join(self.saver.experiment_dir "train"))<line_sep>self.train_writer=self.train_summary.create_summary()<line_sep>self.val_summary=TensorboardSummary(os.path.join(self.saver.experiment_dir "validation"))<line_sep>self.val_writer=self.val_summary.create_summary()<line_sep>self.model=model<line_sep>self.dataset_size={'train':len(train_set) 'val':len(val_set) 'test':len(test_set)}<line_sep>train_params=[{'params':model.get_1x_lr_params() 'lr':args.lr} {'params':model.get_10x_lr_params() 'lr':args.lr<times>10}]<if_stmt>args.use_balanced_weights<block_start>weight=torch.from_numpy(class_weights.astype(np.float32))<block_end><else_stmt><block_start>weight=<none><block_end><if_stmt>args.optimizer<eq>'SGD'<block_start>print('Using SGD')<line_sep>self.optimizer=torch.optim.SGD(train_params momentum=args.momentum weight_decay=args.weight_decay nesterov=args.nesterov)<block_end><elif_stmt>args.optimizer<eq>'Adam'<block_start>print('Using Adam')<line_sep>self.optimizer=torch.optim.Adam(train_params weight_decay=args.weight_decay)<block_end><else_stmt><block_start><raise>NotImplementedError<block_end>self.lr_scheduler=<none><if_stmt>args.use_lr_scheduler<block_start><if_stmt>args.lr_scheduler<eq>'step'<block_start>print('Using step lr scheduler')<line_sep>self.lr_scheduler=torch.optim.lr_scheduler.MultiStepLR(self.optimizer milestones=[int(x)<for>x args.step_size.split(",")] gamma=0.1)<block_end><block_end>self.criterion=SegmentationLosses(weight=weight ignore_index=255 cuda=args.cuda).build_loss(mode=args.loss_type)<line_sep>self.evaluator=Evaluator(train_set.num_classes)<line_sep>self.best_pred=0.0<block_end><def_stmt>training self epoch<block_start>train_loss=0.0<line_sep>self.model.train()<line_sep>num_img_tr=len(self.train_dataloader)<line_sep>tbar=tqdm(self.train_dataloader desc='\r')<line_sep>visualization_index=int(random.random()<times>len(self.train_dataloader))<line_sep>vis_img,vis_tgt,vis_out=<none> <none> <none><line_sep>self.train_writer.add_scalar('learning_rate' get_learning_rate(self.optimizer) epoch)<for_stmt>i,sample enumerate(tbar)<block_start>image,target=sample['image'] sample['label']<line_sep>image,target=image.cuda() target.cuda()<line_sep>self.optimizer.zero_grad()<line_sep>output=self.model(image)<line_sep>loss=self.criterion(output target)<line_sep>loss.backward()<line_sep>self.optimizer.step()<line_sep>train_loss<augadd>loss.item()<line_sep>tbar.set_description('Train loss: %.3f'%(train_loss/(i+1)))<line_sep>self.train_writer.add_scalar('total_loss_iter' loss.item() i+num_img_tr<times>epoch)<if_stmt>i<eq>visualization_index<block_start>vis_img,vis_tgt,vis_out=image target output<block_end><block_end>self.train_writer.add_scalar('total_loss_epoch' train_loss/self.dataset_size['train'] epoch)<if_stmt>constants.VISUALIZATION<block_start>self.train_summary.visualize_state(self.train_writer self.args.dataset vis_img vis_tgt vis_out epoch)<block_end>print('[Epoch: %d, numImages: %5d]'%(epoch i<times>self.args.batch_size+image.data.shape[0]))<line_sep>print('Loss: %.3f'%train_loss)<line_sep>print('BestPred: %.3f'%self.best_pred)<block_end><def_stmt>validation self epoch test=<false><block_start>self.model.eval()<line_sep>self.evaluator.reset()<line_sep>ret_list=[]<if_stmt>test<block_start>tbar=tqdm(self.test_dataloader desc='\r')<block_end><else_stmt><block_start>tbar=tqdm(self.val_dataloader desc='\r')<block_end>test_loss=0.0<line_sep>visualization_index=int(random.random()<times>len(self.val_dataloader))<line_sep>vis_img,vis_tgt,vis_out=<none> <none> <none><for_stmt>i,sample enumerate(tbar)<block_start>image,target=sample['image'] sample['label']<line_sep>image,target=image.cuda() target.cuda()<with_stmt>torch.no_grad()<block_start>output=self.model(image)<block_end><if_stmt>i<eq>visualization_index<block_start>vis_img,vis_tgt,vis_out=image target output<block_end>loss=self.criterion(output target)<line_sep>test_loss<augadd>loss.item()<line_sep>tbar.set_description('Test loss: %.3f'%(test_loss/(i+1)))<line_sep>pred=torch.argmax(output dim=1).data.cpu().numpy()<line_sep>target=target.cpu().numpy()<line_sep>self.evaluator.add_batch(target pred)<block_end>Acc=self.evaluator.Pixel_Accuracy()<line_sep>Acc_class=self.evaluator.Pixel_Accuracy_Class()<line_sep>mIoU=self.evaluator.Mean_Intersection_over_Union()<line_sep>mIoU_20=self.evaluator.Mean_Intersection_over_Union_20()<line_sep>FWIoU=self.evaluator.Frequency_Weighted_Intersection_over_Union()<if_stmt><not>test<block_start>self.val_writer.add_scalar('total_loss_epoch' test_loss/self.dataset_size['val'] epoch)<line_sep>self.val_writer.add_scalar('mIoU' mIoU epoch)<line_sep>self.val_writer.add_scalar('mIoU_20' mIoU_20 epoch)<line_sep>self.val_writer.add_scalar('Acc' Acc epoch)<line_sep>self.val_writer.add_scalar('Acc_class' Acc_class epoch)<line_sep>self.val_writer.add_scalar('fwIoU' FWIoU epoch)<if_stmt>constants.VISUALIZATION<block_start>self.val_summary.visualize_state(self.val_writer self.args.dataset vis_img vis_tgt vis_out epoch)<block_end><block_end>print("Test: "<if>test<else>"Validation:")<line_sep>print('[Epoch: %d, numImages: %5d]'%(epoch i<times>self.args.batch_size+image.data.shape[0]))<line_sep>print("Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}".format(Acc Acc_class mIoU mIoU_20 FWIoU))<line_sep>print('Loss: %.3f'%test_loss)<if_stmt><not>test<block_start>new_pred=mIoU<if_stmt>new_pred<g>self.best_pred<block_start>self.best_pred=new_pred<line_sep>self.saver.save_checkpoint({'epoch':epoch+1 'state_dict':self.model.state_dict() 'optimizer':self.optimizer.state_dict() 'best_pred':self.best_pred })<block_end><block_end><return>test_loss mIoU mIoU_20 Acc Acc_class FWIoU<block_end>#, ret_list <def_stmt>load_best_checkpoint self<block_start>checkpoint=self.saver.load_checkpoint()<line_sep>self.model.load_state_dict(checkpoint['state_dict'])<line_sep>self.optimizer.load_state_dict(checkpoint['optimizer'])<line_sep>print(f'=> loaded checkpoint - epoch {checkpoint["epoch"]})')<line_sep><return>checkpoint["epoch"]<block_end><block_end>
<import_from_stmt>.WebGuard WebGuard<line_sep>
# encoding: utf-8 # http://www.hexblog.com/?p=120 # Default IDA Pro Paths: # MAC /Applications/IDA\ Pro\ X/idaq.app/Contents/MacOS/plugins/ # Windows C:\Program Files (x86)\IDA X\plugins # to make it autoexec on openfile # add this to plugins.cfg # ; Other plugins #FullColor FullColor.py 0 0 SILENT # thanks @JR0driguezB for help :) <import_from_future_stmt> print_function<import_from_stmt>idautils Heads<import_from_stmt>idc get_segm_start get_segm_end print_insn_mnem get_screen_ea print_operand set_color CIC_ITEM<import_stmt>idaapi<line_sep>#idaapi.auto_wait() PLUGIN_TEST=1<class_stmt>FullColor_t(idaapi.plugin_t)<block_start>flags=idaapi.PLUGIN_UNL<line_sep>comment="Set colors :)"<line_sep>help="No help needed"<line_sep>wanted_name="FullColor"<line_sep>wanted_hotkey=""<def_stmt>init self#idaapi.msg("init() called!\n") #self.run(0) <block_start><return>idaapi.PLUGIN_OK<block_end><def_stmt>run self arg=0<block_start>print("hell2")<line_sep>idaapi.msg("run() called with %d!\n"%arg)<line_sep>heads=Heads(get_segm_start(get_screen_ea()) get_segm_end(get_screen_ea()))<line_sep>funcCalls=[]<line_sep>xor=[]<line_sep>antiVM=[]<for_stmt>i heads# Color the Calls off-white <block_start><if_stmt>print_insn_mnem(i)<eq>"call"<block_start>funcCalls.append(i)<block_end># Color Anti-VM instructions Red and print their location <elif_stmt>print_insn_mnem(i)<in>("sidt" "sgdt" "sldt" "smsw" "str" "in" "cpuid")<block_start>antiVM.append(i)<block_end># Color non-zeroing out xor instructions Orange <elif_stmt>print_insn_mnem(i)<eq>"xor"<and>(print_operand(i 0)<ne>print_operand(i 1))<block_start>xor.append(i)<block_end><block_end>print("Number of calls: %d"%(len(funcCalls)))<for_stmt>i funcCalls<block_start>set_color(i CIC_ITEM 0xc7fdff)<block_end>print("Number of potential Anti-VM instructions: %d"%(len(antiVM)))<for_stmt>i antiVM<block_start>print("Anti-VM potential at %x"%i)<line_sep>set_color(i CIC_ITEM 0x0000ff)<block_end>print("Number of xor: %d"%(len(xor)))<for_stmt>i xor<block_start>set_color(i CIC_ITEM 0x00a5ff)<block_end><block_end><def_stmt>term self<block_start>idaapi.msg("term() called!\n")<block_end><block_end><def_stmt>PLUGIN_ENTRY <block_start><return>FullColor_t()<block_end><if_stmt>PLUGIN_TEST# Create form <block_start>f=PLUGIN_ENTRY()<line_sep>f.init()<line_sep>f.run()<line_sep>f.term()<block_end>
<import_stmt>numpy<as>np<import_stmt>time<import_stmt>os<import_stmt>argparse<import_stmt>torch<import_from_stmt>torch.backends cudnn<import_from_stmt>torch optim<import_stmt>torch.nn.functional<as>tfunc<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>torch.optim.lr_scheduler StepLR<import_from_stmt>.utils.dataloader RSNADataSet<import_from_stmt>.utils.score compute_auroc<import_from_stmt>.utils.model DenseNet121 DenseNet121Eff<import_from_stmt>math sqrt<import_stmt>json<import_from_stmt>tqdm tqdm<as>tq<class_stmt>RSNATrainer()<block_start><def_stmt>__init__ self model data_loader_train data_loader_valid data_loader_test class_count checkpoint device class_names lr<block_start>self.gepoch_id=0<line_sep>self.device=device<line_sep>self.model=model.to(self.device)<line_sep>self.data_loader_train=data_loader_train<line_sep>self.data_loader_valid=data_loader_valid<line_sep>self.data_loader_test=data_loader_test<line_sep>self.class_names=class_names<line_sep>self.class_count=class_count<line_sep>self.auroc_max=0.0# Setting maximum AUROC value as zero self.optimizer=optim.Adam(self.model.parameters() lr=lr)<if_stmt>checkpoint<is><not><none><block_start>model_checkpoint=torch.load(checkpoint)<line_sep>self.optimizer.load_state_dict(model_checkpoint['optimizer'])<block_end><else_stmt><block_start>model_checkpoint=<none><block_end>self.loss_fn=torch.nn.BCELoss()<block_end><def_stmt>train self max_epoch savepath<block_start>train_loss_min=1e+5# A random very high number valid_loss_min=1e+5<for_stmt>epoch_id range(max_epoch)<block_start>print(f"Epoch {epoch_id+1}/{max_epoch}")<line_sep>self.gepoch_id=epoch_id<line_sep>train_loss,valid_loss,auroc_max=self.epoch_train()<line_sep>self.current_train_loss=train_loss<line_sep>self.current_valid_loss=valid_loss<line_sep>timestamp_end=time.strftime("%H%M%S-%d%m%Y")<if_stmt>train_loss<l>train_loss_min<block_start>train_loss_min=train_loss<block_end><if_stmt>valid_loss<l>valid_loss_min<block_start>valid_loss_min=valid_loss<block_end>torch.save({'epoch':epoch_id+1 'state_dict':self.model.state_dict() 'best_loss':valid_loss_min 'optimizer':self.optimizer.state_dict()} os.path.join(savepath f'm-epoch-{epoch_id}.pth'))<line_sep>test_auroc=self.test()<line_sep>print(f"Epoch:{epoch_id+1}| EndTime:{timestamp_end}| TestAUROC: {test_auroc}| ValidAUROC: {auroc_max}")<block_end><block_end><def_stmt>valid self<block_start>self.model.eval()<line_sep>loss_valid_r=0<line_sep>valid_batches=0# Counter for valid batches out_gt=torch.FloatTensor().to(self.device)<line_sep>out_pred=torch.FloatTensor().to(self.device)<with_stmt>torch.no_grad()<block_start><for_stmt>(var_input var_target) tq(self.data_loader_valid)<block_start>var_target=var_target.to(self.device)<line_sep>out_gt=torch.cat((out_gt var_target) 0).to(self.device)<line_sep>_,c,h,w=var_input.size()<line_sep>var_input=var_input.view(-1 c h w)<line_sep>var_output=self.model(var_input.to(self.device))<line_sep>out_pred=torch.cat((out_pred var_output) 0)<line_sep>lossvalue=self.loss_fn(var_output tfunc.one_hot(var_target.squeeze(1).long() num_classes=self.class_count).float())<line_sep>loss_valid_r<augadd>lossvalue.item()<line_sep>valid_batches<augadd>1<block_end>valid_loss=loss_valid_r/valid_batches<line_sep>auroc_individual=compute_auroc(tfunc.one_hot(out_gt.squeeze(1).long()).float() out_pred self.class_count)<line_sep>print(len(auroc_individual))<line_sep>auroc_mean=np.array(auroc_individual).mean()<block_end><return>valid_loss auroc_mean<block_end><def_stmt>epoch_train self<block_start>loss_train_list=[]<line_sep>loss_valid_list=[]<line_sep>self.model.train()<line_sep>scheduler=StepLR(self.optimizer step_size=6 gamma=0.002)<for_stmt>batch_id,(var_input var_target) tq(enumerate(self.data_loader_train))<block_start>var_target=var_target.to(self.device)<line_sep>var_input=var_input.to(self.device)<line_sep>var_output=self.model(var_input)<line_sep>trainloss_value=self.loss_fn(var_output tfunc.one_hot(var_target.squeeze(1).long() num_classes=self.class_count).float())<line_sep>self.optimizer.zero_grad()<line_sep>trainloss_value.backward()<line_sep>self.optimizer.step()<line_sep>train_loss_value=trainloss_value.item()<line_sep>loss_train_list.append(train_loss_value)<if_stmt>batch_id%(len(self.data_loader_train)-1)<eq>0<and>batch_id<ne>0<block_start>validloss_value,auroc_mean=self.valid()<line_sep>loss_valid_list.append(validloss_value)<if_stmt>auroc_mean<g>self.auroc_max<block_start>print('Better auroc obtained')<line_sep>self.auroc_max=auroc_mean<block_end>scheduler.step()<block_end><block_end>train_loss_mean=np.mean(loss_train_list)<line_sep>valid_loss_mean=np.mean(loss_valid_list)<line_sep><return>train_loss_mean valid_loss_mean auroc_mean<block_end><def_stmt>test self<block_start>cudnn.benchmark=<true><line_sep>out_gt=torch.FloatTensor().to(self.device)<line_sep>out_pred=torch.FloatTensor().to(self.device)<line_sep>self.model.eval()<with_stmt>torch.no_grad()<block_start><for_stmt>i,(var_input var_target) enumerate(self.data_loader_test)<block_start>var_target=var_target.to(self.device)<line_sep>var_input=var_input.to(self.device)<line_sep>out_gt=torch.cat((out_gt var_target) 0).to(self.device)<line_sep>_,c,h,w=var_input.size()<line_sep>var_input=var_input.view(-1 c h w)<line_sep>out=self.model(var_input)<line_sep>out_pred=torch.cat((out_pred out) 0)<block_end><block_end>auroc_individual=compute_auroc(tfunc.one_hot(out_gt.squeeze(1).long()).float() out_pred self.class_count)<line_sep>auroc_mean=np.array(auroc_individual).mean()<line_sep>print(f'AUROC mean:{auroc_mean}')<for_stmt>i,auroc_val enumerate(auroc_individual)<block_start>print(f"{self.class_names[i]}:{auroc_val}")<block_end><return>auroc_mean<block_end><block_end><def_stmt>main args<block_start>lr=args.lr<line_sep>checkpoint=args.checkpoint<line_sep>batch_size=args.bs<line_sep>max_epoch=args.epochs<line_sep>class_count=args.clscount#The objective is to classify the image into 3 classes device=torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu")# use gpu if available class_names=['Lung Opacity' 'Normal' 'No Lung Opacity / Not Normal']<line_sep># Data Loader dpath=args.dpath<line_sep>img_pth=os.path.join(args.dpath 'processed_data/')<line_sep>numpy_path=os.path.join(args.dpath 'data_split/')<with_stmt>open(os.path.join(dpath 'rsna_annotation.json'))<as>lab_file<block_start>labels=json.load(lab_file)<block_end># Place numpy file containing train-valid-test split on tools folder tr_list=np.load(os.path.join(numpy_path 'train_list.npy')).tolist()<line_sep>val_list=np.load(os.path.join(numpy_path 'valid_list.npy')).tolist()<line_sep>test_list=np.load(os.path.join(numpy_path 'test_list.npy')).tolist()<line_sep>dataset_train=RSNADataSet(tr_list labels img_pth transform=<true>)<line_sep>dataset_valid=RSNADataSet(val_list labels img_pth transform=<true>)<line_sep>data_loader_train=DataLoader(dataset=dataset_train batch_size=batch_size shuffle=<true> num_workers=4 pin_memory=<false>)<line_sep>data_loader_valid=DataLoader(dataset=dataset_valid batch_size=batch_size shuffle=<false> num_workers=4 pin_memory=<false>)<line_sep>dataset_test=RSNADataSet(test_list labels img_pth transform=<true>)<line_sep>data_loader_test=DataLoader(dataset=dataset_test batch_size=1 shuffle=<false> num_workers=4 pin_memory=<false>)<line_sep># Construct Model <if_stmt>args.optimised<block_start>alpha=args.alpha<line_sep>phi=args.phi<line_sep>beta=args.beta<if_stmt>beta<is><none><block_start>beta=round(sqrt(2/alpha) 3)<block_end>alpha=alpha<power>phi<line_sep>beta=beta<power>phi<line_sep>model=DenseNet121Eff(alpha beta class_count)<block_end><else_stmt><block_start>model=DenseNet121(class_count)<block_end># Train the Model savepath=args.spath<line_sep>rsna_trainer=RSNATrainer(model data_loader_train data_loader_valid data_loader_test class_count checkpoint device class_names lr)<line_sep>rsna_trainer.train(max_epoch savepath)<line_sep>print("Model trained !")<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--lr" required=<false> help="Learning rate" default=1e-4 type=float)<line_sep>parser.add_argument("--checkpoint" required=<false> help="Checkpoint model weight" default=<none> type=str)<line_sep>parser.add_argument("--bs" required=<false> default=16 help="Batchsize" type=int)<line_sep>parser.add_argument("--dpath" required=<true> help="Path to folder containing all data" type=str)<line_sep>parser.add_argument("--epochs" required=<false> default=15 help="Number of epochs" type=int)<line_sep>parser.add_argument("--clscount" required=<false> default=3 help="Number of classes" type=int)<line_sep>parser.add_argument("--spath" required=<true> help="Path to folder in which models should be saved" type=str)<line_sep>parser.add_argument("--optimised" required=<false> default=<false> help="enable flag->eff model" action='store_true')<line_sep>parser.add_argument("--alpha" required=<false> help="alpha for the model" default=(11/6) type=float)<line_sep>parser.add_argument("--phi" required=<false> help="Phi for the model." default=1.0 type=float)<line_sep>parser.add_argument("--beta" required=<false> help="Beta for the model." default=<none> type=float)<line_sep>custom_args=parser.parse_args()<line_sep>main(custom_args)<block_end>
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>yfinance<as>yf<import_stmt>matplotlib.pyplot<as>plt<import_stmt>datetime<import_from_stmt>yahoo_fin stock_info<as>si<line_sep>plt.rcParams['figure.figsize']=(15 10)<line_sep>tickers=si.tickers_dow()<line_sep>individual_stock=input(f"Which of the following stocks would you like to backtest \n{tickers}\n:")<line_sep>num_of_years=1<line_sep>start=datetime.date.today()-datetime.timedelta(days=int(365.25<times>num_of_years))<line_sep>yf_prices=yf.download(tickers start=start)<line_sep># Individual Stock Strategy prices=yf_prices['Adj Close'][individual_stock]<line_sep>rs=prices.apply(np.log).diff(1).fillna(0)<line_sep>w1=5<line_sep>w2=22<line_sep>ma_x=prices.rolling(w1).mean()-prices.rolling(w2).mean()<line_sep>pos=ma_x.apply(np.sign)<line_sep>fig,ax=plt.subplots(2 1)<line_sep>ma_x.plot(ax=ax[0] title=f'{individual_stock} Moving Average Crossovers and Positions')<line_sep>pos.plot(ax=ax[1])<line_sep>plt.show()<line_sep>my_rs=pos.shift(1)<times>rs<line_sep>plt.subplots()<line_sep>my_rs.cumsum().apply(np.exp).plot(title=f'{individual_stock} MA Strategy Performance')<line_sep>rs.cumsum().apply(np.exp).plot()<line_sep>plt.legend([f'{individual_stock} MA Performace' f'{individual_stock} Buy and Hold Performnace'])<line_sep>plt.show()<line_sep>print(f'Performance Statistics for {individual_stock} ({num_of_years} years):')<line_sep>print('Moving Average Return: '+str(100<times>round(my_rs.cumsum().apply(np.exp).tolist()[-1] 4))+'%')<line_sep>print('Buy and Hold Return: '+str(100<times>round(rs.cumsum().apply(np.exp).tolist()[-1] 4))+'%')<line_sep># Full Portfolio Strategy prices=yf_prices['Adj Close']<line_sep>rs=prices.apply(np.log).diff(1).fillna(0)<line_sep>w1=5<line_sep>w2=22<line_sep>ma_x=prices.rolling(w1).mean()-prices.rolling(w2).mean()<line_sep>pos=ma_x.apply(np.sign)<line_sep>pos<augdiv>pos.abs().sum(1).values.reshape(-1 1)<line_sep>fig,ax=plt.subplots(2 1)<line_sep>ma_x.plot(ax=ax[0] title='Individual Moving Average Crossovers and Positions')<line_sep>ax[0].legend(bbox_to_anchor=(1.1 1.05))<line_sep>pos.plot(ax=ax[1])<line_sep>ax[1].legend(bbox_to_anchor=(1.1 1.05))<line_sep>plt.show()<line_sep>my_rs=(pos.shift(1)<times>rs)<line_sep>my_rs.cumsum().apply(np.exp).plot(title='Individual Stocks Strategy Performance')<line_sep>plt.show()<line_sep>print('-'<times>60)<line_sep>print(f'Performance Statistics for {num_of_years} years:')<for_stmt>i range(len(tickers))<block_start>print(f'Moving Average Return for {tickers[i]}: '+str(100<times>round(my_rs.cumsum().apply(np.exp)[tickers[i]].tolist()[-1] 4))+'%')<line_sep>i=i+1<block_end>plt.subplots()<line_sep>my_rs=(pos.shift(1)<times>rs).sum(1)<line_sep>my_rs.cumsum().apply(np.exp).plot(title='Full Portfolio Strategy Performance')<line_sep>rs.mean(1).cumsum().apply(np.exp).plot()<line_sep>plt.legend(['Portfolio MA Performace' 'Buy and Hold Performnace'])<line_sep>plt.show()<line_sep>print('-'<times>60)<line_sep>print(f'Performance Statistics for {tickers} ({num_of_years} years):')<line_sep>print('Moving Average Return: '+str(100<times>round(my_rs.cumsum().apply(np.exp).tolist()[-1] 4))+'%')<line_sep>print('Buy and Hold Return: '+str(100<times>round(rs.mean(1).cumsum().apply(np.exp).tolist()[-1] 4))+'%')<line_sep># Portfolio Tests # Look-Ahead Bias my_rs1=(pos<times>rs).sum(1)<line_sep>my_rs2=(pos.shift(1)<times>rs).sum(1)<line_sep>plt.subplots()<line_sep>my_rs1.cumsum().apply(np.exp).plot(title='Full Portfolio Performance')<line_sep>my_rs2.cumsum().apply(np.exp).plot()<line_sep>plt.legend(['With Look-Ahead Bias' 'Without Look-Ahead Bias'])<line_sep>plt.show()<line_sep>print('-'<times>60)<line_sep>print(f'Performance Statistics for {tickers} ({num_of_years} years):')<line_sep>print('With Look-Ahead Bias: '+str(100<times>round(my_rs1.cumsum().apply(np.exp).tolist()[-1] 4))+'%')<line_sep>print('Without Look-Ahead Bias: '+str(100<times>round(my_rs2.cumsum().apply(np.exp).tolist()[-1] 4))+'%')<line_sep># Signal Lags lags=range(1 11)<line_sep>lagged_rs=pd.Series(dtype=float index=lags)<line_sep>print('-'<times>60)<line_sep>print(f'Lag Performance Statistics for {tickers} ({num_of_years} years):')<for_stmt>lag lags<block_start>my_rs=(pos.shift(lag)<times>rs).sum(1)<line_sep>my_rs.cumsum().apply(np.exp).plot()<line_sep>lagged_rs[lag]=my_rs.sum()<line_sep>print(f'Lag {lag} Return: '+str(100<times>round(my_rs.cumsum().apply(np.exp).tolist()[-1] 4))+'%')<block_end>plt.title('Full Portfolio Strategy Performance with Lags')<line_sep>plt.legend(lags bbox_to_anchor=(1.1 0.95))<line_sep>plt.show()<line_sep># Transaction Costs tc_pct=0.01<line_sep>delta_pos=pos.diff(1).abs().sum(1)<line_sep>my_tcs=tc_pct<times>delta_pos<line_sep>my_rs1=(pos.shift(1)<times>rs).sum(1)<line_sep>my_rs2=(pos.shift(1)<times>rs).sum(1)-my_tcs<line_sep>plt.subplots()<line_sep>my_rs1.cumsum().apply(np.exp).plot()<line_sep>my_rs2.cumsum().apply(np.exp).plot()<line_sep>plt.title('Full Portfolio Performance')<line_sep>plt.legend(['Without Transaction Costs' 'With Transaction Costs'])<line_sep>plt.show()<line_sep>print('-'<times>60)<line_sep>print(f'Performance Statistics for {tickers} ({num_of_years} years):')<line_sep>print('Without Transaction Costs: '+str(100<times>round(my_rs1.cumsum().apply(np.exp).tolist()[-1] 4))+'%')<line_sep>print('With Transaction Costs: '+str(100<times>round(my_rs2.cumsum().apply(np.exp).tolist()[-1] 4))+'%')<line_sep>
<import_stmt>sys<import_stmt>platform<import_stmt>time<import_stmt>subprocess<import_stmt>threading<import_stmt>Queue<import_stmt>re<import_stmt>logging<import_stmt>yaml<import_from_stmt>alertaclient.api Client<line_sep>__version__='3.3.0'<line_sep>LOG=logging.getLogger('alerta.pinger')<line_sep>LOG.setLevel(logging.DEBUG)<line_sep>LOG.addHandler(logging.StreamHandler())<line_sep>PING_FILE='alert-pinger.targets'<line_sep>PING_MAX_TIMEOUT=15# seconds PING_MAX_RETRIES=2<line_sep>PING_SLOW_WARNING=200# ms PING_SLOW_CRITICAL=500# ms SERVER_THREAD_COUNT=20<line_sep>LOOP_EVERY=30<line_sep>_PING_ALERTS=['PingFailed' 'PingSlow' 'PingOK' 'PingError' ]<line_sep>PING_OK=0# all ping replies received within timeout PING_FAILED=1# some or all ping replies not received or did not respond within timeout PING_ERROR=2# unspecified error with ping # Initialise Rules <def_stmt>init_targets <block_start>targets=list()<line_sep>LOG.info('Loading Ping targets...')<try_stmt><block_start>targets=yaml.load(open(PING_FILE))<block_end><except_stmt>Exception<as>e<block_start>LOG.error('Failed to load Ping targets: %s' e)<block_end>LOG.info('Loaded %d Ping targets OK' len(targets))<line_sep><return>targets<block_end><class_stmt>WorkerThread(threading.Thread)<block_start><def_stmt>__init__ self api queue<block_start>threading.Thread.__init__(self)<line_sep>LOG.debug('Initialising %s...' self.getName())<line_sep>self.last_event={}<line_sep>self.queue=queue# internal queue self.api=api# message broker <block_end><def_stmt>run self<block_start><while_stmt><true><block_start>LOG.debug('Waiting on input queue...')<line_sep>item=self.queue.get()<if_stmt><not>item<block_start>LOG.info('%s is shutting down.' self.getName())<line_sep><break><block_end>environment,service,resource,retries,queue_time=item<if_stmt>time.time()-queue_time<g>LOOP_EVERY<block_start>LOG.warning('Ping request to %s expired after %d seconds.' resource int(time.time()-queue_time))<line_sep>self.queue.task_done()<line_sep><continue><block_end>LOG.info('%s pinging %s...' self.getName() resource)<if_stmt>retries<g>1<block_start>rc,rtt,loss,stdout=self.pinger(resource count=2 timeout=5)<block_end><else_stmt><block_start>rc,rtt,loss,stdout=self.pinger(resource count=5 timeout=PING_MAX_TIMEOUT)<block_end><if_stmt>rc<ne>PING_OK<and>retries<block_start>LOG.info('Retrying ping %s %s more times' resource retries)<line_sep>self.queue.put((environment service resource retries-1 time.time()))<line_sep>self.queue.task_done()<line_sep><continue><block_end><if_stmt>rc<eq>PING_OK<block_start>avg,max=rtt<if_stmt>avg<g>PING_SLOW_CRITICAL<block_start>event='PingSlow'<line_sep>severity='critical'<line_sep>text='Node responded to ping in %s ms avg (> %s ms)'%(avg PING_SLOW_CRITICAL)<block_end><elif_stmt>avg<g>PING_SLOW_WARNING<block_start>event='PingSlow'<line_sep>severity='warning'<line_sep>text='Node responded to ping in %s ms avg (> %s ms)'%(avg PING_SLOW_WARNING)<block_end><else_stmt><block_start>event='PingOK'<line_sep>severity='normal'<line_sep>text='Node responding to ping avg/max %s/%s ms.'%tuple(rtt)<block_end>value='%s/%s ms'%tuple(rtt)<block_end><elif_stmt>rc<eq>PING_FAILED<block_start>event='PingFailed'<line_sep>severity='major'<line_sep>text='Node did not respond to ping or timed out within %s seconds'%PING_MAX_TIMEOUT<line_sep>value='%s%% packet loss'%loss<block_end><elif_stmt>rc<eq>PING_ERROR<block_start>event='PingError'<line_sep>severity='warning'<line_sep>text='Could not ping node %s.'%resource<line_sep>value=stdout<block_end><else_stmt><block_start>LOG.warning('Unknown ping return code: %s' rc)<line_sep><continue><block_end># Defaults resource<augadd>':icmp'<line_sep>group='Ping'<line_sep>correlate=_PING_ALERTS<line_sep>raw_data=stdout<try_stmt><block_start>self.api.send_alert(resource=resource event=event correlate=correlate group=group value=value severity=severity environment=environment service=service text=text event_type='serviceAlert' raw_data=raw_data )<block_end><except_stmt>Exception<as>e<block_start>LOG.warning('Failed to send alert: %s' e)<block_end>self.queue.task_done()<line_sep>LOG.info('%s ping %s complete.' self.getName() resource)<block_end>self.queue.task_done()<block_end>@staticmethod<def_stmt>pinger node count=1 interval=1 timeout=5<block_start><if_stmt>timeout<le>count<times>interval<block_start>timeout=count<times>interval+1<block_end><if_stmt>timeout<g>PING_MAX_TIMEOUT<block_start>timeout=PING_MAX_TIMEOUT<block_end><if_stmt>sys.platform<eq>"darwin"<block_start>cmd="ping -q -c %s -i %s -t %s %s"%(count interval timeout node)<block_end><else_stmt><block_start>cmd="ping -q -c %s -i %s -w %s %s"%(count interval timeout node)<block_end>ping=subprocess.Popen(cmd.split() stdout=subprocess.PIPE stderr=subprocess.STDOUT)<line_sep>stdout=ping.communicate()[0].rstrip('\n')<line_sep>rc=ping.returncode<line_sep>LOG.debug('Ping %s => %s (rc=%d)' cmd stdout rc)<line_sep>m=re.search('(?P<loss>\d+(\.\d+)?)% packet loss' stdout)<if_stmt>m<block_start>loss=m.group('loss')<block_end><else_stmt><block_start>loss='n/a'<block_end>m=re.search('(?P<min>\d+\.\d+)/(?P<avg>\d+\.\d+)/(?P<max>\d+\.\d+)/(?P<mdev>\d+\.\d+)\s+ms' stdout)<if_stmt>m<block_start>rtt=(float(m.group('avg')) float(m.group('max')))<block_end><else_stmt><block_start>rtt=(0 0)<block_end><if_stmt>rc<eq>0<block_start>LOG.info('%s: is alive %s' node rtt)<block_end><else_stmt><block_start>LOG.info('%s: not responding' node)<block_end><return>rc rtt loss stdout<block_end><block_end><class_stmt>PingerDaemon(object)<block_start><def_stmt>__init__ self<block_start>self.shuttingdown=<false><block_end><def_stmt>run self<block_start>self.running=<true><line_sep># Create internal queue self.queue=Queue.Queue()<line_sep>self.api=Client()<line_sep># Initialiase ping targets ping_list=init_targets()<line_sep># Start worker threads LOG.debug('Starting %s worker threads...' SERVER_THREAD_COUNT)<for_stmt>i range(SERVER_THREAD_COUNT)<block_start>w=WorkerThread(self.api self.queue)<try_stmt><block_start>w.start()<block_end><except_stmt>Exception<as>e<block_start>LOG.error('Worker thread #%s did not start: %s' i e)<line_sep><continue><block_end>LOG.info('Started worker thread: %s' w.getName())<block_end><while_stmt><not>self.shuttingdown<block_start><try_stmt><block_start><for_stmt>p ping_list<block_start><if_stmt>'targets'<in>p<and>p['targets']<block_start><for_stmt>target p['targets']<block_start>environment=p['environment']<line_sep>service=p['service']<line_sep>retries=p.get('retries' PING_MAX_RETRIES)<line_sep>self.queue.put((environment service target retries time.time()))<block_end><block_end><block_end>LOG.debug('Send heartbeat...')<try_stmt><block_start>origin='{}/{}'.format('pinger' platform.uname()[1])<line_sep>self.api.heartbeat(origin tags=[__version__])<block_end><except_stmt>Exception<as>e<block_start>LOG.warning('Failed to send heartbeat: %s' e)<block_end>time.sleep(LOOP_EVERY)<line_sep>LOG.info('Ping queue length is %d' self.queue.qsize())<block_end><except_stmt>(KeyboardInterrupt SystemExit)<block_start>self.shuttingdown=<true><block_end><block_end>LOG.info('Shutdown request received...')<line_sep>self.running=<false><for_stmt>i range(SERVER_THREAD_COUNT)<block_start>self.queue.put(<none>)<block_end>w.join()<block_end><block_end><def_stmt>main <block_start>pinger=PingerDaemon()<line_sep>pinger.run()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>tensorwatch<as>tw<import_stmt>time<line_sep>w=tw.Watcher(filename='test.log')<line_sep>s=w.create_stream(name='my_metric')<line_sep>#w.make_notebook() <for_stmt>i range(1000)<block_start>s.write((i i<times>i))<line_sep>time.sleep(1)<block_end>
# Copyright 2019 The DMLab2D Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for dmlab2d.dmlab2d."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>absl.testing absltest<import_from_stmt>dm_env test_utils<import_stmt>numpy<as>np<import_stmt>dmlab2d<import_from_stmt>dmlab2d runfiles_helper<class_stmt>Dmlab2dDmEnvTest(test_utils.EnvironmentTestMixin absltest.TestCase)<block_start><def_stmt>make_object_under_test self<block_start>lab2d=dmlab2d.Lab2d(runfiles_helper.find() {'levelName':'examples/level_api'})<line_sep><return>dmlab2d.Environment(lab2d lab2d.observation_names() 0)<block_end><block_end><class_stmt>Dmlab2DTest(absltest.TestCase)<block_start><def_stmt>_create_env self extra_settings=<none><block_start>settings=extra_settings.copy()<if>extra_settings<else>{}<line_sep>settings['levelName']='examples/level_api'<line_sep><return>dmlab2d.Lab2d(runfiles_helper.find() settings)<block_end><def_stmt>test_lab2d_environment_name self<block_start>self.assertEqual(self._create_env().name() 'dmlab2d')<block_end><def_stmt>test_lab2d_observation_names self<block_start>env=self._create_env()<line_sep>self.assertEqual(env.observation_names() ['VIEW'+str(i)<for>i range(1 6)])<block_end><def_stmt>test_lab2d_observation_spec self<block_start>env=self._create_env()<line_sep>self.assertEqual(env.observation_spec('VIEW1') {'dtype':np.dtype('uint8') 'shape':(1 )})<line_sep>self.assertEqual(env.observation_spec('VIEW2') {'dtype':np.dtype('double') 'shape':(2 )})<line_sep>self.assertEqual(env.observation_spec('VIEW3') {'dtype':np.dtype('int32') 'shape':(3 )})<line_sep>self.assertEqual(env.observation_spec('VIEW4') {'dtype':np.dtype('int64') 'shape':(4 )})<line_sep># Text is stored in objects. self.assertEqual(env.observation_spec('VIEW5') {'dtype':np.dtype('O') 'shape':()})<block_end><def_stmt>test_lab2d_action_spec self<block_start>env=self._create_env()<line_sep>self.assertEqual(env.action_discrete_names() ['REWARD_ACT'])<line_sep>self.assertEqual(env.action_discrete_spec('REWARD_ACT') {'min':0 'max':4})<line_sep>self.assertEqual(env.action_continuous_names() ['OBSERVATION_ACT'])<line_sep>self.assertEqual(env.action_continuous_spec('OBSERVATION_ACT') {'min':-5 'max':5})<line_sep>self.assertEqual(env.action_text_names() ['LOG_EVENT'])<block_end><def_stmt>test_lab2d_start_environment self<block_start>env=self._create_env()<line_sep>env.start(episode=0 seed=0)<block_end><def_stmt>test_lab2d_events_start self<block_start>env=self._create_env()<line_sep>env.start(episode=0 seed=0)<line_sep>events=env.events()<line_sep>self.assertLen(events 1)<line_sep>event_name,observations=events[0]<line_sep>self.assertEqual(event_name 'start')<line_sep>self.assertLen(observations 1)<line_sep>np.testing.assert_array_equal(observations[0] [1 2 3])<block_end><def_stmt>test_lab2d_events_cleared_after_advance_not_read self<block_start>env=self._create_env()<line_sep>env.start(episode=0 seed=0)<line_sep>self.assertLen(env.events() 1)<line_sep>self.assertLen(env.events() 1)<line_sep>env.advance()<line_sep>self.assertEmpty(env.events())<block_end><def_stmt>test_lab2d_observe self<block_start>env=self._create_env()<line_sep>env.start(episode=0 seed=0)<line_sep>np.testing.assert_array_equal(env.observation('VIEW1') [1])<line_sep>np.testing.assert_array_equal(env.observation('VIEW2') [1 2])<line_sep>np.testing.assert_array_equal(env.observation('VIEW3') [1 2 3])<line_sep>np.testing.assert_array_equal(env.observation('VIEW4') [1 2 3 4])<line_sep>self.assertEqual(env.observation('VIEW5') b'')<block_end><def_stmt>test_lab2d_ten_steps_terminate_environment self<block_start>env=self._create_env()<line_sep>env.start(episode=0 seed=0)<for_stmt>_ range(9)<block_start>self.assertEqual(env.advance()[0] dmlab2d.RUNNING)<block_end>self.assertEqual(env.advance()[0] dmlab2d.TERMINATED)<block_end><def_stmt>test_lab2d_settings_environment self<block_start>env=self._create_env({'steps':'5'})<line_sep>env.start(episode=0 seed=0)<for_stmt>_ range(4)<block_start>self.assertEqual(env.advance()[0] dmlab2d.RUNNING)<block_end>self.assertEqual(env.advance()[0] dmlab2d.TERMINATED)<block_end><def_stmt>test_lab2d_properties_environment self<block_start>env=self._create_env({'steps':'5'})<line_sep>properties=env.list_property('')<line_sep>self.assertLen(properties 1)<line_sep>self.assertEqual(properties[0] ('steps' dmlab2d.PropertyAttribute.READABLE_WRITABLE))<line_sep>self.assertEqual(env.read_property('steps') '5')<line_sep>env.write_property('steps' '3')<line_sep>self.assertEqual(env.read_property('steps') '3')<line_sep>env.start(episode=0 seed=0)<for_stmt>_ range(2)<block_start>self.assertEqual(env.advance()[0] dmlab2d.RUNNING)<block_end>self.assertEqual(env.advance()[0] dmlab2d.TERMINATED)<block_end><def_stmt>test_lab2d_act_discrete self<block_start>env=self._create_env({'steps':'5'})<line_sep>env.start(episode=0 seed=0)<line_sep>env.act_discrete(np.array([2] np.dtype('int32')))<line_sep>_,reward=env.advance()<line_sep>self.assertEqual(reward 2)<block_end><def_stmt>test_lab2d_act_continuous self<block_start>env=self._create_env({'steps':'5'})<line_sep>env.start(episode=0 seed=0)<line_sep>np.testing.assert_array_equal(env.observation('VIEW3') [1 2 3])<line_sep>env.act_continuous([10])<line_sep>env.advance()<line_sep>np.testing.assert_array_equal(env.observation('VIEW3') [11 12 13])<block_end><def_stmt>test_lab2d_act_text self<block_start>env=self._create_env({'steps':'5'})<line_sep>env.start(episode=0 seed=0)<line_sep>view=env.observation('VIEW5')<line_sep>self.assertEqual(view b'')<line_sep>env.act_text(['Hello'])<line_sep>env.advance()<line_sep>view=env.observation('VIEW5')<line_sep>self.assertEqual(view b'Hello')<block_end><def_stmt>test_lab2d_invalid_setting self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>self._create_env({'missing':'5'})<block_end><block_end><def_stmt>test_lab2d_bad_action_spec_name self<block_start>env=self._create_env()<with_stmt>self.assertRaises(KeyError)<block_start>env.action_discrete_spec('bad_key')<block_end><with_stmt>self.assertRaises(KeyError)<block_start>env.action_continuous_spec('bad_key')<block_end><block_end><def_stmt>test_lab2d_bad_observation_spec_name self<block_start>env=self._create_env()<with_stmt>self.assertRaises(KeyError)<block_start>env.observation_spec('bad_key')<block_end><block_end><def_stmt>test_lab2d_observe_before_start self<block_start>env=self._create_env()<with_stmt>self.assertRaises(RuntimeError)<block_start>env.observation('VIEW1')<block_end><block_end><def_stmt>test_lab2d_act_before_start self<block_start>env=self._create_env()<with_stmt>self.assertRaises(RuntimeError)<block_start>env.act_discrete([0])<block_end><with_stmt>self.assertRaises(RuntimeError)<block_start>env.act_continuous([0])<block_end><with_stmt>self.assertRaises(RuntimeError)<block_start>env.act_text([''])<block_end><block_end><def_stmt>test_lab2d_act_bad_shape self<block_start>env=self._create_env()<line_sep>env.start(0 0)<with_stmt>self.assertRaises(ValueError)<block_start>env.act_discrete([0 1])<block_end><with_stmt>self.assertRaises(ValueError)<block_start>env.act_continuous([0 1])<block_end><block_end><def_stmt>test_lab2d_advance_after_episode_ends self<block_start>env=self._create_env({'steps':'2'})<line_sep>env.start(0 0)<line_sep>self.assertEqual(env.advance()[0] dmlab2d.RUNNING)<line_sep>self.assertEqual(env.advance()[0] dmlab2d.TERMINATED)<with_stmt>self.assertRaises(RuntimeError)<block_start>env.advance()<block_end><block_end><def_stmt>test_lab2d_missing_properties self<block_start>env=self._create_env({'steps':'5'})<with_stmt>self.assertRaises(KeyError)<block_start>env.list_property('missing')<block_end><with_stmt>self.assertRaises(KeyError)<block_start>env.read_property('missing')<block_end><with_stmt>self.assertRaises(KeyError)<block_start>env.write_property('missing' '10')<block_end><block_end><def_stmt>test_lab2d_invalid_ops_properties self<block_start>env=self._create_env({'steps':'5'})<with_stmt>self.assertRaises(ValueError)<block_start>env.list_property('steps')<block_end><with_stmt>self.assertRaises(ValueError)<block_start>env.write_property('steps' 'mouse')<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
<import_from_stmt>mozi.layers.template Template<import_from_stmt>mozi.utils.theano_utils shared_zeros sharedX shared_ones<import_from_stmt>mozi.weight_init UniformWeight<import_stmt>theano.tensor<as>T<import_stmt>theano<class_stmt>BatchNormalization(Template)<block_start><def_stmt>__init__ self dim layer_type gamma_init=UniformWeight() short_memory=0.01<block_start>''' REFERENCE: Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift PARAMS: short_memory: short term memory y_t is the latest value, the moving average x_tp1 is calculated as x_tp1 = memory * y_t + (1-memory) * x_t, the larger the short term memory, the more weight is put on contempory. layer_type: fc or conv epsilon: denominator min value for preventing division by zero in computing std dim: for fc layers, shape is the layer dimension, for conv layers, shape is the number of feature maps '''<assert_stmt>layer_type<in>['fc' 'conv']<line_sep>self.layer_type=layer_type<line_sep>self.epsilon=1e-6<line_sep>self.dim=dim<line_sep>self.mem=short_memory<if_stmt>self.layer_type<eq>'fc'<block_start>input_shape=(1 dim)<line_sep>self.broadcastable=(<true> <false>)<block_end><elif_stmt>self.layer_type<eq>'conv'<block_start>input_shape=(1 dim 1 1)<line_sep>self.broadcastable=(<true> <false> <true> <true>)<block_end>self.gamma=gamma_init(input_shape name='gamma')<line_sep>self.beta=shared_zeros(input_shape name='beta')<line_sep>self.params=[self.gamma self.beta]<line_sep>self.moving_mean=0<line_sep>self.moving_var=1<block_end><def_stmt>_train_fprop self state_below<block_start><if_stmt>self.layer_type<eq>'fc'<block_start>miu=state_below.mean(axis=0)<line_sep>var=T.mean((state_below-miu)<power>2 axis=0)<block_end><elif_stmt>self.layer_type<eq>'conv'<block_start>miu=state_below.mean(axis=(0 2 3) keepdims=<true>)<line_sep>var=T.mean((state_below-miu)<power>2 axis=(0 2 3) keepdims=<true>)<block_end>self.moving_mean=self.mem<times>miu+(1-self.mem)<times>self.moving_mean<line_sep>self.moving_var=self.mem<times>var+(1-self.mem)<times>self.moving_var<line_sep>Z=(state_below-self.moving_mean)/T.sqrt(self.moving_var+self.epsilon)<line_sep>gamma=T.patternbroadcast(self.gamma self.broadcastable)<line_sep>beta=T.patternbroadcast(self.beta self.broadcastable)<line_sep><return>gamma<times>Z+beta<block_end><def_stmt>_test_fprop self state_below<block_start>Z=(state_below-self.moving_mean)/T.sqrt(self.moving_var+self.epsilon)<line_sep>gamma=T.patternbroadcast(self.gamma self.broadcastable)<line_sep>beta=T.patternbroadcast(self.beta self.broadcastable)<line_sep><return>gamma<times>Z+beta<block_end><def_stmt>_layer_stats self state_below layer_output<block_start><return>[('moving_mean' T.mean(self.moving_mean)) ('moving_std' T.mean(self.moving_var)) ('gamma_mean' T.mean(self.gamma)) ('beta_mean' T.mean(self.beta)) ('gamma_max' T.max(self.gamma))]<block_end><block_end># class LRN(Template): # """ # Adapted from pylearn2 # Local Response Normalization # """ # # def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2): # super(LRN, self).__init__() # self.n = n # self.alpha = alpha # self.beta = beta # self.k = k # assert self.n % 2 == 1, 'only odd n is supported' # # def _train_fprop(self, state_below): # half = self.n / 2 # sq = T.sqr(state_below) # b, ch, r, c = state_below.shape # extra_channels = T.alloc(0., b, ch + 2*half, r, c) # sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale = self.k # # for i in xrange(self.n): # scale += self.alpha * sq[:,i:i+ch,:,:] # # scale = scale ** self.beta # return state_below / scale # # def _test_fprop(self, state_below): # return self._train_fprop(state_below)
<import_stmt>clr<line_sep>clr.AddReference('RevitAPI')<import_from_stmt>Autodesk.Revit.DB *<line_sep>mats=UnwrapElement(IN[0])<line_sep>colorlist=list()<line_sep>glowlist=list()<line_sep>classlist=list()<line_sep>shinylist=list()<line_sep>smoothlist=list()<line_sep>translist=list()<for_stmt>mat mats<block_start>colorlist.append(mat.Color)<if_stmt>mat.Glow<block_start>glowlist.append(<true>)<block_end><else_stmt><block_start>glowlist.append(<false>)<block_end>classlist.append(mat.MaterialClass)<line_sep>shinylist.append(mat.Shininess)<line_sep>smoothlist.append(mat.Smoothness)<line_sep>translist.append(mat.Transparency)<block_end>OUT=(classlist colorlist glowlist shinylist smoothlist translist)<line_sep>
# # Copyright (C) 2013 - 2021 <NAME> <<EMAIL>> # License: MIT # # pylint: disable=missing-docstring """test cases for anyconfig.cli module. """<import_stmt>contextlib<import_stmt>io<import_stmt>pathlib<import_stmt>sys<import_stmt>tempfile<import_stmt>unittest<import_stmt>anyconfig.api<import_stmt>anyconfig.cli<as>TT<import_from_stmt>.. base<import_from_stmt>. collectors datatypes<def_stmt>make_args _self tdata<block_start>"""Make arguments to run cli.main. """<line_sep><return>['anyconfig_cli']+tdata.opts+[str(tdata.inp_path)]<block_end><class_stmt>BaseTestCase(unittest.TestCase)<block_start>"""Base Test case. """<line_sep>collector=collectors.Collector()<line_sep>make_args=make_args<def_stmt>setUp self<block_start><if_stmt>self.collector<block_start>self.collector.init()<block_end><block_end><def_stmt>post_checks self tdata *args **kwargs<block_start>"""Placeholder to do more post checks. """<line_sep><pass><block_end><def_stmt>_run_main self tdata<block_start>"""Wrapper for cli.main."""<line_sep>args=self.make_args(tdata)<if_stmt>tdata.outname# Running cli.main will output files. <block_start>self.assertTrue(tdata.ref<is><not><none> 'No reference data was given, {tdata!r}')<with_stmt>tempfile.TemporaryDirectory()<as>tdir<block_start>opath=pathlib.Path(tdir)/tdata.outname<line_sep># Run anyconfig.cli.main with arguments. TT.main(args+['-o' str(opath)])<if_stmt>tdata.exp.exit_code_matches<and>tdata.exp.exit_code<eq>0<block_start>self.assertTrue(opath.exists() str(opath))<try_stmt><block_start>odata=anyconfig.api.load(opath **tdata.oo_opts)<block_end><except_stmt>anyconfig.api.UnknownFileTypeError<block_start>odata=anyconfig.api.load(opath ac_parser='json')<block_end>self.assertEqual(odata tdata.ref repr(tdata))<line_sep>self.post_checks(tdata opath)<block_end><block_end><block_end><else_stmt># Likewise but without -o <output_path> option. <block_start>TT.main(args)<line_sep>self.post_checks(tdata)<block_end>sys.exit(0)<block_end><def_stmt>run_main self tdata<arrow><none><block_start>""" Run anyconfig.cli.main and check if the exit code was expected one. """<line_sep>expected:datatypes.Expected=tdata.exp<with_stmt>self.assertRaises(expected.exception msg=repr(tdata))<as>ctx<block_start><with_stmt>contextlib.redirect_stdout(io.StringIO())<as>stdout<block_start><with_stmt>contextlib.redirect_stderr(io.StringIO())<as>stderr<block_start>self._run_main(tdata)<block_end><block_end><block_end>exc=ctx.exception<line_sep>self.assertTrue(isinstance(exc expected.exception))<line_sep>ecode=getattr(exc 'error_code' getattr(exc 'code' 1))<if_stmt>expected.exit_code_matches<block_start>self.assertEqual(ecode expected.exit_code f'{tdata!r}')<block_end><else_stmt><block_start>self.assertNotEqual(ecode expected.exit_code f'{tdata!r}')<block_end><if_stmt>expected.words_in_stdout<block_start>msg=stdout.getvalue()<line_sep>self.assertTrue(expected.words_in_stdout<in>msg msg)<block_end><if_stmt>expected.words_in_stderr<block_start>err=stderr.getvalue()<line_sep>self.assertTrue(expected.words_in_stderr<in>err err)<block_end><block_end><def_stmt>test_runs_for_datasets self<arrow><none><block_start><if_stmt>self.collector<and>self.collector.initialized<block_start><if_stmt>self.collector.kind<eq>base.TDataCollector.kind<block_start><return><block_end><for_stmt>tdata self.collector.each_data()<block_start>self.run_main(tdata)<block_end><block_end><block_end><block_end><class_stmt>NoInputTestCase(BaseTestCase)<block_start>"""Test cases which does not require inputs. """<def_stmt>make_args self tdata# pylint: disable=no-self-use <block_start>"""Make arguments to run cli.main. """<line_sep><return>['anyconfig_cli']+tdata.opts<block_end><block_end># vim:sw=4:ts=4:et:
<import_from_stmt>abc abstractmethod<import_from_stmt>typing Any Dict Iterable List Optional Type TypeVar<import_from_stmt>pyjackson dumps loads<import_from_stmt>sqlalchemy Column DateTime ForeignKey Integer String Text UniqueConstraint<import_from_stmt>sqlalchemy.ext.declarative declarative_base<import_from_stmt>sqlalchemy.orm relationship<import_from_stmt>ebonite.core.objects DatasetType<import_from_stmt>ebonite.core.objects.artifacts ArtifactCollection<import_from_stmt>ebonite.core.objects.core Buildable EvaluationResults EvaluationSet Image Model Pipeline PipelineStep Project RuntimeEnvironment RuntimeInstance Task <import_from_stmt>ebonite.core.objects.dataset_source DatasetSource<import_from_stmt>ebonite.core.objects.metric Metric<import_from_stmt>ebonite.core.objects.requirements Requirements<line_sep>SQL_OBJECT_FIELD='_sqlalchemy_object'<def_stmt>json_column <block_start><return>Column(Text)<block_end><def_stmt>safe_loads payload as_class<block_start><return>loads(payload Optional[as_class])<block_end><def_stmt>sqlobject obj<block_start><return>getattr(obj SQL_OBJECT_FIELD <none>)<block_end><def_stmt>update_attrs obj **attrs<block_start><for_stmt>name,value attrs.items()<block_start>setattr(obj name value)<block_end><block_end>T=TypeVar('T')<line_sep>S=TypeVar('S' bound='Attaching')<class_stmt>Attaching<block_start>id=<ellipsis><line_sep>name=<ellipsis><def_stmt>attach self obj<block_start>setattr(obj SQL_OBJECT_FIELD self)<line_sep><return>obj<block_end>@classmethod<def_stmt>from_obj cls:Type[S] obj:T new=<false><arrow>S<block_start>kwargs=cls.get_kwargs(obj)<line_sep>existing=sqlobject(obj)<if_stmt><not>new<and>existing<is><not><none><block_start>update_attrs(existing **kwargs)<line_sep><return>existing<block_end><return>cls(**kwargs)<block_end>@classmethod@abstractmethod<def_stmt>get_kwargs cls obj:T<arrow>dict<block_start><pass><block_end># pragma: no cover @abstractmethod<def_stmt>to_obj self<arrow>T<block_start><pass><block_end><block_end># pragma: no cover Base=declarative_base()<class_stmt>SProject(Base Attaching)<block_start>__tablename__='projects'<line_sep>id=Column(Integer primary_key=<true> autoincrement=<true>)<line_sep>name=Column(String unique=<true> nullable=<false>)<line_sep>author=Column(String unique=<false> nullable=<false>)<line_sep>creation_date=Column(DateTime unique=<false> nullable=<false>)<line_sep>tasks:Iterable['STask']=relationship("STask" back_populates="project")<def_stmt>to_obj self<arrow>Project<block_start>p=Project(self.name id=self.id author=self.author creation_date=self.creation_date)<for_stmt>task self.tasks<block_start>p._tasks.add(task.to_obj())<block_end><return>self.attach(p)<block_end>@classmethod<def_stmt>get_kwargs cls project:Project<arrow>dict<block_start><return>dict(id=project.id name=project.name author=project.author creation_date=project.creation_date tasks=[STask.from_obj(t)<for>t project.tasks.values()])<block_end><block_end><class_stmt>STask(Base Attaching)<block_start>__tablename__='tasks'<line_sep>id=Column(Integer primary_key=<true> autoincrement=<true>)<line_sep>name=Column(String unique=<false> nullable=<false>)<line_sep>author=Column(String unique=<false> nullable=<false>)<line_sep>creation_date=Column(DateTime unique=<false> nullable=<false>)<line_sep>project_id=Column(Integer ForeignKey('projects.id') nullable=<false>)<line_sep>project=relationship("SProject" back_populates="tasks")<line_sep>models:Iterable['SModel']=relationship("SModel" back_populates="task")<line_sep>pipelines:Iterable['SPipeline']=relationship("SPipeline" back_populates='task')<line_sep>images:Iterable['SImage']=relationship("SImage" back_populates='task')<line_sep>datasets=Column(Text)<line_sep>metrics=Column(Text)<line_sep>evaluation_sets=Column(Text)<line_sep>__table_args__=(UniqueConstraint('name' 'project_id' name='tasks_name_and_ref') )<def_stmt>to_obj self<arrow>Task<block_start>task=Task(id=self.id name=self.name author=self.author creation_date=self.creation_date project_id=self.project_id datasets=safe_loads(self.datasets Dict[str DatasetSource]) metrics=safe_loads(self.metrics Dict[str Metric]) evaluation_sets=safe_loads(self.evaluation_sets Dict[str EvaluationSet]))<for_stmt>model self.models<block_start>task._models.add(model.to_obj())<block_end><for_stmt>pipeline self.pipelines<block_start>task._pipelines.add(pipeline.to_obj())<block_end><for_stmt>image self.images<block_start>task._images.add(image.to_obj())<block_end><return>self.attach(task)<block_end>@classmethod<def_stmt>get_kwargs cls task:Task<arrow>dict<block_start><return>dict(id=task.id name=task.name author=task.author creation_date=task.creation_date project_id=task.project_id models=[SModel.from_obj(m)<for>m task.models.values()] images=[SImage.from_obj(i)<for>i task.images.values()] pipelines=[SPipeline.from_obj(p)<for>p task.pipelines.values()] datasets=dumps(task.datasets) metrics=dumps(task.metrics) evaluation_sets=dumps(task.evaluation_sets))<block_end><block_end><class_stmt>SModel(Base Attaching)<block_start>__tablename__='models'<line_sep>id=Column(Integer primary_key=<true> autoincrement=<true>)<line_sep>name=Column(String unique=<false> nullable=<false>)<line_sep>author=Column(String unique=<false> nullable=<false>)<line_sep>creation_date=Column(DateTime unique=<false> nullable=<false>)<line_sep>wrapper=Column(Text)<line_sep>artifact=Column(Text)<line_sep>requirements=Column(Text)<line_sep>description=Column(Text)<line_sep>params=Column(Text)<line_sep>task_id=Column(Integer ForeignKey('tasks.id') nullable=<false>)<line_sep>task=relationship("STask" back_populates="models")<line_sep>evaluations=Column(Text)<line_sep>__table_args__=(UniqueConstraint('name' 'task_id' name='models_name_and_ref') )<def_stmt>to_obj self<arrow>Model<block_start>model=Model(name=self.name wrapper_meta=safe_loads(self.wrapper dict) author=self.author creation_date=self.creation_date artifact=safe_loads(self.artifact ArtifactCollection) requirements=safe_loads(self.requirements Requirements) description=self.description params=safe_loads(self.params Dict[str Any]) id=self.id task_id=self.task_id evaluations=safe_loads(self.evaluations Dict[str EvaluationResults]))<line_sep><return>self.attach(model)<block_end>@classmethod<def_stmt>get_kwargs cls model:Model<arrow>dict<block_start><return>dict(id=model.id name=model.name author=model.author creation_date=model.creation_date wrapper=dumps(model.wrapper_meta) artifact=dumps(model.artifact) requirements=dumps(model.requirements) description=model.description params=dumps(model.params) task_id=model.task_id evaluations=dumps(model.evaluations))<block_end><block_end><class_stmt>SPipeline(Base Attaching)<block_start>__tablename__='pipelines'<line_sep>id=Column(Integer primary_key=<true> autoincrement=<true>)<line_sep>name=Column(String unique=<false> nullable=<false>)<line_sep>author=Column(String unique=<false> nullable=<false>)<line_sep>creation_date=Column(DateTime unique=<false> nullable=<false>)<line_sep>steps=Column(Text)<line_sep>input_data=Column(Text)<line_sep>output_data=Column(Text)<line_sep>task_id=Column(Integer ForeignKey('tasks.id') nullable=<false>)<line_sep>task=relationship("STask" back_populates="pipelines")<line_sep>evaluations=Column(Text)<line_sep>__table_args__=(UniqueConstraint('name' 'task_id' name='pipelines_name_and_ref') )<def_stmt>to_obj self<arrow>Pipeline<block_start>pipeline=Pipeline(name=self.name steps=safe_loads(self.steps List[PipelineStep]) input_data=safe_loads(self.input_data DatasetType) output_data=safe_loads(self.output_data DatasetType) author=self.author creation_date=self.creation_date id=self.id task_id=self.task_id evaluations=safe_loads(self.evaluations EvaluationResults))<line_sep><return>self.attach(pipeline)<block_end>@classmethod<def_stmt>get_kwargs cls pipeline:Pipeline<arrow>dict<block_start><return>dict(id=pipeline.id name=pipeline.name author=pipeline.author creation_date=pipeline.creation_date steps=dumps(pipeline.steps) input_data=dumps(pipeline.input_data) output_data=dumps(pipeline.output_data) task_id=pipeline.task_id evaluations=dumps(pipeline.evaluations))<block_end><block_end><class_stmt>SImage(Base Attaching)<block_start>__tablename__='images'<line_sep>id=Column(Integer primary_key=<true> autoincrement=<true>)<line_sep>name=Column(String unique=<false> nullable=<false>)<line_sep>author=Column(String unique=<false> nullable=<false>)<line_sep>creation_date=Column(DateTime unique=<false> nullable=<false>)<line_sep>task_id=Column(Integer ForeignKey('tasks.id') nullable=<false>)<line_sep>task=relationship("STask" back_populates="images")<line_sep>environment_id=Column(Integer ForeignKey('environments.id') nullable=<false>)<line_sep>params=Column(Text)<line_sep>source=Column(Text)<line_sep>__table_args__=(UniqueConstraint('name' 'task_id' name='image_name_and_ref') )<def_stmt>to_obj self<arrow>Image<block_start>image=Image(name=self.name author=self.author creation_date=self.creation_date id=self.id task_id=self.task_id params=safe_loads(self.params Image.Params) source=safe_loads(self.source Buildable) environment_id=self.environment_id)<line_sep><return>self.attach(image)<block_end>@classmethod<def_stmt>get_kwargs cls image:Image<arrow>dict<block_start><return>dict(id=image.id name=image.name author=image.author creation_date=image.creation_date task_id=image.task_id params=dumps(image.params) source=dumps(image.source) environment_id=image.environment_id)<block_end><block_end><class_stmt>SRuntimeEnvironment(Base Attaching)<block_start>__tablename__='environments'<line_sep>id=Column(Integer primary_key=<true> autoincrement=<true>)<line_sep>name=Column(String unique=<true> nullable=<false>)<line_sep>author=Column(String unique=<false> nullable=<false>)<line_sep>creation_date=Column(DateTime unique=<false> nullable=<false>)<line_sep>params=Column(Text)<def_stmt>to_obj self<arrow>RuntimeEnvironment<block_start>environment=RuntimeEnvironment(name=self.name author=self.author creation_date=self.creation_date id=self.id params=safe_loads(self.params RuntimeEnvironment.Params))<line_sep><return>self.attach(environment)<block_end>@classmethod<def_stmt>get_kwargs cls environment:RuntimeEnvironment<arrow>dict<block_start><return>dict(id=environment.id name=environment.name author=environment.author creation_date=environment.creation_date params=dumps(environment.params))<block_end><block_end><class_stmt>SRuntimeInstance(Base Attaching)<block_start>__tablename__='instances'<line_sep>id=Column(Integer primary_key=<true> autoincrement=<true>)<line_sep>name=Column(String unique=<false> nullable=<false>)<line_sep>author=Column(String unique=<false> nullable=<false>)<line_sep>creation_date=Column(DateTime unique=<false> nullable=<false>)<line_sep>image_id=Column(Integer ForeignKey('images.id') nullable=<false>)<line_sep>environment_id=Column(Integer ForeignKey('environments.id') nullable=<false>)<line_sep>params=Column(Text)<line_sep>__table_args__=(UniqueConstraint('name' 'image_id' 'environment_id' name='instance_name_and_ref') )<def_stmt>to_obj self<arrow>RuntimeInstance<block_start>instance=RuntimeInstance(name=self.name author=self.author creation_date=self.creation_date id=self.id image_id=self.image_id environment_id=self.environment_id params=safe_loads(self.params RuntimeInstance.Params))<line_sep><return>self.attach(instance)<block_end>@classmethod<def_stmt>get_kwargs cls instance:RuntimeInstance<arrow>dict<block_start><return>dict(id=instance.id name=instance.name author=instance.author creation_date=instance.creation_date image_id=instance.image_id environment_id=instance.environment_id params=dumps(instance.params))<block_end><block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- <import_from_stmt>enum Enum EnumMeta<import_from_stmt>six with_metaclass<class_stmt>_CaseInsensitiveEnumMeta(EnumMeta)<block_start><def_stmt>__getitem__ self name<block_start><return>super().__getitem__(name.upper())<block_end><def_stmt>__getattr__ cls name<block_start>"""Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. """<try_stmt><block_start><return>cls._member_map_[name.upper()]<block_end><except_stmt>KeyError<block_start><raise>AttributeError(name)<block_end><block_end><block_end><class_stmt>AllocationState(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Allocation state of the compute. Possible values are: steady - Indicates that the compute is not resizing. There are no changes to the number of compute nodes in the compute in progress. A compute enters this state when it is created and when no operations are being performed on the compute to change the number of compute nodes. resizing - Indicates that the compute is resizing; that is, compute nodes are being added to or removed from the compute. """<line_sep>STEADY="Steady"<line_sep>RESIZING="Resizing"<block_end><class_stmt>ApplicationSharingPolicy(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Policy for sharing applications on this compute instance among users of parent workspace. If Personal, only the creator can access applications on this compute instance. When Shared, any workspace user can access applications on this instance depending on his/her assigned role. """<line_sep>PERSONAL="Personal"<line_sep>SHARED="Shared"<block_end><class_stmt>BillingCurrency(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Three lettered code specifying the currency of the VM price. Example: USD """<line_sep>USD="USD"<block_end><class_stmt>ComputeInstanceState(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Current state of a ComputeInstance. """<line_sep>CREATING="Creating"<line_sep>CREATE_FAILED="CreateFailed"<line_sep>DELETING="Deleting"<line_sep>RUNNING="Running"<line_sep>RESTARTING="Restarting"<line_sep>JOB_RUNNING="JobRunning"<line_sep>SETTING_UP="SettingUp"<line_sep>SETUP_FAILED="SetupFailed"<line_sep>STARTING="Starting"<line_sep>STOPPED="Stopped"<line_sep>STOPPING="Stopping"<line_sep>USER_SETTING_UP="UserSettingUp"<line_sep>USER_SETUP_FAILED="UserSetupFailed"<line_sep>UNKNOWN="Unknown"<line_sep>UNUSABLE="Unusable"<block_end><class_stmt>ComputeType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The type of compute """<line_sep>AKS="AKS"<line_sep>AML_COMPUTE="AmlCompute"<line_sep>COMPUTE_INSTANCE="ComputeInstance"<line_sep>DATA_FACTORY="DataFactory"<line_sep>VIRTUAL_MACHINE="VirtualMachine"<line_sep>HD_INSIGHT="HDInsight"<line_sep>DATABRICKS="Databricks"<line_sep>DATA_LAKE_ANALYTICS="DataLakeAnalytics"<block_end><class_stmt>EncryptionStatus(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Indicates whether or not the encryption is enabled for the workspace. """<line_sep>ENABLED="Enabled"<line_sep>DISABLED="Disabled"<block_end><class_stmt>NodeState(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""State of the compute node. Values are idle, running, preparing, unusable, leaving and preempted. """<line_sep>IDLE="idle"<line_sep>RUNNING="running"<line_sep>PREPARING="preparing"<line_sep>UNUSABLE="unusable"<line_sep>LEAVING="leaving"<line_sep>PREEMPTED="preempted"<block_end><class_stmt>OperationName(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Name of the last operation. """<line_sep>CREATE="Create"<line_sep>START="Start"<line_sep>STOP="Stop"<line_sep>RESTART="Restart"<line_sep>REIMAGE="Reimage"<line_sep>DELETE="Delete"<block_end><class_stmt>OperationStatus(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Operation status. """<line_sep>IN_PROGRESS="InProgress"<line_sep>SUCCEEDED="Succeeded"<line_sep>CREATE_FAILED="CreateFailed"<line_sep>START_FAILED="StartFailed"<line_sep>STOP_FAILED="StopFailed"<line_sep>RESTART_FAILED="RestartFailed"<line_sep>REIMAGE_FAILED="ReimageFailed"<line_sep>DELETE_FAILED="DeleteFailed"<block_end><class_stmt>PrivateEndpointConnectionProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The current provisioning state. """<line_sep>SUCCEEDED="Succeeded"<line_sep>CREATING="Creating"<line_sep>DELETING="Deleting"<line_sep>FAILED="Failed"<block_end><class_stmt>PrivateEndpointServiceConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The private endpoint connection status. """<line_sep>PENDING="Pending"<line_sep>APPROVED="Approved"<line_sep>REJECTED="Rejected"<line_sep>DISCONNECTED="Disconnected"<line_sep>TIMEOUT="Timeout"<block_end><class_stmt>ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The current deployment state of workspace resource. The provisioningState is to indicate states for resource provisioning. """<line_sep>UNKNOWN="Unknown"<line_sep>UPDATING="Updating"<line_sep>CREATING="Creating"<line_sep>DELETING="Deleting"<line_sep>SUCCEEDED="Succeeded"<line_sep>FAILED="Failed"<line_sep>CANCELED="Canceled"<block_end><class_stmt>QuotaUnit(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""An enum describing the unit of quota measurement. """<line_sep>COUNT="Count"<block_end><class_stmt>ReasonCode(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The reason for the restriction. """<line_sep>NOT_SPECIFIED="NotSpecified"<line_sep>NOT_AVAILABLE_FOR_REGION="NotAvailableForRegion"<line_sep>NOT_AVAILABLE_FOR_SUBSCRIPTION="NotAvailableForSubscription"<block_end><class_stmt>RemoteLoginPortPublicAccess(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled. """<line_sep>ENABLED="Enabled"<line_sep>DISABLED="Disabled"<line_sep>NOT_SPECIFIED="NotSpecified"<block_end><class_stmt>ResourceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The identity type. """<line_sep>SYSTEM_ASSIGNED="SystemAssigned"<line_sep>USER_ASSIGNED="UserAssigned"<line_sep>SYSTEM_ASSIGNED_USER_ASSIGNED="SystemAssigned,UserAssigned"<line_sep>NONE="None"<block_end><class_stmt>SshPublicAccess(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the public ssh port is open and accessible according to the VNet/subnet policy if applicable. """<line_sep>ENABLED="Enabled"<line_sep>DISABLED="Disabled"<block_end><class_stmt>SslConfigurationStatus(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Enable or disable ssl for scoring """<line_sep>DISABLED="Disabled"<line_sep>ENABLED="Enabled"<block_end><class_stmt>Status(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Status of update workspace quota. """<line_sep>UNDEFINED="Undefined"<line_sep>SUCCESS="Success"<line_sep>FAILURE="Failure"<line_sep>INVALID_QUOTA_BELOW_CLUSTER_MINIMUM="InvalidQuotaBelowClusterMinimum"<line_sep>INVALID_QUOTA_EXCEEDS_SUBSCRIPTION_LIMIT="InvalidQuotaExceedsSubscriptionLimit"<line_sep>INVALID_VM_FAMILY_NAME="InvalidVMFamilyName"<line_sep>OPERATION_NOT_SUPPORTED_FOR_SKU="OperationNotSupportedForSku"<line_sep>OPERATION_NOT_ENABLED_FOR_REGION="OperationNotEnabledForRegion"<block_end><class_stmt>UnderlyingResourceAction(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>DELETE="Delete"<line_sep>DETACH="Detach"<block_end><class_stmt>UnitOfMeasure(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The unit of time measurement for the specified VM price. Example: OneHour """<line_sep>ONE_HOUR="OneHour"<block_end><class_stmt>UsageUnit(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""An enum describing the unit of usage measurement. """<line_sep>COUNT="Count"<block_end><class_stmt>VMPriceOSType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Operating system type used by the VM. """<line_sep>LINUX="Linux"<line_sep>WINDOWS="Windows"<block_end><class_stmt>VmPriority(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Virtual Machine priority """<line_sep>DEDICATED="Dedicated"<line_sep>LOW_PRIORITY="LowPriority"<block_end><class_stmt>VMTier(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The type of the VM. """<line_sep>STANDARD="Standard"<line_sep>LOW_PRIORITY="LowPriority"<line_sep>SPOT="Spot"<block_end>
<import_stmt>SecurityAdvisor<line_sep>URL_SUFFIX='apis/coachuser/'<line_sep>BASE_URL='https://www.securityadvisor.io/'<line_sep>CONTEXT_JSON={"SecurityAdvisor.CoachUser":{"coaching_date":"2019-10-04T21:04:19.480425" "coaching_status":"Pending" "coaching_score":"" "user":"<EMAIL>" "context":"phishing" "message":"Coaching Sent"}}<line_sep>RESPONSE_JSON={"coaching_date":"2019-10-04T21:04:19.480425" "coaching_status":"Pending" "coaching_score":"" "user":"<EMAIL>" "context":"phishing" "message":"Coaching Sent"}<line_sep>HEADERS={'Content-Type':'application/json' 'Accept':'application/json' 'Authorization':'Token '+'<PASSWORD>'}<def_stmt>test_coach_end_user_command requests_mock<block_start>"""Unit test for coach-end-user command Args: requests_mock ([type]): [description] """<line_sep>mock_reponse=RESPONSE_JSON<line_sep>requests_mock.post(BASE_URL+URL_SUFFIX json=mock_reponse)<line_sep>client=SecurityAdvisor.Client(base_url=BASE_URL verify=<false> proxy=<false> headers=HEADERS)<line_sep>args={"user":"<EMAIL>" "context":"phishing"}<line_sep>_,_,result=SecurityAdvisor.coach_end_user_command(client args)<assert_stmt>result<eq>RESPONSE_JSON<block_end><def_stmt>test_module_command requests_mock<block_start>"""Unit test for test-module command Args: requests_mock ([type]): [description] """<line_sep>mock_reponse=RESPONSE_JSON<line_sep>requests_mock.post(BASE_URL+URL_SUFFIX json=mock_reponse)<line_sep>client=SecurityAdvisor.Client(base_url=BASE_URL verify=<false> proxy=<false> headers=HEADERS)<line_sep>response=SecurityAdvisor.test_module(client)<assert_stmt>response<eq>"ok"<block_end>
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load("@bazel_tools//tools/build_defs/repo:http.bzl" "http_archive")<line_sep>_BLACK_PY_BUILD_FILE=""" py_binary( name = "black", srcs = glob(["**/*.py"]), visibility = ["//visibility:public"], ) """<def_stmt>py_gapic_repositories <block_start>_maybe(http_archive name="pypi_black" strip_prefix="black-19.3b0" urls=["https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz"] build_file_content=_BLACK_PY_BUILD_FILE )<block_end><def_stmt>_maybe repo_rule name strip_repo_prefix="" **kwargs<block_start><if_stmt><not>name.startswith(strip_repo_prefix)<block_start><return><block_end>repo_name=name[len(strip_repo_prefix):]<if_stmt>repo_name<in>native.existing_rules()<block_start><return><block_end>repo_rule(name=repo_name **kwargs)<block_end>
# SPDX-License-Identifier: MIT <import_stmt>glob<import_stmt>os<import_stmt>pathlib<import_stmt>re<import_stmt>shutil<import_stmt>colorama<import_stmt>uefi_firmware<import_from_stmt>.guid_db UEFI_GUIDS<line_sep>DIR_NAME="all"<line_sep>PE_DIR="modules"<line_sep>g_re_guid=re.compile(r"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}")<class_stmt>Dumper<block_start><def_stmt>__init__ self fw_name dir_name pe_dir<block_start>self.fw_name=fw_name<line_sep>self.dir_name=dir_name<line_sep>self.pe_dir=pe_dir<line_sep>self.modules=list()<if_stmt><not>os.path.isdir(self.dir_name)<block_start>os.mkdir(self.dir_name)<block_end><if_stmt><not>os.path.isdir(self.pe_dir)<block_start>os.mkdir(self.pe_dir)<block_end><block_end>@staticmethod<def_stmt>_unsupported <arrow>bool<block_start>print("[-] This type of binary is not supported")<line_sep><return><false><block_end><def_stmt>get_unique_name self module_name:str<arrow>str# Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11 <block_start>index=1<line_sep>unique_name=module_name<while_stmt><true><block_start><if_stmt>unique_name<in>self.modules<block_start>unique_name=f"{module_name}_{index:#d}"<line_sep>index<augadd>1<line_sep><continue><block_end><return>unique_name<block_end><block_end><def_stmt>get_module_name self module_path:str<arrow>str<block_start>module_name=str()<line_sep>dir_name,_=os.path.split(module_path)<line_sep>template=os.path.join(dir_name "*.ui")<if_stmt>len(glob.glob(template))<eq>1# try to get a friendly name from the *.ui file <block_start>ui_path=glob.glob(template)[0]<with_stmt>open(ui_path "rb")<as>f<block_start>module_name=f.read()<block_end>module_name=module_name.decode("utf-16le")<line_sep>module_name=self.get_unique_name(module_name[:-1])<line_sep>self.modules.append(module_name)<line_sep><return>module_name<block_end># no UI section, try to get a friendly name from the GUID database file_guids=g_re_guid.findall(dir_name)<if_stmt><not>file_guids<block_start><return>str()<block_end>module_guid=file_guids[-1].replace("file-" "")<line_sep>module_name=UEFI_GUIDS.get(module_guid.upper())<if_stmt><not>module_name<block_start>module_name=module_guid<block_end>module_name=self.get_unique_name(module_name)<line_sep>self.modules.append(module_name)<line_sep><return>module_name<block_end>@staticmethod<def_stmt>search_pe d:str<arrow>list<block_start><return>list(map(str pathlib.Path(d).rglob("*.pe")))<block_end>@staticmethod<def_stmt>search_te d:str<arrow>list<block_start><return>list(map(str pathlib.Path(d).rglob("*.te")))<block_end><def_stmt>get_pe_files self<block_start>pe_files=self.search_pe(self.dir_name)<line_sep>te_files=self.search_te(self.dir_name)<for_stmt>module_path te_files+pe_files<block_start>module_name=self.get_module_name(module_path)<if_stmt><not>module_name<block_start>print(f"Current module: unknown")<line_sep><continue><block_end>print(f"Current module: {module_name}")<line_sep>dst=os.path.join(self.pe_dir module_name)<line_sep>shutil.copy(module_path dst)<block_end><block_end><def_stmt>dump_all self<arrow>bool<block_start><if_stmt><not>os.path.isfile(self.fw_name)<block_start>print(f"[-] Check {self.fw_name} file")<line_sep><return><false><block_end><with_stmt>open(self.fw_name "rb")<as>fw<block_start>file_content=fw.read()<block_end>parser=uefi_firmware.AutoParser(file_content)<if_stmt>parser.type()<is>"unknown"<block_start>fvh_index=file_content.find(b"_FVH")<if_stmt>fvh_index<l>0<block_start><return>self._unsupported()<block_end>parser=uefi_firmware.AutoParser(file_content[fvh_index-40:])<if_stmt>parser.type()<is>"unknown"<block_start><return>self._unsupported()<block_end><block_end>firmware=parser.parse()<line_sep>firmware.dump(self.dir_name)<line_sep><return><true><block_end><block_end><def_stmt>get_efi_images fw_name<arrow>bool<block_start>"""get images from firmware"""<line_sep>colorama.init(autoreset=<true>)# for correct color display in uefi_firmware module dumper=Dumper(fw_name DIR_NAME PE_DIR)<if_stmt><not>dumper.dump_all()<block_start>exit()<block_end>dumper.get_pe_files()<line_sep><return><true><block_end>
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>os<import_stmt>utils.tfrecord_voc_utils<as>voc_utils<import_stmt>YOLOv3<as>yolov3<line_sep># import matplotlib.pyplot as plt # import matplotlib.patches as patches # from skimage import io, transform <import_from_stmt>utils.voc_classname_encoder classname_to_ids<line_sep>os.environ['CUDA_VISIBLE_DEVICES']='0'<line_sep>lr=0.001<line_sep>batch_size=12<line_sep>buffer_size=256<line_sep>epochs=160<line_sep>reduce_lr_epoch=[]<line_sep>config={'mode':'train' # 'train', 'test' 'data_shape':[448 448 3] 'num_classes':20 'weight_decay':5e-4 'keep_prob':0.5 # not used 'data_format':'channels_last' # 'channels_last' 'channels_first' 'batch_size':batch_size 'coord_scale':1 'noobj_scale':1 'obj_scale':5. 'class_scale':1. 'num_priors':3 'nms_score_threshold':0.5 'nms_max_boxes':10 'nms_iou_threshold':0.5 'priors':[[[10. 13.] [16 30.] [33. 23.]] [[30. 61.] [62. 45.] [59. 119.]] [[116. 90.] [156. 198.] [373. 326.]]]}<line_sep>image_augmentor_config={'data_format':'channels_last' 'output_shape':[448 448] # 'zoom_size': [520, 520], # 'crop_method': 'random', 'flip_prob':[0. 0.5] 'fill_mode':'BILINEAR' 'keep_aspect_ratios':<false> 'constant_values':0. # 'color_jitter_prob': 0.5, # 'rotate': [0.5, -10., 10.], 'pad_truth_to':60 }<line_sep>data=os.listdir('./voc2007/')<line_sep>data=[os.path.join('./voc2007/' name)<for>name data]<line_sep>train_gen=voc_utils.get_generator(data batch_size buffer_size image_augmentor_config)<line_sep>trainset_provider={'data_shape':[448 448 3] 'num_train':5011 'num_val':0 # not used 'train_generator':train_gen 'val_generator':<none># not used }<line_sep>testnet=yolov3.YOLOv3(config trainset_provider)<line_sep>testnet.load_weight('./weight/test-40449')<for_stmt>i range(epochs)<block_start>print('-'<times>25 'epoch' i '-'<times>25)<if_stmt>i<in>reduce_lr_epoch<block_start>lr=lr/10.<line_sep>print('reduce lr, lr=' lr 'now')<block_end>mean_loss=testnet.train_one_epoch(lr)<line_sep>print('>> mean loss' mean_loss)<line_sep>testnet.save_weight('latest' './weight/test')# 'latest', 'best' <block_end># img = io.imread() # img = transform.resize(img, [448,448]) # img = np.expand_dims(img, 0) # result = testnet.test_one_image(img) # id_to_clasname = {k:v for (v,k) in classname_to_ids.items()} # scores = result[0] # bbox = result[1] # class_id = result[2] # print(scores, bbox, class_id) # plt.figure(1) # plt.imshow(np.squeeze(img)) # axis = plt.gca() # for i in range(len(scores)): # rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none') # axis.add_patch(rect) # plt.text(bbox[i][1],bbox[i][0], id_to_clasname[class_id[i]]+str(' ')+str(scores[i]), color='red', fontsize=12) # plt.show()
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>mmocr.utils<as>utils<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_from_stmt>mmdeploy.core FUNCTION_REWRITER<line_sep>@FUNCTION_REWRITER.register_rewriter(func_name='mmocr.models.textrecog.encoders.SAREncoder.forward' backend='default')<def_stmt>sar_encoder__forward ctx self feat img_metas=<none><block_start>"""Rewrite `forward` of SAREncoder for default backend. Rewrite this function to: 1. convert tuple value of feat.size to int, making model exportable. 2. use torch.ceil to replace original math.ceil and if else in mmocr. Args: ctx (ContextCaller): The context with additional information. self: The instance of the class SAREncoder. feat (Tensor): Encoded feature map of shape (N, C, H, W). img_metas (Optional[list[dict]]): A list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: holistic_feat (Tensor): A feature map output from SAREncoder. The shape [N, M]. """<if_stmt>img_metas<is><not><none><block_start><assert_stmt>utils.is_type_list(img_metas dict)<assert_stmt>len(img_metas)<eq>feat.size(0)<block_end>valid_ratios=<none><if_stmt>img_metas<is><not><none><block_start>valid_ratios=[img_meta.get('valid_ratio' 1.0)<for>img_meta img_metas]<if>self.mask<else><none><block_end>h_feat=int(feat.size(2))<line_sep>feat_v=F.max_pool2d(feat kernel_size=(h_feat 1) stride=1 padding=0)<line_sep>feat_v=feat_v.squeeze(2)# bsz * C * W feat_v=feat_v.permute(0 2 1).contiguous()# bsz * W * C holistic_feat=self.rnn_encoder(feat_v)[0]# bsz * T * C <if_stmt>valid_ratios<is><not><none><block_start>valid_hf=[]<line_sep>T=holistic_feat.size(1)<for_stmt>i,valid_ratio enumerate(valid_ratios)# use torch.ceil to replace original math.ceil and if else in mmocr <block_start>valid_step=torch.ceil(T<times>valid_ratio).long()-1<line_sep>valid_hf.append(holistic_feat[i valid_step :])<block_end>valid_hf=torch.stack(valid_hf dim=0)<block_end><else_stmt><block_start>valid_hf=holistic_feat[: -1 :]<block_end># bsz * C holistic_feat=self.linear(valid_hf)# bsz * C <return>holistic_feat<block_end>
<import_stmt>json<import_stmt>os<import_from_stmt>jinja2 Template<import_from_stmt>chronologer.config config<def_stmt>write_html <block_start>html_file=os.path.join(os.path.dirname(__file__) "templates" "index.html")<with_stmt>open(html_file)<as>fp<block_start>html_template=Template(fp.read())<block_end><if_stmt><not>config.dry_run<block_start>boxplot_spec=json.dumps(_get_boxplot_spec() indent=2)<with_stmt>open(config.html_output_file "w")<as>fp<block_start>fp.write(html_template.render(boxplot_spec=boxplot_spec))<block_end><block_end><block_end><def_stmt>_get_boxplot_spec <block_start><with_stmt>open(config.combined_benchmark_file)<as>fp<block_start>values=json.load(fp)<block_end><return>{"$schema":"https://vega.github.io/schema/vega-lite/v3.json" "data":{"values":values} "mark":{"type":"boxplot" "extent":"min-max" "size":5} "width":1400 "height":500 "encoding":{"y":{"field":"time" "type":"quantitative" "axis":{"title":"Time"}} "x":{"field":"commit" "type":"ordinal" "axis":{"title":"Commit" "labels":<false> "ticks":<false>} } "tooltip":{"field":"message" "type":"ordinal" "aggregate":"min"} } }<block_end>
""" Simple program to demonstrate how to use muniverse on a game that takes mouse events. """<import_stmt>sys<import_stmt>numpy<as>np<line_sep>sys.path.insert(0 '..')<import_stmt>muniverse# noqa: E402 <def_stmt>main <block_start>print('Looking up environment...')<line_sep>spec=muniverse.spec_for_name('TowerMania-v1')<line_sep>print('Creating environment...')<line_sep>env=muniverse.Env(spec)<try_stmt><block_start>print('Resetting environment...')<line_sep>env.reset()<line_sep>print('Getting observation...')<line_sep>obs=env.observe()<line_sep>print(ascii_art(obs))<line_sep>print('Playing game...')<line_sep>step_idx=0<line_sep>action=muniverse.MouseAction('mousePressed' x=100 y=100 click_count=1)<line_sep>actions=[action action.with_event('mouseReleased')]<while_stmt><true><block_start>reward,done=env.step(0.1 actions[step_idx%2])<line_sep>step_idx<augadd>1<line_sep>print('reward: '+str(reward))<if_stmt>done<block_start><break><block_end><block_end><block_end><finally_stmt><block_start>env.close()<block_end><block_end><def_stmt>ascii_art img<block_start>brightness=np.sum(img axis=2)/3<line_sep>downsampled=brightness[::14 ::7]<line_sep>binary=downsampled<g>128<line_sep>height,width=binary.shape<line_sep>res=''<for_stmt>y range(0 height)<block_start><if_stmt>res<ne>''<block_start>res<augadd>'\n'<block_end><for_stmt>x range(0 width)<block_start><if_stmt>binary[y x]<block_start>res<augadd>'X'<block_end><else_stmt><block_start>res<augadd>' '<block_end><block_end><block_end><return>res<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# -*- coding: utf-8 -*- # (c) University of Strathclyde 2021 # Author: <NAME> # # Contact: <EMAIL> # # <NAME>, # Strathclyde Institute for Pharmacy and Biomedical Sciences, # Cathedral Street, # Glasgow, # G4 0RE # Scotland, # UK # # The MIT License # # Copyright (c) 2021 University of Strathclyde # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Code to implement the ANIblastall average nucleotide identity method."""<import_stmt>logging<import_stmt>os<import_stmt>platform<import_stmt>re<import_stmt>shutil<import_stmt>subprocess<import_from_stmt>pathlib Path<import_from_stmt>. pyani_config<import_from_stmt>. PyaniException<class_stmt>PyaniblastallException(PyaniException)<block_start>"""ANIblastall-specific exception for pyani."""<block_end><def_stmt>get_version blast_exe:Path=pyani_config.BLASTALL_DEFAULT<arrow>str<block_start>r"""Return BLAST blastall version as a string. :param blast_exe: path to blastall executable We expect blastall to return a string as, for example .. code-block:: bash $ blastall -version [blastall 2.2.26] ERROR: Number of database sequences to show \ one-line descriptions for (V) [ersion] is bad or out of range [? to ?] This is concatenated with the OS name. The following circumstances are explicitly reported as strings - no executable at passed path - non-executable file at passed path (this includes cases where the user doesn't have execute permissions on the file) - no version info returned - executable cannot be run on this OS """<line_sep>logger=logging.getLogger(__name__)<try_stmt><block_start>blastall_path=Path(shutil.which(blast_exe))# type:ignore <block_end><except_stmt>TypeError<block_start><return>f"{blast_exe} is not found in $PATH"<block_end><if_stmt><not>blastall_path.is_file()# no executable <block_start><return>f"No blastall at {blastall_path}"<block_end># This should catch cases when the file can't be executed by the user <if_stmt><not>os.access(blastall_path os.X_OK)# file exists but not executable <block_start><return>f"blastall exists at {blastall_path} but not executable"<block_end><if_stmt>platform.system()<eq>"Darwin"<block_start>cmdline=[blast_exe "-version"]<block_end><else_stmt><block_start>cmdline=[blast_exe]<block_end><try_stmt><block_start>result=subprocess.run(cmdline # type: ignore shell=<false> stdout=subprocess.PIPE # type: ignore stderr=subprocess.PIPE check=<false> # blastall doesn't return 0 )<block_end><except_stmt>OSError<block_start>logger.warning("blastall executable will not run" exc_info=<true>)<line_sep><return>f"blastall exists at {blastall_path} but could not be executed"<block_end>version=re.search(# type: ignore r"(?<=blastall\s)[0-9\.]*" str(result.stderr "utf-8")).group()<if_stmt>0<eq>len(version.strip())<block_start><return>f"blastall exists at {blastall_path} but could not retrieve version"<block_end><return>f"{platform.system()}_{version} ({blastall_path})"<block_end>
# Generated by Django 2.1.2 on 2018-12-16 13:01 <import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('vespene' '0008_auto_20181106_2233') ]<line_sep>operations=[migrations.RemoveField(model_name='workerpool' name='sudo_password' ) ]<block_end>
<import_stmt>autosar<line_sep>ws=autosar.workspace("4.2.2")<line_sep>components=ws.createPackage("ComponentTypes")<line_sep>swc=components.createCompositionComponent("MyComposition")<line_sep>print(swc.name)<line_sep>
<import_stmt>pytest<import_from_stmt>onegram.exceptions NotSupportedError<import_from_stmt>onegram follow unfollow<import_from_stmt>onegram like unlike<import_from_stmt>onegram comment uncomment<import_from_stmt>onegram save unsave<def_stmt>test_follow logged user cassette<block_start><if_stmt>logged<block_start>response=follow(user)<assert_stmt>response<eq>{'result':'following' 'status':'ok' 'user_id':user['id']}<line_sep>response=unfollow(user)<assert_stmt>response<eq>{'status':'ok' 'user_id':user['id']}<block_end><else_stmt><block_start><with_stmt>pytest.raises(NotSupportedError)<block_start>follow(user)<block_end><with_stmt>pytest.raises(NotSupportedError)<block_start>unfollow(user)<block_end><block_end><block_end><def_stmt>test_like logged post cassette<block_start><if_stmt>logged<block_start>response=like(post)<assert_stmt>response<eq>{'status':'ok' 'post_id':post['id']}<line_sep>response=unlike(post)<assert_stmt>response<eq>{'status':'ok' 'post_id':post['id']}<block_end><else_stmt><block_start><with_stmt>pytest.raises(NotSupportedError)<block_start>like(post)<block_end><with_stmt>pytest.raises(NotSupportedError)<block_start>unlike(post)<block_end><block_end><block_end><def_stmt>test_comment logged post cassette<block_start>text='awesome!'<if_stmt>logged<block_start>commentary=comment(text post)<assert_stmt>commentary['id']<assert_stmt>commentary['text']<eq>text<assert_stmt>commentary['status']<eq>'ok'<assert_stmt>commentary['post_id']<eq>post['id']<line_sep>response=uncomment(commentary)<assert_stmt>response<eq>{'status':'ok' 'post_id':post['id']}<block_end><else_stmt><block_start><with_stmt>pytest.raises(NotSupportedError)<block_start>comment(text post)<block_end><with_stmt>pytest.raises(NotSupportedError)<block_start>fake_comment={'id':'1' 'post_id':'2'}<line_sep>uncomment(fake_comment)<block_end><block_end><block_end><def_stmt>test_save logged post cassette<block_start><if_stmt>logged<block_start>response=save(post)<assert_stmt>response<eq>{'status':'ok' 'post_id':post['id']}<line_sep>response=unsave(post)<assert_stmt>response<eq>{'status':'ok' 'post_id':post['id']}<block_end><else_stmt><block_start><with_stmt>pytest.raises(NotSupportedError)<block_start>save(post)<block_end><with_stmt>pytest.raises(NotSupportedError)<block_start>unsave(post)<block_end><block_end><block_end>
<import_from_future_stmt> print_function<import_stmt>minpy.numpy<as>mp<import_stmt>numpy<as>np<import_stmt>minpy.dispatch.policy<as>policy<import_from_stmt>minpy.core convert_args return_numpy grad_and_loss grad minpy_to_numpy<as>mn numpy_to_minpy<as>nm<import_stmt>time<line_sep># mp.set_policy(policy.OnlyNumPyPolicy()) <def_stmt>test_autograd <block_start>@convert_args<def_stmt>minpy_rnn_step_forward x prev_h Wx Wh b<block_start>next_h=mp.tanh(x.dot(Wx)+prev_h.dot(Wh)+b)<line_sep><return>next_h<block_end><def_stmt>rel_error x y<block_start>""" returns relative error """<line_sep><return>np.max(np.abs(x-y)/(np.maximum(1e-8 np.abs(x)+np.abs(y))))<block_end><def_stmt>rnn_step_forward x prev_h Wx Wh b<block_start>next_h=np.tanh(prev_h.dot(Wh)+x.dot(Wx)+b)<line_sep>cache=next_h prev_h x Wx Wh<line_sep><return>next_h cache<block_end><def_stmt>rnn_step_backward dnext_h cache<block_start>dx,dprev_h,dWx,dWh,db=<none> <none> <none> <none> <none><line_sep># Load values from rnn_step_forward next_h,prev_h,x,Wx,Wh=cache<line_sep># Gradients of loss wrt tanh dtanh=dnext_h<times>(1-next_h<times>next_h)# (N, H) # Gradients of loss wrt x dx=dtanh.dot(Wx.T)<line_sep># Gradients of loss wrt prev_h dprev_h=dtanh.dot(Wh.T)<line_sep># Gradients of loss wrt Wx dWx=x.T.dot(dtanh)# (D, H) # Gradients of loss wrt Wh dWh=prev_h.T.dot(dtanh)<line_sep># Gradients of loss wrt b. Note we broadcast b in practice. Thus result of # matrix ops are just sum over columns db=dtanh.sum(axis=0)# == np.ones([N, 1]).T.dot(dtanh)[0, :] <return>dx dprev_h dWx dWh db<block_end># preparation N,D,H=4 5 6<line_sep>x=np.random.randn(N D)<line_sep>h=np.random.randn(N H)<line_sep>Wx=np.random.randn(D H)<line_sep>Wh=np.random.randn(H H)<line_sep>b=np.random.randn(H)<line_sep>out,cache=rnn_step_forward(x h Wx Wh b)<line_sep>dnext_h=np.random.randn(*out.shape)<line_sep># test MinPy start=time.time()<line_sep>rnn_step_forward_loss=<lambda>x h Wx Wh b dnext_h:minpy_rnn_step_forward(x h Wx Wh b)<times>nm(dnext_h)<line_sep>grad_loss_function=return_numpy(grad_and_loss(rnn_step_forward_loss range(5)))<line_sep>grad_arrays=grad_loss_function(x h Wx Wh b dnext_h)[0]<line_sep>end=time.time()<line_sep>print("MinPy total time elapsed:" end-start)<line_sep># test NumPy start=time.time()<line_sep>out,cache=rnn_step_forward(x h Wx Wh b)<line_sep>dx,dprev_h,dWx,dWh,db=rnn_step_backward(dnext_h cache)<line_sep>out<augmul>dnext_h# to agree with MinPy calculation end=time.time()<line_sep>print("NumPy total time elapsed:" end-start)<line_sep>print()<line_sep>print("Result Check:")<line_sep>print('dx error: ' rel_error(dx grad_arrays[0]))<line_sep>print('dprev_h error: ' rel_error(dprev_h grad_arrays[1]))<line_sep>print('dWx error: ' rel_error(dWx grad_arrays[2]))<line_sep>print('dWh error: ' rel_error(dWh grad_arrays[3]))<line_sep>print('db error: ' rel_error(db grad_arrays[4]))<block_end><def_stmt>test_zero_input_grad <block_start><def_stmt>foo1 x<block_start><return>1<block_end>bar1=grad(foo1)<assert_stmt>bar1(0)<eq>0.0<block_end><def_stmt>test_reduction <block_start><def_stmt>test_sum <block_start>x_np=np.array([[1 2] [3 4] [5 6]])<line_sep>x_grad=np.array([[1 1] [1 1] [1 1]])<def_stmt>red1 x<block_start><return>mp.sum(x)<block_end><def_stmt>red2 x<block_start><return>mp.sum(x axis=0)<block_end><def_stmt>red3 x<block_start><return>mp.sum(x axis=0 keepdims=<true>)<block_end>grad1=grad(red1)<assert_stmt>np.all(grad1(x_np).asnumpy()<eq>x_grad)<line_sep>grad2=grad(red2)<assert_stmt>np.all(grad2(x_np).asnumpy()<eq>x_grad)<line_sep>grad3=grad(red3)<assert_stmt>np.all(grad3(x_np).asnumpy()<eq>x_grad)<block_end><def_stmt>test_max <block_start>x_np=np.array([[1 2] [2 1] [0 0]])<line_sep>x_grad1=np.array([[0 1] [1 0] [0 0]])<line_sep>x_grad2=np.array([[0 1] [1 0] [1 1]])<line_sep>x_grad3=np.array([[0 1] [1 0] [0 0]])<def_stmt>red1 x<block_start><return>mp.max(x)<block_end><def_stmt>red2 x<block_start><return>mp.max(x axis=1)<block_end><def_stmt>red3 x<block_start><return>mp.max(x axis=1 keepdims=<true>)<block_end><def_stmt>red4 x<block_start><return>mp.max(x axis=0)<block_end><def_stmt>red5 x<block_start><return>mp.max(x axis=0 keepdims=<true>)<block_end>grad1=grad(red1)<assert_stmt>np.all(grad1(x_np).asnumpy()<eq>x_grad1)<line_sep>grad2=grad(red2)<assert_stmt>np.all(grad2(x_np).asnumpy()<eq>x_grad2)<line_sep>grad3=grad(red3)<assert_stmt>np.all(grad3(x_np).asnumpy()<eq>x_grad2)<line_sep>grad4=grad(red4)<assert_stmt>np.all(grad4(x_np).asnumpy()<eq>x_grad3)<line_sep>grad5=grad(red5)<assert_stmt>np.all(grad5(x_np).asnumpy()<eq>x_grad3)<block_end><def_stmt>test_min <block_start>x_np=np.array([[1 2] [2 1] [0 0]])<line_sep>x_grad1=np.array([[0 0] [0 0] [1 1]])<line_sep>x_grad2=np.array([[1 0] [0 1] [1 1]])<line_sep>x_grad3=np.array([[0 0] [0 0] [1 1]])<def_stmt>red1 x<block_start><return>mp.min(x)<block_end><def_stmt>red2 x<block_start><return>mp.min(x axis=1)<block_end><def_stmt>red3 x<block_start><return>mp.min(x axis=1 keepdims=<true>)<block_end><def_stmt>red4 x<block_start><return>mp.min(x axis=0)<block_end><def_stmt>red5 x<block_start><return>mp.min(x axis=0 keepdims=<true>)<block_end>grad1=grad(red1)<assert_stmt>np.all(grad1(x_np).asnumpy()<eq>x_grad1)<line_sep>grad2=grad(red2)<assert_stmt>np.all(grad2(x_np).asnumpy()<eq>x_grad2)<line_sep>grad3=grad(red3)<assert_stmt>np.all(grad3(x_np).asnumpy()<eq>x_grad2)<line_sep>grad4=grad(red4)<assert_stmt>np.all(grad4(x_np).asnumpy()<eq>x_grad3)<line_sep>grad5=grad(red5)<assert_stmt>np.all(grad5(x_np).asnumpy()<eq>x_grad3)<block_end>test_sum()<line_sep>test_max()<line_sep>test_min()<block_end><if_stmt>__name__<eq>"__main__"<block_start>test_autograd()<line_sep>test_zero_input_grad()<line_sep>test_reduction()<block_end>
<import_from_stmt>abc abstractmethod<class_stmt>Repair<block_start>""" This class is allows to repair individuals after crossover if necessary. """<def_stmt>do self problem pop **kwargs<block_start><return>self._do(problem pop **kwargs)<block_end>@abstractmethod<def_stmt>_do self problem pop **kwargs<block_start><pass><block_end><block_end><class_stmt>NoRepair(Repair)<block_start>""" A dummy class which can be used to simply do no repair. """<def_stmt>do self problem pop **kwargs<block_start><return>pop<block_end><block_end>
# # Code by <NAME> and under the MIT license # <import_from_stmt>mineturtle *<import_stmt>lsystem<line_sep>t=Turtle()<line_sep>t.pendelay(0)<line_sep>t.turtle(<none>)<line_sep>t.penblock(block.BRICK_BLOCK)<line_sep># ensure angles are always integral multiples of 90 degrees t.gridalign()<line_sep>rules={'X':'X+YF+' 'Y':'-FX-Y'}<def_stmt>go # draw a wall segment with a door <block_start>t.pendown()<line_sep>t.penblock(block.BRICK_BLOCK)<line_sep>t.startface()<for_stmt>i range(4)<block_start>t.go(4)<line_sep>t.pitch(90)<block_end>t.endface()<line_sep>t.penup()<line_sep>t.go(2)<line_sep>t.pendown()<line_sep>t.penblock(block.AIR)<line_sep>t.pitch(90)<line_sep>t.go(1)<line_sep>t.penup()<line_sep>t.pitch(180)<line_sep>t.go(1)<line_sep>t.pitch(90)<line_sep>t.go(2)<block_end>dictionary={'+':<lambda>:t.yaw(90) '-':<lambda>:t.yaw(-90) 'F':<lambda>:go()}<line_sep>lsystem.lsystem('FX' rules dictionary 14)<line_sep>
<import_from_stmt>distutils.core setup<import_stmt>os<import_from_stmt>setuptools.command.install install<class_stmt>InstallWrapper(install)<block_start><def_stmt>run self# compile the relevant protobufs <block_start>self.compile_proto()<line_sep># Run the standard PyPi copy install.run(self)<line_sep># remove the compiled protobufs self.cleanup()<block_end><def_stmt>compile_proto self# compile the protobufs <block_start>os.system('cd anna && protoc -I=../../../../include/proto --python_out=. '+'kvs.proto')<line_sep>os.system('cd anna && protoc -I=../../../../include/proto --python_out=. '+'functions.proto')<block_end><def_stmt>cleanup self<block_start>os.system('rm anna/kvs_pb2.py')<block_end><block_end>setup(name='Anna' version='0.1' packages=['anna' ] license='Apache v2' long_description='Client for the Anna KVS' install_requires=['zmq' 'protobuf'] cmdclass={'install':InstallWrapper})<line_sep>
<import_from_stmt>kqueen.config current_config<import_from_stmt>prometheus_client multiprocess<import_stmt>multiprocessing<import_stmt>os<line_sep>app_config=current_config()<line_sep>bind="{host}:{port}".format(host=app_config.get('KQUEEN_HOST') port=app_config.get('KQUEEN_PORT') )<line_sep>timeout=180<line_sep>workers=multiprocessing.cpu_count()<times>2+1<line_sep>worker_class='gthread'<line_sep># check for prometheus settings <if_stmt>'prometheus_multiproc_dir'<not><in>os.environ<block_start><raise>Exception('Variable prometheus_multiproc_dir is required')<block_end><def_stmt>child_exit server worker<block_start>multiprocess.mark_process_dead(worker.pid)<block_end>
<import_from_stmt>amqpstorm management<if_stmt>__name__<eq>'__main__'# If using a self-signed certificate, change verify=True to point at your CA bundle. # You can disable certificate verification for testing by passing in verify=False. <block_start>API=management.ManagementApi('https://rmq.amqpstorm.io:15671' 'guest' 'guest' verify=<true>)<line_sep>API.user.create('my_user' 'password')<line_sep># Get a user print(API.user.get('my_user'))<line_sep># User that does not exist throws an exception API.user.delete('my_user')<try_stmt><block_start>API.user.get('NOT_FOUND')<block_end><except_stmt>management.ApiError<as>why<block_start><if_stmt>why.error_code<eq>404<block_start>print('User not found')<block_end><block_end><block_end>
<import_stmt>os<import_stmt>sphinx<line_sep>os.chdir('../../../docs_sphinx')<line_sep>sphinx.main(['sphinx-build' '-b' 'doctest' '.' '../docs' '-D' 'exclude_patterns=reference'])<line_sep>
# from seedwork.domain.services import DomainService # from seedwork.domain.value_objects import UUID # from .entities import Listing, Seller # from .repositories import ListingRepository # from .rules import ( # ListingMustBeInDraftState, # SellerMustBeEligibleForAddingNextListing, # ) # class CatalogService: # def publish_listing(self, listing: Listing, seller: Seller): # self.check_rule(ListingMustBeInDraftState(listing.status)) # self.check_rule(SellerMustBeEligibleForAddingNextListing(seller)) # listing.publish()
<import_from_stmt>vit.formatter.start Start<class_stmt>StartRemaining(Start)<block_start><def_stmt>format_datetime self start task<block_start><return>self.remaining(start)<block_end><block_end>
<import_from_stmt>tests.common DummyPostData<import_from_stmt>wtforms.fields IntegerField<import_from_stmt>wtforms.form Form<class_stmt>F(Form)<block_start>a=IntegerField()<line_sep>b=IntegerField(default=48)<block_end><def_stmt>test_integer_field <block_start>form=F(DummyPostData(a=["v"] b=["-15"]))<assert_stmt>form.a.data<is><none><assert_stmt>form.a.raw_data<eq>["v"]<assert_stmt>form.a()<eq>"""<input id="a" name="a" type="number" value="v">"""<assert_stmt>form.b.data<eq>-15<assert_stmt>form.b()<eq>"""<input id="b" name="b" type="number" value="-15">"""<assert_stmt><not>form.a.validate(form)<assert_stmt>form.b.validate(form)<line_sep>form=F(DummyPostData(a=[] b=[""]))<assert_stmt>form.a.data<is><none><assert_stmt>form.a.raw_data<eq>[]<assert_stmt>form.b.data<is><none><assert_stmt>form.b.raw_data<eq>[""]<assert_stmt><not>form.validate()<assert_stmt>len(form.b.process_errors)<eq>1<assert_stmt>len(form.b.errors)<eq>1<line_sep>form=F(b=9)<assert_stmt>form.b.data<eq>9<assert_stmt>form.a._value()<eq>""<assert_stmt>form.b._value()<eq>"9"<line_sep>form=F(DummyPostData() data=dict(b="v"))<assert_stmt>form.b.data<is><none><assert_stmt>form.a._value()<eq>""<assert_stmt>form.b._value()<eq>""<assert_stmt><not>form.validate()<assert_stmt>len(form.b.process_errors)<eq>1<assert_stmt>len(form.b.errors)<eq>1<block_end>
<import_stmt>sys re<import_from_stmt>datetime date<line_sep>version=sys.argv[1]<line_sep>release_date=date.today().strftime('%Y-%m-%d')<line_sep>major,minor,patch=version.split('.')<def_stmt>replace file_path pattern replacement<block_start>updated=re.sub(pattern replacement open(file_path).read())<with_stmt>open(file_path 'w')<as>f<block_start>f.write(updated)<block_end><block_end># Update changelog SEP='---------------------'<line_sep>NEXT=f'Next\n{SEP}'<line_sep>changelog_header=f'{NEXT}\n\n{version} ({release_date})\n{SEP}'<line_sep>replace('CHANGELOG.md' NEXT changelog_header)<line_sep># Update Doxyfile DOXY_VERSION='PROJECT_NUMBER = '<line_sep>replace('Doxyfile' DOXY_VERSION+'.*' DOXY_VERSION+version)<line_sep># Update CMakeLists.txt replace('CMakeLists.txt' '''SET\\(CBOR_VERSION_MAJOR "0"\\) SET\\(CBOR_VERSION_MINOR "7"\\) SET\\(CBOR_VERSION_PATCH "0"\\)''' f'''SET(CBOR_VERSION_MAJOR "{major}") SET(CBOR_VERSION_MINOR "{minor}") SET(CBOR_VERSION_PATCH "{patch}")''')<line_sep># Update Sphinx replace('doc/source/conf.py' """version = '.*' release = '.*'""" f"""version = '{major}.{minor}' release = '{major}.{minor}.{patch}'""")<line_sep>
<import_stmt>cProfile<import_stmt>json<import_stmt>os<import_stmt>pstats<import_from_stmt>logging getLogger<import_from_stmt>shutil rmtree<import_from_stmt>time sleep<import_from_stmt>typing List<import_stmt>pandas<as>pd<import_stmt>pytest<import_from_stmt>whylogs.app.config SessionConfig WriterConfig<import_from_stmt>whylogs.app.session session_from_config<line_sep>script_dir=os.path.dirname(os.path.realpath(__file__))<line_sep>TEST_LOGGER=getLogger(__name__)<def_stmt>count_features json_profile_filename<block_start><if_stmt><not>os.path.isfile(json_profile_filename)<block_start><raise>ValueError(f"{json_profile_filename} is not a json file but trying to open it to count features")<block_end>profile=get_json_profile(json_profile_filename)<if_stmt>profile<and>profile.get("columns")<block_start><return>len(profile["columns"].keys())<block_end><return>0<block_end><def_stmt>get_json_profile json_profile_filename<block_start>profile={}<if_stmt>os.path.exists(json_profile_filename)<and>os.stat(json_profile_filename).st_size<g>0<block_start><with_stmt>open(json_profile_filename)<as>profile_file<block_start>profile=json.load(profile_file)<block_end><block_end><return>profile<block_end><def_stmt>assert_all_elements_equal data:List<block_start><if_stmt><not>data<or>len(data)<eq>1<block_start><return><true><block_end>first=data[0]<for_stmt>element iter(data)<block_start><assert_stmt>first[0]<eq>element[0] f"Found differing feature counts: {first[0]} vs {element[0]} in files {first[1]} and {element[1]}"<block_end><block_end>@pytest.mark.load<def_stmt>test_log_rotation_concurrency tmpdir<block_start>log_rotation_interval="1s"<line_sep>sleep_interval=2<line_sep>test_path=tmpdir.mkdir("log_rotation_concurrency_repro")<line_sep>writer_config=WriterConfig("local" ["json"] test_path.realpath() filename_template="dataset_summary-$dataset_timestamp")<line_sep># Load the full lending club 1000 csv, to get a chance at hitting the bug. csv_path=os.path.join(script_dir "lending_club_1000.csv")<line_sep>full_df=pd.read_csv(csv_path)<line_sep># full_df has shape (1000, 151) so create a test df with 4x size by iteratively appending to self 2 times <for_stmt>_ range(2)<block_start>full_df=full_df.append(full_df)<block_end>TEST_LOGGER.info(f"test dataframe has shape {full_df.shape}")<line_sep># Create a whylogs logging session session_config=SessionConfig("project" "pipeline" writers=[writer_config])<line_sep>session=session_from_config(session_config)<line_sep>TEST_LOGGER.info(f"Running rotate log test with {log_rotation_interval} flush intervals and {sleep_interval}s pause")<line_sep>profiler=cProfile.Profile()<line_sep>profiler.enable()<with_stmt>session.logger(tags={"datasetId":"model-1"} with_rotation_time=log_rotation_interval)<as>ylog<block_start>ylog.log_dataframe(full_df)# Log a larger dataframe to increase chance of rotation before seeing all columns sleep(sleep_interval)<line_sep>ylog.log_dataframe(full_df.head(n=2))# Log a smaller dataframe to get more features before rotation sleep(sleep_interval)<block_end>profiler.disable()<line_sep>stats=pstats.Stats(profiler).sort_stats("cumulative")<line_sep>TEST_LOGGER.info(stats.print_stats(10))<line_sep>output_files=[]<for_stmt>root,subdir,file_names os.walk(test_path)<block_start><if_stmt><not>file_names<block_start><continue><block_end><if_stmt>subdir<block_start><for_stmt>directory subdir<block_start><for_stmt>file file_names<block_start>full_file_path=os.path.join(root directory file)<line_sep>output_files<augadd>[full_file_path]<block_end><block_end><block_end><else_stmt><block_start><for_stmt>file file_names<block_start>full_file_path=os.path.join(root file)<line_sep>output_files<augadd>[full_file_path]<block_end><block_end><block_end><assert_stmt>len(output_files)<g>0 "No output files were generated during stress test"<line_sep>TEST_LOGGER.debug(f"Generated {len(output_files)} dataset summary files.")<line_sep>feature_counts=[]<for_stmt>filename output_files<block_start>feature_count=count_features(filename)<if_stmt>feature_count<g>0<block_start>feature_counts.append((count_features(filename) filename))<block_end><block_end><assert_stmt>len(feature_counts)<g>0 f"feature counts are all empty, we expect some empty files with aggressive log rotation but not all empty!"<line_sep>TEST_LOGGER.info(f"Feature counts all same, first file with features was {feature_counts[0]}")<line_sep>TEST_LOGGER.debug(f"There were {len(feature_counts)} files with features.")<line_sep>assert_all_elements_equal(feature_counts)<line_sep>rmtree(test_path ignore_errors=<true>)<line_sep>TEST_LOGGER.debug(f"End cleaning up test directory {test_path}")<block_end>
<import_stmt>shutil<import_stmt>unittest<import_stmt>os<import_from_stmt>running_modes.configurations TransferLearningLoggerConfig GeneralConfigurationEnvelope<import_from_stmt>running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration LinkInventLearningRateConfiguration<import_from_stmt>running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration LinkInventTransferLearningConfiguration<import_from_stmt>running_modes.constructors.transfer_learning_mode_constructor TransferLearningModeConstructor<import_from_stmt>running_modes.utils set_default_device_cuda<import_from_stmt>running_modes.enums.logging_mode_enum LoggingModeEnum<import_from_stmt>running_modes.enums.running_mode_enum RunningModeEnum<import_from_stmt>reinvent_models.model_factory.enums.model_type_enum ModelTypeEnum<import_from_stmt>unittest_reinvent.fixtures.paths MAIN_TEST_PATH SMILES_SET_LINK_INVENT_PATH LINK_INVENT_PRIOR_PATH<import_from_stmt>unittest_reinvent.fixtures.utils count_empty_files<class_stmt>TestLinkInventTransferLearning(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>set_default_device_cuda()<line_sep>lm_enum=LoggingModeEnum()<line_sep>rm_enum=RunningModeEnum()<line_sep>mt_enum=ModelTypeEnum()<line_sep>self.workfolder=os.path.join(MAIN_TEST_PATH mt_enum.LINK_INVENT+rm_enum.TRANSFER_LEARNING)<if_stmt><not>os.path.isdir(self.workfolder)<block_start>os.makedirs(self.workfolder)<block_end>self.log_dir=os.path.join(self.workfolder "test_log")<line_sep>log_config=TransferLearningLoggerConfig(logging_path=self.log_dir recipient=lm_enum.LOCAL job_name="test_job")<line_sep>self.lr_config=LinkInventLearningRateConfiguration()<line_sep>self.parameters=LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH output_path=self.workfolder input_smiles_path=SMILES_SET_LINK_INVENT_PATH validation_smiles_path=<none> num_epochs=2 sample_size=10 learning_rate=self.lr_config)<line_sep>self.general_config=GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT logging=vars(log_config) run_type=rm_enum.TRANSFER_LEARNING version="3.0" parameters=vars(self.parameters))<line_sep>self.runner=TransferLearningModeConstructor(self.general_config)<block_end><def_stmt>tearDown self<block_start><if_stmt>os.path.isdir(self.workfolder)<block_start>shutil.rmtree(self.workfolder)<block_end><block_end><def_stmt>_model_saved_and_logs_exist self<block_start>self.assertTrue(os.path.isfile(os.path.join(self.workfolder self.parameters.model_file_name)))<line_sep>self.assertTrue(os.path.isdir(self.log_dir))<line_sep>self.assertEqual(count_empty_files(self.log_dir) 0)<block_end><def_stmt>test_no_validation self<block_start>self.parameters.validation_smiles_path=<none><line_sep>self.runner.run()<line_sep>self._model_saved_and_logs_exist()<block_end><def_stmt>test_with_validation self<block_start>self.parameters.validation_smiles_path=SMILES_SET_LINK_INVENT_PATH<line_sep>self.runner.run()<line_sep>self._model_saved_and_logs_exist()<block_end><block_end>
<import_from_stmt>.core BaseRunner<import_from_stmt>.solo SingleThreadASGIRunner SingleThreadRunner SingleThreadWSGIRunner<import_from_stmt>.threadpool ThreadPoolASGIRunner ThreadPoolRunner ThreadPoolWSGIRunner<line_sep>
# Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module provides a gRPC service for updating remote job info to MLMD."""<import_from_stmt>concurrent futures<import_from_stmt>typing Optional<import_from_stmt>absl logging<import_stmt>grpc<import_from_stmt>tfx.orchestration metadata<import_from_stmt>tfx.proto.orchestration execution_watcher_pb2<import_from_stmt>tfx.proto.orchestration execution_watcher_pb2_grpc<import_from_stmt>ml_metadata.proto metadata_store_pb2<def_stmt>generate_service_stub address:str creds:Optional[grpc.ChannelCredentials]=<none> <arrow>execution_watcher_pb2_grpc.ExecutionWatcherServiceStub<block_start>"""Generates a gRPC service stub for a given server address."""<line_sep>channel=grpc.secure_channel(address creds)<if>creds<else>grpc.insecure_channel(address)<line_sep><return>execution_watcher_pb2_grpc.ExecutionWatcherServiceStub(channel)<block_end><class_stmt>ExecutionWatcher(execution_watcher_pb2_grpc.ExecutionWatcherServiceServicer)<block_start>"""A gRPC service server for updating remote job info to MLMD. Attributes: local_address: Local network address to the server. address: Remote network address to the server, same as local_address if not configured. """<def_stmt>__init__ self port:int mlmd_connection:metadata.Metadata execution:metadata_store_pb2.Execution address:Optional[str]=<none> creds:Optional[grpc.ServerCredentials]=<none><block_start>"""Initializes the gRPC server. Args: port: Which port the service will be using. mlmd_connection: ML metadata connection. execution: The MLMD Execution to keep track of. address: Remote address used to contact the server. Should be formatted as an ipv4 or ipv6 address in the format `address:port`. If left as None, server will use local address. creds: gRPC server credentials. If left as None, server will use an insecure port. """<line_sep>super().__init__()<line_sep>self._port=port<line_sep>self._address=address<line_sep>self._creds=creds<line_sep>self._mlmd_connection=mlmd_connection<line_sep>self._server=self._create_server()<if_stmt><not>execution.HasField('id')<block_start><raise>ValueError('execution id must be set to be tracked by ExecutionWatcher.')<block_end>self._execution=execution<block_end><def_stmt>UpdateExecutionInfo self request:execution_watcher_pb2.UpdateExecutionInfoRequest context:grpc.ServicerContext<arrow>execution_watcher_pb2.UpdateExecutionInfoResponse<block_start>"""Updates the `custom_properties` field of Execution object in MLMD."""<line_sep>logging.info('Received request to update execution info: updates %s, '<concat>'execution_id %s' request.updates request.execution_id)<if_stmt>request.execution_id<ne>self._execution.id<block_start>context.set_code(grpc.StatusCode.NOT_FOUND)<line_sep>context.set_details('Execution with given execution_id not tracked by server: '<concat>f'{request.execution_id}')<line_sep><return>execution_watcher_pb2.UpdateExecutionInfoResponse()<block_end><for_stmt>key,value request.updates.items()<block_start>self._execution.custom_properties[key].CopyFrom(value)<block_end># Only the execution is needed <with_stmt>self._mlmd_connection<as>m<block_start>m.store.put_executions((self._execution ))<block_end><return>execution_watcher_pb2.UpdateExecutionInfoResponse()<block_end><def_stmt>_create_server self<block_start>"""Creates a gRPC server and add `self` on to it."""<line_sep>result=grpc.server(futures.ThreadPoolExecutor())<line_sep>execution_watcher_pb2_grpc.add_ExecutionWatcherServiceServicer_to_server(self result)<if_stmt>self._creds<is><none><block_start>result.add_insecure_port(self.local_address)<block_end><else_stmt><block_start>result.add_secure_port(self.local_address self._creds)<block_end><return>result<block_end>@property<def_stmt>local_address self<arrow>str# Local network address to the server. <block_start><return>f'localhost:{self._port}'<block_end>@property<def_stmt>address self<arrow>str<block_start><return>self._address<or>self.local_address<block_end><def_stmt>start self<block_start>"""Starts the server."""<line_sep>self._server.start()<block_end><def_stmt>stop self<block_start>"""Stops the server."""<line_sep>self._server.stop(grace=<none>)<block_end><block_end>