content
stringlengths
0
1.55M
<import_from_stmt>model.model.pipeline.pipeline ProcessUnit<import_from_stmt>watchmen.monitor.model.pipeline_monitor UnitRunStatus<import_from_stmt>watchmen.pipeline.core.context.stage_context StageContext<class_stmt>UnitContext<block_start>stageContext:StageContext<line_sep>unit:ProcessUnit<line_sep>unitStatus:UnitRunStatus<def_stmt>__init__ self stageContext unit<block_start>self.stageContext=stageContext<line_sep>self.unit=unit<line_sep>self.unitStatus=UnitRunStatus()<block_end><block_end>
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The context retrieval method for distribute coordinator."""<import_stmt>threading<line_sep>_worker_context=threading.local()<def_stmt>get_current_worker_context <block_start>"""Returns the current task context."""<try_stmt><block_start><return>_worker_context.current<block_end><except_stmt>AttributeError<block_start><return><none><block_end><block_end>
<import_stmt>os<import_stmt>shutil<import_from_stmt>pathlib Path<import_stmt>conda_content_trust.signing<as>cct_signing<class_stmt>RepoSigner<block_start><def_stmt>sign_repodata self repodata_fn pkg_mgr_key<block_start>final_fn=self.in_folder/"repodata_signed.json"<line_sep>print("copy" repodata_fn final_fn)<line_sep>shutil.copyfile(repodata_fn final_fn)<line_sep>cct_signing.sign_all_in_repodata(str(final_fn) pkg_mgr_key)<line_sep>print(f"Signed {final_fn}")<block_end><def_stmt>__init__ self in_folder pkg_mgr_key<block_start>self.in_folder=Path(in_folder).resolve()<line_sep>f=os.path.join(self.in_folder "repodata.json")<if_stmt>os.path.isfile(f)<block_start>self.sign_repodata(Path(f) pkg_mgr_key)<block_end><block_end><block_end>
<class_stmt>MachineBaseStorage<block_start>"""Base class for storage backends Extending classes should implement the five methods in this base class. Slack Machine takes care of a lot of details regarding the persistent storage of data. So storage backends **do not** have to deal with the following, because Slack Machine takes care of these: - Serialization/Deserialization of data - Namespacing of keys (so data stored by different plugins doesn't clash) """<def_stmt>__init__ self settings<block_start>self.settings=settings<block_end><def_stmt>get self key<block_start>"""Retrieve data by key :param key: key for which to retrieve data :return: the raw data for the provided key, as (byte)string. Should return ``None`` when the key is unknown or the data has expired. """<line_sep><raise>NotImplementedError<block_end><def_stmt>set self key value expires=<none><block_start>"""Store data by key :param key: the key under which to store the data :param value: data as (byte)string :param expires: optional expiration time in seconds, after which the data should not be returned any more. """<line_sep><raise>NotImplementedError<block_end><def_stmt>delete self key<block_start>"""Delete data by key :param key: key for which to delete the data """<line_sep><raise>NotImplementedError<block_end><def_stmt>has self key<block_start>"""Check if the key exists :param key: key to check :return: ``True/False`` wether the key exists """<line_sep><raise>NotImplementedError<block_end><def_stmt>size self<block_start>"""Calculate the total size of the storage :return: total size of storage in bytes (integer) """<line_sep><raise>NotImplementedError<block_end><block_end>
<import_stmt>struct<import_from_stmt>collections namedtuple<def_stmt>read_1 f<block_start><return>f.read(1)[0]<block_end><def_stmt>read_2 f<block_start><return>struct.unpack('<H' f.read(2))[0]<block_end><def_stmt>read_4 f<block_start><return>struct.unpack('<I' f.read(4))[0]<block_end><def_stmt>read_8 f<block_start><return>struct.unpack('<Q' f.read(8))[0]<block_end><def_stmt>read_buffer f<block_start>length=read_4(f)<line_sep><return>f.read(length)<block_end><def_stmt>read_str f<block_start>s=read_buffer(f)<assert_stmt>(s[-1]<eq>0)<line_sep><return>s[0:-1]<block_end>LogMessage=namedtuple('LogMessage' ['msg'])<line_sep>Open=namedtuple('Open' ['flags' 'mode' 'fd' 'path'])<line_sep>Mmap=namedtuple('Mmap' ['offset' 'prot' 'flags' 'fd' 'region_id' 'start' 'length'])<line_sep>Munmap=namedtuple('Munmap' ['offset' 'region_id' 'start' 'length' 'unk1' 'unk2'])<line_sep>StoreInfo=namedtuple('StoreInfo' ['msg'])<line_sep>Store=namedtuple('Store' ['region_id' 'offset' 'data'])<line_sep>ProcessMap=namedtuple('ProcessMap' ['msg'])<line_sep># etnaviv specific Commit=namedtuple('Commit' [])<def_stmt>parse_mmt_file f<block_start><while_stmt><true><block_start>ch=f.read(1)<if_stmt>ch<eq>b''<block_start><return><block_end><elif_stmt>ch<eq>b'='<or>ch<eq>b'-'# Comment <block_start>s=b''<while_stmt><true># read until \n <block_start>ch=f.read(1)<if_stmt>ch<eq>b'\n'<block_start><break><block_end><else_stmt><block_start>s<augadd>ch<block_end><block_end><yield>LogMessage(s)<block_end><elif_stmt>ch<eq>b'o'# open <block_start>flags=read_4(f)<line_sep>mode=read_4(f)<line_sep>fd=read_4(f)<line_sep>path=read_str(f)<assert_stmt>(read_1(f)<eq>10)<line_sep><yield>Open(flags mode fd path)<block_end><elif_stmt>ch<eq>b'M'# mmap <block_start>offset=read_8(f)<line_sep>prot=read_4(f)<line_sep>flags=read_4(f)<line_sep>fd=read_4(f)<line_sep>region_id=read_4(f)<line_sep>start=read_8(f)<line_sep>length=read_8(f)<assert_stmt>(read_1(f)<eq>10)<line_sep><yield>Mmap(offset prot flags fd region_id start length)<block_end><elif_stmt>ch<eq>b'u'# munmap <block_start>offset=read_8(f)<line_sep>region_id=read_4(f)<line_sep>start=read_8(f)<line_sep>length=read_8(f)<line_sep>unk1=read_8(f)<line_sep>unk2=read_8(f)<assert_stmt>(read_1(f)<eq>10)<line_sep><yield>Munmap(offset region_id start length unk1 unk2)<block_end><elif_stmt>ch<eq>b'x'# store_info <block_start>info=read_str(f)<assert_stmt>(read_1(f)<eq>10)<line_sep><yield>StoreInfo(info)<block_end><elif_stmt>ch<eq>b'w'# store <block_start>region_id=read_4(f)<line_sep>offset=read_4(f)<line_sep>length=read_1(f)<line_sep>data=f.read(length)<assert_stmt>(read_1(f)<eq>10)<line_sep><yield>Store(region_id offset data)<block_end><elif_stmt>ch<eq>b'c'# commit <block_start><assert_stmt>(read_1(f)<eq>10)<line_sep><yield>Commit()<block_end><elif_stmt>ch<eq>b'y'# process map <block_start><assert_stmt>(read_8(f)<eq>1)<line_sep>msg=read_buffer(f)<assert_stmt>(read_1(f)<eq>10)<line_sep><yield>ProcessMap(msg)<block_end><else_stmt><block_start>print('Unknown ' ch)<line_sep>exit(1)<block_end><block_end><block_end>
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>.. JiebaSegmenter<line_sep>cur_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>path_dict_file=os.path.join(cur_dir 'dict.txt')<def_stmt>test_jieba_segmenter <block_start>segmenter=JiebaSegmenter(mode='accurate')<line_sep>text='今天是个大晴天!安迪回来以后,我们准备去动物园。'<line_sep>docs_chunks=segmenter.segment(np.stack([text text]))<assert_stmt>len(docs_chunks)<eq>2<for_stmt>chunks docs_chunks<block_start><assert_stmt>len(chunks)<eq>14<block_end><block_end><def_stmt>test_jieba_user_dir <block_start>segmenter=JiebaSegmenter()<line_sep>text='今天是个大晴天!安迪回来以后,我们准备去动物园。thisisnotachineseword'<line_sep>docs_chunks=segmenter.segment(np.stack([text text]))<assert_stmt>len(docs_chunks)<eq>2<for_stmt>chunks docs_chunks<block_start><assert_stmt>len(chunks)<eq>15<block_end>segmenter=JiebaSegmenter(user_dict_file=path_dict_file)<line_sep>text='今天是个大晴天!安迪回来以后,我们准备去动物园。thisisnotachineseword'<line_sep>docs_chunks=segmenter.segment(np.stack([text text]))<assert_stmt>len(docs_chunks)<eq>2<for_stmt>chunks docs_chunks<block_start><assert_stmt>len(chunks)<eq>20<block_end><block_end><def_stmt>test_jieba_user_dir_file_not_found <block_start><with_stmt>pytest.raises(FileNotFoundError)<block_start>JiebaSegmenter(user_dict_file='/this/path/does/not/exist.txt')<block_end><block_end>
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # <import_stmt>math<def_stmt>f_gold n<block_start>l=math.sqrt(n)<line_sep>sq=l<times>l<if_stmt>(sq<eq>n)<block_start><return>l<times>4<block_end><else_stmt><block_start>row=n/l<line_sep>perimeter=2<times>(l+row)<if_stmt>(n%l<ne>0)<block_start>perimeter<augadd>2<block_end><return>perimeter<block_end><block_end>#TOFILL <if_stmt>__name__<eq>'__main__'<block_start>param=[(45 ) (80 ) (54 ) (48 ) (83 ) (68 ) (32 ) (20 ) (68 ) (66 )]<line_sep>n_success=0<for_stmt>i,parameters_set enumerate(param)<block_start><if_stmt>f_filled(*parameters_set)<eq>f_gold(*parameters_set)<block_start>n_success<augadd>1<block_end><block_end>print("#Results: %i, %i"%(n_success len(param)))<block_end>
# -*- coding: utf-8 -*- <import_stmt>sys<line_sep>#reload(sys) #sys.setdefaultencoding('utf8') #1.将问题ID和TOPIC对应关系保持到字典里:process question_topic_train_set.txt #from:question_id,topics(topic_id1,topic_id2,topic_id3,topic_id4,topic_id5) # to:(question_id,topic_id1) # (question_id,topic_id2) #read question_topic_train_set.txt <import_stmt>codecs<line_sep>#1.################################################################################################################ print("process question_topic_train_set.txt,started...")<line_sep>q_t='question_topic_train_set.txt'<line_sep>q_t_file=codecs.open(q_t 'r' 'utf8')<line_sep>lines=q_t_file.readlines()<line_sep>question_topic_dict={}<for_stmt>i,line enumerate(lines)<block_start><if_stmt>i%300000<eq>0<block_start>print(i)<block_end>#print(line) question_id,topic_list_string=line.split('\t')<line_sep>#print(question_id) #print(topic_list_string) topic_list=topic_list_string.replace("\n" "").split(",")<line_sep>question_topic_dict[question_id]=topic_list<line_sep>#for ii,topic in enumerate(topic_list): # print(ii,topic) #print("=====================================") #if i>10: # print(question_topic_dict) # break <block_end>print("process question_topic_train_set.txt,ended...")<line_sep>################################################################################################################### ################################################################################################################### #2.处理问题--得到问题ID:问题的表示,存成字典。proces question. for every question form a a list of string to reprensent it. <import_stmt>codecs<line_sep>print("process question started11...")<line_sep>q='question_train_set.txt'<line_sep>q_file=codecs.open(q 'r' 'utf8')<line_sep>q_lines=q_file.readlines()<line_sep>questionid_words_representation={}<line_sep>question_representation=[]<line_sep>length_desc=30<for_stmt>i,line enumerate(q_lines)#print("line:") #print(line) <block_start>element_lists=line.split('\t')#['c324,c39','w305...','c'] question_id=element_lists[0]<line_sep>#print("question_id:",element_lists[0]) #for i,q_e in enumerate(element_lists): # print("e:",q_e) #question_representation=[x for x in element_lists[2].split(",")] #+ #TODO this is only for title's word. no more. title_words=[x<for>x element_lists[2].strip().split(",")][-length_desc:]<line_sep>#print("title_words:",title_words) title_c=[x<for>x element_lists[1].strip().split(",")][-length_desc:]<line_sep>#print("title_c:", title_c) desc_words=[x<for>x element_lists[4].strip().split(",")][-length_desc:]<line_sep>#print("desc_words:", desc_words) desc_c=[x<for>x element_lists[3].strip().split(",")][-length_desc:]<line_sep>#print("desc_c:", desc_c) question_representation=title_words+title_c+desc_words+desc_c<line_sep>question_representation=" ".join(question_representation)<line_sep>#print("question_representation:",question_representation) #print("question_representation:",question_representation) questionid_words_representation[question_id]=question_representation<block_end>q_file.close()<line_sep>print("proces question ended2...")<line_sep>##################################################################################################################### ################################################################################################################### # 3.获得模型需要的训练数据。以{问题的表示:TOPIC_ID}的形式的列表 # save training data,testing data: question __label__topic_id <import_stmt>codecs<import_stmt>random<line_sep>print("saving traininig data.started1...")<line_sep>count=0<line_sep>train_zhihu='train-zhihu6-title-desc.txt'<line_sep>test_zhihu='test-zhihu6-title-desc.txt'<line_sep>valid_zhihu='valid-zhihu6-title-desc.txt'<line_sep>data_list=[]<line_sep>multi_label_flag=<true><def_stmt>split_list listt<block_start>random.shuffle(listt)<line_sep>list_len=len(listt)<line_sep>train_len=0.95<line_sep>valid_len=0.025<line_sep>train=listt[0:int(list_len<times>train_len)]<line_sep>valid=listt[int(list_len<times>train_len):int(list_len<times>(train_len+valid_len))]<line_sep>test=listt[int(list_len<times>(train_len+valid_len)):]<line_sep><return>train valid test<block_end><for_stmt>question_id,question_representation questionid_words_representation.items()# print("===================>") # print('question_id',question_id) # print("question_representation:",question_representation) # get label_id for this question_id by using:question_topic_dict <block_start>topic_list=question_topic_dict[question_id]<line_sep># print("topic_list:",topic_list) # if count>5: # ii=0 # ii/0 <if_stmt><not>multi_label_flag<block_start><for_stmt>topic_id topic_list<block_start>data_list.append((question_representation topic_id))#single-label <block_end><block_end><else_stmt><block_start>data_list.append((question_representation topic_list))#multi-label <block_end>count=count+1<block_end># random shuffle list random.shuffle(data_list)<def_stmt>write_data_to_file_system file_name data<block_start>file=codecs.open(file_name 'a' 'utf8')<for_stmt>d data# print(d) <block_start>question_representation,topic_id=d<line_sep>question_representation_=" ".join(question_representation)<line_sep>file.write(question_representation_+" __label__"+str(topic_id)+"\n")<block_end>file.close()<block_end><def_stmt>write_data_to_file_system_multilabel file_name data<block_start>file=codecs.open(file_name 'a' 'utf8')<for_stmt>d data<block_start>question_representation,topic_id_list=d<line_sep>topic_id_list_=" ".join(topic_id_list)<line_sep>file.write(question_representation+" __label__"+str(topic_id_list_)+"\n")<block_end>file.close()<block_end>train_data,valid_data,test_data=split_list(data_list)<if_stmt><not>multi_label_flag#single label <block_start>write_data_to_file_system(train_zhihu train_data)<line_sep>write_data_to_file_system(valid_zhihu valid_data)<line_sep>write_data_to_file_system(test_zhihu test_data)<block_end><else_stmt>#multi-label <block_start>write_data_to_file_system_multilabel(train_zhihu train_data)<line_sep>write_data_to_file_system_multilabel(valid_zhihu valid_data)<line_sep>write_data_to_file_system_multilabel(test_zhihu test_data)<block_end>print("saving traininig data.ended...")<line_sep>######################################################################################################################
<import_stmt>fastNLP<as>FN<import_stmt>argparse<import_stmt>os<import_stmt>random<import_stmt>numpy<import_stmt>torch<def_stmt>get_argparser <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--lr' type=float required=<true>)<line_sep>parser.add_argument('--w_decay' type=float required=<true>)<line_sep>parser.add_argument('--lr_decay' type=float required=<true>)<line_sep>parser.add_argument('--bsz' type=int required=<true>)<line_sep>parser.add_argument('--ep' type=int required=<true>)<line_sep>parser.add_argument('--drop' type=float required=<true>)<line_sep>parser.add_argument('--gpu' type=str required=<true>)<line_sep>parser.add_argument('--log' type=str default=<none>)<line_sep><return>parser<block_end><def_stmt>add_model_args parser<block_start>parser.add_argument('--nhead' type=int default=6)<line_sep>parser.add_argument('--hdim' type=int default=50)<line_sep>parser.add_argument('--hidden' type=int default=300)<line_sep><return>parser<block_end><def_stmt>set_gpu gpu_str<block_start>os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"<line_sep>os.environ['CUDA_VISIBLE_DEVICES']=gpu_str<block_end><def_stmt>set_rng_seeds seed=<none><block_start><if_stmt>seed<is><none><block_start>seed=numpy.random.randint(0 65536)<block_end>random.seed(seed)<line_sep>numpy.random.seed(seed)<line_sep>torch.random.manual_seed(seed)<line_sep>torch.cuda.manual_seed_all(seed)<line_sep># print('RNG_SEED {}'.format(seed)) <return>seed<block_end><class_stmt>TensorboardCallback(FN.Callback)<block_start>""" 接受以下一个或多个字符串作为参数: - "model" - "loss" - "metric" """<def_stmt>__init__ self *options<block_start>super(TensorboardCallback self).__init__()<line_sep>args={"model" "loss" "metric"}<for_stmt>opt options<block_start><if_stmt>opt<not><in>args<block_start><raise>ValueError("Unrecognized argument {}. Expect one of {}".format(opt args))<block_end><block_end>self.options=options<line_sep>self._summary_writer=<none><line_sep>self.graph_added=<false><block_end><def_stmt>on_train_begin self<block_start>save_dir=self.trainer.save_path<if_stmt>save_dir<is><none><block_start>path=os.path.join("./" 'tensorboard_logs_{}'.format(self.trainer.start_time))<block_end><else_stmt><block_start>path=os.path.join(save_dir 'tensorboard_logs_{}'.format(self.trainer.start_time))<block_end>self._summary_writer=SummaryWriter(path)<block_end><def_stmt>on_batch_begin self batch_x batch_y indices<block_start><if_stmt>"model"<in>self.options<and>self.graph_added<is><false># tesorboardX 这里有大bug,暂时没法画模型图 # from fastNLP.core.utils import _build_args # inputs = _build_args(self.trainer.model, **batch_x) # args = tuple([value for value in inputs.values()]) # args = args[0] if len(args) == 1 else args # self._summary_writer.add_graph(self.trainer.model, torch.zeros(32, 2)) <block_start>self.graph_added=<true><block_end><block_end><def_stmt>on_backward_begin self loss<block_start><if_stmt>"loss"<in>self.options<block_start>self._summary_writer.add_scalar("loss" loss.item() global_step=self.trainer.step)<block_end><if_stmt>"model"<in>self.options<block_start><for_stmt>name,param self.trainer.model.named_parameters()<block_start><if_stmt>param.requires_grad<block_start>self._summary_writer.add_scalar(name+"_mean" param.mean() global_step=self.trainer.step)<line_sep># self._summary_writer.add_scalar(name + "_std", param.std(), global_step=self.trainer.step) self._summary_writer.add_scalar(name+"_grad_mean" param.grad.mean() global_step=self.trainer.step)<block_end><block_end><block_end><block_end><def_stmt>on_valid_end self eval_result metric_key<block_start><if_stmt>"metric"<in>self.options<block_start><for_stmt>name,metric eval_result.items()<block_start><for_stmt>metric_key,metric_val metric.items()<block_start>self._summary_writer.add_scalar("valid_{}_{}".format(name metric_key) metric_val global_step=self.trainer.step)<block_end><block_end><block_end><block_end><def_stmt>on_train_end self<block_start>self._summary_writer.close()<del_stmt>self._summary_writer<block_end><def_stmt>on_exception self exception<block_start><if_stmt>hasattr(self "_summary_writer")<block_start>self._summary_writer.close()<del_stmt>self._summary_writer<block_end><block_end><block_end>
<import_from_stmt>django template<import_from_stmt>django.template.defaultfilters stringfilter<import_from_stmt>django.utils.html conditional_escape<import_from_stmt>django.utils.safestring mark_safe<import_from_stmt>nntpchan.frontend.models Newsgroup Post<import_stmt>re<import_from_stmt>urllib.parse urlparse<import_from_stmt>html unescape<line_sep>register=template.Library()<line_sep>re_postcite=re.compile('>> ?([0-9a-fA-F]+)')<line_sep>re_boardlink=re.compile('>>> ?/([a-zA-Z0-9\.]+[a-zA-Z0-9])/')<line_sep>re_redtext=re.compile('== ?(.+) ?==')<line_sep>re_psytext=re.compile('@@ ?(.+) ?@@')<def_stmt>greentext text esc<block_start>return_text=''<line_sep>f=<false><for_stmt>line text.split('\n')<block_start>line=line.strip()<if_stmt>len(line)<l>2<block_start><continue><block_end><if_stmt>line[0]<eq>'>'<and>line[1]<ne>'>'<block_start>return_text<augadd>'<span class="greentext">%s </span>'%esc(line)+'\n'<line_sep>f=<true><block_end><else_stmt><block_start>return_text<augadd>esc(line)+'\n'<block_end><block_end><return>return_text f<block_end><def_stmt>blocktext text esc delim='' css='' tag='span'<block_start>parts=text.split(delim)<line_sep>f=<false><if_stmt>len(parts)<g>1<block_start>parts.reverse()<line_sep>return_text=''<while_stmt>len(parts)<g>0<block_start>return_text<augadd>esc(parts.pop())<if_stmt>len(parts)<g>0<block_start>f=<true><line_sep>return_text<augadd>'<{} class="{}">%s</{}>'.format(tag css tag)%esc(parts.pop())<block_end><block_end><return>return_text f<block_end><else_stmt><block_start><return>text f<block_end><block_end>redtext=<lambda>t e:blocktext(t e '==' 'redtext')<line_sep>psytext=<lambda>t e:blocktext(t e '@@' 'psy')<line_sep>codeblock=<lambda>t e:blocktext(t e '[code]' 'code' 'pre')<def_stmt>postcite text esc<block_start>return_text=''<line_sep>filtered=<false><for_stmt>line text.split('\n')<block_start><for_stmt>word line.split(' ')<block_start>match=re_postcite.match(unescape(word))<if_stmt>match<block_start>posthash=match.groups()[0]<line_sep>posts=Post.objects.filter(posthash__startswith=posthash)<if_stmt>len(posts)<g>0<block_start>filtered=<true><line_sep>return_text<augadd>'<a href="%s" class="postcite">&gt;&gt%s</a> '%(posts[0].get_absolute_url() posthash)<block_end><else_stmt><block_start>return_text<augadd>'<span class="greentext">&gt;&gt;%s</span> '%match.string<block_end><block_end><elif_stmt>filtered<block_start>return_text<augadd>word+' '<block_end><else_stmt><block_start>return_text<augadd>esc(word)+' '<block_end><block_end>return_text<augadd>'\n'<block_end><return>return_text filtered<block_end><def_stmt>boardlink text esc<block_start>return_text=''<line_sep>filtered=<false><for_stmt>line text.split('\n')<block_start><for_stmt>word line.split(' ')<block_start>match=re_boardlink.match(unescape(word))<if_stmt>match<block_start>name=match.groups()[0]<line_sep>group=Newsgroup.objects.filter(name=name)<if_stmt>len(group)<g>0<block_start>filtered=<true><line_sep>return_text<augadd>'<a href="%s" class="boardlink">%s</a> '%(group[0].get_absolute_url() esc(match.string))<block_end><else_stmt><block_start>return_text<augadd>'<span class="greentext">%s</span> '%esc(match.string)<block_end><block_end><else_stmt><block_start>return_text<augadd>esc(word)+' '<block_end><block_end>return_text<augadd>'\n'<block_end><return>return_text filtered<block_end><def_stmt>urlify text esc<block_start>return_text=''<line_sep>filtered=<false><for_stmt>line text.split('\n')<block_start><for_stmt>word line.split(' ')<block_start>u=urlparse(word)<if_stmt>u.scheme<ne>''<and>u.netloc<ne>''<block_start>return_text<augadd>'<a href="%s">%s</a> '%(u.geturl() esc(word))<line_sep>filtered=<true><block_end><else_stmt><block_start>return_text<augadd>esc(word)+' '<block_end><block_end>return_text<augadd>'\n'<block_end><return>return_text filtered<block_end>line_funcs=[greentext redtext urlify psytext codeblock postcite boardlink ]<line_sep>@register.filter(needs_autoescape=<true> name='memepost')<def_stmt>memepost text autoescape=<true><block_start>text,_=line_funcs[0](text conditional_escape)<for_stmt>f line_funcs[1:]<block_start>text,_=f(text <lambda>x:x)<block_end><return>mark_safe(text)<block_end>@register.filter(name='truncate')@stringfilter<def_stmt>truncate text truncate=500<block_start><if_stmt>len(text)<g>truncate<block_start><return>text[:truncate]+'...'<block_end><return>text<block_end>
<import_stmt>torch<import_stmt>sys<import_stmt>os<line_sep>sys.path.append(os.getcwd())<line_sep>sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))<import_from_stmt>unimodals.MVAE TSEncoder TSDecoder# noqa <import_from_stmt>utils.helper_modules Sequential2# noqa <import_from_stmt>objective_functions.objectives_for_supervised_learning MFM_objective# noqa <import_from_stmt>torch nn# noqa <import_from_stmt>unimodals.common_models MLP# noqa <import_from_stmt>training_structures.Supervised_Learning train test# noqa <import_from_stmt>datasets.affect.get_data get_dataloader# noqa <import_from_stmt>fusions.common_fusions Concat# noqa classes=2<line_sep>n_latent=256<line_sep>dim_0=35<line_sep>dim_1=74<line_sep>dim_2=300<line_sep>timestep=50<line_sep># mosi_data.pkl, mosei_senti_data.pkl # mosi_raw.pkl, mosei_raw.pkl, sarcasm.pkl, humor.pkl # raw_path: mosi.hdf5, mosei.hdf5, sarcasm_raw_text.pkl, humor_raw_text.pkl traindata,validdata,test_robust=get_dataloader('/home/paul/MultiBench/mosi_raw.pkl' task='classification' robust_test=<false> max_pad=<true> max_seq_len=timestep)<line_sep>encoders=[TSEncoder(dim_0 30 n_latent timestep returnvar=<false>).cuda() TSEncoder(dim_1 30 n_latent timestep returnvar=<false>).cuda() TSEncoder(dim_2 30 n_latent timestep returnvar=<false>).cuda()]<line_sep>decoders=[TSDecoder(dim_0 30 n_latent timestep).cuda() TSDecoder(dim_1 30 n_latent timestep).cuda() TSDecoder(dim_2 30 n_latent timestep).cuda()]<line_sep>fuse=Sequential2(Concat() MLP(3<times>n_latent n_latent n_latent<floordiv>2)).cuda()<line_sep>intermediates=[MLP(n_latent n_latent<floordiv>2 n_latent<floordiv>2).cuda() MLP(n_latent n_latent<floordiv>2 n_latent<floordiv>2).cuda() MLP(n_latent n_latent<floordiv>2 n_latent<floordiv>2).cuda()]<line_sep>head=MLP(n_latent<floordiv>2 20 classes).cuda()<line_sep>argsdict={'decoders':decoders 'intermediates':intermediates}<line_sep>additional_modules=decoders+intermediates<line_sep>objective=MFM_objective(2.0 [torch.nn.MSELoss() torch.nn.MSELoss() torch.nn.MSELoss()] [1.0 1.0 1.0])<line_sep>train(encoders fuse head traindata validdata 200 additional_modules objective=objective objective_args_dict=argsdict save='mosi_mfm_best.pt')<line_sep>print("Testing:")<line_sep>model=torch.load('mosi_mfm_best.pt').cuda()<line_sep>test(model=model test_dataloaders_all=test_robust dataset='mosi' is_packed=<false> no_robust=<true>)<line_sep>
# coding: utf8 <import_stmt>pytest<import_stmt>os<import_from_stmt>os.path join exists<line_sep>@pytest.fixture(params=['classify_image' 'classify_slice' 'classify_patch'])<def_stmt>classify_commands request<block_start>out_filename='fold-0/cnn_classification/best_balanced_accuracy/DB-TEST_image_level_prediction.tsv'<if_stmt>request.param<eq>'classify_image'<block_start>data_folder='data/models/image_model_baseline_AD_CN_single_fold/'<line_sep>test_input=['classify' 'data/classify/OASIS_test' 'data/classify/OASIS_test/data.tsv' data_folder '--prefix_output' 'DB-TEST' '-cpu']<line_sep>output_files=join(data_folder out_filename)<block_end><elif_stmt>request.param<eq>'classify_slice'<block_start>data_folder='data/models/slice_model_baseline_AD_CN_single_fold/'<line_sep>test_input=['classify' 'data/classify/OASIS_test' 'data/classify/OASIS_test/data.tsv' data_folder '--prefix_output' 'DB-TEST' '-cpu']<line_sep>output_files=join(data_folder out_filename)<block_end><elif_stmt>request.param<eq>'classify_patch'<block_start>data_folder='data/models/patch_model_baseline_AD_CN_multicnn_single_fold/'<line_sep>test_input=['classify' 'data/classify/OASIS_test' 'data/classify/OASIS_test/data.tsv' data_folder '--prefix_output' 'DB-TEST' '-cpu']<line_sep>output_files=join(data_folder out_filename)<block_end><else_stmt><block_start><raise>NotImplementedError("Test %s is not implemented."%request.param)<block_end><return>test_input output_files<block_end><def_stmt>test_classify classify_commands<block_start>test_input=classify_commands[0]<line_sep>output_files=classify_commands[1]<line_sep>flag_error=<not>os.system("clinicadl "+" ".join(test_input))<assert_stmt>flag_error<assert_stmt>exists(output_files)<block_end>
_base_=['../../universenet/models/universenet50_2008.py' '../../_base_/datasets/coco_detection_mstrain_480_960.py' '../../_base_/schedules/schedule_1x.py' '../../_base_/default_runtime.py']<line_sep>model=dict(neck=dict(_delete_=<true> type='FPN' in_channels=[256 512 1024 2048] out_channels=256 start_level=1 add_extra_convs='on_output' num_outs=5) bbox_head=dict(type='GFLHead' stacked_convs=4))<line_sep>data=dict(samples_per_gpu=4)<line_sep>optimizer=dict(type='SGD' lr=0.01 momentum=0.9 weight_decay=0.0001)<line_sep>optimizer_config=dict(_delete_=<true> grad_clip=dict(max_norm=35 norm_type=2))<line_sep>lr_config=dict(warmup_iters=1000)<line_sep>fp16=dict(loss_scale=512.)<line_sep>
<import_stmt>os<import_stmt>sys<line_sep>sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))<import_from_stmt>cakechat.utils.text_processing get_processed_corpus_path load_processed_dialogs_from_json FileTextLinesIterator get_dialog_lines_and_conditions ProcessedLinesIterator get_flatten_dialogs<import_from_stmt>cakechat.utils.w2v.model _get_w2v_model<as>get_w2v_model<import_from_stmt>cakechat.config TRAIN_CORPUS_NAME VOCABULARY_MAX_SIZE WORD_EMBEDDING_DIMENSION W2V_WINDOW_SIZE USE_SKIP_GRAM<if_stmt>__name__<eq>'__main__'<block_start>processed_corpus_path=get_processed_corpus_path(TRAIN_CORPUS_NAME)<line_sep>dialogs=load_processed_dialogs_from_json(FileTextLinesIterator(processed_corpus_path) text_field_name='text' condition_field_name='condition')<line_sep>training_dialogs_lines_for_w2v,_=get_dialog_lines_and_conditions(get_flatten_dialogs(dialogs) text_field_name='text' condition_field_name='condition')<line_sep>tokenized_training_lines=ProcessedLinesIterator(training_dialogs_lines_for_w2v processing_callbacks=[str.split])<line_sep>get_w2v_model(tokenized_lines=tokenized_training_lines corpus_name=TRAIN_CORPUS_NAME voc_size=VOCABULARY_MAX_SIZE vec_size=WORD_EMBEDDING_DIMENSION window_size=W2V_WINDOW_SIZE skip_gram=USE_SKIP_GRAM)<block_end>
<import_from_stmt>spacy.lang.en English<line_sep>nlp=English()<line_sep>doc=nlp("<NAME> is a PERSON")<line_sep># Look up the hash for the string label "PERSON" person_hash=____.____.____[____]<line_sep>print(person_hash)<line_sep># Look up the person_hash to get the string person_string=____.____.____[____]<line_sep>print(person_string)<line_sep>
# coding: utf-8 __author__='deff'<import_stmt>re<class_stmt>Tools<block_start>@staticmethod<def_stmt>xml_assent word<block_start>symbola=re.compile('>')<line_sep>word=symbola.sub('&lt;' word)<line_sep>symbolb=re.compile('<')<line_sep>word=symbolb.sub('&gt;' word)<line_sep>symbolc=re.compile('&')<line_sep>word=symbolc.sub('&amp;' word)<line_sep>symbold=re.compile('\'')<line_sep>word=symbold.sub('&apos;' word)<line_sep>symbole=re.compile('\"')<line_sep>word=symbole.sub('&quot;' word)<line_sep><return>word<block_end><block_end>
# The MIT License (MIT) # # Copyright (c) 2015 by Teradata # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_from_stmt>teradata pulljson<import_stmt>unittest<import_stmt>sys<if_stmt>sys.version_info[0]<eq>2<block_start><import_from_stmt>StringIO StringIO# @UnresolvedImport #@UnusedImport <block_end><else_stmt><block_start><import_from_stmt>io StringIO<block_end># @UnresolvedImport @UnusedImport @Reimport <class_stmt>TestJSONPullParser(unittest.TestCase)<block_start><def_stmt>testNextEvent self<block_start>stream=StringIO("""{"key1":"value", "key2":100, "key3":null, "key4": true, "key5":false, "key6":-201.50E1, "key7":{"key8":"value2", "key9":null}, "key10":["value3", 10101010101010101010101, null, {} ] }""")<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep># Start of object event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<line_sep># Key1 - "value" event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key1")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_VALUE)<line_sep>self.assertEqual(event.value "value")<line_sep>self.assertEqual(event.valueType pulljson.STRING)<line_sep># Key2 - 100 event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key2")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_VALUE)<line_sep>self.assertEqual(event.value 100)<line_sep>self.assertEqual(event.valueType pulljson.NUMBER)<line_sep># Key3 - null event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key3")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_VALUE)<line_sep>self.assertIsNone(event.value)<line_sep>self.assertEqual(event.valueType pulljson.NULL)<line_sep># Key4 - true event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key4")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_VALUE)<line_sep>self.assertTrue(event.value)<line_sep>self.assertEqual(event.valueType pulljson.BOOLEAN)<line_sep># Key5 - false event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key5")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_VALUE)<line_sep>self.assertFalse(event.value)<line_sep>self.assertEqual(event.valueType pulljson.BOOLEAN)<line_sep># Key6 event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key6")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_VALUE)<line_sep>self.assertEqual(event.value -2015)<line_sep>self.assertEqual(event.valueType pulljson.NUMBER)<line_sep># Key7 event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key7")<line_sep># Start of key7 object event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<line_sep># Key8 - value2 event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key8")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_VALUE)<line_sep>self.assertEqual(event.value "value2")<line_sep>self.assertEqual(event.valueType pulljson.STRING)<line_sep># Key9 - null event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key9")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_VALUE)<line_sep>self.assertIsNone(event.value)<line_sep># End of key7 object event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.END_OBJECT)<line_sep># Key10 - array[0] - value3 event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key10")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_ARRAY)<line_sep># Key10 - array[0] - value3 event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.ARRAY_VALUE)<line_sep>self.assertEqual(event.value "value3")<line_sep>self.assertEqual(event.valueType pulljson.STRING)<line_sep>self.assertEqual(event.arrayIndex 0)<line_sep># Key10 - array[1] - 10101010101010101010101 event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.ARRAY_VALUE)<line_sep>self.assertEqual(event.value 10101010101010101010101)<line_sep>self.assertEqual(event.valueType pulljson.NUMBER)<line_sep>self.assertEqual(event.arrayIndex 1)<line_sep># Key10 - array[2] - null event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.ARRAY_VALUE)<line_sep>self.assertIsNone(event.value)<line_sep>self.assertEqual(event.valueType pulljson.NULL)<line_sep>self.assertEqual(event.arrayIndex 2)<line_sep># Key10 - array[3] - object event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<line_sep>self.assertEqual(event.arrayIndex 3)<line_sep># Key10 - array[3] - object event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.END_OBJECT)<line_sep>self.assertEqual(event.arrayIndex 3)<line_sep># End of key 10 array. event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.END_ARRAY)<line_sep># End of object event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.END_OBJECT)<line_sep>event=reader.nextEvent()<line_sep>self.assertIsNone(event)<block_end><def_stmt>testDocumentIncomplete self<block_start>stream=StringIO('{"key":"value"')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key")<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>event=reader.nextEvent()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_INCOMPLETE_ERROR cm.exception.msg)<block_end><def_stmt>testEmptyName self<block_start>stream=StringIO('{:"value"}')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>event=reader.nextEvent()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_SYNTAX_ERROR cm.exception.msg)<block_end><def_stmt>testExtraWhiteSpace self<block_start>stream=StringIO('{\n\t "key"\n\t\t: "\t value\n"} ')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value "key")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_VALUE)<line_sep>self.assertEqual(event.value "\t value\n")<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.END_OBJECT)<line_sep>event=reader.nextEvent()<line_sep>self.assertIsNone(event)<block_end><def_stmt>testEscapeCharacter self<block_start>stream=StringIO('{"\\"ke\\"y\\\\" : "va\\"l\\"ue"} ')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<line_sep>self.assertEqual(event.value '"ke"y\\')<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_VALUE)<line_sep>self.assertEqual(event.value 'va"l"ue')<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.END_OBJECT)<line_sep>event=reader.nextEvent()<line_sep>self.assertIsNone(event)<block_end><def_stmt>testEmptyArray self<block_start>stream=StringIO('[]')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_ARRAY)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.END_ARRAY)<line_sep>event=reader.nextEvent()<line_sep>self.assertIsNone(event)<block_end><def_stmt>testMissingColon self<block_start>stream=StringIO('{"key" "value"}')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>event=reader.nextEvent()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_SYNTAX_ERROR cm.exception.msg)<block_end><def_stmt>testCommaInsteadOfColon self<block_start>stream=StringIO('{"key","value"}')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>event=reader.nextEvent()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_SYNTAX_ERROR cm.exception.msg)<block_end><def_stmt>testColonInsteadOfComma self<block_start>stream=StringIO('["key":"value"]')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_ARRAY)<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>event=reader.nextEvent()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_SYNTAX_ERROR cm.exception.msg)<block_end><def_stmt>testNumberLiteral self<block_start>stream=StringIO('1')<line_sep>reader=pulljson.JSONPullParser(stream)<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>reader.nextEvent()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_SYNTAX_ERROR cm.exception.msg)<block_end><def_stmt>testStringLiteral self<block_start>stream=StringIO('"This is a test"')<line_sep>reader=pulljson.JSONPullParser(stream)<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>reader.nextEvent()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_SYNTAX_ERROR cm.exception.msg)<block_end><def_stmt>testObjectMissingValue self<block_start>stream=StringIO('{"key":}')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.FIELD_NAME)<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>event=reader.nextEvent()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_SYNTAX_ERROR cm.exception.msg)<block_end><def_stmt>testArrayMissingValue self<block_start>stream=StringIO('[1, ,2}')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_ARRAY)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.ARRAY_VALUE)<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>event=reader.nextEvent()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_SYNTAX_ERROR cm.exception.msg)<block_end><def_stmt>testArrayInObject self<block_start>stream=StringIO('{[]}')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>event=reader.nextEvent()<line_sep>self.assertEqual(event.type pulljson.START_OBJECT)<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>event=reader.nextEvent()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_SYNTAX_ERROR cm.exception.msg)<block_end><def_stmt>testReadObject self<block_start>stream=StringIO('{"key1":[0,1,2,3,4,{"value":"5"}], "key2":\ {"key1":[0,1,2,3,4,{"value":"5"}]}}')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>obj=reader.readObject()<line_sep>self.assertEqual(len(obj) 2)<for_stmt>i range(0 2)<block_start>self.assertEqual(len(obj["key1"]) 6)<for_stmt>i range(0 5)<block_start>self.assertEqual(obj["key1"][i] i)<block_end>self.assertEqual(obj["key1"][5]["value"] "5")<if_stmt>i<eq>1<block_start>obj=obj["key2"]<line_sep>self.assertEqual(len(obj) 1)<block_end><block_end><block_end><def_stmt>testReadArray self<block_start>stream=StringIO('[0,1,2,3,4,[0,1,2,3,4,[0,1,2,3,4]],[0,1,2,3,4]]')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>arr=reader.readArray()<line_sep>self.assertEqual(len(arr) 7)<for_stmt>i range(0 5)<block_start>self.assertEqual(arr[i] i)<block_end><for_stmt>i range(0 5)<block_start>self.assertEqual(arr[5][i] i)<block_end><for_stmt>i range(0 5)<block_start>self.assertEqual(arr[5][5][i] i)<block_end><for_stmt>i range(0 5)<block_start>self.assertEqual(arr[6][i] i)<block_end><block_end><def_stmt>testArraySyntaxError self<block_start>stream=StringIO('[[0,1][0,1]]')<line_sep>reader=pulljson.JSONPullParser(stream)<with_stmt>self.assertRaises(pulljson.JSONParseError)<as>cm<block_start>reader.readArray()<block_end>self.assertEqual(cm.exception.code pulljson.JSON_SYNTAX_ERROR cm.exception.msg)<block_end><def_stmt>testIterateArray self<block_start>stream=StringIO('[{"key0}":["}\\"","\\"}","}"]}, {"key1}":["}","\\"}","}"]}, '<concat>'{"key2}":["}","}","\\"}"]}]')<line_sep>reader=pulljson.JSONPullParser(stream)<line_sep>i=0<for_stmt>x reader.expectArray()<block_start>self.assertEqual(len(x["key"+str(i)+"}"]) 3)<line_sep>i<augadd>1<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# -*- coding: utf-8 -*- # Copyright 2011 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>pkg_resources<import_from_stmt>blockdiag.utils.logging warning<line_sep>drawers={}<def_stmt>init_imagedrawers debug=<false><block_start><for_stmt>drawer pkg_resources.iter_entry_points('blockdiag_imagedrawers')<block_start><try_stmt><block_start>module=drawer.load()<if_stmt>hasattr(module 'setup')<block_start>module.setup(module)<block_end><block_end><except_stmt>Exception<as>exc<block_start><if_stmt>debug<block_start>warning('Failed to load %s: %r'%(drawer.module_name exc))<block_end><block_end><block_end><block_end><def_stmt>install_imagedrawer ext drawer<block_start>drawers[ext]=drawer<block_end><def_stmt>create _format filename **kwargs<block_start><if_stmt>len(drawers)<eq>0<block_start>init_imagedrawers(debug=kwargs.get('debug'))<block_end>_format=_format.lower()<if_stmt>_format<in>drawers<block_start>drawer=drawers[_format](filename **kwargs)<block_end><else_stmt><block_start>msg='failed to load %s image driver'%_format<line_sep><raise>RuntimeError(msg)<block_end><if_stmt>'linejump'<in>kwargs.get('filters' [])<block_start><import_from_stmt>blockdiag.imagedraw.filters.linejump LineJumpDrawFilter<line_sep>jumpsize=kwargs.get('jumpsize' 0)<line_sep>drawer=LineJumpDrawFilter(drawer jumpsize)<block_end><return>drawer<block_end>
"""Abstract data and type/shape inference."""<import_from_stmt>.aliasing *<import_from_stmt>.amerge *<import_from_stmt>.data *<import_from_stmt>.infer *<import_from_stmt>.loop *<import_from_stmt>.macro *<import_from_stmt>.ref *<import_from_stmt>.to_abstract *<import_from_stmt>.utils *<line_sep>
""" Name : c9_52_impacto_of_correlation_on_efficient_frontier.py Book : Python for Finance (2nd ed.) Publisher: Packt Publishing Ltd. Author : <NAME> Date : 6/6/2017 email : <EMAIL> <EMAIL> """<import_from_stmt>matplotlib.finance quotes_historical_yahoo_ochl<as>getData<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np pandas<as>pd scipy<as>sp<import_from_stmt>numpy.linalg inv pinv<line_sep>begYear,endYear=2012 2016<line_sep>stocks=['IBM' 'WMT']<def_stmt>ret_monthly ticker# function 1 <block_start>x=getData(ticker (begYear 1 1) (endYear 12 31) asobject=<true> adjusted=<true>)<line_sep>logret=np.log(x.aclose[1:]/x.aclose[:-1])<line_sep>date=[]<line_sep>d0=x.date<for_stmt>i range(0 np.size(logret))<block_start>date.append(''.join([d0[i].strftime("%Y") d0[i].strftime("%m")]))<block_end>y=pd.DataFrame(logret date columns=[ticker])<line_sep><return>y.groupby(y.index).sum()<block_end><def_stmt>std_f ticker<block_start>x=ret_monthly(ticker)<line_sep><return>sp.std(x)<block_end><def_stmt>objFunction W R target_ret<block_start>stock_mean=np.mean(R axis=0)<line_sep>port_mean=np.dot(W stock_mean)# portfolio mean #cov=np.cov(R.T) # var-cov matrix cov=cov0<line_sep>port_var=np.dot(np.dot(W cov) W.T)# portfolio variance penalty=2000<times>abs(port_mean-target_ret)# penalty 4 deviation <return>np.sqrt(port_var)+penalty# objective function <block_end>R0=ret_monthly(stocks[0])# starting from 1st stock n_stock=len(stocks)# number of stocks std1=std_f(stocks[0])<line_sep>std2=std_f(stocks[1])<for_stmt>jj sp.arange(1)<block_start>k=0.1<times>std1<times>std2<line_sep>#cov0=sp.array([[0.00266285,0.00037303],[0.00037303,0.0021296]]) #cov0=sp.array([[std1**2,k],[k,std2**2]]) cov0=sp.array([[std1<power>2 0.00037303] [0.00037303 std2<power>2]])<for_stmt>i xrange(1 n_stock)# merge with other stocks <block_start>x=ret_monthly(stocks[i])<line_sep>R0=pd.merge(R0 x left_index=<true> right_index=<true>)<line_sep>R=np.array(R0)<block_end>out_mean,out_std,out_weight=[] [] []<line_sep>stockMean=np.mean(R axis=0)<for_stmt>r np.linspace(np.min(stockMean) np.max(stockMean) num=100)<block_start>W=np.ones([n_stock])/n_stock# starting from equal weights b_=[(0 1)<for>i range(n_stock)]<line_sep># bounds, here no short c_=({'type':'eq' 'fun':<lambda>W:sum(W)-1.})#constraint result=sp.optimize.minimize(objFunction W (R r) method='SLSQP' constraints=c_ bounds=b_)<if_stmt><not>result.success# handle error raise <block_start>BaseException(result.message)<block_end>out_mean.append(round(r 4))# 4 decimal places std_=round(np.std(np.sum(R<times>result.x axis=1)) 6)<line_sep>out_std.append(std_)<line_sep>out_weight.append(result.x)<block_end>plt.title('Efficient Frontier')<line_sep>plt.xlabel('Standard Deviation of the porfolio (Risk))')<line_sep>plt.ylabel('Return of the portfolio')<line_sep>plt.figtext(0.5 0.75 str(n_stock)+' stock are used: ')<line_sep>plt.figtext(0.5 0.7 ' '+str(stocks))<line_sep>plt.figtext(0.5 0.65 'Time period: '+str(begYear)+' ------ '+str(endYear))<line_sep>plt.plot(out_std out_mean '--')<block_end>plt.show()<line_sep>
<import_from_stmt>unittest TestCase<import_from_stmt>authlib.oauth2.rfc7591 ClientMetadataClaims<import_from_stmt>authlib.jose.errors InvalidClaimError<class_stmt>ClientMetadataClaimsTest(TestCase)<block_start><def_stmt>test_validate_redirect_uris self<block_start>claims=ClientMetadataClaims({'redirect_uris':['foo']} {})<line_sep>self.assertRaises(InvalidClaimError claims.validate)<block_end><def_stmt>test_validate_client_uri self<block_start>claims=ClientMetadataClaims({'client_uri':'foo'} {})<line_sep>self.assertRaises(InvalidClaimError claims.validate)<block_end><def_stmt>test_validate_logo_uri self<block_start>claims=ClientMetadataClaims({'logo_uri':'foo'} {})<line_sep>self.assertRaises(InvalidClaimError claims.validate)<block_end><def_stmt>test_validate_tos_uri self<block_start>claims=ClientMetadataClaims({'tos_uri':'foo'} {})<line_sep>self.assertRaises(InvalidClaimError claims.validate)<block_end><def_stmt>test_validate_policy_uri self<block_start>claims=ClientMetadataClaims({'policy_uri':'foo'} {})<line_sep>self.assertRaises(InvalidClaimError claims.validate)<block_end><def_stmt>test_validate_jwks_uri self<block_start>claims=ClientMetadataClaims({'jwks_uri':'foo'} {})<line_sep>self.assertRaises(InvalidClaimError claims.validate)<block_end><block_end>
<import_from_stmt>typing Dict List<import_stmt>torch<import_from_stmt>functools partial<import_from_stmt>backprop.models PathModel<import_from_stmt>torch.optim.adamw AdamW<import_from_stmt>sentence_transformers SentenceTransformer<class_stmt>STModel(PathModel)<block_start>""" Class for models which are initialised from Sentence Transformers Attributes: model_path: path to ST model name: string identifier for the model. Lowercase letters and numbers. No spaces/special characters except dashes. max_length: Max supported token length for vectorisation description: String description of the model. tasks: List of supported task strings details: Dictionary of additional details about the model init_model: Class used to initialise model device: Device for model. Defaults to "cuda" if available. """<def_stmt>__init__ self model_path init_model=SentenceTransformer name:str=<none> description:str=<none> tasks:List[str]=<none> details:Dict=<none> max_length=512 device=<none><block_start>init_model=partial(init_model device=device)<line_sep>tasks=["text-vectorisation"]<line_sep>PathModel.__init__(self model_path name=name description=description details=details tasks=tasks init_model=init_model device=device)<line_sep>self.max_length=max_length<block_end>@staticmethod<def_stmt>list_models <block_start><import_from_stmt>.models_list models<line_sep><return>models<block_end>@torch.no_grad()<def_stmt>__call__ self task_input task="text-vectorisation" return_tensor=<false><block_start>""" Uses the model for the text-vectorisation task Args: task_input: input dictionary according to the ``text-vectorisation`` task specification task: text-vectorisation """<line_sep>is_list=<false><if_stmt>task<eq>"text-vectorisation"<block_start>input_ids=<none><line_sep>attention_mask=<none><line_sep>text=task_input.get("text")<if_stmt>type(text)<eq>list<block_start>is_list=<true><block_end><else_stmt><block_start>text=[text]<block_end>features=self.model.tokenizer(text truncation=<true> padding=<true> return_tensors="pt").to(self._model_device)<line_sep>text_vecs=self.vectorise(features)<if_stmt><not>return_tensor<block_start>text_vecs=text_vecs.tolist()<block_end>output=text_vecs<if_stmt><not>is_list<block_start>output=output[0]<block_end><return>output<block_end><else_stmt><block_start><raise>ValueError(f"Unsupported task '{task}'")<block_end><block_end><def_stmt>training_step self params task="text-vectorisation"<block_start>text=params["text"]<line_sep><return>self.vectorise(text)<block_end><def_stmt>process_batch self params task="text-vectorisation"<block_start><if_stmt>task<eq>"text-vectorisation"<block_start>max_length=params["max_length"]<or>self.max_length<if_stmt>max_length<g>self.max_length<block_start><raise>ValueError(f"This model has a max_length limit of {self.max_length}")<block_end>text=params["text"]<line_sep><return>self.model.tokenizer(text truncation=<true> padding="max_length" return_tensors="pt")<block_end><block_end><def_stmt>vectorise self features<block_start><return>self.model.forward(features)["sentence_embedding"]<block_end><def_stmt>configure_optimizers self<block_start><return>AdamW(params=self.model.parameters() lr=2e-5 eps=1e-6 correct_bias=<false>)<block_end><block_end>
# table_border_syntax.py - Base classes for table with borders: Pandoc, # Emacs Org mode, Simple, reStrucutredText # Copyright (C) 2012 Free Software Foundation, Inc. # Author: <NAME> # Package: SublimeTableEditor # Homepage: https://github.com/vkocubinsky/SublimeTableEditor # This file is part of SublimeTableEditor. # SublimeTableEditor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # SublimeTableEditor is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with SublimeTableEditor. If not, see <http://www.gnu.org/licenses/>. <import_from_future_stmt> print_function<import_from_future_stmt> division<import_stmt>re<try_stmt><block_start><import_from_stmt>. table_base<as>tbase<block_end><except_stmt>ValueError<block_start><import_stmt>table_base<as>tbase<block_end><class_stmt>SeparatorRow(tbase.Row)<block_start><def_stmt>__init__ self table separator='-' size=0<block_start>tbase.Row.__init__(self table)<line_sep>self.separator=separator<for_stmt>i range(size)<block_start>self.columns.append(SeparatorColumn(self self.separator))<block_end><block_end><def_stmt>new_empty_column self<block_start><return>SeparatorColumn(self self.separator)<block_end><def_stmt>create_column self text<block_start><return>SeparatorColumn(self self.separator)<block_end><def_stmt>is_header_separator self<block_start><return><true><block_end><def_stmt>is_separator self<block_start><return><true><block_end><def_stmt>render self<block_start>r=self.syntax.hline_out_border<for_stmt>ind,column enumerate(self.columns)<block_start><if_stmt>ind<ne>0<block_start>r<augadd>self.syntax.hline_in_border<block_end>r<augadd>column.render()<block_end>r<augadd>self.syntax.hline_out_border<line_sep><return>r<block_end><block_end><class_stmt>SeparatorColumn(tbase.Column)<block_start><def_stmt>__init__ self row separator<block_start>tbase.Column.__init__(self row)<line_sep>self.separator=separator<block_end><def_stmt>min_len self# '---' or '===' <block_start><return>3<block_end><def_stmt>render self<block_start><return>self.separator<times>self.col_len<block_end><block_end><class_stmt>BorderTableDriver(tbase.TableDriver)<block_start><def_stmt>editor_insert_single_hline self table table_pos<block_start>table.rows.insert(table_pos.row_num+1 SeparatorRow(table '-'))<line_sep>table.pack()<line_sep><return>("Single separator row inserted" tbase.TablePos(table_pos.row_num table_pos.field_num))<block_end><def_stmt>editor_insert_double_hline self table table_pos<block_start>table.rows.insert(table_pos.row_num+1 SeparatorRow(table '='))<line_sep>table.pack()<line_sep><return>("Double separator row inserted" tbase.TablePos(table_pos.row_num table_pos.field_num))<block_end><def_stmt>editor_insert_hline_and_move self table table_pos<block_start>table.rows.insert(table_pos.row_num+1 SeparatorRow(table '-'))<line_sep>table.pack()<if_stmt>table_pos.row_num+2<l>len(table)<block_start><if_stmt>table[table_pos.row_num+2].is_separator()<block_start>table.insert_empty_row(table_pos.row_num+2)<block_end><block_end><else_stmt><block_start>table.insert_empty_row(table_pos.row_num+2)<block_end><return>("Single separator row inserted" tbase.TablePos(table_pos.row_num+2 0))<block_end><block_end><class_stmt>BorderTableParser(tbase.BaseTableParser)<block_start><def_stmt>_is_single_row_separator self str_cols<block_start><if_stmt>len(str_cols)<eq>0<block_start><return><false><block_end><for_stmt>col str_cols<block_start><if_stmt><not>re.match(r"^\s*[\-]+\s*$" col)<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>_is_double_row_separator self str_cols<block_start><if_stmt>len(str_cols)<eq>0<block_start><return><false><block_end><for_stmt>col str_cols<block_start><if_stmt><not>re.match(r"^\s*[\=]+\s*$" col)<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>create_row self table line<block_start><if_stmt>self._is_single_row_separator(line.str_cols())<block_start>row=SeparatorRow(table '-')<block_end><elif_stmt>self._is_double_row_separator(line.str_cols())<block_start>row=SeparatorRow(table '=')<block_end><else_stmt><block_start>row=self.create_data_row(table line)<block_end><return>row<block_end><def_stmt>create_data_row self table line<block_start><return>tbase.DataRow(table)<block_end><block_end>
<import_stmt>functools<import_stmt>inspect<import_stmt>reprlib<import_stmt>sys<import_stmt>traceback<import_from_stmt>. constants<def_stmt>_get_function_source func<block_start>func=inspect.unwrap(func)<if_stmt>inspect.isfunction(func)<block_start>code=func.__code__<line_sep><return>(code.co_filename code.co_firstlineno)<block_end><if_stmt>isinstance(func functools.partial)<block_start><return>_get_function_source(func.func)<block_end><if_stmt>isinstance(func functools.partialmethod)<block_start><return>_get_function_source(func.func)<block_end><return><none><block_end><def_stmt>_format_callback_source func args<block_start>func_repr=_format_callback(func args <none>)<line_sep>source=_get_function_source(func)<if_stmt>source<block_start>func_repr<augadd>f' at {source[0]}:{source[1]}'<block_end><return>func_repr<block_end><def_stmt>_format_args_and_kwargs args kwargs<block_start>"""Format function arguments and keyword arguments. Special case for a single parameter: ('hello',) is formatted as ('hello'). """<line_sep># use reprlib to limit the length of the output items=[]<if_stmt>args<block_start>items.extend(reprlib.repr(arg)<for>arg args)<block_end><if_stmt>kwargs<block_start>items.extend(f'{k}={reprlib.repr(v)}'<for>k,v kwargs.items())<block_end><return>'({})'.format(', '.join(items))<block_end><def_stmt>_format_callback func args kwargs suffix=''<block_start><if_stmt>isinstance(func functools.partial)<block_start>suffix=_format_args_and_kwargs(args kwargs)+suffix<line_sep><return>_format_callback(func.func func.args func.keywords suffix)<block_end><if_stmt>hasattr(func '__qualname__')<and>func.__qualname__<block_start>func_repr=func.__qualname__<block_end><elif_stmt>hasattr(func '__name__')<and>func.__name__<block_start>func_repr=func.__name__<block_end><else_stmt><block_start>func_repr=repr(func)<block_end>func_repr<augadd>_format_args_and_kwargs(args kwargs)<if_stmt>suffix<block_start>func_repr<augadd>suffix<block_end><return>func_repr<block_end><def_stmt>extract_stack f=<none> limit=<none><block_start>"""Replacement for traceback.extract_stack() that only does the necessary work for asyncio debug mode. """<if_stmt>f<is><none><block_start>f=sys._getframe().f_back<block_end><if_stmt>limit<is><none># Limit the amount of work to a reasonable amount, as extract_stack() # can be called for each coroutine and future in debug mode. <block_start>limit=constants.DEBUG_STACK_DEPTH<block_end>stack=traceback.StackSummary.extract(traceback.walk_stack(f) limit=limit lookup_lines=<false>)<line_sep>stack.reverse()<line_sep><return>stack<block_end>
aprutil_build_rule=""" cc_library( name = "aprutil", srcs = [ "@mod_pagespeed//third_party/aprutil:aprutil_pagespeed_memcache_c", 'buckets/apr_brigade.c', 'buckets/apr_buckets.c', 'buckets/apr_buckets_alloc.c', 'buckets/apr_buckets_eos.c', 'buckets/apr_buckets_file.c', 'buckets/apr_buckets_flush.c', 'buckets/apr_buckets_heap.c', 'buckets/apr_buckets_mmap.c', 'buckets/apr_buckets_pipe.c', 'buckets/apr_buckets_pool.c', 'buckets/apr_buckets_refcount.c', 'buckets/apr_buckets_simple.c', 'buckets/apr_buckets_socket.c', 'crypto/apr_md5.c', 'crypto/getuuid.c', 'crypto/uuid.c', #'dbm/apr_dbm.c', #'dbm/apr_dbm_sdbm.c', #'dbm/sdbm/sdbm.c', #'dbm/sdbm/sdbm_hash.c', #'dbm/sdbm/sdbm_lock.c', #'dbm/sdbm/sdbm_pair.c', 'encoding/apr_base64.c', 'hooks/apr_hooks.c', #'ldap/apr_ldap_stub.c', #'ldap/apr_ldap_url.c', 'memcache/apr_memcache.c', 'misc/apr_date.c', 'misc/apr_queue.c', 'misc/apr_reslist.c', 'misc/apr_rmm.c', 'misc/apr_thread_pool.c', 'misc/apu_dso.c', 'misc/apu_version.c', 'strmatch/apr_strmatch.c', 'uri/apr_uri.c', 'xlate/xlate.c', ], hdrs = [ "@mod_pagespeed//third_party/aprutil:aprutil_pagespeed", "crypto/crypt_blowfish.h", #"test/test_apu.h", #"test/abts_tests.h", #"test/testutil.h", #"test/abts.h", "dbm/sdbm/sdbm_private.h", "dbm/sdbm/sdbm_pair.h", "dbm/sdbm/sdbm_tune.h", "include/apr_siphash.h", "include/apr_dbm.h", "include/apr_xlate.h", "include/apr_ldap_url.h", "include/apu_version.h", "include/apr_redis.h", "include/private/apr_dbd_odbc_v2.h", "include/private/apr_dbm_private.h", "include/private/apu_internal.h", "include/private/apr_dbd_internal.h", "include/private/apr_crypto_internal.h", "include/apr_md5.h", "include/apu_errno.h", "include/apr_xml.h", "include/apr_sdbm.h", "include/apr_md4.h", "include/apr_hooks.h", "include/apr_date.h", "include/apr_reslist.h", "include/apr_memcache.h", "include/apr_uuid.h", "include/apr_base64.h", "include/apr_sha1.h", "include/apr_uri.h", "include/apr_queue.h", "include/apr_ldap_option.h", "include/apr_optional.h", "include/apr_dbd.h", "include/apr_anylock.h", "include/apr_strmatch.h", "include/apr_optional_hooks.h", "include/apr_thread_pool.h", "include/apr_buckets.h", "include/apr_rmm.h", "include/apr_ldap_rebind.h", "include/apr_ldap_init.h", "include/apr_crypto.h", ], copts = [ "-Ithird_party/aprutil/gen/arch/linux/x64/include/", "-Ithird_party/aprutil/gen/arch/linux/x64/include/private", "-Iexternal/aprutil/include/", "-Iexternal/aprutil/include/private/", "-Iexternal/aprutil/include/arch/unix/", "-Iexternal/aprutil/", "-Iexternal/apr/include/", "-Iexternal/apr/include/arch/unix/", "-Ithird_party/apr/gen/arch/linux/x64/include/", ], deps = [ "@apr//:apr", ], visibility = ["//visibility:public"], ) """<line_sep># find | grep .h$ | while read line; do echo "\"$line\","; done
"""Calculates the batch_grad derivative."""<import_from_future_stmt> annotations<import_from_stmt>typing TYPE_CHECKING Callable List Tuple<import_from_stmt>torch Tensor<import_from_stmt>torch.nn Module<import_from_stmt>backpack.core.derivatives.basederivatives BaseParameterDerivatives<import_from_stmt>backpack.extensions.firstorder.base FirstOrderModuleExtension<import_from_stmt>backpack.utils.subsampling subsample<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>backpack.extensions.firstorder BatchGrad<block_end><class_stmt>BatchGradBase(FirstOrderModuleExtension)<block_start>"""Calculates the batch_grad derivative. Passes the calls for the parameters to the derivatives class. Implements functions with method names from params. If child class wants to overwrite these methods - for example to support an additional external module - it can do so using the interface for parameter "param1":: param1(ext, module, g_inp, g_out, bpQuantities): return batch_grads In this case, the method is not overwritten by this class. """<def_stmt>__init__ self derivatives:BaseParameterDerivatives params:List[str]<arrow><none><block_start>"""Initializes all methods. If the param method has already been defined, it is left unchanged. Args: derivatives: Derivatives object used to apply parameter Jacobians. params: List of parameter names. """<line_sep>self._derivatives=derivatives<for_stmt>param_str params<block_start><if_stmt><not>hasattr(self param_str)<block_start>setattr(self param_str self._make_param_function(param_str))<block_end><block_end>super().__init__(params=params)<block_end><def_stmt>_make_param_function self param_str:str<arrow>Callable[[BatchGrad Module Tuple[Tensor] Tuple[Tensor] <none>] Tensor]<block_start>"""Creates a function that calculates batch_grad w.r.t. param. Args: param_str: Parameter name. Returns: Function that calculates batch_grad wrt param """<def_stmt>param_function ext:BatchGrad module:Module g_inp:Tuple[Tensor] g_out:Tuple[Tensor] bpQuantities:<none> <arrow>Tensor<block_start>"""Calculates batch_grad with the help of derivatives object. Args: ext: extension that is used module: module that performed forward pass g_inp: input gradient tensors g_out: output gradient tensors bpQuantities: additional quantities for second order Returns: Scaled individual gradients """<line_sep>subsampling=ext.get_subsampling()<line_sep>batch_axis=0<line_sep><return>self._derivatives.param_mjp(param_str module g_inp g_out subsample(g_out[0] dim=batch_axis subsampling=subsampling) sum_batch=<false> subsampling=subsampling )<block_end><return>param_function<block_end><block_end>
# coding:utf-8 # <def_stmt>two words<block_start>""" :param words: :return: """<line_sep>new=[]<line_sep>s=len(words)<for_stmt>index range(s)<block_start>w=words[index]<for_stmt>next_index range(index+1 s)<block_start>next_w=words[next_index]<line_sep>new.append(frozenset([w next_w]))<block_end><block_end><return>new<block_end>poemfile=open("five_poem.txt").readlines()<line_sep>feature=[]<line_sep>n=1<line_sep>length=len(poemfile)<for_stmt>poemline poemfile<block_start>print("finish:%.5f"%(n/length))<line_sep>poemline=poemline.strip().replace("\n" "")<line_sep>sentences=poemline.split(".")<line_sep>temp=[]<for_stmt>sen sentences<block_start><if_stmt>len(sen)<ne>5<block_start><continue><block_end>temp.append(sen[:2])<block_end>feature.append(temp)<line_sep>n<augadd>1<block_end>size=len(feature)<line_sep>word_fre=dict()<for_stmt>fea feature<block_start><for_stmt>word set(fea)<block_start>word_fre[word]=word_fre.get(word 0)+1/size<block_end><block_end>two_fre=dict()<line_sep>two_feature=[]<line_sep># <for_stmt>fea feature<block_start>fea=list(set(fea))<line_sep>two_feature.append(two(fea))<block_end><for_stmt>fea two_feature<block_start><for_stmt>word fea<block_start>two_fre[word]=two_fre.get(word 0)+1/size<block_end><block_end># pro=dict()<for_stmt>k,v two_fre.items()<block_start>event=list(k)<line_sep># key=event[0]<if_stmt>key<not><in>pro<block_start>pro[key]=[]<block_end>pro[key].append([event[1] two_fre[k]/word_fre[key]])<line_sep>key=event[1]<if_stmt>key<not><in>pro<block_start>pro[key]=[]<block_end>pro[key].append([event[0] two_fre[k]/word_fre[key]])<block_end># <import_stmt>json<line_sep>out=open("pro.json" "w")<line_sep>json.dump(pro out)<line_sep>
<class_stmt>Pizza<block_start><def_stmt>__init__ self toppings<block_start>self.toppings=toppings<block_end><def_stmt>__repr__ self<block_start><return>"Pizza with "+" and ".join(self.toppings)<block_end>@classmethod<def_stmt>recommend cls<block_start>"""Recommend some pizza with arbitrary toppings,"""<line_sep><return>cls(['spam' 'ham' 'eggs'])<block_end><block_end><class_stmt>VikingPizza(Pizza)<block_start>@classmethod<def_stmt>recommend cls<block_start>"""Use same recommendation as super but add extra spam"""<line_sep>recommended=super(VikingPizza).recommend()<line_sep>recommended.toppings<augadd>['spam']<times>5<line_sep><return>recommended<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print("Ordinary pizza recomendation:" Pizza.recommend())<line_sep>print("Viking pizza recomendation:" VikingPizza.recommend())<block_end>
"""Hacked up script to sort modifiers"""<line_sep># Couldn't find a tool for this """ import io with io.open('modifiers.py', 'rt') as f: iter_lines = iter(f) while 1: line = next(iter_lines, None) if line.startswith('class ExpressionModifiers('): break defs = [] while 1: line = next(iter_lines, None) if line is None: break if line.lstrip().startswith('def'): defs.append([]) if defs: defs[-1].append(line) for d in sorted(defs, key=lambda m: m[0]): print ''.join(d), """<line_sep>
# EXAMPLE_EVALUATE Code to evaluate example results on ROxford and RParis datasets. # Revisited protocol has 3 difficulty setups: Easy (E), Medium (M), and Hard (H), # and evaluates the performance using mean average precision (mAP), as well as mean precision @ k (mP@k) # # More details about the revisited annotation and evaluation can be found in: # <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Revisiting Oxford and Paris: Large-Scale Image Retrieval Benchmarking, CVPR 2018 # # Authors: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., 2018 # Added diffusion: <NAME>. <import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>scipy.io loadmat<import_from_stmt>dataset configdataset<import_from_stmt>download download_datasets download_features<import_from_stmt>evaluate compute_map<line_sep>#--------------------------------------------------------------------- # Set data folder and testing parameters #--------------------------------------------------------------------- # Set data folder, change if you have downloaded the data somewhere else data_root=os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) 'data')<line_sep># Check, and, if necessary, download test data (Oxford and Pairs), # revisited annotation, and example feature vectors for evaluation download_datasets(data_root)<line_sep>download_features(data_root)<line_sep># Set test dataset: roxford5k | rparis6k test_dataset='roxford5k'<line_sep>#--------------------------------------------------------------------- # Evaluate #--------------------------------------------------------------------- print('>> {}: Evaluating test dataset...'.format(test_dataset))<line_sep># config file for the dataset # separates query image list from database image list, when revisited protocol used cfg=configdataset(test_dataset os.path.join(data_root 'datasets'))<line_sep># load query and database features print('>> {}: Loading features...'.format(test_dataset))<line_sep>features=loadmat(os.path.join(data_root 'features' '{}_resnet_rsfm120k_gem.mat'.format(test_dataset)))<line_sep>Q=features['Q']<line_sep>X=features['X']<line_sep>K=100# approx 50 mutual nns QUERYKNN=10<line_sep>R=2000<line_sep>alpha=0.9<import_from_stmt>diffussion *<line_sep># perform search print('>> {}: Retrieval...'.format(test_dataset))<line_sep>sim=np.dot(X.T Q)<line_sep>qsim=sim_kernel(sim).T<line_sep>sortidxs=np.argsort(-qsim axis=1)<for_stmt>i range(len(qsim))<block_start>qsim[i sortidxs[i QUERYKNN:]]=0<block_end>qsim=sim_kernel(qsim)<line_sep>A=np.dot(X.T X)<line_sep>W=sim_kernel(A).T<line_sep>W=topK_W(W K)<line_sep>Wn=normalize_connection_graph(W)<line_sep>plain_ranks=np.argsort(-sim axis=0)<line_sep>cg_ranks=cg_diffusion(qsim Wn alpha)<line_sep>cg_trunk_ranks=dfs_trunk(sim A alpha=alpha QUERYKNN=QUERYKNN)<line_sep>fast_spectral_ranks=fsr_rankR(qsim Wn alpha R)<line_sep>alg_names=['Plain' 'Diffusion cg' 'Diffusion trunkated' 'Spectral R=2000']<line_sep>alg_ranks=[plain_ranks cg_ranks cg_trunk_ranks fast_spectral_ranks]<for_stmt>rn range(len(alg_names))<block_start>ranks=alg_ranks[rn]<line_sep>name=alg_names[rn]<line_sep># revisited evaluation gnd=cfg['gnd']<line_sep># evaluate ranks ks=[1 5 10]<line_sep># search for easy gnd_t=[]<for_stmt>i range(len(gnd))<block_start>g={}<line_sep>g['ok']=np.concatenate([gnd[i]['easy']])<line_sep>g['junk']=np.concatenate([gnd[i]['junk'] gnd[i]['hard']])<line_sep>gnd_t.append(g)<block_end>mapE,apsE,mprE,prsE=compute_map(ranks gnd_t ks)<line_sep># search for easy & hard gnd_t=[]<for_stmt>i range(len(gnd))<block_start>g={}<line_sep>g['ok']=np.concatenate([gnd[i]['easy'] gnd[i]['hard']])<line_sep>g['junk']=np.concatenate([gnd[i]['junk']])<line_sep>gnd_t.append(g)<block_end>mapM,apsM,mprM,prsM=compute_map(ranks gnd_t ks)<line_sep># search for hard gnd_t=[]<for_stmt>i range(len(gnd))<block_start>g={}<line_sep>g['ok']=np.concatenate([gnd[i]['hard']])<line_sep>g['junk']=np.concatenate([gnd[i]['junk'] gnd[i]['easy']])<line_sep>gnd_t.append(g)<block_end>mapH,apsH,mprH,prsH=compute_map(ranks gnd_t ks)<line_sep>print(name)<line_sep>print('>> {}: mAP E: {}, M: {}, H: {}'.format(test_dataset np.around(mapE<times>100 decimals=2) np.around(mapM<times>100 decimals=2) np.around(mapH<times>100 decimals=2)))<line_sep>print('>> {}: mP@k{} E: {}, M: {}, H: {}'.format(test_dataset np.array(ks) np.around(mprE<times>100 decimals=2) np.around(mprM<times>100 decimals=2) np.around(mprH<times>100 decimals=2)))<block_end>
<import_stmt>platform<import_stmt>signal<import_from_stmt>xv_leak_tools.exception XVEx<import_from_stmt>xv_leak_tools.helpers unused<import_from_stmt>xv_leak_tools.log L<import_from_stmt>xv_leak_tools.test_device.desktop_device DesktopDevice<import_from_stmt>xv_leak_tools.test_device.connector_helper ConnectorHelper<import_from_stmt>xv_leak_tools.process XVProcessException<line_sep># TODO: consider a UnixDevice as ancestor of MacOSDevice, LinuxDevice <class_stmt>LinuxDevice(DesktopDevice)<block_start><def_stmt>__init__ self config connector<block_start>super().__init__(config connector)<line_sep>self._connector_helper=ConnectorHelper(self)<block_end>@staticmethod<def_stmt>local_ips <block_start><raise>XVEx("TODO: Local IPs for Linux")<block_end>@staticmethod<def_stmt>open_app binary_path root=<false><block_start>unused(root)<if_stmt>binary_path<is><none><block_start>L.debug('Application has no binary path; not opening')<block_end># TODO: open the application here <block_end>@staticmethod<def_stmt>close_app binary_path root=<false><block_start>unused(root)<if_stmt>binary_path<is><none><block_start>L.debug('Application has no binary path; not closing')<block_end># TODO: close the application here <block_end><def_stmt>os_name self<block_start><return>'linux'<block_end><def_stmt>os_version self<block_start><return>" ".join(platform.linux_distribution())<block_end><def_stmt>report_info self<block_start>info=super().report_info()<line_sep>commands=[['uname' '-a'] ['lsb_release' '-a'] ['lscpu'] ]<for_stmt>command commands<block_start><try_stmt><block_start>info<augadd>self._connector_helper.check_command(command)[0]<block_end><except_stmt>XVProcessException<as>ex<block_start>L.warning("Couldn't get system info using command {}:\n{}".format(command ex))<block_end><block_end><return>info<block_end><def_stmt>kill_process self pid<block_start>L.debug("Killing process {}".format(pid))<line_sep><return>self._connector_helper.execute_scriptlet('remote_os_kill.py' [pid int(signal.SIGKILL)] root=<true>)<block_end><def_stmt>pgrep self process_name<block_start>L.debug("pgrep-ing for {}".format(process_name))<line_sep><return>self._connector_helper.execute_scriptlet('pgrep.py' [process_name] root=<true>)<block_end><def_stmt>command_line_for_pid self pid<block_start><return>self._connector_helper.execute_scriptlet('command_line_for_pid.py' [pid] root=<true>)<block_end><block_end>
MINI14='1.4GHz Mac mini'<class_stmt>AppleFactory<block_start><class_stmt>MacMini14<block_start><def_stmt>__init__ self<block_start>self.memory=4# in gigabytes self.hdd=500# in gigabytes self.gpu='Intel HD Graphics 5000'<block_end><def_stmt>__str__ self<block_start>info=(f'Model: {MINI14}' f'Memory: {self.memory}GB' f'Hard Disk: {self.hdd}GB' f'Graphics Card: {self.gpu}')<line_sep><return>'\n'.join(info)<block_end><block_end><def_stmt>build_computer self model<block_start><if_stmt>model<eq>MINI14<block_start><return>self.MacMini14()<block_end><else_stmt><block_start>print(f"I don't know how to build {model}")<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>afac=AppleFactory()<line_sep>mac_mini=afac.build_computer(MINI14)<line_sep>print(mac_mini)<block_end>
# A script to temporarily install and run the addon. Useful for running # blender-mesh-to-json via blender CLI where you might be in a # continuous integration environment that doesn't have the addon # installed # # blender file.blend --python $(mesh2json) # -> becomes -> # blender file.blend --python /path/to/run-addon <import_stmt>bpy<import_stmt>os<line_sep># Get the absolute path to the addon dir=os.path.dirname(__file__)<line_sep>addonFilePath=dir+'/blender-mesh-to-json.py'<line_sep># Install and enable the addon temporarily (since we aren't saving our user preferences) # We just want to have access to the addon during this blender session bpy.ops.preferences.addon_install(filepath=addonFilePath)<line_sep>bpy.ops.preferences.addon_enable(module='blender-mesh-to-json')<line_sep># Run our addon bpy.ops.import_export.mesh2json()<line_sep>
GET_HUB={'HubArn':'arn:aws:securityhub:us-east-1:000000000000:hub/default' 'SubscribedAt':'2020-12-03T11:05:17.571Z' 'AutoEnableControls':<true> }<line_sep>
"""Tests for treadmill's linux direct system call interface."""<line_sep>
""" Create holdem game record table """<import_from_stmt>yoyo step<line_sep>__depends__={'20211109_01_xKblp-change-comments-on-black-jack-record'}<line_sep>steps=[step("CREATE TABLE `holdemGameRecord` ( `userID` BIGINT NOT NULL , `moneyInvested` BIGINT NOT NULL , `status` INT NOT NULL COMMENT '0 represent in progress; 1 represent lose or fold; 2 represent win;' , `tableID` BIGINT NOT NULL , `time` TIMESTAMP NOT NULL , `tableUUID` VARCHAR(64) NOT NULL ) ENGINE = InnoDB;")]<line_sep>
<import_from_stmt>chariot.transformer.text.base TextNormalizer<class_stmt>LowerNormalizer(TextNormalizer)<block_start><def_stmt>__init__ self copy=<true><block_start>super().__init__(copy)<block_end><def_stmt>apply self text<block_start><return>text.lower()<block_end><block_end>
<import_from_stmt>io IOBase<import_from_stmt>typing Sized Tuple<import_from_stmt>torch.utils.data IterDataPipe<import_from_stmt>torch.utils.data.datapipes.utils.common deprecation_warning_torchdata<class_stmt>HTTPReaderIterDataPipe(IterDataPipe[Tuple[str IOBase]])<block_start>r""" :class:`HTTPReaderIterDataPipe` Iterable DataPipe to load file url(s) (http url(s) pointing to file(s)), yield file url and IO stream in a tuple Args: datapipe: Iterable DataPipe providing urls timeout: Timeout for http request """<def_stmt>__init__ self datapipe timeout=<none><block_start>self.datapipe=datapipe<line_sep>self.timeout=timeout<line_sep>deprecation_warning_torchdata(type(self).__name__)<block_end><def_stmt>__iter__ self<block_start><import_from_stmt>requests HTTPError RequestException Session<for_stmt>url self.datapipe<block_start><try_stmt><block_start><with_stmt>Session()<as>session<block_start><if_stmt>self.timeout<is><none><block_start>r=session.get(url stream=<true>)<block_end><else_stmt><block_start>r=session.get(url timeout=self.timeout stream=<true>)<block_end><block_end><return>url r.raw<block_end><except_stmt>HTTPError<as>e<block_start><raise>Exception(f"Could not get the file. [HTTP Error] {e.response}.")<block_end><except_stmt>RequestException<as>e<block_start><raise>Exception(f"Could not get the file at {url}. [RequestException] {e.response}.")<block_end><except_stmt>Exception<block_start><raise><block_end><block_end><block_end><def_stmt>__len__ self<arrow>int<block_start><if_stmt>isinstance(self.datapipe Sized)<block_start><return>len(self.datapipe)<block_end><raise>TypeError("{} instance doesn't have valid length".format(type(self).__name__))<block_end><block_end>
<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_array_equal<import_from_stmt>scipy.stats.contingency crosstab<line_sep>@pytest.mark.parametrize('sparse' [<false> <true>])<def_stmt>test_crosstab_basic sparse<block_start>a=[0 0 9 9 0 0 9]<line_sep>b=[2 1 3 1 2 3 3]<line_sep>expected_avals=[0 9]<line_sep>expected_bvals=[1 2 3]<line_sep>expected_count=np.array([[1 2 1] [1 0 2]])<line_sep>(avals bvals),count=crosstab(a b sparse=sparse)<line_sep>assert_array_equal(avals expected_avals)<line_sep>assert_array_equal(bvals expected_bvals)<if_stmt>sparse<block_start>assert_array_equal(count.A expected_count)<block_end><else_stmt><block_start>assert_array_equal(count expected_count)<block_end><block_end><def_stmt>test_crosstab_basic_1d # Verify that a single input sequence works as expected. <block_start>x=[1 2 3 1 2 3 3]<line_sep>expected_xvals=[1 2 3]<line_sep>expected_count=np.array([2 2 3])<line_sep>(xvals ),count=crosstab(x)<line_sep>assert_array_equal(xvals expected_xvals)<line_sep>assert_array_equal(count expected_count)<block_end><def_stmt>test_crosstab_basic_3d # Verify the function for three input sequences. <block_start>a='a'<line_sep>b='b'<line_sep>x=[0 0 9 9 0 0 9 9]<line_sep>y=[a a a a b b b a]<line_sep>z=[1 2 3 1 2 3 3 1]<line_sep>expected_xvals=[0 9]<line_sep>expected_yvals=[a b]<line_sep>expected_zvals=[1 2 3]<line_sep>expected_count=np.array([[[1 1 0] [0 1 1]] [[2 0 1] [0 0 1]]])<line_sep>(xvals yvals zvals),count=crosstab(x y z)<line_sep>assert_array_equal(xvals expected_xvals)<line_sep>assert_array_equal(yvals expected_yvals)<line_sep>assert_array_equal(zvals expected_zvals)<line_sep>assert_array_equal(count expected_count)<block_end>@pytest.mark.parametrize('sparse' [<false> <true>])<def_stmt>test_crosstab_levels sparse<block_start>a=[0 0 9 9 0 0 9]<line_sep>b=[1 2 3 1 2 3 3]<line_sep>expected_avals=[0 9]<line_sep>expected_bvals=[0 1 2 3]<line_sep>expected_count=np.array([[0 1 2 1] [0 1 0 2]])<line_sep>(avals bvals),count=crosstab(a b levels=[<none> [0 1 2 3]] sparse=sparse)<line_sep>assert_array_equal(avals expected_avals)<line_sep>assert_array_equal(bvals expected_bvals)<if_stmt>sparse<block_start>assert_array_equal(count.A expected_count)<block_end><else_stmt><block_start>assert_array_equal(count expected_count)<block_end><block_end>@pytest.mark.parametrize('sparse' [<false> <true>])<def_stmt>test_crosstab_extra_levels sparse# The pair of values (-1, 3) will be ignored, because we explicitly # request the counted `a` values to be [0, 9]. <block_start>a=[0 0 9 9 0 0 9 -1]<line_sep>b=[1 2 3 1 2 3 3 3]<line_sep>expected_avals=[0 9]<line_sep>expected_bvals=[0 1 2 3]<line_sep>expected_count=np.array([[0 1 2 1] [0 1 0 2]])<line_sep>(avals bvals),count=crosstab(a b levels=[[0 9] [0 1 2 3]] sparse=sparse)<line_sep>assert_array_equal(avals expected_avals)<line_sep>assert_array_equal(bvals expected_bvals)<if_stmt>sparse<block_start>assert_array_equal(count.A expected_count)<block_end><else_stmt><block_start>assert_array_equal(count expected_count)<block_end><block_end><def_stmt>test_validation_at_least_one <block_start><with_stmt>pytest.raises(TypeError match='At least one')<block_start>crosstab()<block_end><block_end><def_stmt>test_validation_same_lengths <block_start><with_stmt>pytest.raises(ValueError match='must have the same length')<block_start>crosstab([1 2] [1 2 3 4])<block_end><block_end><def_stmt>test_validation_sparse_only_two_args <block_start><with_stmt>pytest.raises(ValueError match='only two input sequences')<block_start>crosstab([0 1 1] [8 8 9] [1 3 3] sparse=<true>)<block_end><block_end><def_stmt>test_validation_len_levels_matches_args <block_start><with_stmt>pytest.raises(ValueError match='number of input sequences')<block_start>crosstab([0 1 1] [8 8 9] levels=([0 1 2 3] ))<block_end><block_end>
""" Helper module for import * without __all__ """<line_sep>all_import2=3<line_sep>all_import3=3<line_sep>all_override=<true><line_sep>
<import_stmt>json<import_stmt>random<import_stmt>string<def_stmt>lambda_handler event context# print(event) # print(context) <block_start>letters=string.ascii_lowercase<line_sep>value=''.join(random.choice(letters)<for>i range(10))<line_sep><return>{'statusCode':200 "headers":json.dumps({'Access-Control-Allow-Origin':'*'}) "body":json.dumps(value)}<block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_from_stmt>mmdeploy.core FUNCTION_REWRITER<line_sep>@FUNCTION_REWRITER.register_rewriter('mmdet3d.models.detectors.voxelnet.VoxelNet.simple_test')<def_stmt>voxelnet__simple_test ctx self voxels num_points coors img_metas=<none> imgs=<none> rescale=<false><block_start>"""Test function without augmentaiton. Rewrite this func to remove model post process. Args: voxels (torch.Tensor): Point features or raw points in shape (N, M, C). num_points (torch.Tensor): Number of points in each pillar. coors (torch.Tensor): Coordinates of each voxel. input_metas (list[dict]): Contain pcd meta info. Returns: List: Result of model. """<line_sep>x=self.extract_feat(voxels num_points coors img_metas)<line_sep>bbox_preds,scores,dir_scores=self.bbox_head(x)<line_sep><return>bbox_preds scores dir_scores<block_end>@FUNCTION_REWRITER.register_rewriter('mmdet3d.models.detectors.voxelnet.VoxelNet.extract_feat')<def_stmt>voxelnet__extract_feat ctx self voxels num_points coors img_metas=<none><block_start>"""Extract features from points. Rewrite this func to remove voxelize op. Args: voxels (torch.Tensor): Point features or raw points in shape (N, M, C). num_points (torch.Tensor): Number of points in each pillar. coors (torch.Tensor): Coordinates of each voxel. input_metas (list[dict]): Contain pcd meta info. Returns: torch.Tensor: Features from points. """<line_sep>voxel_features=self.voxel_encoder(voxels num_points coors)<line_sep>batch_size=coors[-1 0]+1# refactor <assert_stmt>batch_size<eq>1<line_sep>x=self.middle_encoder(voxel_features coors batch_size)<line_sep>x=self.backbone(x)<if_stmt>self.with_neck<block_start>x=self.neck(x)<block_end><return>x<block_end>
# Copyright (c) 2021 Agenium Scale # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # What does this script? # ---------------------- # # This script generates code for each architecture, the base C/C++ APIs and # the advanced C++ API. Each part to be generated is handled by a # `gen_*.py` file. This script simply calls the `doit` function of each # `gen_*.py` module. Names are self-explanatory. # # ----------------------------------------------------------------------------- # First thing we do is check whether python3 is used <import_stmt>sys<if_stmt>sys.version_info[0]<l>3<block_start>print('Only Python 3 is supported')<line_sep>sys.exit(1)<block_end># ----------------------------------------------------------------------------- # Imports <import_stmt>argparse<import_stmt>os<import_stmt>re<import_stmt>common<import_stmt>gen_archis<import_stmt>gen_base_apis<import_stmt>gen_adv_cxx_api<import_stmt>gen_adv_c_api<import_stmt>gen_tests<import_stmt>gen_src<import_stmt>gen_doc<import_stmt>gen_friendly_but_not_optimized<import_stmt>gen_modules<import_stmt>gen_scalar_utilities<import_stmt>get_sleef_code<line_sep># Dir of this script script_dir=os.path.dirname(__file__)<if_stmt>script_dir<eq>''<block_start>script_dir='.'<block_end># ----------------------------------------------------------------------------- # Arguments parsing <def_stmt>parse_args args<block_start><def_stmt>parse_simd value## Split .simd now <block_start>values={'x86':common.x86_simds 'arm':common.arm_simds 'ppc':common.ppc_simds 'all':common.simds }.get(value value.split(','))<line_sep>## Check that all simd are valid ret=[]<for_stmt>simd values<block_start><if_stmt>simd<not><in>common.simds<block_start><raise>argparse.ArgumentTypeError("SIMD '{}' not found in {}".format(simd common.simds))<block_end>ret<augadd>common.simds_deps[simd]<block_end><return>list(set(ret))<block_end><def_stmt>parse_match value<block_start><if_stmt>value<is><none><block_start><return><none><block_end><else_stmt><block_start><return>re.compile(value)<block_end><block_end># In pratice, we either generate all or all except tests and we never # change default directories for code generation. So we remove unused # options and regroup some into --library. parser=argparse.ArgumentParser(description='This is NSIMD generation script.')<line_sep>parser.add_argument('--force' '-f' action='store_true' help='Generate all files even if they already exist')<line_sep>parser.add_argument('--list-files' '-L' action='store_true' default=<false> help='List files that will be created by hatch.py')<line_sep>parser.add_argument('--all' '-A' action='store_true' help='Generate code for the library and its tests')<line_sep>parser.add_argument('--library' '-l' action='store_true' help='Generate code of the library (C and C++ APIs)')<line_sep>parser.add_argument('--sleef' '-s' action='store_true' default=<false> help='Compile Sleef')<line_sep>parser.add_argument('--tests' '-t' action='store_true' help='Generate tests in C and C++')<line_sep>parser.add_argument('--doc' '-d' action='store_true' help='Generate all documentation')<line_sep>parser.add_argument('--enable-clang-format' '-F' action='store_false' default=<true> help='Disable Clang Format (mainly for speed on Windows)')<line_sep>parser.add_argument('--sve-emulate-bool' action='store_true' default=<false> help='Use normal SVE vector to emulate predicates.')<line_sep>parser.add_argument('--simd' '-D' type=parse_simd default='all' help='List of SIMD extensions (separated by a comma)')<line_sep>parser.add_argument('--match' '-m' type=parse_match default=<none> help='Regex used to filter generation on operator names')<line_sep>parser.add_argument('--verbose' '-v' action='store_true' default=<none> help='Enable verbose mode')<line_sep>parser.add_argument('--simple-license' action='store_true' default=<false> help='Put a simple copyright statement instead of the whole license')<line_sep>opts=parser.parse_args(args)<line_sep># When -L has been chosen, we want to list all files and so we have to # turn to True other parameters <if_stmt>opts.list_files<block_start>opts.library=<true><line_sep>opts.tests=<true><line_sep>opts.force=<true><line_sep>opts.doc=<true><block_end># We set variables here because all the code depends on them + we do want # to keep the possibility to change them in the future opts.archis=opts.library<line_sep>opts.base_apis=opts.library<line_sep>opts.adv_cxx_api=opts.library<line_sep>opts.adv_c_api=opts.library<line_sep>opts.friendly_but_not_optimized=opts.library<line_sep>opts.src=opts.library<line_sep>opts.scalar_utilities=opts.library<line_sep>opts.sleef_version='3.5.1'<line_sep>opts.include_dir=os.path.join(script_dir '..' 'include' 'nsimd')<line_sep>opts.tests_dir=os.path.join(script_dir '..' 'tests')<line_sep>opts.src_dir=os.path.join(script_dir '..' 'src')<line_sep><return>opts<block_end># ----------------------------------------------------------------------------- # Entry point <def_stmt>main <block_start>opts=parse_args(sys.argv[1:])<line_sep>opts.script_dir=script_dir<line_sep>opts.modules_list=<none><line_sep>opts.platforms_list=<none><line_sep>## Gather all SIMD dependencies opts.simd=common.get_simds_deps_from_opts(opts)<line_sep>common.myprint(opts 'List of SIMD: {}'.format(', '.join(opts.simd)))<if_stmt>opts.archis<eq><true><or>opts.all<eq><true><block_start>gen_archis.doit(opts)<block_end><if_stmt>opts.base_apis<eq><true><or>opts.all<eq><true><block_start>gen_base_apis.doit(opts)<block_end><if_stmt>opts.adv_cxx_api<eq><true><or>opts.all<eq><true><block_start>gen_adv_cxx_api.doit(opts)<block_end><if_stmt>opts.adv_c_api<eq><true><or>opts.all<eq><true><block_start>gen_adv_c_api.doit(opts)<block_end><if_stmt>opts.tests<eq><true><or>opts.all<eq><true><block_start>gen_tests.doit(opts)<block_end><if_stmt>opts.src<eq><true><or>opts.all<eq><true><block_start>gen_src.doit(opts)<block_end><if_stmt>opts.sleef<eq><true><or>opts.all<eq><true><block_start>get_sleef_code.doit(opts)<block_end><if_stmt>opts.scalar_utilities<eq><true><or>opts.all<eq><true><block_start>gen_scalar_utilities.doit(opts)<block_end><if_stmt>opts.friendly_but_not_optimized<eq><true><or>opts.all<eq><true><block_start>gen_friendly_but_not_optimized.doit(opts)<block_end>gen_modules.doit(opts)# this must be here after all NSIMD <if_stmt>opts.doc<eq><true><or>opts.all<eq><true><block_start>gen_doc.doit(opts)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>os<import_stmt>sys<import_stmt>re<import_stmt>collections<import_stmt>numpy<as>np<import_stmt>scipy<import_stmt>json<import_stmt>itertools<import_stmt>pickle<import_stmt>gc<import_stmt>gzip<import_stmt>argparse<def_stmt>tokenize sent<block_start>'''Return the tokens of a sentence including punctuation. >>> tokenize('Bob dropped the apple. Where is the apple?') ['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?'] '''<line_sep><return>re.findall('(?:\w+)|\S' sent)<block_end><def_stmt>list_to_map l<block_start>'''Convert a list of values to a map from values to indices'''<line_sep><return>{val:i<for>i,val enumerate(l)}<block_end><def_stmt>parse_stories lines<block_start>''' Parse stories provided in the bAbi tasks format, with knowledge graph. '''<line_sep>data=[]<line_sep>story=[]<for_stmt>line lines<block_start><if_stmt>line[-1]<eq>"\n"<block_start>line=line[:-1]<block_end>nid,line=line.split(' ' 1)<line_sep>nid=int(nid)<if_stmt>nid<eq>1<block_start>story=[]<line_sep>questions=[]<block_end><if_stmt>'\t'<in>line<block_start>q,apre=line.split('\t')[:2]<line_sep>a=apre.split(',')<line_sep>q=tokenize(q)<line_sep>substory=[x<for>x story<if>x]<line_sep>data.append((substory q a))<line_sep>story.append('')<block_end><else_stmt><block_start>line,graph=line.split('=' 1)<line_sep>sent=tokenize(line)<line_sep>graph_parsed=json.loads(graph)<line_sep>story.append((sent graph_parsed))<block_end><block_end><return>data<block_end><def_stmt>get_stories taskname<block_start><with_stmt>open(taskname 'r')<as>f<block_start>lines=f.readlines()<block_end><return>parse_stories(lines)<block_end><def_stmt>get_max_sentence_length stories<block_start><return>max((max((len(sentence)<for>(sentence graph) sents_graphs))<for>(sents_graphs query answer) stories))<block_end><def_stmt>get_max_query_length stories<block_start><return>max((len(query)<for>(sents_graphs query answer) stories))<block_end><def_stmt>get_max_num_queries stories<block_start><return>max((len(queries)<for>(sents_graphs query answer) stories))<block_end><def_stmt>get_max_nodes_per_iter stories<block_start>result=0<for_stmt>(sents_graphs query answer) stories<block_start>prev_nodes=set()<for_stmt>(sentence graph) sents_graphs<block_start>cur_nodes=set(graph["nodes"])<line_sep>new_nodes=len(cur_nodes-prev_nodes)<if_stmt>new_nodes<g>result<block_start>result=new_nodes<block_end>prev_nodes=cur_nodes<block_end><block_end><return>result<block_end><def_stmt>get_buckets stories max_ignore_unbatched=100 max_pad_amount=25<block_start>sentencecounts=[len(sents_graphs)<for>(sents_graphs query answer) stories]<line_sep>countpairs=sorted(collections.Counter(sentencecounts).items())<line_sep>buckets=[]<line_sep>smallest_left_val=0<line_sep>num_unbatched=max_ignore_unbatched<for_stmt>val,ct countpairs<block_start>num_unbatched<augadd>ct<if_stmt>val-smallest_left_val<g>max_pad_amount<or>num_unbatched<g>max_ignore_unbatched<block_start>buckets.append(val)<line_sep>smallest_left_val=val<line_sep>num_unbatched=0<block_end><block_end><if_stmt>buckets[-1]<ne>countpairs[-1][0]<block_start>buckets.append(countpairs[-1][0])<block_end><return>buckets<block_end>PAD_WORD="<PAD>"<def_stmt>get_wordlist stories<block_start>words=[PAD_WORD]+sorted(list(set((word<for>(sents_graphs query answer) stories<for>wordbag itertools.chain((s<for>s,g sents_graphs) [query])<for>word wordbag))))<line_sep>wordmap=list_to_map(words)<line_sep><return>words wordmap<block_end><def_stmt>get_answer_list stories<block_start>words=sorted(list(set(word<for>(sents_graphs query answer) stories<for>word answer)))<line_sep>wordmap=list_to_map(words)<line_sep><return>words wordmap<block_end><def_stmt>pad_story story num_sentences sentence_length<block_start><def_stmt>pad lst dlen pad<block_start><return>lst+[pad]<times>(dlen-len(lst))<block_end>sents_graphs,query,answer=story<line_sep>padded_sents_graphs=[(pad(s sentence_length PAD_WORD) g)<for>s,g sents_graphs]<line_sep>padded_query=pad(query sentence_length PAD_WORD)<line_sep>sentgraph_padding=(pad([] sentence_length PAD_WORD) padded_sents_graphs[-1][1])<line_sep><return>(pad(padded_sents_graphs num_sentences sentgraph_padding) padded_query answer)<block_end><def_stmt>get_unqualified_id s<block_start><return>s.split("#")[0]<block_end><def_stmt>get_graph_lists stories<block_start>node_words=sorted(list(set(get_unqualified_id(node)<for>(sents_graphs query answer) stories<for>sent,graph sents_graphs<for>node graph["nodes"])))<line_sep>nodemap=list_to_map(node_words)<line_sep>edge_words=sorted(list(set(get_unqualified_id(edge["type"])<for>(sents_graphs query answer) stories<for>sent,graph sents_graphs<for>edge graph["edges"])))<line_sep>edgemap=list_to_map(edge_words)<line_sep><return>node_words nodemap edge_words edgemap<block_end><def_stmt>convert_graph graphs nodemap edgemap new_nodes_per_iter dynamic=<true><block_start>num_node_ids=len(nodemap)<line_sep>num_edge_types=len(edgemap)<line_sep>full_size=len(graphs)<times>new_nodes_per_iter+1<line_sep>prev_size=1<line_sep>processed_nodes=[]<line_sep>index_map={}<line_sep>all_num_nodes=[]<line_sep>all_node_ids=[]<line_sep>all_node_strengths=[]<line_sep>all_edges=[]<if_stmt><not>dynamic<block_start>processed_nodes=list(nodemap.keys())<line_sep>index_map=nodemap.copy()<line_sep>prev_size=num_node_ids<line_sep>full_size=prev_size<line_sep>new_nodes_per_iter=0<block_end><for_stmt>g graphs<block_start>active_nodes=g["nodes"]<line_sep>active_edges=g["edges"]<line_sep>new_nodes=[e<for>e active_nodes<if>e<not><in>processed_nodes]<line_sep>num_new_nodes=len(new_nodes)<if_stmt><not>dynamic<block_start><assert_stmt>num_new_nodes<eq>0 "Cannot create more nodes in non-dynamic mode!\n{}".format(graphs)<block_end>new_node_strengths=np.zeros([new_nodes_per_iter] np.float32)<line_sep>new_node_strengths[:num_new_nodes]=1.0<line_sep>new_node_ids=np.zeros([new_nodes_per_iter num_node_ids] np.float32)<for_stmt>i,node enumerate(new_nodes)<block_start>new_node_ids[i nodemap[get_unqualified_id(node)]]=1.0<line_sep>index_map[node]=prev_size+i<block_end>next_edges=np.zeros([full_size full_size num_edge_types])<for_stmt>edge active_edges<block_start>next_edges[index_map[edge["from"]] index_map[edge["to"]] edgemap[get_unqualified_id(edge["type"])]]=1.0<block_end>processed_nodes.extend(new_nodes)<line_sep>prev_size<augadd>new_nodes_per_iter<line_sep>all_num_nodes.append(num_new_nodes)<line_sep>all_node_ids.append(new_node_ids)<line_sep>all_edges.append(next_edges)<line_sep>all_node_strengths.append(new_node_strengths)<block_end><return>np.stack(all_num_nodes) np.stack(all_node_strengths) np.stack(all_node_ids) np.stack(all_edges)<block_end><def_stmt>convert_story story wordmap answer_map graph_node_map graph_edge_map new_nodes_per_iter dynamic=<true><block_start>""" Converts a story in format ([(sentence, graph)], [(index, question_arr, answer)]) to a consolidated story in format (sentence_arr, [graph_arr_dict], [(index, question_arr, answer)]) and also replaces words according to the input maps """<line_sep>sents_graphs,query,answer=story<line_sep>sentence_arr=[[wordmap[w]<for>w s]<for>s,g sents_graphs]<line_sep>graphs=convert_graph([g<for>s,g sents_graphs] graph_node_map graph_edge_map new_nodes_per_iter dynamic)<line_sep>query_arr=[wordmap[w]<for>w query]<line_sep>answer_arr=[answer_map[w]<for>w answer]<line_sep><return>(sentence_arr graphs query_arr answer_arr)<block_end><def_stmt>process_story s bucket_len<block_start><return>convert_story(pad_story(s bucket_len sentence_length) wordmap answer_map graph_node_map graph_edge_map new_nodes_per_iter dynamic)<block_end><def_stmt>bucket_stories stories buckets wordmap answer_map graph_node_map graph_edge_map sentence_length new_nodes_per_iter dynamic=<true><block_start><return>[[process_story(story bmax)<for>story stories<if>bstart<l>len(story[0])<le>bmax]<for>bstart,bmax zip([0]+buckets buckets)]<block_end><def_stmt>prepare_stories stories dynamic=<true><block_start>sentence_length=max(get_max_sentence_length(stories) get_max_query_length(stories))<line_sep>buckets=get_buckets(stories)<line_sep>wordlist,wordmap=get_wordlist(stories)<line_sep>anslist,ansmap=get_answer_list(stories)<line_sep>new_nodes_per_iter=get_max_nodes_per_iter(stories)<line_sep>graph_node_list,graph_node_map,graph_edge_list,graph_edge_map=get_graph_lists(stories)<line_sep>bucketed=bucket_stories(stories buckets wordmap ansmap graph_node_map graph_edge_map sentence_length new_nodes_per_iter dynamic)<line_sep><return>sentence_length new_nodes_per_iter buckets wordlist anslist graph_node_list graph_edge_list bucketed<block_end><def_stmt>print_batch story wordlist anslist file=sys.stdout<block_start>sents,query,answer=story<for_stmt>batch,(s q a) enumerate(zip(sents query answer))<block_start>file.write("Story {}\n".format(batch))<for_stmt>sent s<block_start>file.write(" ".join([wordlist[word]<for>word sent])+"\n")<block_end>file.write(" ".join(wordlist[word]<for>word q)+"\n")<line_sep>file.write(" ".join(anslist[word]<for>word a.nonzero()[1])+"\n")<block_end><block_end>MetadataList=collections.namedtuple("MetadataList" ["sentence_length" "new_nodes_per_iter" "buckets" "wordlist" "anslist" "graph_node_list" "graph_edge_list"])<line_sep>PreppedStory=collections.namedtuple("PreppedStory" ["converted" "sentences" "query" "answer"])<def_stmt>generate_metadata stories dynamic=<true><block_start>sentence_length=max(get_max_sentence_length(stories) get_max_query_length(stories))<line_sep>buckets=get_buckets(stories)<line_sep>wordlist,wordmap=get_wordlist(stories)<line_sep>anslist,ansmap=get_answer_list(stories)<line_sep>new_nodes_per_iter=get_max_nodes_per_iter(stories)<line_sep>graph_node_list,graph_node_map,graph_edge_list,graph_edge_map=get_graph_lists(stories)<line_sep>metadata=MetadataList(sentence_length new_nodes_per_iter buckets wordlist anslist graph_node_list graph_edge_list)<line_sep><return>metadata<block_end><def_stmt>preprocess_stories stories savedir dynamic=<true> metadata_file=<none><block_start><if_stmt>metadata_file<is><none><block_start>metadata=generate_metadata(stories dynamic)<block_end><else_stmt><block_start><with_stmt>open(metadata_file 'rb')<as>f<block_start>metadata=pickle.load(f)<block_end><block_end>buckets=get_buckets(stories)<line_sep>sentence_length,new_nodes_per_iter,old_buckets,wordlist,anslist,graph_node_list,graph_edge_list=metadata<line_sep>metadata=metadata._replace(buckets=buckets)<if_stmt><not>os.path.exists(savedir)<block_start>os.makedirs(savedir)<block_end><with_stmt>open(os.path.join(savedir 'metadata.p') 'wb')<as>f<block_start>pickle.dump(metadata f)<block_end>bucketed_files=[[]<for>_ buckets]<for_stmt>i,story enumerate(stories)<block_start>bucket_idx,cur_bucket=next(((i bmax)<for>(i (bstart bmax)) enumerate(zip([0]+buckets buckets))<if>bstart<l>len(story[0])<le>bmax) (<none> <none>))<assert_stmt>cur_bucket<is><not><none> "Couldn't put story of length {} into buckets {}".format(len(story[0]) buckets)<line_sep>bucket_dir=os.path.join(savedir "bucket_{}".format(cur_bucket))<if_stmt><not>os.path.exists(bucket_dir)<block_start>os.makedirs(bucket_dir)<block_end>story_fn=os.path.join(bucket_dir "story_{}.pz".format(i))<line_sep>sents_graphs,query,answer=story<line_sep>sents=[s<for>s,g sents_graphs]<line_sep>cvtd=convert_story(pad_story(story cur_bucket sentence_length) list_to_map(wordlist) list_to_map(anslist) list_to_map(graph_node_list) list_to_map(graph_edge_list) new_nodes_per_iter dynamic)<line_sep>prepped=PreppedStory(cvtd sents query answer)<with_stmt>gzip.open(story_fn 'wb')<as>zf<block_start>pickle.dump(prepped zf)<block_end>bucketed_files[bucket_idx].append(os.path.relpath(story_fn savedir))<line_sep>gc.collect()<block_end># we don't want to use too much memory, so try to clean it up <with_stmt>open(os.path.join(savedir 'file_list.p') 'wb')<as>f<block_start>pickle.dump(bucketed_files f)<block_end><block_end><def_stmt>main file dynamic metadata_file=<none><block_start>stories=get_stories(file)<line_sep>dirname,ext=os.path.splitext(file)<line_sep>preprocess_stories(stories dirname dynamic metadata_file)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Parse a graph file')<line_sep>parser.add_argument("file" help="Graph file to parse")<line_sep>parser.add_argument("--static" dest="dynamic" action="store_false" help="Don't use dynamic nodes")<line_sep>parser.add_argument("--metadata-file" default=<none> help="Use this particular metadata file instead of building it from scratch")<line_sep>args=vars(parser.parse_args())<line_sep>main(**args)<block_end>
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>threading<try_stmt><block_start><import_from_stmt>unittest mock<block_end><except_stmt>ImportError# pragma: NO PY3 COVER <block_start><import_stmt>mock<block_end><import_stmt>pytest<import_from_stmt>google.cloud.ndb utils<class_stmt>Test_asbool<block_start>@staticmethod<def_stmt>test_None <block_start><assert_stmt>utils.asbool(<none>)<is><false><block_end>@staticmethod<def_stmt>test_bool <block_start><assert_stmt>utils.asbool(<true>)<is><true><assert_stmt>utils.asbool(<false>)<is><false><block_end>@staticmethod<def_stmt>test_truthy_int <block_start><assert_stmt>utils.asbool(0)<is><false><assert_stmt>utils.asbool(1)<is><true><block_end>@staticmethod<def_stmt>test_truthy_string <block_start><assert_stmt>utils.asbool("Y")<is><true><assert_stmt>utils.asbool("f")<is><false><block_end><block_end><def_stmt>test_code_info <block_start><with_stmt>pytest.raises(NotImplementedError)<block_start>utils.code_info()<block_end><block_end><def_stmt>test_decorator <block_start><with_stmt>pytest.raises(NotImplementedError)<block_start>utils.decorator()<block_end><block_end><def_stmt>test_frame_info <block_start><with_stmt>pytest.raises(NotImplementedError)<block_start>utils.frame_info()<block_end><block_end><def_stmt>test_func_info <block_start><with_stmt>pytest.raises(NotImplementedError)<block_start>utils.func_info()<block_end><block_end><def_stmt>test_gen_info <block_start><with_stmt>pytest.raises(NotImplementedError)<block_start>utils.gen_info()<block_end><block_end><def_stmt>test_get_stack <block_start><with_stmt>pytest.raises(NotImplementedError)<block_start>utils.get_stack()<block_end><block_end><class_stmt>Test_logging_debug<block_start>@staticmethod@mock.patch("google.cloud.ndb.utils.DEBUG" <false>)<def_stmt>test_noop <block_start>log=mock.Mock(spec=("debug" ))<line_sep>utils.logging_debug(log "hello dad! {} {where}" "I'm" where="in jail")<line_sep>log.debug.assert_not_called()<block_end>@staticmethod@mock.patch("google.cloud.ndb.utils.DEBUG" <true>)<def_stmt>test_log_it <block_start>log=mock.Mock(spec=("debug" ))<line_sep>utils.logging_debug(log "hello dad! {} {where}" "I'm" where="in jail")<line_sep>log.debug.assert_called_once_with("hello dad! I'm in jail")<block_end><block_end><def_stmt>test_positional <block_start>@utils.positional(2)<def_stmt>test_func a=1 b=2 **kwargs<block_start><return>a b<block_end>@utils.positional(1)<def_stmt>test_func2 a=3 **kwargs<block_start><return>a<block_end><with_stmt>pytest.raises(TypeError)<block_start>test_func(1 2 3)<block_end><with_stmt>pytest.raises(TypeError)<block_start>test_func2(1 2)<block_end><assert_stmt>test_func(4 5 x=0)<eq>(4 5)<assert_stmt>test_func(6)<eq>(6 2)<assert_stmt>test_func2(6)<eq>6<block_end><def_stmt>test_keyword_only <block_start>@utils.keyword_only(foo=1 bar=2 baz=3)<def_stmt>test_kwonly **kwargs<block_start><return>kwargs["foo"] kwargs["bar"] kwargs["baz"]<block_end><with_stmt>pytest.raises(TypeError)<block_start>test_kwonly(faz=4)<block_end><assert_stmt>test_kwonly()<eq>(1 2 3)<assert_stmt>test_kwonly(foo=3 bar=5 baz=7)<eq>(3 5 7)<assert_stmt>test_kwonly(baz=7)<eq>(1 2 7)<block_end><def_stmt>test_threading_local <block_start><assert_stmt>utils.threading_local<is>threading.local<block_end><def_stmt>test_tweak_logging <block_start><with_stmt>pytest.raises(NotImplementedError)<block_start>utils.tweak_logging()<block_end><block_end><def_stmt>test_wrapping <block_start><with_stmt>pytest.raises(NotImplementedError)<block_start>utils.wrapping()<block_end><block_end>
<import_stmt>unittest<import_from_stmt>coldtype.pens.cairopen CairoPen<import_from_stmt>pathlib Path<import_from_stmt>coldtype.color hsl<import_from_stmt>coldtype.geometry Rect<import_from_stmt>coldtype.text.composer StSt Font<import_from_stmt>coldtype.pens.datpen DATPen DATPens<import_from_stmt>PIL Image<import_stmt>drawBot<as>db<import_stmt>imagehash<import_stmt>contextlib<line_sep>co=Font.Cacheable("assets/ColdtypeObviously-VF.ttf")<line_sep>renders=Path("test/renders/cairo")<line_sep>renders.mkdir(parents=<true> exist_ok=<true>)<def_stmt>hash_img path<block_start><if_stmt>path.exists()<block_start><return>(imagehash.colorhash(Image.open(path)) imagehash.average_hash(Image.open(path)))<block_end><else_stmt><block_start><return>-1<block_end><block_end>@contextlib.contextmanager<def_stmt>test_image test:unittest.TestCase path rect=Rect(300 300)<block_start>img=(renders/path)<line_sep>hash_before=hash_img(img)<if_stmt>img.exists()<block_start>img.unlink()<block_end><yield>(img rect)<line_sep>hash_after=hash_img(img)<line_sep>test.assertEqual(hash_after hash_before)<line_sep>test.assertEqual(img.exists() <true>)<block_end><class_stmt>TestCairoPen(unittest.TestCase)<block_start><def_stmt>test_cairo_pdf self<block_start>r=Rect(300 300)<line_sep>pdf=renders/"test_cairo.pdf"<line_sep>dp=(StSt("CDEL" co 100 wdth=0.5).pens().align(r))<line_sep>CairoPen.Composite(dp r pdf)<line_sep>self.assertEqual(len(dp) 4)<line_sep>self.assertEqual(type(dp) DATPens)<block_end><def_stmt>test_cairo_png self<block_start><with_stmt>test_image(self "test_cairo.png")<as>(i r)<block_start>rr=Rect(0 0 100 100)<line_sep>dp=(DATPen().define(r=rr c=75).gs("$r↗ $r↓|↘|$c $r↖|↙|$c").align(r).scale(1.2).rotate(180).f(hsl(0.5 a=0.1)).s(hsl(0.9)).sw(5))<line_sep>CairoPen.Composite(dp r i)<line_sep>self.assertEqual(len(dp.value) 4)<line_sep>self.assertEqual(type(dp) DATPen)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. <import_from_stmt>.dialog_bot DialogBot<import_from_stmt>.teams_bot TeamsBot<line_sep>__all__=["DialogBot" "TeamsBot"]<line_sep>
<import_from_stmt>typing List<import_stmt>databases<import_stmt>pytest<import_stmt>sqlalchemy<import_stmt>ormar<import_from_stmt>tests.settings DATABASE_URL<line_sep>database=databases.Database(DATABASE_URL force_rollback=<true>)<line_sep>metadata=sqlalchemy.MetaData()<class_stmt>BaseMeta(ormar.ModelMeta)<block_start>database=database<line_sep>metadata=metadata<block_end><class_stmt>Language(ormar.Model)<block_start><class_stmt>Meta(BaseMeta)<block_start>tablename="languages"<block_end>id:int=ormar.Integer(primary_key=<true>)<line_sep>name:str=ormar.String(max_length=100)<line_sep>level:str=ormar.String(max_length=150 default="Beginner")<block_end><class_stmt>CringeLevel(ormar.Model)<block_start><class_stmt>Meta(BaseMeta)<block_start>tablename="levels"<block_end>id:int=ormar.Integer(primary_key=<true>)<line_sep>name:str=ormar.String(max_length=100)<line_sep>language=ormar.ForeignKey(Language)<block_end><class_stmt>NickName(ormar.Model)<block_start><class_stmt>Meta(BaseMeta)<block_start>tablename="nicks"<block_end>id:int=ormar.Integer(primary_key=<true>)<line_sep>name:str=ormar.String(max_length=100 nullable=<false> name="hq_name")<line_sep>is_lame:bool=ormar.Boolean(nullable=<true>)<line_sep>level:CringeLevel=ormar.ForeignKey(CringeLevel)<block_end><class_stmt>HQ(ormar.Model)<block_start><class_stmt>Meta(BaseMeta)<block_start>tablename="hqs"<block_end>id:int=ormar.Integer(primary_key=<true>)<line_sep>name:str=ormar.String(max_length=100 nullable=<false> name="hq_name")<line_sep>nicks:List[NickName]=ormar.ManyToMany(NickName)<block_end><class_stmt>Company(ormar.Model)<block_start><class_stmt>Meta(BaseMeta)<block_start>tablename="companies"<block_end>id:int=ormar.Integer(primary_key=<true>)<line_sep>name:str=ormar.String(max_length=100 nullable=<false> name="company_name")<line_sep>founded:int=ormar.Integer(nullable=<true>)<line_sep>hq:HQ=ormar.ForeignKey(HQ related_name="companies")<block_end>@pytest.fixture(autouse=<true> scope="module")<def_stmt>create_test_database <block_start>engine=sqlalchemy.create_engine(DATABASE_URL)<line_sep>metadata.drop_all(engine)<line_sep>metadata.create_all(engine)<line_sep><yield><line_sep>metadata.drop_all(engine)<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_load_all_fk_rel <block_start><async_keyword><with_stmt>database<block_start><async_keyword><with_stmt>database.transaction(force_rollback=<true>)<block_start>hq=<await>HQ.objects.create(name="Main")<line_sep>company=<await>Company.objects.create(name="Banzai" founded=1988 hq=hq)<line_sep>hq=<await>HQ.objects.get(name="Main")<line_sep><await>hq.load_all()<assert_stmt>hq.companies[0]<eq>company<assert_stmt>hq.companies[0].name<eq>"Banzai"<assert_stmt>hq.companies[0].founded<eq>1988<line_sep>hq2=<await>HQ.objects.select_all().get(name="Main")<assert_stmt>hq2.companies[0]<eq>company<assert_stmt>hq2.companies[0].name<eq>"Banzai"<assert_stmt>hq2.companies[0].founded<eq>1988<block_end><block_end><block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_load_all_many_to_many <block_start><async_keyword><with_stmt>database<block_start><async_keyword><with_stmt>database.transaction(force_rollback=<true>)<block_start>nick1=<await>NickName.objects.create(name="BazingaO" is_lame=<false>)<line_sep>nick2=<await>NickName.objects.create(name="Bazinga20" is_lame=<true>)<line_sep>hq=<await>HQ.objects.create(name="Main")<line_sep><await>hq.nicks.add(nick1)<line_sep><await>hq.nicks.add(nick2)<line_sep>hq=<await>HQ.objects.get(name="Main")<line_sep><await>hq.load_all()<assert_stmt>hq.nicks[0]<eq>nick1<assert_stmt>hq.nicks[0].name<eq>"BazingaO"<assert_stmt>hq.nicks[1]<eq>nick2<assert_stmt>hq.nicks[1].name<eq>"Bazinga20"<line_sep>hq2=<await>HQ.objects.select_all().get(name="Main")<assert_stmt>hq2.nicks[0]<eq>nick1<assert_stmt>hq2.nicks[0].name<eq>"BazingaO"<assert_stmt>hq2.nicks[1]<eq>nick2<assert_stmt>hq2.nicks[1].name<eq>"Bazinga20"<block_end><block_end><block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_load_all_with_order <block_start><async_keyword><with_stmt>database<block_start><async_keyword><with_stmt>database.transaction(force_rollback=<true>)<block_start>nick1=<await>NickName.objects.create(name="Barry" is_lame=<false>)<line_sep>nick2=<await>NickName.objects.create(name="Joe" is_lame=<true>)<line_sep>hq=<await>HQ.objects.create(name="Main")<line_sep><await>hq.nicks.add(nick1)<line_sep><await>hq.nicks.add(nick2)<line_sep>hq=<await>HQ.objects.get(name="Main")<line_sep><await>hq.load_all(order_by="-nicks__name")<assert_stmt>hq.nicks[0]<eq>nick2<assert_stmt>hq.nicks[0].name<eq>"Joe"<assert_stmt>hq.nicks[1]<eq>nick1<assert_stmt>hq.nicks[1].name<eq>"Barry"<line_sep><await>hq.load_all()<assert_stmt>hq.nicks[0]<eq>nick1<assert_stmt>hq.nicks[1]<eq>nick2<line_sep>hq2=(<await>HQ.objects.select_all().order_by("-nicks__name").get(name="Main"))<assert_stmt>hq2.nicks[0]<eq>nick2<assert_stmt>hq2.nicks[1]<eq>nick1<line_sep>hq3=<await>HQ.objects.select_all().get(name="Main")<assert_stmt>hq3.nicks[0]<eq>nick1<assert_stmt>hq3.nicks[1]<eq>nick2<block_end><block_end><block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_loading_reversed_relation <block_start><async_keyword><with_stmt>database<block_start><async_keyword><with_stmt>database.transaction(force_rollback=<true>)<block_start>hq=<await>HQ.objects.create(name="Main")<line_sep><await>Company.objects.create(name="Banzai" founded=1988 hq=hq)<line_sep>company=<await>Company.objects.get(name="Banzai")<line_sep><await>company.load_all()<assert_stmt>company.hq<eq>hq<line_sep>company2=<await>Company.objects.select_all().get(name="Banzai")<assert_stmt>company2.hq<eq>hq<block_end><block_end><block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_loading_nested <block_start><async_keyword><with_stmt>database<block_start><async_keyword><with_stmt>database.transaction(force_rollback=<true>)<block_start>language=<await>Language.objects.create(name="English")<line_sep>level=<await>CringeLevel.objects.create(name="High" language=language)<line_sep>level2=<await>CringeLevel.objects.create(name="Low" language=language)<line_sep>nick1=<await>NickName.objects.create(name="BazingaO" is_lame=<false> level=level)<line_sep>nick2=<await>NickName.objects.create(name="Bazinga20" is_lame=<true> level=level2)<line_sep>hq=<await>HQ.objects.create(name="Main")<line_sep><await>hq.nicks.add(nick1)<line_sep><await>hq.nicks.add(nick2)<line_sep>hq=<await>HQ.objects.get(name="Main")<line_sep><await>hq.load_all(follow=<true>)<assert_stmt>hq.nicks[0]<eq>nick1<assert_stmt>hq.nicks[0].name<eq>"BazingaO"<assert_stmt>hq.nicks[0].level.name<eq>"High"<assert_stmt>hq.nicks[0].level.language.name<eq>"English"<assert_stmt>hq.nicks[1]<eq>nick2<assert_stmt>hq.nicks[1].name<eq>"Bazinga20"<assert_stmt>hq.nicks[1].level.name<eq>"Low"<assert_stmt>hq.nicks[1].level.language.name<eq>"English"<line_sep>hq2=<await>HQ.objects.select_all(follow=<true>).get(name="Main")<assert_stmt>hq2.nicks[0]<eq>nick1<assert_stmt>hq2.nicks[0].name<eq>"BazingaO"<assert_stmt>hq2.nicks[0].level.name<eq>"High"<assert_stmt>hq2.nicks[0].level.language.name<eq>"English"<assert_stmt>hq2.nicks[1]<eq>nick2<assert_stmt>hq2.nicks[1].name<eq>"Bazinga20"<assert_stmt>hq2.nicks[1].level.name<eq>"Low"<assert_stmt>hq2.nicks[1].level.language.name<eq>"English"<line_sep>hq5=<await>HQ.objects.select_all().get(name="Main")<assert_stmt>len(hq5.nicks)<eq>2<line_sep><await>hq5.nicks.select_all(follow=<true>).all()<assert_stmt>hq5.nicks[0]<eq>nick1<assert_stmt>hq5.nicks[0].name<eq>"BazingaO"<assert_stmt>hq5.nicks[0].level.name<eq>"High"<assert_stmt>hq5.nicks[0].level.language.name<eq>"English"<assert_stmt>hq5.nicks[1]<eq>nick2<assert_stmt>hq5.nicks[1].name<eq>"Bazinga20"<assert_stmt>hq5.nicks[1].level.name<eq>"Low"<assert_stmt>hq5.nicks[1].level.language.name<eq>"English"<line_sep><await>hq.load_all(follow=<true> exclude="nicks__level__language")<assert_stmt>len(hq.nicks)<eq>2<assert_stmt>hq.nicks[0].level.language<is><none><assert_stmt>hq.nicks[1].level.language<is><none><line_sep>hq3=(<await>HQ.objects.select_all(follow=<true>).exclude_fields("nicks__level__language").get(name="Main"))<assert_stmt>len(hq3.nicks)<eq>2<assert_stmt>hq3.nicks[0].level.language<is><none><assert_stmt>hq3.nicks[1].level.language<is><none><line_sep><await>hq.load_all(follow=<true> exclude="nicks__level__language__level")<assert_stmt>len(hq.nicks)<eq>2<assert_stmt>hq.nicks[0].level.language<is><not><none><assert_stmt>hq.nicks[0].level.language.level<is><none><assert_stmt>hq.nicks[1].level.language<is><not><none><assert_stmt>hq.nicks[1].level.language.level<is><none><line_sep><await>hq.load_all(follow=<true> exclude="nicks__level")<assert_stmt>len(hq.nicks)<eq>2<assert_stmt>hq.nicks[0].level<is><none><assert_stmt>hq.nicks[1].level<is><none><line_sep><await>hq.load_all(follow=<true> exclude="nicks")<assert_stmt>len(hq.nicks)<eq>0<block_end><block_end><block_end>
# Copyright 2014, Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain # rights in this software. <import_from_stmt>behave *<import_stmt>nose<import_stmt>numpy<import_stmt>toyplot.data<line_sep>@given(u'values from -1000 to -1')<def_stmt>step_impl context<block_start>context.x=numpy.linspace(-1000 -1 100)<block_end>@given(u'values from -1000 to -0.01')<def_stmt>step_impl context<block_start>context.x=numpy.linspace(-1000 -0.01 100)<block_end>@given(u'values from -1000 to 0')<def_stmt>step_impl context<block_start>context.x=numpy.linspace(-1000 0 100)<block_end>@given(u'values from -1000 to 0.5')<def_stmt>step_impl context<block_start>context.x=numpy.linspace(-1000 0.5 100)<block_end>@given(u'values from -0.5 to 1000')<def_stmt>step_impl context<block_start>context.x=numpy.linspace(-0.5 1000 100)<block_end>@given(u'values from 0 to 1000')<def_stmt>step_impl context<block_start>context.x=numpy.linspace(0 1000 100)<block_end>@given(u'values from 0.01 to 1000')<def_stmt>step_impl context<block_start>context.x=numpy.linspace(0.01 1000 100)<block_end>@given(u'values from 1 to 1000')<def_stmt>step_impl context<block_start>context.x=numpy.linspace(1 1000 100)<block_end>@given(u'values from -1000 to 1000')<def_stmt>step_impl context<block_start>context.x=numpy.linspace(-1000 1000 100)<block_end>@given(u'log 10 axes on x and y')<def_stmt>step_impl context<block_start>context.axes=context.canvas.cartesian(xscale="log10" yscale="log10")<block_end>@given(u'log 2 axes on x and y')<def_stmt>step_impl context<block_start>context.axes=context.canvas.cartesian(xscale="log2" yscale="log2")<block_end>@given(u'log 10 axes on x and y with custom format')<def_stmt>step_impl context<block_start>context.axes=context.canvas.cartesian(xscale="log10" yscale="log10")<line_sep>context.axes.x.ticks.locator=toyplot.locator.Log(base=10 format="{base}^{exponent}")<line_sep>context.axes.y.ticks.locator=toyplot.locator.Log(base=10 format="{base}^{exponent}")<block_end>@when(u'plotting x, x with markers')<def_stmt>step_impl context<block_start>context.axes.plot(context.x context.x marker="o")<block_end>@given(u'squared values from 0 to 10')<def_stmt>step_impl context<block_start>context.values=numpy.linspace(0 10)<power>2<block_end>@given(u'squared values from -10 to 0')<def_stmt>step_impl context<block_start>context.values=-(numpy.linspace(10 0)<power>2)<block_end>@given(u'log 10 axes on y with domain min 10')<def_stmt>step_impl context<block_start>context.axes=context.canvas.cartesian(yscale="log10")<line_sep>context.axes.y.domain.min=10<block_end>@given(u'log 10 axes on y with domain max -10')<def_stmt>step_impl context<block_start>context.axes=context.canvas.cartesian(yscale="log10")<line_sep>context.axes.y.domain.max=-10<block_end>@when(u'plotting the values with bars')<def_stmt>step_impl context<block_start>context.axes.bars(context.values)<block_end>
# SPDX-FileCopyrightText: 2021 <NAME> # SPDX-License-Identifier: MIT <import_stmt>board<import_from_stmt>adafruit_led_animation.animation.sparkle Sparkle<import_from_stmt>adafruit_led_animation.color PURPLE<import_from_stmt>adafruit_led_animation.sequence AnimationSequence<import_from_stmt>adafruit_is31fl3741.adafruit_ledglasses MUST_BUFFER LED_Glasses<import_from_stmt>adafruit_is31fl3741.led_glasses_animation LED_Glasses_Animation<line_sep>glasses=LED_Glasses(board.I2C() allocate=MUST_BUFFER)<line_sep>glasses.set_led_scaling(255)<line_sep>glasses.global_current=0xFE<line_sep>glasses.enable=<true><line_sep>pixels=LED_Glasses_Animation(glasses)<line_sep>anim2=Sparkle(pixels 0.05 PURPLE)<line_sep>group=AnimationSequence(anim2 advance_interval=5 auto_reset=<true> auto_clear=<true>)<while_stmt><true><block_start>group.animate()<block_end>
<import_stmt>abc<import_stmt>logging<line_sep>__all__=["multihash" "Comparable" "is_in_class" "get_class" "get_context"]<line_sep>log=logging.getLogger(__name__)<def_stmt>multihash *args<block_start>"""Multi-argument order-sensitive hash. Args: *args: Objects to hash. Returns: int: Hash. """<line_sep><return>hash(args)<block_end><class_stmt>Comparable<block_start>"""A mixin that makes instances of the class comparable. Requires the subclass to just implement `__le__`. """<line_sep>__metaclass__=abc.ABCMeta<def_stmt>__eq__ self other<block_start><return>self<le>other<le>self<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end>@abc.abstractmethod<def_stmt>__le__ self other<block_start><pass><block_end># pragma: no cover <def_stmt>__lt__ self other<block_start><return>self<le>other<and>self<ne>other<block_end><def_stmt>__ge__ self other<block_start><return>other.__le__(self)<block_end><def_stmt>__gt__ self other<block_start><return>self<ge>other<and>self<ne>other<block_end><def_stmt>is_comparable self other<block_start>"""Check whether this object is comparable with another one. Args: other (:class:`.util.Comparable`): Object to check comparability with. Returns: bool: `True` if the object is comparable with `other` and `False` otherwise. """<line_sep><return>self<l>other<or>self<eq>other<or>self<g>other<block_end><block_end><def_stmt>is_in_class f<block_start>"""Check if a function is part of a class. Args: f (function): Function to check. Returns: bool: `True` if `f` is part of a class, else `False`. """<line_sep>parts=f.__qualname__.split(".")<line_sep><return>len(parts)<ge>2<and>parts[-2]<ne>"<locals>"<block_end><def_stmt>_split_parts f<block_start>qualified_name=f.__module__+"."+f.__qualname__<line_sep><return>qualified_name.split(".")<block_end><def_stmt>get_class f<block_start>"""Assuming that `f` is part of a class, get the fully qualified name of the class. Args: f (function): Method to get class name for. Returns: str: Fully qualified name of class. """<line_sep>parts=_split_parts(f)<line_sep><return>".".join(parts[:-1])<block_end><def_stmt>get_context f<block_start>"""Get the fully qualified name of the context for `f`. If `f` is part of a class, then the context corresponds to the scope of the class. If `f` is not part of a class, then the context corresponds to the scope of the function. Args: f (function): Method to get context for. Returns: str: Context. """<line_sep>parts=_split_parts(f)<if_stmt>is_in_class(f)# Split off function name and class. <block_start><return>".".join(parts[:-2])<block_end><else_stmt># Split off function name only. <block_start><return>".".join(parts[:-1])<block_end><block_end>
<import_from_stmt>.base_renderer *<import_stmt>os.path<line_sep>__file__=os.path.normpath(os.path.abspath(__file__))<line_sep>__path__=os.path.dirname(__file__)<line_sep>@renderer<class_stmt>MediaWikiRenderer(CommandlineRenderer)<block_start><def_stmt>__init__ self<block_start>super(MediaWikiRenderer self).__init__(executable='ruby' args=['-rubygems' os.path.join(__path__ 'bin/mw2html.rb')])<block_end>@classmethod<def_stmt>is_enabled cls filename syntax<block_start><if_stmt>syntax<eq>'text.html.mediawiki'<block_start><return><true><block_end><return>filename.endswith('.mediawiki')<or>filename.endswith('.wiki')<block_end><block_end>
<import_stmt>psutil#Library to get System details <import_stmt>time<import_stmt>pyttsx3# Library for text to speech Offline <import_from_stmt>win10toast ToastNotifier# also need to install win32api (This is for Notifications) <import_stmt>threading# To make notification and speech work at same time toaster=ToastNotifier()<line_sep>x=pyttsx3.init()<line_sep>x.setProperty('rate' 130)<line_sep>x.setProperty('volume' 8)<line_sep>count=0<def_stmt>show_notification show_text<block_start>toaster.show_toast(show_text icon_path='battery.ico' duration=10)<line_sep># loop the toaster over some period of time <while_stmt>toaster.notification_active()<block_start>time.sleep(0.1)<block_end><block_end><def_stmt>monitor <block_start><while_stmt>(<true>)<block_start>time.sleep(10)<line_sep>battery=psutil.sensors_battery()<line_sep>plugged=battery.power_plugged<line_sep>percent=int(battery.percent)<if_stmt>percent<eq>100<block_start><if_stmt>plugged<eq><true><block_start>processThread=threading.Thread(target=show_notification args=("Laptop Fully Charged" ))# <- note extra ',' processThread.start()<line_sep>x.say("Laptop is Fully Charged Please plug out the cable")<line_sep>x.runAndWait()<block_end><block_end><elif_stmt>percent<eq>90<block_start><if_stmt>plugged<eq><true><block_start><if_stmt>count<eq>0<block_start>processThread=threading.Thread(target=show_notification args=("Your Battery at 90% Please plug out the cable" ))# <- note extra ',' processThread.start()<line_sep>x.say("Your battery at 90% ")<line_sep>x.runAndWait()<line_sep>count=count+1<block_end><block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>monitor()<block_end>
<import_stmt>os<line_sep>code_lines=list()<line_sep>notation_lines=list()<line_sep>blank_lines=list()<def_stmt>process_file filename<block_start><global>code_lines<line_sep><global>notation_lines<line_sep><global>blank_lines<with_stmt>open(filename 'r')<as>file<block_start><for_stmt>line file.readlines()<block_start>_line=line.strip()<if_stmt><not>_line<block_start>blank_lines.append(_line)<block_end><elif_stmt>_line.startswith('#')<block_start>notation_lines.append(_line)<block_end><else_stmt><block_start>code_lines.append(_line)<block_end><block_end><block_end><block_end><def_stmt>show_result <block_start><global>code_lines<line_sep><global>notation_lines<line_sep><global>blank_lines<line_sep>print('-'<times>20)<line_sep>print('code:' len(code_lines))<for_stmt>line code_lines<block_start>print(line)<block_end>print('-'<times>20)<line_sep>print('notation:' len(notation_lines))<for_stmt>line notation_lines<block_start>print(line)<block_end>print('-'<times>20)<line_sep>print('blank:' len(blank_lines))<line_sep>code_lines.clear()<line_sep>notation_lines.clear()<line_sep>blank_lines.clear()<block_end><def_stmt>process_files path='../6'<block_start>files=os.listdir(path)<for_stmt>file files<block_start><if_stmt>file.endswith('.py')<block_start>print('='<times>30)<line_sep>print('current file:' os.path.join(path file))<line_sep>process_file(os.path.join(path file))<line_sep>show_result()<block_end><block_end><block_end>process_files()<line_sep>
VERSION='0.0.1'<line_sep>default_app_config='jet_django.apps.JetDjangoConfig'<line_sep>
<import_stmt>pytest<import_stmt>requests<import_from_stmt>suite.resources_utils ensure_connection<line_sep>@pytest.mark.ingresses@pytest.mark.parametrize('ingress_controller, expected_responses' [pytest.param({"extra_args":["-health-status=true" "-health-status-uri=/something-va(l)id/blabla"]} {"/something-va(l)id/blabla":200 "/nginx-health":404} id="custom-health-status-uri") pytest.param({"extra_args":["-health-status=true"]} {"/something-va(l)id/blabla":404 "/nginx-health":200} id="default-health-status-uri") pytest.param({"extra_args":["-health-status=false"]} {"/something-va(l)id/blabla":404 "/nginx-health":404} id="disable-health-status")] indirect=["ingress_controller"])<class_stmt>TestHealthStatusURI<block_start><def_stmt>test_response_code self ingress_controller_endpoint ingress_controller expected_responses<block_start><for_stmt>uri expected_responses<block_start>req_url=f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port}{uri}"<line_sep>ensure_connection(req_url expected_responses[uri])<line_sep>resp=requests.get(req_url)<assert_stmt>resp.status_code<eq>expected_responses[uri] f"Expected {expected_responses[uri]} code for {uri} but got {resp.status_code}"<block_end><block_end><block_end>
<import_stmt>arrow<import_stmt>json<import_stmt>requests<def_stmt>kanban_webhook event context<block_start>input_body=json.loads(event['body'])<line_sep>print(event['body'])<line_sep>action=input_body["action"]<line_sep>action_type=action["type"]<if_stmt>action_type<eq>"createCard"<block_start>list_name,card_name=get_create_card(action["data"])<block_end><elif_stmt>action_type<eq>"updateCard"<block_start>list_name,card_name=get_update_card(action["data"])<block_end>kanban_list=["DOING" "BREAK" "DONE"]<if_stmt>list_name<in>kanban_list<block_start>payload=make_payload(action=list_name msg=card_name)<line_sep>r=send_to_kino({"text":payload})<line_sep>response={"statusCode":r.status_code}<block_end>response={"statusCode":400}<line_sep><return>response<block_end><def_stmt>get_create_card action_data<block_start>list_name=action_data["list"]["name"].upper()<line_sep>card_name=action_data["card"]["name"]<line_sep><return>list_name card_name<block_end><def_stmt>get_update_card action_data<block_start>list_name=action_data["listAfter"]["name"].upper()<line_sep>card_name=action_data["card"]["name"]<line_sep><return>list_name card_name<block_end><def_stmt>make_payload action=<none> msg=<none> time=<none><block_start><if_stmt>time<is><none><block_start>now=arrow.now()<line_sep>time=now.format(" MMMM d, YYYY")+" at "+now.format("HH:mmA")<block_end>payload={"action":"KANBAN_"+action "msg":msg "time":time}<line_sep><return>json.dumps(payload)<block_end><def_stmt>send_to_kino data<block_start><return>requests.post("https://hooks.slack.com/services/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" data=json.dumps(data))<block_end>
# This function app is to ensure the code outside main() function # should only get loaded once in __init__.py <import_from_stmt>.count invoke get_invoke_count reset_count<line_sep>invoke()<def_stmt>main req<block_start>count=get_invoke_count()<line_sep>reset_count()<line_sep><return>f'executed count = {count}'<block_end>
""" ShadeSketch https://github.com/qyzdao/ShadeSketch Learning to Shadow Hand-drawn Sketches <NAME>, <NAME>, <NAME> Copyright (C) 2020 The respective authors and Project HAT. All rights reserved. Licensed under MIT license. """<import_stmt>tensorflow<as>tf<line_sep># import keras keras=tf.keras<line_sep>K=keras.backend<line_sep>Layer=keras.layers.Layer<line_sep>Conv2D=keras.layers.Conv2D<line_sep>InputSpec=keras.layers.InputSpec<line_sep>image_data_format=K.image_data_format<line_sep>activations=keras.activations<line_sep>initializers=keras.initializers<line_sep>regularizers=keras.regularizers<line_sep>constraints=keras.constraints<class_stmt>Composite(Layer)<block_start><def_stmt>__init__ self data_format='channels_last' **kwargs<block_start>self.data_format=data_format<line_sep>super(Composite self).__init__(**kwargs)<block_end><def_stmt>call self inputs<block_start>line_inputs,shade_inputs=inputs<line_sep><return>line_inputs+(shade_inputs+1)<times>0.25<block_end><def_stmt>compute_output_shape self input_shape<block_start><return>input_shape[0]<block_end><block_end><class_stmt>PixelwiseConcat(Layer)<block_start><def_stmt>__init__ self data_format='channels_last' **kwargs<block_start>self.data_format=data_format<line_sep>super(PixelwiseConcat self).__init__(**kwargs)<block_end><def_stmt>call self inputs<block_start>pixel_inputs,unit_inputs=inputs<if_stmt>self.data_format<eq>'channels_first'<block_start>repeated_unit_inputs=tf.tile(K.expand_dims(K.expand_dims(unit_inputs 2) 2) [1 K.shape(pixel_inputs)[2] K.shape(pixel_inputs)[3] 1])<block_end><elif_stmt>self.data_format<eq>'channels_last'<block_start>repeated_unit_inputs=tf.tile(K.expand_dims(K.expand_dims(unit_inputs 1) 1) [1 K.shape(pixel_inputs)[1] K.shape(pixel_inputs)[2] 1])<block_end><return>K.concatenate([pixel_inputs repeated_unit_inputs])<block_end><def_stmt>compute_output_shape self input_shape<block_start><if_stmt>self.data_format<eq>'channels_first'<block_start><return>(input_shape[0][0] input_shape[0][1]+input_shape[1][1] input_shape[0][2] input_shape[0][3])<block_end><elif_stmt>self.data_format<eq>'channels_last'<block_start><return>(input_shape[0][0] input_shape[0][1] input_shape[0][2] input_shape[0][3]+input_shape[1][1])<block_end><block_end><block_end><class_stmt>SubPixelConv2D(Conv2D)<block_start><def_stmt>__init__ self filters kernel_size r padding='same' data_format=<none> strides=(1 1) activation=<none> use_bias=<true> kernel_initializer='glorot_uniform' bias_initializer='zeros' kernel_regularizer=<none> bias_regularizer=<none> activity_regularizer=<none> kernel_constraint=<none> bias_constraint=<none> **kwargs<block_start>super(SubPixelConv2D self).__init__(filters=r<times>r<times>filters kernel_size=kernel_size strides=strides padding=padding data_format=data_format activation=activation use_bias=use_bias kernel_initializer=kernel_initializer bias_initializer=bias_initializer kernel_regularizer=kernel_regularizer bias_regularizer=bias_regularizer activity_regularizer=activity_regularizer kernel_constraint=kernel_constraint bias_constraint=bias_constraint **kwargs)<line_sep>self.r=r<if_stmt>hasattr(tf.nn 'depth_to_space')<block_start>self.depth_to_space=tf.nn.depth_to_space<block_end><else_stmt><block_start>self.depth_to_space=tf.depth_to_space<block_end><block_end><def_stmt>phase_shift self I<block_start><if_stmt>self.data_format<eq>'channels_first'<block_start><return>self.depth_to_space(I self.r data_format="NCHW")<block_end><elif_stmt>self.data_format<eq>'channels_last'<block_start><return>self.depth_to_space(I self.r data_format="NHWC")<block_end><block_end><def_stmt>call self inputs<block_start><return>self.phase_shift(super(SubPixelConv2D self).call(inputs))<block_end><def_stmt>compute_output_shape self input_shape<block_start><if_stmt>self.data_format<eq>'channels_first'<block_start>n,c,h,w=super(SubPixelConv2D self).compute_output_shape(input_shape)<block_end><elif_stmt>self.data_format<eq>'channels_last'<block_start>n,h,w,c=super(SubPixelConv2D self).compute_output_shape(input_shape)<block_end><if_stmt>h<is><not><none><block_start>h=int(self.r<times>h)<block_end><if_stmt>w<is><not><none><block_start>w=int(self.r<times>w)<block_end>c=int(c/(self.r<times>self.r))<if_stmt>self.data_format<eq>'channels_first'<block_start><return>(n c h w)<block_end><elif_stmt>self.data_format<eq>'channels_last'<block_start><return>(n h w c)<block_end><block_end><def_stmt>get_config self<block_start>config=super(Conv2D self).get_config()<line_sep>config.pop('rank')<line_sep>config.pop('dilation_rate')<line_sep>config['filters']<augdiv>self.r<times>self.r<line_sep>config['r']=self.r<line_sep><return>config<block_end><block_end><class_stmt>SelfAttention(Layer)<block_start><def_stmt>__init__ self data_format='channels_last' activation=<none> use_bias=<true> kernel_initializer='glorot_uniform' bias_initializer='zeros' kernel_regularizer=<none> bias_regularizer=<none> activity_regularizer=<none> kernel_constraint=<none> bias_constraint=<none> **kwargs<block_start>super(SelfAttention self).__init__(**kwargs)<line_sep>self.data_format=data_format<line_sep>self.activation=activations.get(activation)<line_sep>self.use_bias=use_bias<line_sep>self.kernel_initializer=initializers.get(kernel_initializer)<line_sep>self.bias_initializer=initializers.get(bias_initializer)<line_sep>self.kernel_regularizer=regularizers.get(kernel_regularizer)<line_sep>self.bias_regularizer=regularizers.get(bias_regularizer)<line_sep>self.activity_regularizer=regularizers.get(activity_regularizer)<line_sep>self.kernel_constraint=constraints.get(kernel_constraint)<line_sep>self.bias_constraint=constraints.get(bias_constraint)<block_end><def_stmt>build self input_shape<block_start><if_stmt>self.data_format<eq>'channels_first'<block_start>channel_axis=1<block_end><else_stmt><block_start>channel_axis=-1<block_end>kernel_size=(1 1)<line_sep>self.filters=int(input_shape[channel_axis])<line_sep>self.kernel_f=self.add_weight(shape=kernel_size+(self.filters self.filters<floordiv>8) initializer=self.kernel_initializer name='kernel_f' regularizer=self.kernel_regularizer constraint=self.kernel_constraint)<line_sep>self.kernel_g=self.add_weight(shape=kernel_size+(self.filters self.filters<floordiv>8) initializer=self.kernel_initializer name='kernel_g' regularizer=self.kernel_regularizer constraint=self.kernel_constraint)<line_sep>self.kernel_h=self.add_weight(shape=kernel_size+(self.filters self.filters) initializer=self.kernel_initializer name='kernel_h' regularizer=self.kernel_regularizer constraint=self.kernel_constraint)<if_stmt>self.use_bias<block_start>self.bias_f=self.add_weight(shape=(self.filters<floordiv>8 ) initializer=self.bias_initializer name='bias_f' regularizer=self.bias_regularizer constraint=self.bias_constraint)<line_sep>self.bias_g=self.add_weight(shape=(self.filters<floordiv>8 ) initializer=self.bias_initializer name='bias_g' regularizer=self.bias_regularizer constraint=self.bias_constraint)<line_sep>self.bias_h=self.add_weight(shape=(self.filters ) initializer=self.bias_initializer name='bias_h' regularizer=self.bias_regularizer constraint=self.bias_constraint)<block_end><else_stmt><block_start>self.bias_f=<none><line_sep>self.bias_g=<none><line_sep>self.bias_h=<none><block_end>self.gamma=self.add_weight(name='gamma' shape=(1 ) initializer=initializers.Constant(0))<line_sep>super(SelfAttention self).build(input_shape)<block_end><def_stmt>call self inputs<block_start>f=K.conv2d(inputs self.kernel_f data_format=self.data_format strides=(1 1) dilation_rate=(1 1))<line_sep># [bs, h, w, c'] g=K.conv2d(inputs self.kernel_g data_format=self.data_format strides=(1 1) dilation_rate=(1 1))<line_sep># [bs, h, w, c'] h=K.conv2d(inputs self.kernel_h data_format=self.data_format strides=(1 1) dilation_rate=(1 1))<line_sep># [bs, h, w, c] <if_stmt>self.use_bias<block_start>f=K.bias_add(f self.bias_f data_format=self.data_format)# [bs, h, w, c'] g=K.bias_add(g self.bias_g data_format=self.data_format)# [bs, h, w, c'] h=K.bias_add(h self.bias_h data_format=self.data_format)<block_end># [bs, h, w, c] # N = h * w s=K.dot(K.batch_flatten(g) K.transpose(K.batch_flatten(f)))# # [bs, N, N] beta=K.softmax(s)# attention map o=K.dot(beta K.batch_flatten(h))# [bs, N, C] o=K.reshape(o K.shape(inputs))# [bs, h, w, C] <return>self.activation(self.gamma<times>o+inputs)<block_end><def_stmt>compute_output_shape self input_shape<block_start><return>input_shape<block_end><def_stmt>get_config self<block_start>config={'activation':activations.serialize(self.activation) 'data_format':self.data_format 'use_bias':self.use_bias 'kernel_initializer':initializers.serialize(self.kernel_initializer) 'bias_initializer':initializers.serialize(self.bias_initializer) 'kernel_regularizer':regularizers.serialize(self.kernel_regularizer) 'bias_regularizer':regularizers.serialize(self.bias_regularizer) 'activity_regularizer':regularizers.serialize(self.activity_regularizer) 'kernel_constraint':constraints.serialize(self.kernel_constraint) 'bias_constraint':constraints.serialize(self.bias_constraint)}<line_sep>base_config=super(SelfAttention self).get_config()<line_sep><return>dict(list(base_config.items())+list(config.items()))<block_end><block_end>""" Implementation of Coordinate Channel keras-coordconv MIT License Copyright (c) 2018 <NAME> https://github.com/titu1994/keras-coordconv/blob/master/coord.py """<class_stmt>_CoordinateChannel(Layer)<block_start>""" Adds Coordinate Channels to the input tensor. # Arguments rank: An integer, the rank of the input data-uniform, e.g. "2" for 2D convolution. use_radius: Boolean flag to determine whether the radius coordinate should be added for 2D rank inputs or not. data_format: A string, one of `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, ..., channels)` while `"channels_first"` corresponds to inputs with shape `(batch, channels, ...)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". # Input shape ND tensor with shape: `(samples, channels, *)` if `data_format` is `"channels_first"` or ND tensor with shape: `(samples, *, channels)` if `data_format` is `"channels_last"`. # Output shape ND tensor with shape: `(samples, channels + 2, *)` if `data_format` is `"channels_first"` or 5D tensor with shape: `(samples, *, channels + 2)` if `data_format` is `"channels_last"`. # References: - [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247) """<def_stmt>__init__ self rank use_radius=<false> data_format='channels_last' **kwargs<block_start>super(_CoordinateChannel self).__init__(**kwargs)<if_stmt>data_format<not><in>[<none> 'channels_first' 'channels_last']<block_start><raise>ValueError('`data_format` must be either "channels_last", "channels_first" '<concat>'or None.')<block_end>self.rank=rank<line_sep>self.use_radius=use_radius<line_sep>self.data_format=data_format<line_sep>self.axis=1<if>image_data_format()<eq>'channels_first'<else>-1<line_sep>self.input_spec=InputSpec(min_ndim=2)<line_sep>self.supports_masking=<true><block_end><def_stmt>build self input_shape<block_start><assert_stmt>len(input_shape)<ge>2<line_sep>input_dim=input_shape[self.axis]<line_sep>self.input_spec=InputSpec(min_ndim=self.rank+2 axes={self.axis:input_dim})<line_sep>self.built=<true><block_end><def_stmt>call self inputs training=<none> mask=<none><block_start>input_shape=K.shape(inputs)<if_stmt>self.rank<eq>1<block_start>input_shape=[input_shape[i]<for>i range(3)]<line_sep>batch_shape,dim,channels=input_shape<line_sep>xx_range=tf.tile(K.expand_dims(K.arange(0 dim) axis=0) K.stack([batch_shape 1]))<line_sep>xx_range=K.expand_dims(xx_range axis=-1)<line_sep>xx_channels=K.cast(xx_range K.floatx())<line_sep>xx_channels=xx_channels/K.cast(dim-1 K.floatx())<line_sep>xx_channels=(xx_channels<times>2)-1.<line_sep>outputs=K.concatenate([inputs xx_channels] axis=-1)<block_end><if_stmt>self.rank<eq>2<block_start><if_stmt>self.data_format<eq>'channels_first'<block_start>inputs=K.permute_dimensions(inputs [0 2 3 1])<line_sep>input_shape=K.shape(inputs)<block_end>input_shape=[input_shape[i]<for>i range(4)]<line_sep>batch_shape,dim1,dim2,channels=input_shape<line_sep>xx_ones=tf.ones(K.stack([batch_shape dim2]) dtype='int32')<line_sep>xx_ones=K.expand_dims(xx_ones axis=-1)<line_sep>xx_range=tf.tile(K.expand_dims(K.arange(0 dim1) axis=0) K.stack([batch_shape 1]))<line_sep>xx_range=K.expand_dims(xx_range axis=1)<line_sep>xx_channels=K.batch_dot(xx_ones xx_range axes=[2 1])<line_sep>xx_channels=K.expand_dims(xx_channels axis=-1)<line_sep>xx_channels=K.permute_dimensions(xx_channels [0 2 1 3])<line_sep>yy_ones=tf.ones(K.stack([batch_shape dim1]) dtype='int32')<line_sep>yy_ones=K.expand_dims(yy_ones axis=1)<line_sep>yy_range=tf.tile(K.expand_dims(K.arange(0 dim2) axis=0) K.stack([batch_shape 1]))<line_sep>yy_range=K.expand_dims(yy_range axis=-1)<line_sep>yy_channels=K.batch_dot(yy_range yy_ones axes=[2 1])<line_sep>yy_channels=K.expand_dims(yy_channels axis=-1)<line_sep>yy_channels=K.permute_dimensions(yy_channels [0 2 1 3])<line_sep>xx_channels=K.cast(xx_channels K.floatx())<line_sep>xx_channels=xx_channels/K.cast(dim1-1 K.floatx())<line_sep>xx_channels=(xx_channels<times>2)-1.<line_sep>yy_channels=K.cast(yy_channels K.floatx())<line_sep>yy_channels=yy_channels/K.cast(dim2-1 K.floatx())<line_sep>yy_channels=(yy_channels<times>2)-1.<line_sep>outputs=K.concatenate([inputs xx_channels yy_channels] axis=-1)<if_stmt>self.use_radius<block_start>rr=K.sqrt(K.square(xx_channels-0.5)+K.square(yy_channels-0.5))<line_sep>outputs=K.concatenate([outputs rr] axis=-1)<block_end><if_stmt>self.data_format<eq>'channels_first'<block_start>outputs=K.permute_dimensions(outputs [0 3 1 2])<block_end><block_end><if_stmt>self.rank<eq>3<block_start><if_stmt>self.data_format<eq>'channels_first'<block_start>inputs=K.permute_dimensions(inputs [0 2 3 4 1])<line_sep>input_shape=K.shape(inputs)<block_end>input_shape=[input_shape[i]<for>i range(5)]<line_sep>batch_shape,dim1,dim2,dim3,channels=input_shape<line_sep>xx_ones=tf.ones(K.stack([batch_shape dim3]) dtype='int32')<line_sep>xx_ones=K.expand_dims(xx_ones axis=-1)<line_sep>xx_range=tf.tile(K.expand_dims(K.arange(0 dim2) axis=0) K.stack([batch_shape 1]))<line_sep>xx_range=K.expand_dims(xx_range axis=1)<line_sep>xx_channels=K.batch_dot(xx_ones xx_range axes=[2 1])<line_sep>xx_channels=K.expand_dims(xx_channels axis=-1)<line_sep>xx_channels=K.permute_dimensions(xx_channels [0 2 1 3])<line_sep>xx_channels=K.expand_dims(xx_channels axis=1)<line_sep>xx_channels=tf.tile(xx_channels [1 dim1 1 1 1])<line_sep>yy_ones=tf.ones(K.stack([batch_shape dim2]) dtype='int32')<line_sep>yy_ones=K.expand_dims(yy_ones axis=1)<line_sep>yy_range=tf.tile(K.expand_dims(K.arange(0 dim3) axis=0) K.stack([batch_shape 1]))<line_sep>yy_range=K.expand_dims(yy_range axis=-1)<line_sep>yy_channels=K.batch_dot(yy_range yy_ones axes=[2 1])<line_sep>yy_channels=K.expand_dims(yy_channels axis=-1)<line_sep>yy_channels=K.permute_dimensions(yy_channels [0 2 1 3])<line_sep>yy_channels=K.expand_dims(yy_channels axis=1)<line_sep>yy_channels=tf.tile(yy_channels [1 dim1 1 1 1])<line_sep>zz_range=tf.tile(K.expand_dims(K.arange(0 dim1) axis=0) K.stack([batch_shape 1]))<line_sep>zz_range=K.expand_dims(zz_range axis=-1)<line_sep>zz_range=K.expand_dims(zz_range axis=-1)<line_sep>zz_channels=tf.tile(zz_range [1 1 dim2 dim3])<line_sep>zz_channels=K.expand_dims(zz_channels axis=-1)<line_sep>xx_channels=K.cast(xx_channels K.floatx())<line_sep>xx_channels=xx_channels/K.cast(dim2-1 K.floatx())<line_sep>xx_channels=xx_channels<times>2-1.<line_sep>yy_channels=K.cast(yy_channels K.floatx())<line_sep>yy_channels=yy_channels/K.cast(dim3-1 K.floatx())<line_sep>yy_channels=yy_channels<times>2-1.<line_sep>zz_channels=K.cast(zz_channels K.floatx())<line_sep>zz_channels=zz_channels/K.cast(dim1-1 K.floatx())<line_sep>zz_channels=zz_channels<times>2-1.<line_sep>outputs=K.concatenate([inputs zz_channels xx_channels yy_channels] axis=-1)<if_stmt>self.data_format<eq>'channels_first'<block_start>outputs=K.permute_dimensions(outputs [0 4 1 2 3])<block_end><block_end><return>outputs<block_end><def_stmt>compute_output_shape self input_shape<block_start><assert_stmt>input_shape<and>len(input_shape)<ge>2<assert_stmt>input_shape[self.axis]<if_stmt>self.use_radius<and>self.rank<eq>2<block_start>channel_count=3<block_end><else_stmt><block_start>channel_count=self.rank<block_end>output_shape=list(input_shape)<line_sep>output_shape[self.axis]=input_shape[self.axis]+channel_count<line_sep><return>tuple(output_shape)<block_end><def_stmt>get_config self<block_start>config={'rank':self.rank 'use_radius':self.use_radius 'data_format':self.data_format}<line_sep>base_config=super(_CoordinateChannel self).get_config()<line_sep><return>dict(list(base_config.items())+list(config.items()))<block_end><block_end><class_stmt>CoordinateChannel1D(_CoordinateChannel)<block_start>""" Adds Coordinate Channels to the input tensor of rank 1. # Arguments data_format: A string, one of `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, ..., channels)` while `"channels_first"` corresponds to inputs with shape `(batch, channels, ...)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". # Input shape 3D tensor with shape: `(batch_size, steps, input_dim)` # Output shape 3D tensor with shape: `(batch_size, steps, input_dim + 2)` # References: - [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247) """<def_stmt>__init__ self data_format=<none> **kwargs<block_start>super(CoordinateChannel1D self).__init__(rank=1 use_radius=<false> data_format=data_format **kwargs)<block_end><def_stmt>get_config self<block_start>config=super(CoordinateChannel1D self).get_config()<line_sep>config.pop('rank')<line_sep>config.pop('use_radius')<line_sep><return>config<block_end><block_end><class_stmt>CoordinateChannel2D(_CoordinateChannel)<block_start>""" Adds Coordinate Channels to the input tensor. # Arguments use_radius: Boolean flag to determine whether the radius coordinate should be added for 2D rank inputs or not. data_format: A string, one of `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, ..., channels)` while `"channels_first"` corresponds to inputs with shape `(batch, channels, ...)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". # Input shape 4D tensor with shape: `(samples, channels, rows, cols)` if `data_format` is `"channels_first"` or 4D tensor with shape: `(samples, rows, cols, channels)` if `data_format` is `"channels_last"`. # Output shape 4D tensor with shape: `(samples, channels + 2/3, rows, cols)` if `data_format` is `"channels_first"` or 4D tensor with shape: `(samples, rows, cols, channels + 2/3)` if `data_format` is `"channels_last"`. If `use_radius` is set, then will have 3 additional filers, else only 2 additional filters will be added. # References: - [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247) """<def_stmt>__init__ self use_radius=<false> data_format=<none> **kwargs<block_start>super(CoordinateChannel2D self).__init__(rank=2 use_radius=use_radius data_format=data_format **kwargs)<block_end><def_stmt>get_config self<block_start>config=super(CoordinateChannel2D self).get_config()<line_sep>config.pop('rank')<line_sep><return>config<block_end><block_end>
<import_from_stmt>django.contrib.auth.models Permission<def_stmt>assign_perm perm group<block_start>""" Assigns a permission to a group """<if_stmt><not>isinstance(perm Permission)<block_start><try_stmt><block_start>app_label,codename=perm.split('.' 1)<block_end><except_stmt>ValueError<block_start><raise>ValueError("For global permissions, first argument must be in"<concat>" format: 'app_label.codename' (is %r)"%perm)<block_end>perm=Permission.objects.get(content_type__app_label=app_label codename=codename)<block_end>group.permissions.add(perm)<line_sep><return>perm<block_end><def_stmt>remove_perm perm group<block_start>""" Removes a permission from a group """<if_stmt><not>isinstance(perm Permission)<block_start><try_stmt><block_start>app_label,codename=perm.split('.' 1)<block_end><except_stmt>ValueError<block_start><raise>ValueError("For global permissions, first argument must be in"<concat>" format: 'app_label.codename' (is %r)"%perm)<block_end>perm=Permission.objects.get(content_type__app_label=app_label codename=codename)<block_end>group.permissions.remove(perm)<line_sep><return><block_end>
"""\ Acora - a multi-keyword search engine based on Aho-Corasick trees. Usage:: >>> from acora import AcoraBuilder Collect some keywords:: >>> builder = AcoraBuilder('ab', 'bc', 'de') >>> builder.add('a', 'b') Generate the Acora search engine:: >>> ac = builder.build() Search a string for all occurrences:: >>> ac.findall('abc') [('a', 0), ('ab', 0), ('b', 1), ('bc', 1)] >>> ac.findall('abde') [('a', 0), ('ab', 0), ('b', 1), ('de', 2)] """<import_from_future_stmt> absolute_import<import_stmt>sys<line_sep>IS_PY3=sys.version_info[0]<ge>3<if_stmt>IS_PY3<block_start>unicode=str<block_end>FILE_BUFFER_SIZE=32<times>1024<class_stmt>PyAcora(object)<block_start>"""A simple (and very slow) Python implementation of the Acora search engine. """<line_sep>transitions=<none><def_stmt>__init__ self machine transitions=<none><block_start><if_stmt>transitions<is><not><none># old style format <block_start>start_state=machine<line_sep>self.transitions=dict([((state.id char) (target_state.id target_state.matches))<for>((state char) target_state) transitions.items()])<block_end><else_stmt># new style Machine format <block_start>start_state=machine.start_state<line_sep>ignore_case=machine.ignore_case<line_sep>self.transitions=transitions={}<line_sep>child_states=machine.child_states<line_sep>child_targets={}<line_sep>state_matches={}<line_sep>needs_bytes_conversion=<none><for_stmt>state child_states<block_start>state_id=state.id<line_sep>child_targets[state_id],state_matches[state_id]=(_merge_targets(state ignore_case))<if_stmt>needs_bytes_conversion<is><none><and>state_matches[state_id]<block_start><if_stmt>IS_PY3<block_start>needs_bytes_conversion=any(isinstance(s bytes)<for>s state_matches[state_id])<block_end><elif_stmt>any(isinstance(s unicode)<for>s state_matches[state_id])# in Py2, some keywords might be str even though we're processing unicode <block_start>needs_bytes_conversion=<false><block_end><block_end><block_end><if_stmt>needs_bytes_conversion<is><none><and><not>IS_PY3<block_start>needs_bytes_conversion=<true><block_end><if_stmt>needs_bytes_conversion<block_start><if_stmt>IS_PY3<block_start>convert=ord<block_end><else_stmt><block_start><import_from_stmt>codecs latin_1_encode<def_stmt>convert s<block_start><return>latin_1_encode(s)[0]<block_end><block_end><block_end><else_stmt><block_start>convert=<none><block_end>get_child_targets=child_targets.get<line_sep>get_matches=state_matches.get<line_sep>state_id=start_state.id<for_stmt>ch,child _merge_targets(start_state ignore_case)[0].items()<block_start>child_id=child.id<if_stmt>convert<is><not><none><block_start>ch=convert(ch)<block_end>transitions[(state_id ch)]=(child_id get_matches(child_id))<block_end><for_stmt>state child_states<block_start>state_id=state.id<for_stmt>ch,child get_child_targets(state_id).items()<block_start>child_id=child.id<if_stmt>convert<is><not><none><block_start>ch=convert(ch)<block_end>transitions[(state_id ch)]=(child_id get_matches(child_id))<block_end><block_end><block_end>self.start_state=start_state.id<block_end><def_stmt>finditer self s<block_start>"""Iterate over all occurrences of any keyword in the string. Returns (keyword, offset) pairs. """<line_sep>state=self.start_state<line_sep>start_state=(state [])<line_sep>next_state=self.transitions.get<line_sep>pos=0<for_stmt>char s<block_start>pos<augadd>1<line_sep>state,matches=next_state((state char) start_state)<if_stmt>matches<block_start><for_stmt>match matches<block_start><yield>(match pos-len(match))<block_end><block_end><block_end><block_end><def_stmt>findall self s<block_start>"""Find all occurrences of any keyword in the string. Returns a list of (keyword, offset) pairs. """<line_sep><return>list(self.finditer(s))<block_end><def_stmt>filefind self f<block_start>"""Iterate over all occurrences of any keyword in a file. Returns (keyword, offset) pairs. """<line_sep>opened=<false><if_stmt><not>hasattr(f 'read')<block_start>f=open(f 'rb')<line_sep>opened=<true><block_end><try_stmt><block_start>state=self.start_state<line_sep>start_state=(state ())<line_sep>next_state=self.transitions.get<line_sep>pos=0<while_stmt>1<block_start>data=f.read(FILE_BUFFER_SIZE)<if_stmt><not>data<block_start><break><block_end><for_stmt>char data<block_start>pos<augadd>1<line_sep>state,matches=next_state((state char) start_state)<if_stmt>matches<block_start><for_stmt>match matches<block_start><yield>(match pos-len(match))<block_end><block_end><block_end><block_end><block_end><finally_stmt><block_start><if_stmt>opened<block_start>f.close()<block_end><block_end><block_end><def_stmt>filefindall self f<block_start>"""Find all occurrences of any keyword in a file. Returns a list of (keyword, offset) pairs. """<line_sep><return>list(self.filefind(f))<block_end><block_end># import from shared Python/Cython module <import_from_stmt>acora._acora insert_bytes_keyword insert_unicode_keyword build_trie<as>_build_trie build_MachineState<as>_MachineState merge_targets<as>_merge_targets <line_sep># import from Cython module if available <try_stmt><block_start><import_from_stmt>acora._cacora UnicodeAcora BytesAcora insert_bytes_keyword insert_unicode_keyword <block_end><except_stmt>ImportError# C module not there ... <block_start>UnicodeAcora=BytesAcora=PyAcora<block_end><class_stmt>AcoraBuilder(object)<block_start>"""The main builder class for an Acora search engine. Add keywords by calling ``.add(*keywords)`` or by passing them into the constructor. Then build the search engine by calling ``.build()``. Builds a case insensitive search engine when passing ``ignore_case=True``, and a case sensitive engine otherwise. """<line_sep>ignore_case=<false><def_stmt>__init__ self *keywords **kwargs<block_start><if_stmt>kwargs<block_start>self.ignore_case=kwargs.pop('ignore_case' <false>)<if_stmt>kwargs<block_start><raise>TypeError("%s() got unexpected keyword argument %s"%(self.__class__.__name__ next(iter(kwargs))))<block_end><block_end><if_stmt>len(keywords)<eq>1<and>isinstance(keywords[0] (list tuple))<block_start>keywords=keywords[0]<block_end>self.for_unicode=<none><line_sep>self.state_counter=1<line_sep>self.keywords=set()<line_sep>self.tree=_MachineState(0)<if_stmt>keywords<block_start>self.update(keywords)<block_end><block_end><def_stmt>__update self keywords<block_start>"""Add more keywords to the search engine builder. Adding keywords does not impact previously built search engines. """<if_stmt><not>keywords<block_start><return><block_end>self.tree=<none><line_sep>self.keywords.update(keywords)<if_stmt>self.for_unicode<is><none><block_start><for_stmt>keyword keywords<block_start><if_stmt>isinstance(keyword unicode)<block_start>self.for_unicode=<true><block_end><elif_stmt>isinstance(keyword bytes)<block_start>self.for_unicode=<false><block_end><else_stmt><block_start><raise>TypeError("keywords must be either bytes or unicode, not mixed (got %s)"%type(keyword))<block_end><break><block_end><block_end># validate input string types marker=object()<if_stmt>self.for_unicode<block_start><for_stmt>keyword keywords<block_start><if_stmt><not>isinstance(keyword unicode)<block_start><break><block_end><block_end><else_stmt><block_start>keyword=marker<block_end><block_end><else_stmt><block_start><for_stmt>keyword keywords<block_start><if_stmt><not>isinstance(keyword bytes)<block_start><break><block_end><block_end><else_stmt><block_start>keyword=marker<block_end><block_end><if_stmt>keyword<is><not>marker<block_start><raise>TypeError("keywords must be either bytes or unicode, not mixed (got %s)"%type(keyword))<block_end><block_end><def_stmt>add self *keywords<block_start>"""Add more keywords to the search engine builder. Adding keywords does not impact previously built search engines. """<if_stmt>keywords<block_start>self.update(keywords)<block_end><block_end><def_stmt>build self ignore_case=<none> acora=<none><block_start>"""Build a search engine from the aggregated keywords. Builds a case insensitive search engine when passing ``ignore_case=True``, and a case sensitive engine otherwise. """<if_stmt>acora<is><none><block_start><if_stmt>self.for_unicode<block_start>acora=UnicodeAcora<block_end><else_stmt><block_start>acora=BytesAcora<block_end><block_end><if_stmt>self.for_unicode<eq><false><and>ignore_case<block_start><import_stmt>sys<if_stmt>sys.version_info[0]<ge>3<block_start><raise>ValueError("Case insensitive search is not supported for byte strings in Python 3")<block_end><block_end><if_stmt>ignore_case<is><not><none><and>ignore_case<ne>self.ignore_case# must rebuild tree <block_start>builder=type(self)(ignore_case=ignore_case)<line_sep>builder.update(self.keywords)<line_sep><return>builder.build(acora=acora)<block_end><return>acora(_build_trie(self.tree ignore_case=self.ignore_case))<block_end><def_stmt>update self keywords<block_start>for_unicode=self.for_unicode<line_sep>ignore_case=self.ignore_case<line_sep>insert_keyword=insert_unicode_keyword<if>for_unicode<else>insert_bytes_keyword<for_stmt>keyword keywords<block_start><if_stmt>for_unicode<is><none><block_start>for_unicode=self.for_unicode=isinstance(keyword unicode)<line_sep>insert_keyword=(insert_unicode_keyword<if>for_unicode<else>insert_bytes_keyword)<block_end><elif_stmt>for_unicode<ne>isinstance(keyword unicode)<block_start><raise>TypeError("keywords must be either bytes or unicode, not mixed (got %s)"%type(keyword))<block_end>self.state_counter=insert_keyword(self.tree keyword self.state_counter ignore_case)<block_end>self.keywords.update(keywords)<block_end><block_end>### convenience functions <def_stmt>search s *keywords<block_start>"""Convenience function to search a string for keywords. """<line_sep>acora=AcoraBuilder(keywords).build()<line_sep><return>acora.findall(s)<block_end><def_stmt>search_ignore_case s *keywords<block_start>"""Convenience function to search a string for keywords. Case insensitive version. """<line_sep>acora=AcoraBuilder(keywords ignore_case=<true>).build()<line_sep><return>acora.findall(s)<block_end>
<import_stmt>json<import_from_stmt>decimal Decimal<import_from_stmt>pymongo MongoClient<import_from_stmt>appkernel PropertyRequiredException<import_from_stmt>appkernel.configuration config<import_from_stmt>appkernel.repository mongo_type_converter_to_dict mongo_type_converter_from_dict<import_from_stmt>.utils *<import_stmt>pytest<import_from_stmt>jsonschema validate<def_stmt>setup_module module<block_start>config.mongo_database=MongoClient(host='localhost')['appkernel']<block_end><def_stmt>setup_function function<block_start>""" executed before each method call """<line_sep>print('\n\nSETUP ==> ')<line_sep>Project.delete_all()<line_sep>User.delete_all()<block_end><def_stmt>test_required_field <block_start>project=Project()<with_stmt>pytest.raises(PropertyRequiredException)<block_start>project.finalise_and_validate()<block_end><with_stmt>pytest.raises(PropertyRequiredException)<block_start>project.update(name=<none>)<line_sep>project.finalise_and_validate()<block_end>project.update(name='some_name')<line_sep>project.finalise_and_validate()<block_end><def_stmt>test_append_to_non_existing_non_defined_element <block_start>project=Project().update(name='strange project')<line_sep>project.append_to(users=Task().update(name='some_task' description='some description'))<line_sep>project.finalise_and_validate()<assert_stmt>'users'<in>project.__dict__<assert_stmt>len(project.users)<eq>1<assert_stmt>isinstance(project.users[0] Task)<line_sep>print(('{}'.format(project)))<block_end><def_stmt>test_append_to_non_existing_element <block_start>project=Project().update(name='strange project')<line_sep>project.append_to(tasks=Task().update(name='some_task' description='some description'))<line_sep>project.finalise_and_validate()<assert_stmt>'tasks'<in>project.__dict__<assert_stmt>len(project.tasks)<eq>1<assert_stmt>isinstance(project.tasks[0] Task)<line_sep>print(('{}'.format(project)))<block_end><def_stmt>test_remove_non_existing_element <block_start><with_stmt>pytest.raises(AttributeError)<block_start>project=Project().update(name='strange project')<line_sep>project.remove_from(tasks=Task())<block_end><with_stmt>pytest.raises(AttributeError)<block_start>project=Project().update(name='strange project')<line_sep>project.remove_from(tasks=<none>)<block_end><with_stmt>pytest.raises(AttributeError)<block_start>project=Project().update(name='strange project')<line_sep>project.remove_from(somehtings=Task())<block_end><block_end><def_stmt>test_remove_existing_defined_element <block_start>task1=Task().update(name='some_task' description='some description')<line_sep>task2=Task().update(name='some_other_task' description='some other description')<line_sep>task3=Task().update(name='a third task' description='some third description')<line_sep>project=Project().update(name='strange project')<line_sep>project.append_to(tasks=[task1 task2])<line_sep>project.finalise_and_validate()<assert_stmt>len(project.tasks)<eq>2<line_sep>project.append_to(tasks=task3)<line_sep>project.finalise_and_validate()<assert_stmt>len(project.tasks)<eq>3<line_sep>print(('{}'.format(project)))<line_sep>project.remove_from(tasks=task1)<assert_stmt>len(project.tasks)<eq>2<line_sep>print(('{}'.format(project)))<block_end><def_stmt>test_generator <block_start>task=Task()<line_sep>task.name='some task name'<line_sep>task.description='some task description'<line_sep>task.finalise_and_validate()<line_sep>print(('\nTask:\n {}'.format(task)))<assert_stmt>task.id<is><not><none><and>task.id.startswith('U')<block_end><def_stmt>test_converter <block_start>user=create_and_save_a_user('test user' 'test password' 'test description')<line_sep>print(('\n{}'.format(user.dumps(pretty_print=<true>))))<assert_stmt>user.password.startswith('<PASSWORD>')<line_sep>hash1=user.password<line_sep>user.save()<assert_stmt>user.password.startswith('<PASSWORD>')<assert_stmt>hash1<eq>user.password<block_end><def_stmt>test_nested_object_serialisation <block_start>portfolio=create_a_portfolion_with_owner()<line_sep>print((portfolio.dumps(pretty_print=<true>)))<line_sep>check_portfolio(portfolio)<block_end><def_stmt>test_describe_model <block_start>user_spec=User.get_parameter_spec()<line_sep>print(User.get_paramater_spec_as_json())<assert_stmt>'name'<in>user_spec<assert_stmt>user_spec.get('name').get('required')<assert_stmt>user_spec.get('name').get('type')<eq>'str'<assert_stmt>len(user_spec.get('name').get('validators'))<eq>2<for_stmt>validator user_spec.get('name').get('validators')<block_start><if_stmt>validator.get('type')<eq>'Regexp'<block_start><assert_stmt>validator.get('value')<eq>'[A-Za-z0-9-_]'<block_end><block_end><assert_stmt>user_spec.get('roles').get('sub_type')<eq>'str'<block_end><def_stmt>test_describe_rich_model <block_start>project_spec=Project.get_parameter_spec()<line_sep>print(Project.get_paramater_spec_as_json())<assert_stmt>project_spec.get('created').get('required')<assert_stmt>project_spec.get('created').get('type')<eq>'datetime'<assert_stmt>project_spec.get('name').get('required')<assert_stmt>project_spec.get('name').get('type')<eq>'str'<line_sep>name_validators=project_spec.get('name').get('validators')<assert_stmt>len(name_validators)<eq>1<assert_stmt>name_validators[0].get('type')<eq>'NotEmpty'<assert_stmt>name_validators[0].get('value')<is><none><or>'null'<line_sep>tasks=project_spec.get('tasks')<assert_stmt><not>tasks.get('required')<assert_stmt>'sub_type'<in>tasks<assert_stmt>tasks.get('type')<eq>'list'<line_sep>task=tasks.get('sub_type')<assert_stmt>task.get('type')<eq>'Task'<assert_stmt>'props'<in>task<line_sep>props=task.get('props')<assert_stmt><not>props.get('closed_date').get('required')<assert_stmt>props.get('closed_date').get('type')<eq>'datetime'<assert_stmt>props.get('closed_date').get('validators')[0].get('type')<eq>'Past'<block_end><def_stmt>test_json_schema <block_start>json_schema=Project.get_json_schema()<line_sep>print('\n{}'.format(json.dumps(json_schema indent=2)))<line_sep>print('===========')<line_sep>project=create_rich_project()<line_sep>print(project.dumps(pretty_print=<true>))<assert_stmt>json_schema.get('title')<eq>'Project Schema'<assert_stmt>'title'<in>json_schema<assert_stmt>json_schema.get('type')<eq>'object'<assert_stmt>'name'<in>json_schema.get('required')<assert_stmt>'created'<in>json_schema.get('required')<assert_stmt>'definitions'<in>json_schema<assert_stmt>json_schema.get('additionalProperties')<line_sep>definitions=json_schema.get('definitions')<assert_stmt>'Task'<in>definitions<assert_stmt>len(definitions.get('Task').get('required'))<eq>6<assert_stmt>'id'<in>definitions.get('Task').get('properties')<line_sep>closed_date=definitions.get('Task').get('properties').get('closed_date')<assert_stmt>'string'<in>closed_date.get('type')<assert_stmt>len(closed_date.get('type'))<eq>2<assert_stmt>closed_date.get('format')<eq>'date-time'<line_sep>completed=definitions.get('Task').get('properties').get('completed')<assert_stmt>'boolean'<in>completed.get('type')<assert_stmt>len(completed.get('type'))<eq>1<line_sep>validate(json.loads(project.dumps()) json_schema)<line_sep># todo: check the enum / make a negative test # validator = Draft4Validator(json_schema) # errors = sorted(validator.iter_errors(project.dumps()), key=lambda e: e.path) # for error in errors: # print('{}'.format(error.message, list(error.path))) <block_end><def_stmt>test_json_schema_primitives_types <block_start>json_schema=Stock.get_json_schema()<line_sep>print(json.dumps(json_schema indent=2))<line_sep>props=json_schema.get('properties')<line_sep>opentypes=props.get('open').get('type')<assert_stmt>'number'<in>opentypes<assert_stmt>len(opentypes)<eq>1<line_sep>item_types=props.get('history').get('items').get('type')<assert_stmt>'number'<in>item_types<line_sep>len(item_types)<eq>1<line_sep>stock=create_a_stock()<line_sep>validate(json.loads(stock.dumps()) json_schema)<block_end><def_stmt>test_json_schema_complex # print json.dumps(Portfolio.get_parameter_spec(True), indent=2) <block_start>json_schema=Portfolio.get_json_schema()<line_sep>print(json.dumps(json_schema indent=2))<line_sep>stock_definition=json_schema.get('definitions').get('Stock')<assert_stmt>stock_definition.get('properties').get('updated').get('format')<eq>'date-time'<assert_stmt>stock_definition.get('properties').get('code').get('pattern')<eq>'[A-Za-z0-9-_]'<assert_stmt>stock_definition.get('properties').get('code').get('maxLength')<eq>4<assert_stmt>stock_definition.get('properties').get('open').get('minimum')<eq>0<line_sep>open_types=stock_definition.get('properties').get('open').get('type')<assert_stmt>'number'<in>open_types<assert_stmt>len(open_types)<eq>1<line_sep>sequence_types=stock_definition.get('properties').get('sequence').get('type')<assert_stmt>'number'<in>sequence_types<assert_stmt>len(sequence_types)<eq>2<assert_stmt>stock_definition.get('properties').get('sequence').get('minimum')<eq>1<assert_stmt>stock_definition.get('properties').get('sequence').get('maximum')<eq>100<assert_stmt>stock_definition.get('properties').get('sequence').get('multipleOf')<eq>1.0<line_sep>history_types=stock_definition.get('properties').get('history').get('type')<assert_stmt>'array'<in>history_types<assert_stmt>len(history_types)<eq>2<line_sep>portfolio=create_portfolio('My Portfolio')<line_sep>validate(json.loads(portfolio.dumps()) json_schema)<block_end><def_stmt>test_json_schema_in_mongo_compat_mode <block_start>json_schema=Project.get_json_schema(mongo_compatibility=<true>)<line_sep>print('\n\n{}'.format(json.dumps(json_schema indent=2)))<line_sep>print('===========')<line_sep>task_spec=json_schema.get('properties').get('tasks')<assert_stmt>len(task_spec.get('items').get('required'))<eq>5<line_sep>priority_spec=task_spec.get('items').get('properties').get('priority')<assert_stmt>len(priority_spec.get('enum'))<eq>3<line_sep>closed_date_spec=task_spec.get('items').get('properties').get('closed_date')<assert_stmt>len(closed_date_spec.get('bsonType'))<eq>2<assert_stmt>'bsonType'<in>json_schema<assert_stmt>'id'<not><in>json_schema<assert_stmt>'$schema'<not><in>json_schema<assert_stmt>'definitions'<not><in>json_schema<for_stmt>prop json_schema.get('properties').items()<block_start><assert_stmt>'format'<not><in>prop[1]<assert_stmt>'bsonType'<in>prop[1]<block_end><for_stmt>prop task_spec.get('items').get('properties').items()<block_start><assert_stmt>'format'<not><in>prop[1]<assert_stmt>'bsonType'<or>'enum'<in>prop[1]<block_end>project=create_rich_project()<line_sep>print(project.dumps(pretty_print=<true>))<line_sep>validate(json.loads(project.dumps()) json_schema)<block_end><def_stmt>__assert_product_dict product_dict:dict<block_start><assert_stmt>'name'<in>product_dict<assert_stmt>'description'<in>product_dict<assert_stmt>'size'<in>product_dict<assert_stmt>product_dict.get('size')<eq>'M'<assert_stmt>'price'<in>product_dict<assert_stmt>isinstance(product_dict.get('price') dict)<line_sep>price_dict=product_dict.get('price')<assert_stmt>'_type'<in>price_dict<assert_stmt>price_dict.get('_type')<eq>'money.money.Money'<assert_stmt>price_dict.get('currency')<eq>'EUR'<block_end><def_stmt>test_custom_object_marshalling <block_start>product=Product(code='TRX' name='White T-Shirt' description='a stylish white shirt' size=ProductSize.M price=Money(10.50 'EUR'))<line_sep>product_dict=Model.to_dict(product)<line_sep>__assert_product_dict(product_dict)<line_sep>amount=product_dict.get('price').get('amount')<assert_stmt>isinstance(amount Decimal)<assert_stmt>amount<eq>10.5<line_sep>product_json=product.dumps(pretty_print=<true>)<line_sep>print('JSON: \n{}'.format(product_json))<line_sep>reloaded_product=Product.loads(product_json)<assert_stmt>reloaded_product<is><not><none><and>isinstance(reloaded_product Product)<assert_stmt>reloaded_product.name<eq>product.name<assert_stmt>reloaded_product.description<eq>product.description<assert_stmt>reloaded_product.size<eq>product.size<assert_stmt>isinstance(reloaded_product.price Money)<assert_stmt>reloaded_product.price<eq>product.price<block_end><def_stmt>test_custom_converter_function <block_start>product=Product(code='TRX' name='White T-Shirt' description='a stylish white shirt' size=ProductSize.M price=Money(10.50 'EUR'))<line_sep>product_dict=Model.to_dict(product converter_func=mongo_type_converter_to_dict)<line_sep>__assert_product_dict(product_dict)<line_sep>amount=product_dict.get('price').get('amount')<assert_stmt>isinstance(amount float)<line_sep>product_json=product.dumps(pretty_print=<true>)<line_sep>print('JSON: \n{}'.format(product_json))<line_sep>reloaded_product=Model.from_dict(product_dict Product converter_func=mongo_type_converter_from_dict)<assert_stmt>isinstance(reloaded_product.price Money)<assert_stmt>isinstance(reloaded_product.price.amount Decimal)<block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.nn functional<as>F<class_stmt>CustomRNN(nn.Module)<block_start><def_stmt>__init__ self input_size output_size hidden_size batch_first=<true> W_scale=1e-1 f_hidden=<none><block_start>super(CustomRNN self).__init__()<line_sep>self.input_size=input_size<line_sep>self.output_size=output_size<line_sep>self.hidden_size=hidden_size<line_sep>self.f_hidden=f_hidden<line_sep>self.W1=nn.Parameter((torch.rand(hidden_size input_size)-0.5)<times>W_scale)<line_sep>self.W2=nn.Parameter((torch.rand(hidden_size hidden_size)-0.5)<times>W_scale)<line_sep>self.W3=nn.Parameter((torch.rand(output_size hidden_size)-0.5)<times>W_scale)<line_sep>self.b_h=nn.Parameter(torch.zeros(hidden_size))<block_end><def_stmt>forward self x<block_start>h1=torch.zeros(x.shape[0] self.hidden_size)<line_sep>ys=[]<for_stmt>i,xi enumerate(x.chunk(x.size(1) dim=1))<block_start>h1=(torch.matmul(self.W2 h1.t())+torch.matmul(self.W1 xi.t())).t()+self.b_h<if_stmt>self.f_hidden<is><not><none><block_start>h1=getattr(F self.f_hidden)(h1)<block_end>y=torch.matmul(self.W3 h1.t()).t()<line_sep>ys.append(y)<block_end>ys=torch.stack(ys dim=1)<line_sep><return>ys<block_end><block_end><class_stmt>CustomRes(nn.Module)<block_start><def_stmt>__init__ self input_size output_size hidden_size batch_first=<true> W_scale=1e-1 f_hidden=<none><block_start>super(CustomRes self).__init__()<line_sep>self.input_size=input_size<line_sep>self.output_size=output_size<line_sep>self.hidden_size=hidden_size<line_sep>self.f_hidden=f_hidden<line_sep>self.W1=torch.nn.Parameter((torch.rand(hidden_size input_size)-0.5)<times>W_scale)<line_sep>self.W2=torch.nn.Parameter((torch.rand(hidden_size hidden_size)-0.5)<times>W_scale)<line_sep>self.W3=torch.nn.Parameter((torch.rand(output_size hidden_size)-0.5)<times>W_scale)<line_sep>self.b_h=torch.nn.Parameter(torch.zeros(hidden_size))<block_end><def_stmt>forward self x<block_start>h1=torch.zeros(x.shape[0] self.hidden_size)<line_sep>ys=[]<for_stmt>i,xi enumerate(x.chunk(x.size(1) dim=1))<block_start>hprev=h1<line_sep>h1=(torch.matmul(self.W2 h1.t())+torch.matmul(self.W1 xi.t())).t()+self.b_h<if_stmt>self.f_hidden<is><not><none><block_start>h1=getattr(F self.f_hidden)(h1)<block_end>y=torch.matmul(self.W3 h1.t()).t()<line_sep>ys.append(y)<line_sep>h1=h1+hprev<block_end>ys=torch.stack(ys dim=1)<line_sep><return>ys<block_end><block_end><class_stmt>CustomLSTM(nn.Module)<block_start><def_stmt>__init__ self input_size output_size hidden_size batch_first=<true> W_scale=1e-1<block_start>super(CustomLSTM self).__init__()<line_sep>self.input_size=input_size<line_sep>self.output_size=output_size<line_sep>self.hidden_size=hidden_size<line_sep>self.lstm=nn.LSTM(input_size hidden_size batch_first=batch_first)<line_sep>self.W3=torch.nn.Parameter((torch.rand(output_size hidden_size)-0.5))<block_end><def_stmt>forward self x# out should have size [N_batch, T, N_hidden] <block_start>out,hidden=self.lstm(x.unsqueeze(2))<line_sep># print(torch.max(x, 1)) # print(x[:, 100]) # print(out[:, 100, 0].detach()) # ys should have size [N_batch, T, N_classes] ys=torch.matmul(out self.W3.t())<line_sep><return>ys<block_end><block_end>
# # This file is part of the LibreOffice project. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # # This file incorporates work covered by the following license notice: # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed # with this work for additional information regarding copyright # ownership. The ASF licenses this file to you under the Apache # License, Version 2.0 (the "License"); you may not use this file # except in compliance with the License. You may obtain a copy of # the License at http://www.apache.org/licenses/LICENSE-2.0 . # <import_from_stmt>com.sun.star.beans PropertyValue<line_sep>''' Simplifies handling Arrays of PropertyValue. To make a use of this class, instantiate it, and call the put(propName,propValue) method. caution: propName should always be a String. When finished, call the getProperties() method to get an array of the set properties. '''<class_stmt>Properties(dict)<block_start>@classmethod<def_stmt>getPropertyValue self props propName<block_start><for_stmt>i props<block_start><if_stmt>propName<eq>i.Name<block_start><return>i.Value<block_end><block_end><raise>AttributeError("Property '"+propName+"' not found.")<block_end>@classmethod<def_stmt>hasPropertyValue self props propName<block_start><for_stmt>i props<block_start><if_stmt>propName<eq>i.Name<block_start><return><true><block_end><block_end><return><false><block_end>@classmethod<def_stmt>getProperties self _map<block_start>pv=[]<for_stmt>k,v _map.items()<block_start>pv.append(self.createProperty(k v))<block_end><return>pv<block_end>@classmethod<def_stmt>createProperty self name value handle=<none><block_start>pv=PropertyValue()<line_sep>pv.Name=name<line_sep>pv.Value=value<if_stmt>handle<is><not><none><block_start>pv.Handle=handle<block_end><return>pv<block_end><def_stmt>getProperties1 self<block_start><return>self.getProperties(self)<block_end><block_end>
# # Copyright (c) 2013-present, <NAME> # All rights reserved. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # #Program for text written in one Indic script to another based on Unicode mappings. # # @author <NAME> # <import_stmt>sys string itertools re os<import_from_stmt>collections defaultdict<import_from_stmt>indicnlp common<import_from_stmt>indicnlp langinfo<import_from_stmt>indicnlp.script indic_scripts<as>isc<import_from_stmt>indicnlp.transliterate.sinhala_transliterator SinhalaDevanagariTransliterator<as>sdt<import_stmt>pandas<as>pd<line_sep>OFFSET_TO_ITRANS={}<line_sep>ITRANS_TO_OFFSET=defaultdict(list)<line_sep>DUPLICATE_ITRANS_REPRESENTATIONS={}<def_stmt>init <block_start>""" To be called by library loader, do not call it in your program """<line_sep>### Load the ITRANS-script offset map. The map was initially generated using the snippet below (uses the old itrans transliterator) ### The map is modified as needed to accomodate extensions and corrections to the mappings # # base=0x900 # l=[] # for i in range(0,0x80): # c=chr(base+i) # itrans=ItransTransliterator.to_itrans(c,'hi') # l.append((hex(i),c,itrans)) # print(l) # # pd.DataFrame(l,columns=['offset_hex','devnag_char','itrans']).to_csv('offset_itrans_map.csv',index=False,encoding='utf-8') itrans_map_fname=os.path.join(common.get_resources_path() 'transliterate' 'offset_itrans_map.csv')<line_sep>#itrans_map_fname=r'D:\src\python_sandbox\src\offset_itrans_map.csv' itrans_df=pd.read_csv(itrans_map_fname encoding='utf-8')<line_sep><global>OFFSET_TO_ITRANS ITRANS_TO_OFFSET DUPLICATE_ITRANS_REPRESENTATIONS<for_stmt>r itrans_df.iterrows()<block_start>itrans=r[1]['itrans']<line_sep>o=int(r[1]['offset_hex'] base=16)<line_sep>OFFSET_TO_ITRANS[o]=itrans<if_stmt>langinfo.is_consonant_offset(o)### for consonants, strip the schwa - add halant offset <block_start>ITRANS_TO_OFFSET[itrans[:-1]].extend([o 0x4d])<block_end><else_stmt>### the append assumes that the maatra always comes after independent vowel in the df <block_start>ITRANS_TO_OFFSET[itrans].append(o)<block_end>DUPLICATE_ITRANS_REPRESENTATIONS={'A':'aa' 'I':'ii' 'U':'uu' 'RRi':'R^i' 'RRI':'R^I' 'LLi':'L^i' 'LLI':'L^I' 'L':'ld' 'w':'v' 'x':'kSh' 'gj':'j~n' 'dny':'j~n' '.n':'.m' 'M':'.m' 'OM':'AUM'}<block_end><block_end><class_stmt>UnicodeIndicTransliterator(object)<block_start>""" Base class for rule-based transliteration among Indian languages. Script pair specific transliterators should derive from this class and override the transliterate() method. They can call the super class 'transliterate()' method to avail of the common transliteration """<line_sep>@staticmethod<def_stmt>_correct_tamil_mapping offset# handle missing unaspirated and voiced plosives in Tamil script # replace by unvoiced, unaspirated plosives # for first 4 consonant rows of varnamala # exception: ja has a mapping in Tamil <block_start><if_stmt>offset<ge>0x15<and>offset<le>0x28<and>offset<ne>0x1c<and><not>((offset-0x15)%5<eq>0<or>(offset-0x15)%5<eq>4)<block_start>subst_char=(offset-0x15)<floordiv>5<line_sep>offset=0x15+5<times>subst_char<block_end># for 5th consonant row of varnamala <if_stmt>offset<in>[0x2b 0x2c 0x2d]<block_start>offset=0x2a<block_end># 'sh' becomes 'Sh' <if_stmt>offset<eq>0x36<block_start>offset=0x37<block_end><return>offset<block_end>@staticmethod<def_stmt>transliterate text lang1_code lang2_code<block_start>""" convert the source language script (lang1) to target language script (lang2) text: text to transliterate lang1_code: language 1 code lang1_code: language 2 code """<if_stmt>lang1_code<in>langinfo.SCRIPT_RANGES<and>lang2_code<in>langinfo.SCRIPT_RANGES# if Sinhala is source, do a mapping to Devanagari first <block_start><if_stmt>lang1_code<eq>'si'<block_start>text=sdt.sinhala_to_devanagari(text)<line_sep>lang1_code='hi'<block_end># if Sinhala is target, make Devanagiri the intermediate target org_lang2_code=''<if_stmt>lang2_code<eq>'si'<block_start>lang2_code='hi'<line_sep>org_lang2_code='si'<block_end>trans_lit_text=[]<for_stmt>c text<block_start>newc=c<line_sep>offset=ord(c)-langinfo.SCRIPT_RANGES[lang1_code][0]<if_stmt>offset<ge>langinfo.COORDINATED_RANGE_START_INCLUSIVE<and>offset<le>langinfo.COORDINATED_RANGE_END_INCLUSIVE<and>c<ne>'\u0964'<and>c<ne>'\u0965'<block_start><if_stmt>lang2_code<eq>'ta'# tamil exceptions <block_start>offset=UnicodeIndicTransliterator._correct_tamil_mapping(offset)<block_end>newc=chr(langinfo.SCRIPT_RANGES[lang2_code][0]+offset)<block_end>trans_lit_text.append(newc)<block_end># if Sinhala is source, do a mapping to Devanagari first <if_stmt>org_lang2_code<eq>'si'<block_start><return>sdt.devanagari_to_sinhala(''.join(trans_lit_text))<block_end><return>''.join(trans_lit_text)<block_end><else_stmt><block_start><return>text<block_end><block_end><block_end><class_stmt>ItransTransliterator(object)<block_start>""" Transliterator between Indian scripts and ITRANS """<line_sep>@staticmethod<def_stmt>to_itrans text lang_code<block_start><if_stmt>lang_code<in>langinfo.SCRIPT_RANGES<block_start><if_stmt>lang_code<eq>'ml'# Change from chillus characters to corresponding consonant+halant <block_start>text=text.replace('\u0d7a' '\u0d23\u0d4d')<line_sep>text=text.replace('\u0d7b' '\u0d28\u0d4d')<line_sep>text=text.replace('\u0d7c' '\u0d30\u0d4d')<line_sep>text=text.replace('\u0d7d' '\u0d32\u0d4d')<line_sep>text=text.replace('\u0d7e' '\u0d33\u0d4d')<line_sep>text=text.replace('\u0d7f' '\u0d15\u0d4d')<block_end>offsets=[isc.get_offset(c lang_code)<for>c text]<line_sep>### naive lookup # itrans_l = [ OFFSET_TO_ITRANS.get(o, '-' ) for o in offsets ] itrans_l=[]<for_stmt>o offsets<block_start>itrans=OFFSET_TO_ITRANS.get(o chr(langinfo.SCRIPT_RANGES[lang_code][0]+o))<if_stmt>langinfo.is_halanta_offset(o)<block_start>itrans=''<if_stmt>len(itrans_l)<g>0<block_start>itrans_l.pop()<block_end><block_end><elif_stmt>langinfo.is_vowel_sign_offset(o)<and>len(itrans_l)<g>0<block_start>itrans_l.pop()<block_end>itrans_l.extend(itrans)<block_end><return>''.join(itrans_l)<block_end><else_stmt><block_start><return>text<block_end><block_end>@staticmethod<def_stmt>from_itrans text lang<block_start>""" TODO: Document this method properly TODO: A little hack is used to handle schwa: needs to be documented TODO: check for robustness """<line_sep>MAXCODE=4### TODO: Needs to be fixed ## handle_duplicate_itrans_representations <for_stmt>k,v DUPLICATE_ITRANS_REPRESENTATIONS.items()<block_start><if_stmt>k<in>text<block_start>text=text.replace(k v)<block_end><block_end>start=0<line_sep>match=<none><line_sep>solution=[]<line_sep>i=start+1<while_stmt>i<le>len(text)<block_start>itrans=text[start:i]<line_sep># print('===') # print('i: {}'.format(i)) # if i<len(text): # print('c: {}'.format(text[i-1])) # print('start: {}'.format(start)) # print('itrans: {}'.format(itrans)) <if_stmt>itrans<in>ITRANS_TO_OFFSET<block_start>offs=ITRANS_TO_OFFSET[itrans]<line_sep>## single element list - no problem ## except when it is 'a' ## 2 element list of 2 kinds: ### 1. alternate char for independent/dependent vowel ### 2. consonant + halant <if_stmt>len(offs)<eq>2<and>langinfo.is_vowel_offset(offs[0])### 1. alternate char for independent/dependent vowel ## if previous is a consonant, then use the dependent vowel <block_start><if_stmt>len(solution)<g>0<and>langinfo.is_halanta(solution[-1] lang)<block_start>offs=[offs[1]]## dependent vowel <block_end><else_stmt><block_start>offs=[offs[0]]<block_end><block_end>## independent vowel c=''.join([langinfo.offset_to_char(x lang)<for>x offs])<line_sep>match=(i c)<block_end><elif_stmt>len(itrans)<eq>1## unknown character <block_start>match=(i itrans)<block_end><elif_stmt>i<l>len(text)<and>(i-start)<l>MAXCODE+1## continue matching till MAXCODE length substring <block_start>i=i+1<line_sep><continue><block_end><else_stmt><block_start>solution.extend(match[1])<line_sep># start=i-1 start=match[0]<line_sep>i=start<line_sep>match=<none><block_end># print('match done') # print('match: {}'.format(match)) i=i+1<block_end>### flush matches <if_stmt>match<is><not><none><block_start>solution.extend(match[1])<block_end>#### post-processing ## delete unecessary halants # print(''.join(solution)) temp_out=list(''.join(solution))<line_sep>rem_indices=[]<for_stmt>i range(len(temp_out)-1)<block_start><if_stmt>langinfo.is_halanta(temp_out[i] lang)<and>(langinfo.is_vowel_sign(temp_out[i+1] lang)<or>langinfo.is_nukta(temp_out[i+1] lang)<or>temp_out[i+1]<eq>langinfo.offset_to_char(0x7f lang))<block_start>rem_indices.append(i)<block_end><block_end># if temp_out[i]==langinfo.offset_to_char(0x7f,lang): # rem_indices.append(i) <for_stmt>i reversed(rem_indices)<block_start>temp_out.pop(i)<block_end>out=''.join(temp_out)<line_sep>## delete schwa placeholder out=out.replace(langinfo.offset_to_char(0x7f lang) '')<line_sep><return>out<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><if_stmt>len(sys.argv)<l>4<block_start>print("Usage: python unicode_transliterate.py <command> <infile> <outfile> <src_language> <tgt_language>")<line_sep>sys.exit(1)<block_end><if_stmt>sys.argv[1]<eq>'transliterate'<block_start>src_language=sys.argv[4]<line_sep>tgt_language=sys.argv[5]<with_stmt>open(sys.argv[2] 'r' encoding='utf-8')<as>ifile<block_start><with_stmt>open(sys.argv[3] 'w' encoding='utf-8')<as>ofile<block_start><for_stmt>line ifile.readlines()<block_start>transliterated_line=UnicodeIndicTransliterator.transliterate(line src_language tgt_language)<line_sep>ofile.write(transliterated_line)<block_end><block_end><block_end><block_end><elif_stmt>sys.argv[1]<eq>'romanize'<block_start>language=sys.argv[4]<line_sep>### temp fix to replace anusvara with corresponding nasal #r1_nasal=re.compile(ur'\u0902([\u0915-\u0918])') #r2_nasal=re.compile(ur'\u0902([\u091a-\u091d])') #r3_nasal=re.compile(ur'\u0902([\u091f-\u0922])') #r4_nasal=re.compile(ur'\u0902([\u0924-\u0927])') #r5_nasal=re.compile(ur'\u0902([\u092a-\u092d])') <with_stmt>open(sys.argv[2] 'r' encoding='utf-8')<as>ifile<block_start><with_stmt>open(sys.argv[3] 'w' encoding='utf-8')<as>ofile<block_start><for_stmt>line ifile.readlines()### temp fix to replace anusvara with corresponding nasal #line=r1_nasal.sub(u'\u0919\u094D\\1',line) #line=r2_nasal.sub(u'\u091e\u094D\\1',line) #line=r3_nasal.sub(u'\u0923\u094D\\1',line) #line=r4_nasal.sub(u'\u0928\u094D\\1',line) #line=r5_nasal.sub(u'\u092e\u094D\\1',line) <block_start>transliterated_line=ItransTransliterator.to_itrans(line language)<line_sep>## temp fix to replace 'ph' to 'F' to match with Urdu transliteration scheme transliterated_line=transliterated_line.replace('ph' 'f')<line_sep>ofile.write(transliterated_line)<block_end><block_end><block_end><block_end><elif_stmt>sys.argv[1]<eq>'indicize'<block_start>language=sys.argv[4]<with_stmt>open(sys.argv[2] 'r' encoding='utf-8')<as>ifile<block_start><with_stmt>open(sys.argv[3] 'w' encoding='utf-8')<as>ofile<block_start><for_stmt>line ifile.readlines()<block_start>transliterated_line=ItransTransliterator.from_itrans(line language)<line_sep>ofile.write(transliterated_line)<block_end><block_end><block_end><block_end><block_end>
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper functions for modules."""<import_stmt>os<import_stmt>six<if_stmt>six.PY2<block_start><import_stmt>imp# pylint: disable=g-import-not-at-top <block_end><else_stmt><block_start><import_stmt>importlib# pylint: disable=g-import-not-at-top <block_end><def_stmt>get_parent_dir module<block_start><return>os.path.abspath(os.path.join(os.path.dirname(module.__file__) ".."))<block_end><def_stmt>get_parent_dir_for_name module_name<block_start>"""Get parent directory for module with the given name. Args: module_name: Module name for e.g. tensorflow_estimator.python.estimator.api._v1.estimator. Returns: Path to the parent directory if module is found and None otherwise. Given example above, it should return: /pathtoestimator/tensorflow_estimator/python/estimator/api/_v1. """<line_sep>name_split=module_name.split(".")<if_stmt><not>name_split<block_start><return><none><block_end><if_stmt>six.PY2<block_start><try_stmt><block_start>spec=imp.find_module(name_split[0])<block_end><except_stmt>ImportError<block_start><return><none><block_end><if_stmt><not>spec<block_start><return><none><block_end>base_path=spec[1]<block_end><else_stmt><block_start><try_stmt><block_start>spec=importlib.util.find_spec(name_split[0])<block_end><except_stmt>ValueError<block_start><return><none><block_end><if_stmt><not>spec<or><not>spec.origin<block_start><return><none><block_end>base_path=os.path.dirname(spec.origin)<block_end><return>os.path.join(base_path *name_split[1:-1])<block_end>
<import_from_stmt>.args.qos_sai_args add_qos_sai_args<import_from_stmt>.args.buffer_args add_dynamic_buffer_calculation_args<line_sep># QoS pytest arguments <def_stmt>pytest_addoption parser<block_start>''' Adds option to QoS pytest Args: parser: pytest parser object Returns: None '''<line_sep>add_qos_sai_args(parser)<line_sep>add_dynamic_buffer_calculation_args(parser)<block_end>
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>sys<import_stmt>time<import_stmt>random<if_stmt>sys.version_info[:2]<le>(2 6)<block_start><try_stmt><block_start><import_stmt>unittest2<as>unittest<block_end><except_stmt>ImportError<block_start>sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')<line_sep>sys.exit(1)<block_end><block_end><else_stmt><block_start><import_stmt>unittest<block_end><import_from_stmt>pyspark.context SparkConf SparkContext RDD<import_from_stmt>pyspark.streaming.context StreamingContext<import_from_stmt>pyspark.streaming.tests PySparkStreamingTestCase<import_from_stmt>mqtt MQTTUtils<class_stmt>MQTTStreamTests(PySparkStreamingTestCase)<block_start>timeout=20# seconds duration=1<def_stmt>setUp self<block_start>super(MQTTStreamTests self).setUp()<line_sep>MQTTTestUtilsClz=self.ssc._jvm.java.lang.Thread.currentThread().getContextClassLoader().loadClass("org.apache.spark.streaming.mqtt.MQTTTestUtils")<line_sep>self._MQTTTestUtils=MQTTTestUtilsClz.newInstance()<line_sep>self._MQTTTestUtils.setup()<block_end><def_stmt>tearDown self<block_start><if_stmt>self._MQTTTestUtils<is><not><none><block_start>self._MQTTTestUtils.teardown()<line_sep>self._MQTTTestUtils=<none><block_end>super(MQTTStreamTests self).tearDown()<block_end><def_stmt>_randomTopic self<block_start><return>"topic-%d"%random.randint(0 10000)<block_end><def_stmt>_startContext self topic# Start the StreamingContext and also collect the result <block_start>stream=MQTTUtils.createStream(self.ssc "tcp://"+self._MQTTTestUtils.brokerUri() topic)<line_sep>result=[]<def_stmt>getOutput _ rdd<block_start><for_stmt>data rdd.collect()<block_start>result.append(data)<block_end><block_end>stream.foreachRDD(getOutput)<line_sep>self.ssc.start()<line_sep><return>result<block_end><def_stmt>test_mqtt_stream self<block_start>"""Test the Python MQTT stream API."""<line_sep>sendData="MQTT demo for spark streaming"<line_sep>topic=self._randomTopic()<line_sep>result=self._startContext(topic)<def_stmt>retry <block_start>self._MQTTTestUtils.publishData(topic sendData)<line_sep># Because "publishData" sends duplicate messages, here we should use > 0 self.assertTrue(len(result)<g>0)<line_sep>self.assertEqual(sendData result[0])<block_end># Retry it because we don't know when the receiver will start. self._retry_or_timeout(retry)<block_end><def_stmt>_start_context_with_paired_stream self topics<block_start>stream=MQTTUtils.createPairedStream(self.ssc "tcp://"+self._MQTTTestUtils.brokerUri() topics)<line_sep># Keep a set because records can potentially be repeated. result=set()<def_stmt>getOutput _ rdd<block_start><for_stmt>data rdd.collect()<block_start>result.add(data)<block_end><block_end>stream.foreachRDD(getOutput)<line_sep>self.ssc.start()<line_sep><return>result<block_end><def_stmt>test_mqtt_pair_stream self<block_start>"""Test the Python MQTT stream API with multiple topics."""<line_sep>data_records=["random string 1" "random string 2" "random string 3"]<line_sep>topics=[self._randomTopic() self._randomTopic() self._randomTopic()]<line_sep>topics_and_records=zip(topics data_records)<line_sep>result=self._start_context_with_paired_stream(topics)<def_stmt>retry <block_start><for_stmt>topic,data_record topics_and_records<block_start>self._MQTTTestUtils.publishData(topic data_record)<block_end># Sort the received records as they might be out of order. self.assertEqual(topics_and_records sorted(result key=<lambda>x:x[1]))<block_end># Retry it because we don't know when the receiver will start. self._retry_or_timeout(retry)<block_end><def_stmt>_retry_or_timeout self test_func<block_start>start_time=time.time()<while_stmt><true><block_start><try_stmt><block_start>test_func()<line_sep><break><block_end><except_stmt><block_start><if_stmt>time.time()-start_time<g>self.timeout<block_start><raise><block_end>time.sleep(0.01)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
#------------------------------------------------------------------------------ # query_one.py (Section 3.2) #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Copyright 2017, 2018, Oracle and/or its affiliates. All rights reserved. #------------------------------------------------------------------------------ <import_from_future_stmt> print_function<import_stmt>cx_Oracle<import_stmt>db_config<line_sep>con=cx_Oracle.connect(db_config.user db_config.pw db_config.dsn)<line_sep>cur=con.cursor()<line_sep>cur.execute("select * from dept order by deptno")<line_sep>row=cur.fetchone()<line_sep>print(row)<line_sep>row=cur.fetchone()<line_sep>print(row)<line_sep>
<import_stmt>time<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>reinvent_chemistry.utils get_indices_of_unique_smiles<import_from_stmt>reinvent_models.lib_invent.enums.generative_model_regime GenerativeModelRegimeEnum<import_from_stmt>reinvent_models.model_factory.configurations.model_configuration ModelConfiguration<import_from_stmt>reinvent_models.model_factory.enums.model_type_enum ModelTypeEnum<import_from_stmt>reinvent_models.model_factory.generative_model GenerativeModel<import_from_stmt>reinvent_models.model_factory.generative_model_base GenerativeModelBase<import_from_stmt>reinvent_scoring FinalSummary<import_from_stmt>reinvent_scoring.scoring.diversity_filters.reinvent_core.base_diversity_filter BaseDiversityFilter<import_from_stmt>reinvent_scoring.scoring.function.base_scoring_function BaseScoringFunction<import_from_stmt>running_modes.configurations ReinforcementLearningConfiguration<import_from_stmt>running_modes.constructors.base_running_mode BaseRunningMode<import_from_stmt>running_modes.reinforcement_learning.inception Inception<import_from_stmt>running_modes.reinforcement_learning.logging.base_reinforcement_logger BaseReinforcementLogger<import_from_stmt>running_modes.reinforcement_learning.margin_guard MarginGuard<import_from_stmt>running_modes.utils.general to_tensor<class_stmt>CoreReinforcementRunner(BaseRunningMode)<block_start><def_stmt>__init__ self critic:GenerativeModelBase actor:GenerativeModelBase configuration:ReinforcementLearningConfiguration scoring_function:BaseScoringFunction diversity_filter:BaseDiversityFilter inception:Inception logger:BaseReinforcementLogger<block_start>self._prior=critic<line_sep>self._agent=actor<line_sep>self._scoring_function=scoring_function<line_sep>self._diversity_filter=diversity_filter<line_sep>self.config=configuration<line_sep>self._logger=logger<line_sep>self._inception=inception<line_sep>self._margin_guard=MarginGuard(self)<line_sep>self._optimizer=torch.optim.Adam(self._agent.get_network_parameters() lr=self.config.learning_rate)<block_end><def_stmt>run self<block_start>self._logger.log_message("starting an RL run")<line_sep>start_time=time.time()<line_sep>self._disable_prior_gradients()<for_stmt>step range(self.config.n_steps)<block_start>seqs,smiles,agent_likelihood=self._sample_unique_sequences(self._agent self.config.batch_size)<line_sep># switch signs agent_likelihood=-agent_likelihood<line_sep>prior_likelihood=-self._prior.likelihood(seqs)<line_sep>score_summary:FinalSummary=self._scoring_function.get_final_score_for_step(smiles step)<line_sep>score=self._diversity_filter.update_score(score_summary step)<line_sep>augmented_likelihood=prior_likelihood+self.config.sigma<times>to_tensor(score)<line_sep>loss=torch.pow((augmented_likelihood-agent_likelihood) 2)<line_sep>loss,agent_likelihood=self._inception_filter(self._agent loss agent_likelihood prior_likelihood self.config.sigma smiles score)<line_sep>loss=loss.mean()<line_sep>self._optimizer.zero_grad()<line_sep>loss.backward()<line_sep>self._optimizer.step()<line_sep>self._stats_and_chekpoint(score start_time step smiles score_summary agent_likelihood prior_likelihood augmented_likelihood)<block_end>self._logger.save_final_state(self._agent self._diversity_filter)<line_sep>self._logger.log_out_input_configuration()<line_sep>self._logger.log_out_inception(self._inception)<block_end><def_stmt>_disable_prior_gradients self# There might be a more elegant way of disabling gradients <block_start><for_stmt>param self._prior.get_network_parameters()<block_start>param.requires_grad=<false><block_end><block_end><def_stmt>_stats_and_chekpoint self score start_time step smiles score_summary:FinalSummary agent_likelihood prior_likelihood augmented_likelihood<block_start>self._margin_guard.adjust_margin(step)<line_sep>mean_score=np.mean(score)<line_sep>self._margin_guard.store_run_stats(agent_likelihood prior_likelihood augmented_likelihood score)<line_sep>self._logger.timestep_report(start_time self.config.n_steps step smiles mean_score score_summary score agent_likelihood prior_likelihood augmented_likelihood self._diversity_filter)<line_sep>self._logger.save_checkpoint(step self._diversity_filter self._agent)<block_end><def_stmt>_sample_unique_sequences self agent batch_size<block_start>seqs,smiles,agent_likelihood=agent.sample(batch_size)<line_sep>unique_idxs=get_indices_of_unique_smiles(smiles)<line_sep>seqs_unique=seqs[unique_idxs]<line_sep>smiles_np=np.array(smiles)<line_sep>smiles_unique=smiles_np[unique_idxs]<line_sep>agent_likelihood_unique=agent_likelihood[unique_idxs]<line_sep><return>seqs_unique smiles_unique agent_likelihood_unique<block_end><def_stmt>_inception_filter self agent loss agent_likelihood prior_likelihood sigma smiles score<block_start>exp_smiles,exp_scores,exp_prior_likelihood=self._inception.sample()<if_stmt>len(exp_smiles)<g>0<block_start>exp_agent_likelihood=-agent.likelihood_smiles(exp_smiles)<line_sep>exp_augmented_likelihood=exp_prior_likelihood+sigma<times>exp_scores<line_sep>exp_loss=torch.pow((to_tensor(exp_augmented_likelihood)-exp_agent_likelihood) 2)<line_sep>loss=torch.cat((loss exp_loss) 0)<line_sep>agent_likelihood=torch.cat((agent_likelihood exp_agent_likelihood) 0)<block_end>self._inception.add(smiles score prior_likelihood)<line_sep><return>loss agent_likelihood<block_end><def_stmt>reset self reset_countdown=0<block_start>model_type_enum=ModelTypeEnum()<line_sep>model_regime=GenerativeModelRegimeEnum()<line_sep>actor_config=ModelConfiguration(model_type_enum.DEFAULT model_regime.TRAINING self.config.agent)<line_sep>self._agent=GenerativeModel(actor_config)<line_sep>self._optimizer=torch.optim.Adam(self._agent.get_network_parameters() lr=self.config.learning_rate)<line_sep>self._logger.log_message("Resetting Agent")<line_sep>self._logger.log_message(f"Adjusting sigma to: {self.config.sigma}")<line_sep><return>reset_countdown<block_end><block_end>
<import_from_stmt>. func<as>func_template<import_from_stmt>. data<as>data_template<import_from_stmt>. model<as>model_template<import_from_stmt>. criterion<as>criterion_template<import_from_stmt>. proxy<as>proxy_template<line_sep>__all__=['func_template' 'data_template' 'model_template' 'criterion_template' 'proxy_template' ]<line_sep>
<import_stmt>logging<import_from_stmt>cactus.listener.polling PollingListener<line_sep>logger=logging.getLogger(__name__)<try_stmt><block_start><import_from_stmt>cactus.listener.mac FSEventsListener<as>Listener<block_end><except_stmt>(ImportError OSError)<block_start>logger.debug("Failed to load FSEventsListener, falling back to PollingListener" exc_info=<true>)<line_sep>Listener=PollingListener<block_end>
<import_stmt>numpy<as>np<import_stmt>soundfile<as>sf<import_from_stmt>torch.utils data<class_stmt>Dataset_VoxCeleb2(data.Dataset)<block_start><def_stmt>__init__ self list_IDs base_dir nb_samp=0 labels={} cut=<true> return_label=<true> norm_scale=<true><block_start>''' self.list_IDs : list of strings (each string: utt key) self.labels : dictionary (key: utt key, value: label integer) self.nb_samp : integer, the number of timesteps for each mini-batch cut : (boolean) adjust utterance duration for mini-batch construction return_label : (boolean) norm_scale : (boolean) normalize scale alike SincNet github repo '''<line_sep>self.list_IDs=list_IDs<line_sep>self.nb_samp=nb_samp<line_sep>self.base_dir=base_dir<line_sep>self.labels=labels<line_sep>self.cut=cut<line_sep>self.return_label=return_label<line_sep>self.norm_scale=norm_scale<if_stmt>self.cut<and>self.nb_samp<eq>0<block_start><raise>ValueError('when adjusting utterance length, "nb_samp" should be input')<block_end><block_end><def_stmt>__len__ self<block_start><return>len(self.list_IDs)<block_end><def_stmt>__getitem__ self index<block_start>ID=self.list_IDs[index]<try_stmt><block_start>X,_=sf.read(self.base_dir+ID)<line_sep>X=X.astype(np.float64)<block_end><except_stmt><block_start><raise>ValueError('%s'%ID)<block_end><if_stmt>self.norm_scale<block_start>X=self._normalize_scale(X).astype(np.float32)<block_end>X=X.reshape(1 -1)#because of LayerNorm for the input <if_stmt>self.cut<block_start>nb_time=X.shape[1]<if_stmt>nb_time<g>self.nb_samp<block_start>start_idx=np.random.randint(low=0 high=nb_time-self.nb_samp)<line_sep>X=X[: start_idx:start_idx+self.nb_samp][0]<block_end><elif_stmt>nb_time<l>self.nb_samp<block_start>nb_dup=int(self.nb_samp/nb_time)+1<line_sep>X=np.tile(X (1 nb_dup))[: :self.nb_samp][0]<block_end><else_stmt><block_start>X=X[0]<block_end><block_end><if_stmt><not>self.return_label<block_start><return>X<block_end>y=self.labels[ID.split('/')[0]]<line_sep><return>X y<block_end><def_stmt>_normalize_scale self x<block_start>''' Normalize sample scale alike SincNet. '''<line_sep><return>x/np.max(np.abs(x))<block_end><block_end><class_stmt>TA_Dataset_VoxCeleb2(data.Dataset)<block_start><def_stmt>__init__ self list_IDs base_dir nb_samp=0 window_size=0 labels={} cut=<true> return_label=<true> norm_scale=<true><block_start>''' self.list_IDs : list of strings (each string: utt key) self.labels : dictionary (key: utt key, value: label integer) self.nb_samp : integer, the number of timesteps for each mini-batch cut : (boolean) adjust utterance duration for mini-batch construction return_label : (boolean) norm_scale : (boolean) normalize scale alike SincNet github repo '''<line_sep>self.list_IDs=list_IDs<line_sep>self.window_size=window_size<line_sep>self.nb_samp=nb_samp<line_sep>self.base_dir=base_dir<line_sep>self.labels=labels<line_sep>self.cut=cut<line_sep>self.return_label=return_label<line_sep>self.norm_scale=norm_scale<if_stmt>self.cut<and>self.nb_samp<eq>0<block_start><raise>ValueError('when adjusting utterance length, "nb_samp" should be input')<block_end><block_end><def_stmt>__len__ self<block_start><return>len(self.list_IDs)<block_end><def_stmt>__getitem__ self index<block_start>ID=self.list_IDs[index]<try_stmt><block_start>X,_=sf.read(self.base_dir+ID)<line_sep>X=X.astype(np.float64)<block_end><except_stmt><block_start><raise>ValueError('%s'%ID)<block_end><if_stmt>self.norm_scale<block_start>X=self._normalize_scale(X).astype(np.float32)<block_end>X=X.reshape(1 -1)<line_sep>list_X=[]<line_sep>nb_time=X.shape[1]<if_stmt>nb_time<l>self.nb_samp<block_start>nb_dup=int(self.nb_samp/nb_time)+1<line_sep>list_X.append(np.tile(X (1 nb_dup))[: :self.nb_samp][0])<block_end><elif_stmt>nb_time<g>self.nb_samp<block_start>step=self.nb_samp-self.window_size<line_sep>iteration=int((nb_time-self.window_size)/step)+1<for_stmt>i range(iteration)<block_start><if_stmt>i<eq>0<block_start>list_X.append(X[: :self.nb_samp][0])<block_end><elif_stmt>i<l>iteration-1<block_start>list_X.append(X[: i<times>step:i<times>step+self.nb_samp][0])<block_end><else_stmt><block_start>list_X.append(X[: -self.nb_samp:][0])<block_end><block_end><block_end><else_stmt><block_start>list_X.append(X[0])<block_end><if_stmt><not>self.return_label<block_start><return>list_X<block_end>y=self.labels[ID.split('/')[0]]<line_sep><return>list_X y<block_end><def_stmt>_normalize_scale self x<block_start>''' Normalize sample scale alike SincNet. '''<line_sep><return>x/np.max(np.abs(x))<block_end><block_end>
<import_from_stmt>framework.flask redirect<def_stmt>redirect_activity_to_search **kwargs<block_start><return>redirect('/search/')<block_end>
<import_from_stmt>.mixformer build_mixformer<import_from_stmt>.mixformer_online build_mixformer_online_score<line_sep>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. """ Tests for TrainLoader and TestLoader classes when overriding the file names of the seismic and label data. """<import_stmt>tempfile<import_stmt>numpy<as>np<import_from_stmt>deepseismic_interpretation.dutchf3.data get_test_loader TrainPatchLoaderWithDepth TrainSectionLoaderWithDepth <import_stmt>pytest<import_stmt>yacs.config<import_stmt>os<line_sep># npy files dimensions IL=5<line_sep>XL=10<line_sep>D=8<line_sep>N_CLASSES=2<line_sep>CONFIG_FILE="./experiments/interpretation/dutchf3_patch/configs/unet.yaml"<with_stmt>open(CONFIG_FILE "rt")<as>f_read<block_start>config=yacs.config.load_cfg(f_read)<block_end><def_stmt>generate_npy_files path data<block_start>np.save(path data)<block_end><def_stmt>assert_dimensions test_section_loader<block_start><assert_stmt>test_section_loader.labels.shape[0]<eq>IL<assert_stmt>test_section_loader.labels.shape[1]<eq>XL<assert_stmt>test_section_loader.labels.shape[2]<eq>D<line_sep># Because add_section_depth_channels method add # 2 extra channels to a 1 channel section <assert_stmt>test_section_loader.seismic.shape[0]<eq>IL<assert_stmt>test_section_loader.seismic.shape[2]<eq>XL<assert_stmt>test_section_loader.seismic.shape[3]<eq>D<block_end><def_stmt>test_TestSectionLoader_should_load_data_from_test1_set <block_start><with_stmt>open(CONFIG_FILE "rt")<as>f_read<block_start>config=yacs.config.load_cfg(f_read)<block_end><with_stmt>tempfile.TemporaryDirectory()<as>data_dir<block_start>os.makedirs(os.path.join(data_dir "test_once"))<line_sep>os.makedirs(os.path.join(data_dir "splits"))<line_sep>seimic=np.zeros([IL XL D])<line_sep>generate_npy_files(os.path.join(data_dir "test_once" "test1_seismic.npy") seimic)<line_sep>labels=np.ones([IL XL D])<line_sep>generate_npy_files(os.path.join(data_dir "test_once" "test1_labels.npy") labels)<line_sep>txt_path=os.path.join(data_dir "splits" "section_test1.txt")<line_sep>open(txt_path "a").close()<line_sep>TestSectionLoader=get_test_loader(config)<line_sep>config.merge_from_list(["DATASET.ROOT" data_dir])<line_sep>test_set=TestSectionLoader(config split="test1")<line_sep>assert_dimensions(test_set)<block_end><block_end><def_stmt>test_TestSectionLoader_should_load_data_from_test2_set <block_start><with_stmt>tempfile.TemporaryDirectory()<as>data_dir<block_start>os.makedirs(os.path.join(data_dir "test_once"))<line_sep>os.makedirs(os.path.join(data_dir "splits"))<line_sep>seimic=np.zeros([IL XL D])<line_sep>generate_npy_files(os.path.join(data_dir "test_once" "test2_seismic.npy") seimic)<line_sep>A=np.load(os.path.join(data_dir "test_once" "test2_seismic.npy"))<line_sep>labels=np.ones([IL XL D])<line_sep>generate_npy_files(os.path.join(data_dir "test_once" "test2_labels.npy") labels)<line_sep>txt_path=os.path.join(data_dir "splits" "section_test2.txt")<line_sep>open(txt_path "a").close()<line_sep>TestSectionLoader=get_test_loader(config)<line_sep>config.merge_from_list(["DATASET.ROOT" data_dir])<line_sep>test_set=TestSectionLoader(config split="test2")<line_sep>assert_dimensions(test_set)<block_end><block_end><def_stmt>test_TestSectionLoader_should_load_data_from_path_override_data <block_start><with_stmt>tempfile.TemporaryDirectory()<as>data_dir<block_start>os.makedirs(os.path.join(data_dir "volume_name"))<line_sep>os.makedirs(os.path.join(data_dir "splits"))<line_sep>seimic=np.zeros([IL XL D])<line_sep>generate_npy_files(os.path.join(data_dir "volume_name" "seismic.npy") seimic)<line_sep>labels=np.ones([IL XL D])<line_sep>generate_npy_files(os.path.join(data_dir "volume_name" "labels.npy") labels)<line_sep>txt_path=os.path.join(data_dir "splits" "section_volume_name.txt")<line_sep>open(txt_path "a").close()<line_sep>TestSectionLoader=get_test_loader(config)<line_sep>config.merge_from_list(["DATASET.ROOT" data_dir])<line_sep>test_set=TestSectionLoader(config split="volume_name" is_transform=<true> augmentations=<none> seismic_path=os.path.join(data_dir "volume_name" "seismic.npy") label_path=os.path.join(data_dir "volume_name" "labels.npy") )<line_sep>assert_dimensions(test_set)<block_end><block_end><def_stmt>test_TrainPatchLoaderWithDepth_should_fail_on_missing_seismic_file tmpdir<block_start>""" Check for exception when training param is empty """<line_sep># Setup os.makedirs(os.path.join(tmpdir "volume_name"))<line_sep>os.makedirs(os.path.join(tmpdir "splits"))<line_sep>labels=np.ones([IL XL D])<line_sep>generate_npy_files(os.path.join(tmpdir "volume_name" "labels.npy") labels)<line_sep>txt_path=os.path.join(tmpdir "splits" "patch_volume_name.txt")<line_sep>open(txt_path "a").close()<line_sep>config.merge_from_list(["DATASET.ROOT" str(tmpdir)])<line_sep># Test <with_stmt>pytest.raises(Exception)<as>excinfo<block_start>_=TrainPatchLoaderWithDepth(config split="volume_name" is_transform=<true> augmentations=<none> seismic_path=os.path.join(tmpdir "volume_name" "seismic.npy") label_path=os.path.join(tmpdir "volume_name" "labels.npy") )<block_end><assert_stmt>"does not exist"<in>str(excinfo.value)<block_end><def_stmt>test_TrainPatchLoaderWithDepth_should_fail_on_missing_label_file tmpdir<block_start>""" Check for exception when training param is empty """<line_sep># Setup os.makedirs(os.path.join(tmpdir "volume_name"))<line_sep>os.makedirs(os.path.join(tmpdir "splits"))<line_sep>seimic=np.zeros([IL XL D])<line_sep>generate_npy_files(os.path.join(tmpdir "volume_name" "seismic.npy") seimic)<line_sep>txt_path=os.path.join(tmpdir "splits" "patch_volume_name.txt")<line_sep>open(txt_path "a").close()<line_sep>config.merge_from_list(["DATASET.ROOT" str(tmpdir)])<line_sep># Test <with_stmt>pytest.raises(Exception)<as>excinfo<block_start>_=TrainPatchLoaderWithDepth(config split="volume_name" is_transform=<true> augmentations=<none> seismic_path=os.path.join(tmpdir "volume_name" "seismic.npy") label_path=os.path.join(tmpdir "volume_name" "labels.npy") )<block_end><assert_stmt>"does not exist"<in>str(excinfo.value)<block_end><def_stmt>test_TrainPatchLoaderWithDepth_should_load_with_one_train_and_label_file tmpdir<block_start>""" Check for successful class instantiation w/ single npy file for train & label """<line_sep># Setup os.makedirs(os.path.join(tmpdir "volume_name"))<line_sep>os.makedirs(os.path.join(tmpdir "splits"))<line_sep>seimic=np.zeros([IL XL D])<line_sep>generate_npy_files(os.path.join(tmpdir "volume_name" "seismic.npy") seimic)<line_sep>labels=np.ones([IL XL D])<line_sep>generate_npy_files(os.path.join(tmpdir "volume_name" "labels.npy") labels)<line_sep>txt_dir=os.path.join(tmpdir "splits")<line_sep>txt_path=os.path.join(txt_dir "patch_volume_name.txt")<line_sep>open(txt_path "a").close()<line_sep>config.merge_from_list(["DATASET.ROOT" str(tmpdir)])<line_sep># Test train_set=TrainPatchLoaderWithDepth(config split="volume_name" is_transform=<true> augmentations=<none> seismic_path=os.path.join(tmpdir "volume_name" "seismic.npy") label_path=os.path.join(tmpdir "volume_name" "labels.npy") )<assert_stmt>train_set.labels.shape<eq>(IL XL D+2<times>config.TRAIN.PATCH_SIZE)<assert_stmt>train_set.seismic.shape<eq>(IL XL D+2<times>config.TRAIN.PATCH_SIZE)<block_end>
<import_stmt>asyncio<import_from_stmt>threading Thread<async_keyword><def_stmt>production_task <block_start>i=0<while_stmt>1# 将consumption这个协程每秒注册一个到运行在线程中的循环,thread_loop每秒会获得一个一直打印i的无限循环任务 <block_start>asyncio.run_coroutine_threadsafe(consumption(i) thread_loop)<line_sep># 注意:run_coroutine_threadsafe 这个方法只能用在运行在线程中的循环事件使用 <await>asyncio.sleep(2)# 必须加await i<augadd>1<block_end><block_end><async_keyword><def_stmt>consumption i<block_start><while_stmt><true><block_start>print("我是第{}任务".format(i))<line_sep><await>asyncio.sleep(1)<block_end><block_end><def_stmt>start_loop loop# 运行事件循环, loop以参数的形式传递进来运行 <block_start>asyncio.set_event_loop(loop)<line_sep>loop.run_forever()<block_end>#消费者循环 thread_loop=asyncio.new_event_loop()# 获取一个事件循环 run_loop_thread=Thread(target=start_loop args=(thread_loop ))# 将次事件循环运行在一个线程中,防止阻塞当前主线程 run_loop_thread.start()# 运行线程,同时协程事件循环也会运行 #生产者循环 advocate_loop=asyncio.get_event_loop()# 将生产任务的协程注册到这个循环中 advocate_loop.run_until_complete(production_task())# 运行次循环
# Lint as: python3 # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RQMC support."""<import_from_stmt>tf_quant_finance.math.qmc utils<import_from_stmt>tf_quant_finance.math.qmc.digital_net digital_net_sample<import_from_stmt>tf_quant_finance.math.qmc.digital_net random_digital_shift<import_from_stmt>tf_quant_finance.math.qmc.digital_net random_scrambling_matrices<import_from_stmt>tf_quant_finance.math.qmc.digital_net scramble_generating_matrices<import_from_stmt>tf_quant_finance.math.qmc.lattice_rule lattice_rule_sample<import_from_stmt>tf_quant_finance.math.qmc.lattice_rule random_scrambling_vectors<import_from_stmt>tf_quant_finance.math.qmc.sobol sobol_generating_matrices<import_from_stmt>tf_quant_finance.math.qmc.sobol sobol_sample<import_from_stmt>tensorflow.python.util.all_util remove_undocumented# pylint: disable=g-direct-tensorflow-import _allowed_symbols=['digital_net_sample' 'lattice_rule_sample' 'random_digital_shift' 'random_scrambling_matrices' 'random_scrambling_vectors' 'scramble_generating_matrices' 'sobol_generating_matrices' 'sobol_sample' 'utils' ]<line_sep>remove_undocumented(__name__ _allowed_symbols)<line_sep>
""" Classes for GP models without any PP backend, using a given distance matrix. """<import_from_stmt>argparse Namespace<import_stmt>time<import_stmt>copy<import_stmt>numpy<as>np<import_from_stmt>scipy.spatial.distance cdist<import_from_stmt>bo.pp.pp_core DiscPP<import_from_stmt>bo.pp.gp.gp_utils kern_exp_quad kern_matern32 get_cholesky_decomp solve_upper_triangular solve_lower_triangular sample_mvn squared_euc_distmat kern_distmat<import_from_stmt>bo.util.print_utils suppress_stdout_stderr<class_stmt>MyGpDistmatPP(DiscPP)<block_start>""" GPs using a kernel specified by a given distance matrix, without any PP backend """<def_stmt>__init__ self data=<none> modelp=<none> printFlag=<true><block_start>""" Constructor """<line_sep>self.set_model_params(modelp)<line_sep>self.set_data(data)<line_sep>self.set_model()<line_sep>super(MyGpDistmatPP self).__init__()<if_stmt>printFlag<block_start>self.print_str()<block_end><block_end><def_stmt>set_model_params self modelp<block_start>""" Set self.modelp """<if_stmt>modelp<is><none><block_start><pass>#TODO <block_end>self.modelp=modelp<block_end><def_stmt>set_data self data<block_start>""" Set self.data """<if_stmt>data<is><none><block_start><pass>#TODO <block_end>self.data_init=copy.deepcopy(data)<line_sep>self.data=copy.deepcopy(self.data_init)<block_end><def_stmt>set_model self<block_start>""" Set GP regression model """<line_sep>self.model=self.get_model()<block_end><def_stmt>get_model self<block_start>""" Returns model object """<line_sep><return><none><block_end><def_stmt>infer_post_and_update_samples self print_result=<false><block_start>""" Update self.sample_list """<line_sep>self.sample_list=[Namespace(ls=self.modelp.kernp.ls alpha=self.modelp.kernp.alpha sigma=self.modelp.kernp.sigma)]<if_stmt>print_result<block_start>self.print_inference_result()<block_end><block_end><def_stmt>get_distmat self xmat1 xmat2<block_start>""" Get distance matrix """<line_sep>#return squared_euc_distmat(xmat1, xmat2, .5) <import_from_stmt>data Data<line_sep>self.distmat=Data.generate_distance_matrix<line_sep>#print('distmat') #print(self.distmat(xmat1, xmat2, self.modelp.distance)) <return>self.distmat(xmat1 xmat2 self.modelp.distance)<block_end><def_stmt>print_inference_result self<block_start>""" Print results of stan inference """<line_sep>print('*ls pt est = '+str(self.sample_list[0].ls)+'.')<line_sep>print('*alpha pt est = '+str(self.sample_list[0].alpha)+'.')<line_sep>print('*sigma pt est = '+str(self.sample_list[0].sigma)+'.')<line_sep>print('-----')<block_end><def_stmt>sample_pp_post_pred self nsamp input_list full_cov=<false><block_start>""" Sample from posterior predictive of PP. Inputs: input_list - list of np arrays size=(-1,) Returns: list (len input_list) of np arrays (size=(nsamp,1))."""<line_sep>samp=self.sample_list[0]<line_sep>postmu,postcov=self.gp_post(self.data.X self.data.y input_list samp.ls samp.alpha samp.sigma full_cov)<if_stmt>full_cov<block_start>ppred_list=list(sample_mvn(postmu postcov nsamp))<block_end><else_stmt><block_start>ppred_list=list(np.random.normal(postmu.reshape(-1 ) postcov.reshape(-1 ) size=(nsamp len(input_list))))<block_end><return>list(np.stack(ppred_list).T) ppred_list<block_end><def_stmt>sample_pp_pred self nsamp input_list lv=<none><block_start>""" Sample from predictive of PP for parameter lv. Returns: list (len input_list) of np arrays (size (nsamp,1))."""<if_stmt>lv<is><none><block_start>lv=self.sample_list[0]<block_end>postmu,postcov=self.gp_post(self.data.X self.data.y input_list lv.ls lv.alpha lv.sigma)<line_sep>pred_list=list(sample_mvn(postmu postcov 1))###TODO: sample from this mean nsamp times <return>list(np.stack(pred_list).T) pred_list<block_end><def_stmt>gp_post self x_train_list y_train_arr x_pred_list ls alpha sigma full_cov=<true><block_start>""" Compute parameters of GP posterior """<line_sep>kernel=<lambda>a b c d:kern_distmat(a b c d self.get_distmat)<line_sep>k11_nonoise=kernel(x_train_list x_train_list ls alpha)<line_sep>lmat=get_cholesky_decomp(k11_nonoise sigma 'try_first')<line_sep>smat=solve_upper_triangular(lmat.T solve_lower_triangular(lmat y_train_arr))<line_sep>k21=kernel(x_pred_list x_train_list ls alpha)<line_sep>mu2=k21.dot(smat)<line_sep>k22=kernel(x_pred_list x_pred_list ls alpha)<line_sep>vmat=solve_lower_triangular(lmat k21.T)<line_sep>k2=k22-vmat.T.dot(vmat)<if_stmt>full_cov<is><false><block_start>k2=np.sqrt(np.diag(k2))<block_end><return>mu2 k2<block_end># Utilities <def_stmt>print_str self<block_start>""" Print a description string """<line_sep>print('*MyGpDistmatPP with modelp='+str(self.modelp)+'.')<line_sep>print('-----')<block_end><block_end>
# import our libraries <import_stmt>time<import_stmt>datetime<line_sep># get today's date today=date.today()<line_sep>print(today)<line_sep># create a custom date future_date=date(2020 1 31)<line_sep>print(future_date)<line_sep># let's create a time stamp time_stamp=time.time()<line_sep>print(time_stamp)<line_sep># create a date from a timestamp date_stamp=date.fromtimestamp(time_stamp)<line_sep>print(date_stamp)<line_sep># get components of a date print(date_stamp.year)<line_sep>print(date_stamp.month)<line_sep>print(date_stamp.day)<line_sep># ------------------------- PART TWO -------------------------- <import_from_stmt>datetime datetime date time<line_sep># create a date and a time my_date=date(2019 3 22)<line_sep>my_time=time(12 30)<line_sep># create a datetime my_datetime=datetime.combine(my_date my_time)<line_sep>print(my_datetime)<line_sep># get the different components print(my_datetime.year)<line_sep>print(my_datetime.month)<line_sep>print(my_datetime.day)<line_sep>print(my_datetime.hour)<line_sep>print(my_datetime.minute)<line_sep>
""" Various round-to-integer helpers. """<import_stmt>math<import_stmt>functools<import_stmt>logging<line_sep>log=logging.getLogger(__name__)<line_sep>__all__=["noRound" "otRound" "maybeRound" "roundFunc" ]<def_stmt>noRound value<block_start><return>value<block_end><def_stmt>otRound value<block_start>"""Round float value to nearest integer towards ``+Infinity``. The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_) defines the required method for converting floating point values to fixed-point. In particular it specifies the following rounding strategy: for fractional values of 0.5 and higher, take the next higher integer; for other fractional values, truncate. This function rounds the floating-point value according to this strategy in preparation for conversion to fixed-point. Args: value (float): The input floating-point value. Returns float: The rounded value. """<line_sep># See this thread for how we ended up with this implementation: # https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166 <return>int(math.floor(value+0.5))<block_end><def_stmt>maybeRound v tolerance round=otRound<block_start>rounded=round(v)<line_sep><return>rounded<if>abs(rounded-v)<le>tolerance<else>v<block_end><def_stmt>roundFunc tolerance round=otRound<block_start><if_stmt>tolerance<l>0<block_start><raise>ValueError("Rounding tolerance must be positive")<block_end><if_stmt>tolerance<eq>0<block_start><return>noRound<block_end><if_stmt>tolerance<ge>.5<block_start><return>round<block_end><return>functools.partial(maybeRound tolerance=tolerance round=round)<block_end>
<class_stmt>SharingCapabilities<block_start><def_stmt>__init__ self<block_start><pass><block_end>Disabled=0<line_sep>ExternalUserSharingOnly=1<line_sep>ExternalUserAndGuestSharing=2<line_sep>ExistingExternalUserSharingOnly=3<block_end>
# Copyright (c) 2015-2020, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>numpy<as>np<import_stmt>os<def_stmt>generate_data mode='train' problem_type='binary'<block_start><assert_stmt>mode<eq>'train'<or>mode<eq>'test'<line_sep>rng=np.random.RandomState(1)<if_stmt>problem_type<eq>'binary'<block_start>labels=['POS' 'NEG']<block_end><else_stmt><block_start>labels=['POS' 'NEG' 'NEU']<block_end>texts=['aaa' 'bbb' 'ccc']<line_sep>counts={label:0<for>label labels}<if_stmt>mode<eq>'train'<block_start>n=1000<block_end><else_stmt><block_start>n=100<block_end>lns=[]<for_stmt>i range(n)<block_start>y=rng.choice(labels)<line_sep>counts[y]<augadd>1<line_sep>x=rng.choice(texts)<line_sep>lns.append('%s##%s\n'%(y x))<block_end>print(counts)<with_stmt>open('%s_input_%s.tribuo'%(mode problem_type) 'w')<as>f<block_start><for_stmt>ln lns<block_start>f.write(ln)<block_end><block_end><block_end><def_stmt>generate_models <block_start>lltypes=['L2R_LR' 'L2R_L2LOSS_SVC_DUAL' 'L2R_L2LOSS_SVC' 'L2R_L1LOSS_SVC_DUAL' 'MCSVM_CS' 'L1R_L2LOSS_SVC' 'L1R_LR' 'L2R_LR_DUAL']<for_stmt>lltype lltypes<block_start>cmd='./src/test/scripts/generate-model.sh %s %s %s %s'%(lltype lltype 'train_input_binary.tribuo' 'test_input_binary.tribuo')<line_sep>print(cmd)<line_sep>os.system(cmd)<block_end># multiclass model lltype='L2R_LR'<line_sep>cmd='./src/test/scripts/generate-model.sh %s %s %s %s'%(lltype lltype+'_multiclass' 'train_input_multiclass.tribuo' 'test_input_multiclass.tribuo')<line_sep>print(cmd)<line_sep>os.system(cmd)<block_end><if_stmt>__name__<eq>'__main__'<block_start>generate_data(mode='train')<line_sep>generate_data(mode='test')<line_sep>generate_data(mode='train' problem_type='multiclass')<line_sep>generate_data(mode='test' problem_type='multiclass')<line_sep>generate_models()<block_end>
# -*- coding: utf-8 -*- #!/usr/bin/python """ """<import_stmt>requests<import_stmt>time<import_from_stmt>raven Client<line_sep>client=Client('https://aee9ceb609b549fe8a85339e69c74150:8604fd36d8b04fbd9a70a81bdada5cdf@sentry.io/1223891')<line_sep>key="<KEY>"<def_stmt>check_api word<block_start>query_string={'api-key':key 'q':'"%s"'%word}<line_sep>req=requests.get('https://api.nytimes.com/svc/search/v2/articlesearch.json' params=query_string verify=<false>)<if_stmt>req.status_code<in>set([429 529 504])<block_start>time.sleep(50)<line_sep>client.captureMessage("NYT API RATELIMIT")<line_sep><return>check_api(word)<block_end><if_stmt>req.status_code<eq>500<block_start>client.captureMessage("NYT API 500" extra={'req':req 'word':word })<line_sep><return><false><block_end>result=req.json()<line_sep>num_results=len(result['response']['docs'])<line_sep><return>num_results<l>2<block_end>
<import_stmt>numpy<as>np<import_from_stmt>typing Iterable<import_from_stmt>...shader_base Nodes<import_from_stmt>..source1_shader_base Source1ShaderBase<class_stmt>Refract(Source1ShaderBase)<block_start>SHADER:str='refract'<line_sep>@property<def_stmt>bumpmap self<block_start>texture_path=self._vavle_material.get_param('$normalmap' <none>)<if_stmt>texture_path<is><not><none><block_start>image=self.load_texture_or_default(texture_path (0.5 0.5 1.0 1.0))<line_sep>image=self.convert_normalmap(image)<line_sep>image.colorspace_settings.is_data=<true><line_sep>image.colorspace_settings.name='Non-Color'<line_sep><return>image<block_end><return><none><block_end>@property<def_stmt>basetexture self<block_start>texture_path=self._vavle_material.get_param('$basetexture' <none>)<if_stmt>texture_path<is><not><none><block_start><return>self.load_texture_or_default(texture_path (0.3 0 0.3 1.0))<block_end><return><none><block_end>@property<def_stmt>color2 self<block_start>color_value,value_type=self._vavle_material.get_vector('$color2' [1 1 1])<line_sep>divider=255<if>value_type<is>int<else>1<line_sep>color_value=list(map(<lambda>a:a/divider color_value))<if_stmt>len(color_value)<eq>1<block_start>color_value=[color_value[0] color_value[0] color_value[0]]<block_end><elif_stmt>len(color_value)<g>3<block_start>color_value=color_value[:3]<block_end><return>color_value<block_end>@property<def_stmt>bluramount self<block_start>value=self._vavle_material.get_float('$bluramount' 0)<line_sep><return>value<block_end>@property<def_stmt>color self<block_start>color_value,value_type=self._vavle_material.get_vector('$color' [1 1 1])<line_sep>divider=255<if>value_type<is>int<else>1<line_sep>color_value=list(map(<lambda>a:a/divider color_value))<if_stmt>len(color_value)<eq>1<block_start>color_value=[color_value[0] color_value[0] color_value[0]]<block_end><elif_stmt>len(color_value)<g>3<block_start>color_value=color_value[:3]<block_end><return>color_value<block_end>@property<def_stmt>refracttint self<block_start>color_value,value_type=self._vavle_material.get_vector('$refracttint' [1 1 1])<line_sep>divider=255<if>value_type<is>int<else>1<line_sep>color_value=list(map(<lambda>a:a/divider color_value))<if_stmt>len(color_value)<eq>1<block_start>color_value=[color_value[0] color_value[0] color_value[0]]<block_end><return>color_value<block_end><def_stmt>create_nodes self material_name<block_start><if_stmt>super().create_nodes(material_name)<in>['UNKNOWN' 'LOADED']<block_start><return><block_end>self.bpy_material.blend_method='OPAQUE'<line_sep>self.bpy_material.shadow_method='NONE'<line_sep>self.bpy_material.use_screen_refraction=<true><line_sep>self.bpy_material.use_backface_culling=<true><line_sep>material_output=self.create_node(Nodes.ShaderNodeOutputMaterial)<line_sep>shader=self.create_node(Nodes.ShaderNodeBsdfPrincipled self.SHADER)<line_sep>self.connect_nodes(shader.outputs['BSDF'] material_output.inputs['Surface'])<line_sep>basetexture=self.basetexture<if_stmt>basetexture<block_start>self.create_and_connect_texture_node(basetexture shader.inputs['Base Color'] name='$basetexture')<block_end>bumpmap=self.bumpmap<if_stmt>bumpmap<block_start>normalmap_node=self.create_node(Nodes.ShaderNodeNormalMap)<line_sep>self.create_and_connect_texture_node(bumpmap normalmap_node.inputs['Color'] name='$bumpmap')<line_sep>self.connect_nodes(normalmap_node.outputs['Normal'] shader.inputs['Normal'])<line_sep>shader.inputs['Transmission'].default_value=1.0<line_sep>shader.inputs['Roughness'].default_value=self.bluramount<block_end><block_end><block_end>
<import_stmt>logging<import_stmt>socket<import_from_stmt>dagster Field IntSource StringSource logger<class_stmt>ContextFilter(logging.Filter)<block_start>hostname=socket.gethostname()<def_stmt>filter self record<block_start>record.hostname=ContextFilter.hostname<line_sep><return><true><block_end><block_end>@logger({"log_level":Field(StringSource is_required=<false> default_value="INFO") "name":Field(StringSource is_required=<false> default_value="dagster_papertrail") "papertrail_address":Field(StringSource description="Papertrail URL" is_required=<true>) "papertrail_port":Field(IntSource description="Papertrail port" is_required=<true>) } description="A JSON-formatted console logger" )<def_stmt>papertrail_logger init_context<block_start>"""Use this logger to configure your Dagster pipeline to log to Papertrail. You'll need an active Papertrail account with URL and port. Example: .. code-block:: python @job(logger_defs={ "console": colored_console_logger, "papertrail": papertrail_logger, }) def simple_job(): ... simple_job.execute_in_process( run_config={ "loggers": { "console": { "config": { "log_level": "INFO", } }, "papertrail": { "config": { "log_level": "INFO", "name": "hello_pipeline", "papertrail_address": "127.0.0.1", "papertrail_port": 12345, } }, } } ) """<line_sep>level,name,papertrail_address,papertrail_port=(init_context.logger_config.get(k)<for>k ("log_level" "name" "papertrail_address" "papertrail_port"))<line_sep>klass=logging.getLoggerClass()<line_sep>logger_=klass(name level=level)<line_sep>log_format="%(asctime)s %(hostname)s "+name+": %(message)s"<line_sep>formatter=logging.Formatter(log_format datefmt="%b %d %H:%M:%S")<line_sep>handler=logging.handlers.SysLogHandler(address=(papertrail_address papertrail_port))<line_sep>handler.addFilter(ContextFilter())<line_sep>handler.setFormatter(formatter)<line_sep>logger_.addHandler(handler)<line_sep><return>logger_<block_end>
<import_from_stmt>typing Optional List Dict<import_from_stmt>cle.address_translator AddressTranslator<import_from_stmt>sortedcontainers SortedDict<import_from_stmt>.plugin KnowledgeBasePlugin<line_sep># TODO: Serializable <class_stmt>Patch<block_start><def_stmt>__init__ self addr new_bytes comment:Optional[str]=<none><block_start>self.addr=addr<line_sep>self.new_bytes=new_bytes<line_sep>self.comment=comment<block_end><def_stmt>__len__ self<block_start><return>len(self.new_bytes)<block_end><block_end><class_stmt>PatchManager(KnowledgeBasePlugin)<block_start>""" A placeholder-style implementation for a binary patch manager. This class should be significantly changed in the future when all data about loaded binary objects are loaded into angr knowledge base from CLE. As of now, it only stores byte-level replacements. Other angr components may choose to use or not use information provided by this manager. In other words, it is not transparent. Patches should not overlap, but it's user's responsibility to check for and avoid overlapping patches. """<def_stmt>__init__ self kb<block_start>super().__init__()<line_sep>self._patches:Dict[int Patch]=SortedDict()<line_sep>self._kb=kb<block_end><def_stmt>add_patch self addr new_bytes comment:Optional[str]=<none><block_start>self._patches[addr]=Patch(addr new_bytes comment=comment)<block_end><def_stmt>add_patch_obj self patch:Patch<block_start>self._patches[patch.addr]=patch<block_end><def_stmt>remove_patch self addr<block_start><if_stmt>addr<in>self._patches<block_start><del_stmt>self._patches[addr]<block_end><block_end><def_stmt>patch_addrs self<block_start><return>self._patches.keys()<block_end><def_stmt>get_patch self addr<block_start>""" Get patch at the given address. :param int addr: The address of the patch. :return: The patch if there is one starting at the address, or None if there isn't any. :rtype: Patch or None """<line_sep><return>self._patches.get(addr <none>)<block_end><def_stmt>get_all_patches self addr size<block_start>""" Retrieve all patches that cover a region specified by [addr, addr+size). :param int addr: The address of the beginning of the region. :param int size: Size of the region. :return: A list of patches. :rtype: list """<line_sep>patches=[]<for_stmt>patch_addr self._patches.irange(maximum=addr+size-1 reverse=<true>)<block_start>p=self._patches[patch_addr]<if_stmt>self.overlap(p.addr p.addr+len(p) addr addr+size)<block_start>patches.append(p)<block_end><else_stmt><block_start><break><block_end><block_end><return>patches[::-1]<block_end><def_stmt>keys self<block_start><return>self._patches.keys()<block_end><def_stmt>items self<block_start><return>self._patches.items()<block_end><def_stmt>values self<block_start><return>self._patches.values()<block_end><def_stmt>copy self<block_start>o=PatchManager(self._kb)<line_sep>o._patches=self._patches.copy()<block_end>@staticmethod<def_stmt>overlap a0 a1 b0 b1<block_start><return>a0<le>b0<l>a1<or>a0<le>b1<l>a1<or>b0<le>a0<l>b1<block_end><def_stmt>apply_patches_to_binary self binary_bytes:Optional[bytes]=<none> patches:Optional[List[Patch]]=<none><arrow>bytes<block_start><if_stmt>patches<is><none><block_start>patches=sorted(list(self._patches.values()) key=<lambda>x:x.addr)<block_end><if_stmt>binary_bytes<is><none><block_start><with_stmt>open(self._kb._project.loader.main_object.binary "rb")<as>f<block_start>binary_bytes=f.read()<block_end><block_end><for_stmt>patch patches# convert addr to file offset <block_start>at=AddressTranslator.from_mva(patch.addr self._kb._project.loader.main_object)<line_sep>file_offset=at.to_raw()<if_stmt>file_offset<l>len(binary_bytes)<and>file_offset+len(patch.new_bytes)<l>len(binary_bytes)<block_start>binary_bytes=binary_bytes[:file_offset]+patch.new_bytes+binary_bytes[file_offset+len(patch.new_bytes):]<block_end><block_end><return>binary_bytes<block_end><block_end>KnowledgeBasePlugin.register_default('patches' PatchManager)<line_sep>
<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>skimage transform<import_from_stmt>skimage.transform estimate_transform<line_sep>source=np.array([(129 72) (302 76) (90 185) (326 193)])<line_sep>target=np.array([[0 0] [400 0] [0 400] [400 400]])<line_sep>tf=estimate_transform('projective' source target)<line_sep>H=tf.params# in older versions of skimage, this should be # H = tf._matrix print(H)<line_sep># H = np.array([[ 3.04026872e+00, 1.04929628e+00, -4.67743998e+02], # [ -1.44134582e-01, 6.23382067e+00, -4.30241727e+02], # [ 2.63620673e-05, 4.17694527e-03, 1.00000000e+00]]) <def_stmt>rectify xy<block_start>x=xy[: 0]<line_sep>y=xy[: 1]<line_sep># You must fill in your code here. # # Handy functions are: # # - np.dot (matrix multiplication) # - np.ones_like (make an array of ones the same shape as another array) # - np.column_stack # - A.T -- type .T after a matrix to transpose it # - x.reshape -- reshapes the array x # We need to provide the backward mapping HH=np.linalg.inv(H)<line_sep>homogeneous_coordinates=np.column_stack([x y np.ones_like(x)])<line_sep>xyz=np.dot(HH homogeneous_coordinates.T)<line_sep># We want one coordinate per row xyz=xyz.T<line_sep># Turn z into a column vector z=xyz[: 2]<line_sep>z=z.reshape([len(z) 1])<line_sep>xyz=xyz/z<line_sep><return>xyz[: :2]<block_end>image=plt.imread('../../images/chapel_floor.png')<line_sep>out=transform.warp(image rectify output_shape=(400 400))<line_sep>f,(ax0 ax1)=plt.subplots(1 2 figsize=(8 4))<line_sep>ax0.imshow(image)<line_sep>ax1.imshow(out)<line_sep>plt.show()<line_sep>
<def_stmt>can_shift target string<block_start><return>target<and>string<and>len(target)<eq>len(string)<and>string<in>target<times>2<block_end><assert_stmt>can_shift("abcde" "cdeab")<assert_stmt><not>can_shift("abc" "acb")<line_sep>
<import_stmt>inspect<import_stmt>numpy<import_from_stmt>amuse.units generic_unit_system<import_from_stmt>amuse datamodel<def_stmt>fill_grid_with_cloud_and_medium grid center=<none> radius=<none> rho_medium=1.0|generic_unit_system.mass/generic_unit_system.length<power>3 rho_cloud=0.1|generic_unit_system.mass/generic_unit_system.length<power>3 gamma=5.0/3.0 <block_start><pass><block_end><def_stmt>fill_grid_with_spherical_cloud grid center=<none> radius=<none> rho=1.0|generic_unit_system.mass/generic_unit_system.length<power>3 rhovx=0.0|generic_unit_system.mass/(generic_unit_system.time<times>generic_unit_system.length<power>2) rhovy=0.0|generic_unit_system.mass/(generic_unit_system.time<times>generic_unit_system.length<power>2) rhovz=0.0|generic_unit_system.mass/(generic_unit_system.time<times>generic_unit_system.length<power>2) energy=1.0|generic_unit_system.mass/(generic_unit_system.time<power>2<times>generic_unit_system.length) subgridsize=4 <block_start>radii=(grid.position-center).lengths()<if_stmt>subgridsize<le>1<block_start>selection=radii<le>radius<block_end><else_stmt><block_start>dr=grid.cellsize().length()<line_sep>selection=radii<l>(radius-dr)<block_end>grid.rho[selection]=rho(radii)<if>inspect.isroutine(rho)<else>rho<line_sep>grid.rhovx[selection]=rhovx<line_sep>grid.rhovy[selection]=rhovy<line_sep>grid.rhovz[selection]=rhovz<line_sep>grid.energy[selection]=energy<if_stmt>subgridsize<le>1<block_start><return><block_end>selection=numpy.logical_and(radii<ge>(radius-dr) radii<le>(radius+dr))<line_sep>subgrid=datamodel.Grid.create((subgridsize subgridsize subgridsize) grid.cellsize())<line_sep>subgrid.x<augsub>grid.cellsize()[0]/2.0<line_sep>subgrid.y<augsub>grid.cellsize()[1]/2.0<line_sep>subgrid.z<augsub>grid.cellsize()[2]/2.0<line_sep>x_indices,y_indices,z_indices=grid.indices()<line_sep>x_indices=x_indices[selection]<line_sep>y_indices=y_indices[selection]<line_sep>z_indices=z_indices[selection]<line_sep>position=subgrid.position<line_sep>centers=center-grid.position[selection]<line_sep>subgrid_rho=rho<times>numpy.ones_like(subgrid.x.number)<line_sep>subgrid_rhovx=rhovx<times>numpy.ones_like(subgrid.x.number)<line_sep>subgrid_rhovy=rhovy<times>numpy.ones_like(subgrid.x.number)<line_sep>subgrid_rhovz=rhovz<times>numpy.ones_like(subgrid.x.number)<line_sep>subgrid_energy=energy<times>numpy.ones_like(subgrid.x.number)<line_sep>update_grid_rho=grid.rho[selection]<line_sep>update_grid_rhovx=grid.rhovx[selection]<line_sep>update_grid_rhovy=grid.rhovy[selection]<line_sep>update_grid_rhovz=grid.rhovz[selection]<line_sep>update_grid_energy=grid.energy[selection]<for_stmt>i range(len(x_indices))<block_start>x_index=x_indices[i]<line_sep>y_index=y_indices[i]<line_sep>z_index=z_indices[i]<line_sep>center_of_cloud_for_subgrid=centers[i]<line_sep>radii=(position-center_of_cloud_for_subgrid).lengths()<line_sep>subgrid_rho[<ellipsis>]=update_grid_rho[i]<line_sep>subgrid_rhovx[<ellipsis>]=update_grid_rhovx[i]<line_sep>subgrid_rhovy[<ellipsis>]=update_grid_rhovy[i]<line_sep>subgrid_rhovz[<ellipsis>]=update_grid_rhovz[i]<line_sep>subgrid_energy[<ellipsis>]=update_grid_energy[i]<line_sep>subgrid_selection=radii<le>radius<line_sep>subgrid_rho[subgrid_selection]=rho<line_sep>subgrid_rhovx[subgrid_selection]=rhovx<line_sep>subgrid_rhovy[subgrid_selection]=rhovy<line_sep>subgrid_rhovz[subgrid_selection]=rhovz<line_sep>subgrid_energy[subgrid_selection]=energy<line_sep>update_grid_rho[i]=subgrid_rho.mean()<line_sep>update_grid_rhovx[i]=subgrid_rhovx.mean()<line_sep>update_grid_rhovy[i]=subgrid_rhovy.mean()<line_sep>update_grid_rhovz[i]=subgrid_rhovz.mean()<line_sep>update_grid_energy[i]=subgrid_energy.mean()<block_end>grid.rho[selection]=update_grid_rho<line_sep>grid.rhovx[selection]=update_grid_rhovx<line_sep>grid.rhovy[selection]=update_grid_rhovy<line_sep>grid.rhovz[selection]=update_grid_rhovz<line_sep>grid.energy[selection]=update_grid_energy<block_end><def_stmt>fill_grid_with_cloud_shock grid center=<none> radius=<none> ratio_densities=10.0 mach_number=2.7 gamma=5.0/3.0 subgridsize=4 <block_start>velocity_unit=generic_unit_system.length/generic_unit_system.time<line_sep>momentum_unit=generic_unit_system.mass/(generic_unit_system.time<times>generic_unit_system.length<power>2)<line_sep>density_unit=generic_unit_system.mass/generic_unit_system.length<power>3<line_sep>energy_unit=generic_unit_system.mass/(generic_unit_system.time<power>2<times>generic_unit_system.length)<line_sep>velocity_of_medium=(numpy.sqrt(gamma<times>(gamma-1.0)<times>ratio_densities)<times>mach_number)|velocity_unit<line_sep>rho_in_cloud=1.0|density_unit<line_sep>rhovx_in_cloud=0.0|momentum_unit<line_sep>rhovy_in_cloud=0.0|momentum_unit<line_sep>rhovz_in_cloud=0.0|momentum_unit<line_sep>energy_in_cloud=1.0|energy_unit<line_sep>rho_in_medium=1.0/ratio_densities|density_unit<line_sep>rhovx_in_medium=0.0|momentum_unit<line_sep>rhovy_in_medium=rho_in_medium<times>velocity_of_medium<line_sep>rhovz_in_medium=0.0|momentum_unit<line_sep>energy_in_medium=(1.0|energy_unit)+(0.5<times>rho_in_medium<times>velocity_of_medium<power>2)<line_sep>grid.rho=rho_in_medium<line_sep>grid.rhovx=rhovx_in_medium<line_sep>grid.rhovy=rhovy_in_medium<line_sep>grid.rhovz=rhovz_in_medium<line_sep>grid.energy=energy_in_medium<line_sep>fill_grid_with_spherical_cloud(grid center radius rho_in_cloud rhovx_in_cloud rhovy_in_cloud rhovz_in_cloud energy_in_cloud subgridsize)<block_end>
<import_stmt>quadratic_provers<as>q<line_sep>data=q.eval_across_field([1 2 3 4] 11)<line_sep>qproof=q.mk_quadratic_proof(data 4 11)<assert_stmt>q.check_quadratic_proof(data qproof 4 5 11)<line_sep>data2=q.eval_across_field(range(36) 97)<line_sep>cproof=q.mk_column_proof(data2 36 97)<assert_stmt>q.check_column_proof(data2 cproof 36 10 97)<line_sep>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>numpy<as>np<import_from_stmt>. MultiheadAttention<class_stmt>MultiBranch(nn.Module)<block_start><def_stmt>__init__ self branches embed_dim_list<block_start>super().__init__()<line_sep>self.branches=nn.ModuleList(branches)<line_sep>self.embed_dim_list=embed_dim_list<block_end><def_stmt>forward self query key value key_padding_mask=<none> incremental_state=<none> need_weights=<true> static_kv=<false> attn_mask=<none><block_start>tgt_len,bsz,embed_size=query.size()<assert_stmt>sum(self.embed_dim_list)<eq>embed_size<line_sep>out=[]<line_sep>attn=<none><line_sep>start=0<for_stmt>idx,embed_dim enumerate(self.embed_dim_list)<block_start>branch=self.branches[idx]<line_sep>branch_type=type(branch)<line_sep>q=query[<ellipsis> start:start+embed_dim]<if_stmt>key<is><not><none><block_start><assert_stmt>value<is><not><none><line_sep>k,v=key[<ellipsis> start:start+embed_dim] value[<ellipsis> start:start+embed_dim]<block_end>start<augadd>embed_dim<if_stmt>branch_type<eq>MultiheadAttention<block_start>x,attn=branch(q k v key_padding_mask incremental_state need_weights static_kv attn_mask)<block_end><else_stmt><block_start>mask=key_padding_mask<if_stmt>mask<is><not><none><block_start>q=q.masked_fill(mask.transpose(0 1).unsqueeze(2) 0)<block_end>x=branch(q.contiguous() incremental_state=incremental_state)<block_end>out.append(x)<block_end>out=torch.cat(out dim=-1)<line_sep><return>out attn<block_end><block_end>
# Copyright (c) 2017-2020 <NAME>. # Author: <NAME> # Email: <EMAIL> # Update: 2020 - 2 - 7 <import_stmt>re<import_from_stmt>concurrent futures<import_from_stmt>pathlib Path<import_stmt>copy<import_stmt>yaml<import_from_stmt>.VirtualFile ImageFile RawFile<import_from_stmt>..Util Config to_list<try_stmt><block_start><import_from_stmt>yaml FullLoader<as>_Loader<block_end><except_stmt>ImportError<block_start><import_from_stmt>yaml Loader<as>_Loader<block_end>IMAGE_SUF=('PNG' 'JPG' 'JPEG' 'BMP' 'TIFF' 'TIF' 'GIF')<line_sep>VIDEO_SUF={'NV12':'NV12' 'YUV':'YV12' 'YV12':'YV12' 'NV21':'NV21' 'YV21':'YV21' 'RGB':'RGB'}<def_stmt>_supported_image x:Path<block_start><return>x.suffix[1:].upper()<in>IMAGE_SUF<block_end><def_stmt>_supported_video x:Path<block_start><return>x.suffix[1:].upper()<in>VIDEO_SUF<block_end><def_stmt>_supported_suffix x:Path<block_start><return>_supported_image(x)<or>_supported_video(x)<block_end><class_stmt>Dataset(object)<block_start>""" Make a `dataset` object """<def_stmt>__init__ self *folders<block_start>self.dirs=list(map(Path folders))<line_sep>self.recursive=<true><line_sep>self.glob_patterns=('*' )<line_sep>self.inc_patterns=<none><line_sep>self.exc_patterns=<none><line_sep>self.as_video=<false><line_sep>self.compiled=<none><block_end><def_stmt>use_like_video_ self<block_start>self.as_video=<true><block_end><def_stmt>use_like_video self<block_start>d=copy.copy(self)<line_sep>d.compiled=<none><line_sep>d.use_like_video_()<line_sep><return>d<block_end><def_stmt>include_ self *pattern:str<block_start>self.glob_patterns=list(pattern)<line_sep>self.inc_patterns=<none><block_end><def_stmt>include self *pattern:str<block_start>d=copy.copy(self)<line_sep>d.compiled=<none><line_sep>d.include_(*pattern)<line_sep><return>d<block_end><def_stmt>include_reg_ self *reg:str<block_start>self.inc_patterns=[re.compile(r)<for>r reg]<line_sep>self.glob_patterns=('*' )<block_end><def_stmt>include_reg self *reg:str<block_start>d=copy.copy(self)<line_sep>d.compiled=<none><line_sep>d.include_reg_(*reg)<line_sep><return>d<block_end><def_stmt>exclude_ self *reg:str<block_start>self.exc_patterns=[re.compile(r)<for>r reg]<block_end><def_stmt>exclude self *reg:str<block_start>d=copy.copy(self)<line_sep>d.compiled=<none><line_sep>d.exclude_(*reg)<line_sep><return>d<block_end><def_stmt>compile self<block_start><if_stmt>self.compiled<block_start><return>self.compiled<block_end>files=[]<def_stmt>_exc x:Path<block_start><if_stmt>self.exc_patterns<block_start><for_stmt>reg self.exc_patterns<block_start><if_stmt>reg.search(str(x.absolute().as_posix()))<block_start><return><false><block_end><block_end><block_end><return><true><block_end><def_stmt>_inc x:Path<block_start><if_stmt>self.inc_patterns<block_start><for_stmt>reg self.inc_patterns<block_start><if_stmt>reg.search(str(x.absolute().as_posix()))<block_start><return><true><block_end><block_end><block_end><return><false><block_end><for_stmt>folder self.dirs<block_start><if_stmt><not>Path(folder).exists()<block_start><continue><block_end>nodes=[]<if_stmt>folder.is_file()# if points to a file rather than a directory <block_start>nodes.append(folder)<block_end>fn_glob=Path.rglob<if>self.recursive<else>Path.glob<for_stmt>pat self.glob_patterns<block_start>nodes<augadd>list(fn_glob(folder pat))<block_end><if_stmt>self.inc_patterns<block_start>nodes=filter(_inc nodes)<block_end>files<augadd>list(filter(_exc filter(_supported_suffix nodes)))<block_end>image_nodes=list(filter(_supported_image files))<if_stmt><not>self.as_video<block_start>self.compiled=Container(sorted(image_nodes) self.as_video)<line_sep><return>self.compiled<block_end>video_nodes=list(filter(_supported_video files))<line_sep>video_nodes<augadd>list(map(<lambda>x:x.parent image_nodes))<line_sep>video_nodes=list(set(video_nodes))# remove duplicated nodes self.compiled=Container(sorted(video_nodes) self.as_video)<line_sep><return>self.compiled<block_end><block_end><class_stmt>Container(object)<block_start>"""Frames container """<def_stmt>__init__ self urls is_video:bool<block_start><assert_stmt>isinstance(urls (list tuple))<line_sep>pool=futures.ThreadPoolExecutor(4)<line_sep>fs=[]<line_sep>self.nodes=[]<def_stmt>_parse_image_node url:Path<block_start><if_stmt>url.is_dir()<block_start><for_stmt>i filter(_supported_image url.glob('*'))<block_start>self.nodes.append(ImageFile(i rewind=<true>))<block_end><block_end><elif_stmt>_supported_image(url)<block_start>self.nodes.append(ImageFile(url rewind=<true>))<block_end><block_end><def_stmt>_parse_video_node url:Path<block_start><if_stmt>_supported_video(url)<block_start>size=re.findall("\\d+x\\d+" url.stem)<if_stmt>size<block_start>size=[int(x)<for>x size[0].split('x')]<line_sep>self.nodes.append(RawFile(url VIDEO_SUF[url.suffix[1:].upper()] size rewind=<true>))<block_end><block_end><elif_stmt>url.is_dir()<block_start>self.nodes.append(ImageFile(url))<block_end><block_end><for_stmt>j urls<block_start><if_stmt>is_video<block_start>fs.append(pool.submit(_parse_video_node j))<block_end><else_stmt><block_start>fs.append(pool.submit(_parse_image_node j))<block_end><block_end>futures.as_completed(fs)<line_sep>pool.shutdown()<line_sep>self.nodes=sorted(self.nodes key=<lambda>x:x.path)<block_end><def_stmt>__getitem__ self item<block_start><return>self.nodes[item]<block_end><def_stmt>__len__ self<block_start><return>len(self.nodes)<block_end>@property<def_stmt>capacity self<block_start><if_stmt><not>self.nodes<block_start><return>0<block_end>pos=0<line_sep>max_sz=0<line_sep>total_frames=0<for_stmt>i,n enumerate(self.nodes)<block_start>total_frames<augadd>n.frames<if_stmt>n.size()<g>max_sz<block_start>max_sz=n.size()<line_sep>pos=i<block_end><block_end>shape=self.nodes[pos].shape<line_sep>max_bpp=3<line_sep><return>shape[0]<times>shape[1]<times>max_bpp<times>total_frames<block_end><block_end><def_stmt>load_datasets describe_file key=''<block_start>"""load dataset described in YAML file"""<def_stmt>_extend_pattern url<block_start>_url=root/Path(url)<line_sep>url_p=_url<while_stmt><true><block_start><try_stmt><block_start><if_stmt>url_p.exists()<block_start><break><block_end><block_end><except_stmt>OSError<block_start>url_p=url_p.parent<line_sep><continue><block_end><if_stmt>url_p<eq>url_p.parent<block_start><break><block_end>url_p=url_p.parent<block_end># retrieve glob pattern url_r=str(_url.relative_to(url_p))<if_stmt>url_r<eq>'.'<and>url_p.is_dir()<block_start><return>str(Path(url)/'**/*')<block_end><return>url<block_end><def_stmt>_get_dataset desc use_as_video=<none> name=<none><block_start>dataset=Config(name=name)<for_stmt>i desc<block_start><if_stmt>i<not><in>('train' 'val' 'test')<block_start><continue><block_end><if_stmt>isinstance(desc[i] dict)<block_start>hr=to_list(desc[i].get('hr'))<line_sep>lr=to_list(desc[i].get('lr'))<block_end><else_stmt><block_start>hr=to_list(desc[i])<line_sep>lr=[]<block_end><if_stmt>use_as_video<block_start>hr_pattern=[x<if>x<not><in>all_path<and>x+'[video]'<not><in>all_path<else>all_path[x+'[video]']<for>x hr]<line_sep>lr_pattern=[x<if>x<not><in>all_path<and>x+'[video]'<not><in>all_path<else>all_path[x+'[video]']<for>x lr]<block_end><else_stmt><block_start>hr_pattern=[x<if>x<not><in>all_path<else>all_path[x]<for>x hr]<line_sep>lr_pattern=[x<if>x<not><in>all_path<else>all_path[x]<for>x lr]<block_end>hr_data=Dataset(root).include(*(_extend_pattern(x)<for>x hr_pattern))<line_sep>lr_data=Dataset(root).include(*(_extend_pattern(x)<for>x lr_pattern))<if>lr_pattern<else><none><line_sep>hr_data.recursive=<false><if_stmt>lr_data<is><not><none><block_start>lr_data.recursive=<false><block_end><if_stmt>use_as_video<block_start>hr_data.use_like_video_()<if_stmt>lr_data<is><not><none><block_start>lr_data.use_like_video_()<block_end><block_end>setattr(dataset i Config(hr=hr_data lr=lr_data))<block_end><return>dataset<block_end>datasets=Config()<with_stmt>open(describe_file 'r')<as>fd<block_start>config=yaml.load(fd Loader=_Loader)<line_sep>root=Path(config["Root"])<if_stmt><not>root.is_absolute()# make `root` relative to the file <block_start>root=Path(describe_file).resolve().parent/root<line_sep>root=root.resolve()<block_end>all_path=config["Path"]<if_stmt>key.upper()<in>config["Dataset"]<block_start><return>_get_dataset(config["Dataset"][key.upper()] name=key)<block_end><elif_stmt>key.upper()+'[video]'<in>config["Dataset"]<block_start><return>_get_dataset(config["Dataset"][key.upper()+'[video]'] <true> name=key)<block_end><elif_stmt>key.upper()<in>all_path<block_start><return>_get_dataset(Config(test=all_path[key.upper()]) name=key)<block_end><elif_stmt>key.upper()+'[video]'<in>all_path<block_start><return>_get_dataset(Config(test=all_path[key.upper()+'[video]']) <true> name=key)<block_end><for_stmt>name,value config["Dataset"].items()<block_start><if_stmt>'[video]'<in>name<block_start>name=name.replace('[video]' '')<line_sep>datasets[name]=_get_dataset(value <true> name=name)<block_end><else_stmt><block_start>datasets[name]=_get_dataset(value name=name)<block_end><block_end><for_stmt>name all_path<block_start><if_stmt>'[video]'<in>name<block_start>_name=name.replace('[video]' '')<line_sep>datasets[_name]=_get_dataset(Config(test=all_path[name]) <true> name=_name)<block_end><else_stmt><block_start>datasets[name]=_get_dataset(Config(test=all_path[name]) name=name)<block_end><block_end><return>datasets<block_end><block_end>
"""Miscellaneous utility functions."""<import_from_stmt>functools reduce<import_from_stmt>PIL Image<import_stmt>numpy<as>np<import_from_stmt>matplotlib.colors rgb_to_hsv hsv_to_rgb<import_stmt>spacy<import_stmt>re<import_stmt>cv2<import_stmt>time<import_from_stmt>keras_bert.tokenizer Tokenizer<import_from_stmt>keras_bert.loader load_trained_model_from_checkpoint load_vocabulary<import_from_stmt>keras_bert extract_embeddings<import_stmt>os<def_stmt>compose *funcs<block_start>"""Compose arbitrarily many functions, evaluated left to right. Reference: https://mathieularose.com/function-composition-in-python/ """<line_sep># return lambda x: reduce(lambda v, f: f(v), funcs, x) <if_stmt>funcs<block_start><return>reduce(<lambda>f g:<lambda>*a **kw:g(f(*a **kw)) funcs)<block_end><else_stmt><block_start><raise>ValueError('Composition of empty sequence not supported.')<block_end><block_end><def_stmt>letterbox_image image size<block_start>'''resize image with unchanged aspect ratio using padding'''<line_sep>iw,ih=image.size<line_sep>w,h=size<line_sep>scale=min(w/iw h/ih)<line_sep>nw=int(iw<times>scale)<line_sep>nh=int(ih<times>scale)<line_sep>image=image.resize((nw nh) Image.BICUBIC)<line_sep>new_image=Image.new('RGB' size (128 128 128))<line_sep>new_image.paste(image ((w-nw)<floordiv>2 (h-nh)<floordiv>2))<line_sep><return>new_image<block_end><def_stmt>rand a=0 b=1<block_start><return>np.random.rand()<times>(b-a)+a<block_end><def_stmt>get_bert_input text vocabs max_len=512<block_start>tokenizer=Tokenizer(vocabs cased=<false>)<line_sep>token=[]<line_sep>segment=[]<line_sep>token,segment=tokenizer.encode(text max_len=max_len)<line_sep>token.append(token)<line_sep>segment.append(segment)<line_sep>token.extend([0]<times>(max_len-len(token)))<line_sep>segment.extend([0]<times>(max_len-len(token)))<line_sep><return>[token segment]<block_end><def_stmt>seq_to_list s<block_start>''' note: 2018.10.3 use for process sentences '''<line_sep>t_str=s.lower()<for_stmt>i [r'\?' r'\!' r'\'' r'\"' r'\$' r'\:' r'\@' r'\(' r'\)' r'\,' r'\.' r'\;' r'\n']<block_start>t_str=re.sub(i '' t_str)<block_end><for_stmt>i [r'\-' r'\/']<block_start>t_str=re.sub(i ' ' t_str)<block_end>q_list=re.sub(r'\?' '' t_str.lower()).split(' ')<line_sep>q_list=list(filter(<lambda>x:len(x)<g>0 q_list))<line_sep><return>q_list<block_end><def_stmt>qlist_to_vec max_length q_list embed<block_start>''' note: 2018.10.3 use for process sentences '''<line_sep>glove_matrix=[]<line_sep>glove_dict={}<line_sep>q_len=len(q_list)<if_stmt>q_len<g>max_length<block_start>q_len=max_length<block_end><for_stmt>i range(max_length)<block_start><if_stmt>i<l>q_len<block_start>w=q_list[i]<if_stmt>w<not><in>glove_dict<block_start>glove_dict[w]=embed(u'%s'%w).vector<block_end>glove_matrix.append(glove_dict[w])<block_end><else_stmt><block_start>glove_matrix.append(np.zeros(300 dtype=float))<block_end><block_end><return>np.array(glove_matrix)<block_end><def_stmt>get_random_data annotation_line input_shape embed config train_mode=<true> max_boxes=1<block_start>'''random preprocessing for real-time data augmentation'''<line_sep>SEG_DIR=config['seg_gt_path']<line_sep>line=annotation_line.split()<line_sep>h,w=input_shape<line_sep>stop=len(line)<for_stmt>i range(1 len(line))<block_start><if_stmt>(line[i]<eq>'~')<block_start>stop=i<line_sep><break><block_end><block_end># print(line[1:stop]) box_=np.array([np.array(list(map(int box.split(','))))<for>box line[1:stop]])<line_sep>box=np.zeros([1 5])<line_sep>seg_id=box_[0][-1]<line_sep>box[0]=box_[0][:-1]<line_sep>seg_map=np.load(os.path.join(SEG_DIR str(seg_id)+'.npy'))<line_sep>seg_map_ori=np.array(seg_map).astype(np.float32)<line_sep>seg_map=Image.fromarray(seg_map_ori)<line_sep># print(np.shape(box)) # print(box) ##################################### #sentence process maxlength set to 20 and random choose one for train sentences=[]<line_sep>sent_stop=stop+1<for_stmt>i range(stop+1 len(line))<block_start><if_stmt>line[i]<eq>'~'<block_start>sentences.append(line[sent_stop:i])<line_sep>sent_stop=i+1<block_end><block_end>sentences.append(line[sent_stop:len(line)])<line_sep>choose_index=np.random.choice(len(sentences))<line_sep>sentence=sentences[choose_index]<line_sep># print(qlist) <if_stmt>config['use_bert']<block_start>vocabs=load_vocabulary(config['bert_path']+'/vocab.txt')<line_sep>word_vec=get_bert_input(sentence vocabs 512)<block_end><else_stmt><block_start>word_vec=qlist_to_vec(config['word_len'] sentence embed)<block_end># print(word_vec) # print(np.shape(word_vec)) ####################################### image=Image.open(os.path.join(config['image_path'] line[0]))<line_sep>iw,ih=image.size<line_sep>scale=min(w/iw h/ih)<line_sep>nw=int(iw<times>scale)<line_sep>nh=int(ih<times>scale)<line_sep>dx=(w-nw)<floordiv>2<line_sep>dy=(h-nh)<floordiv>2<line_sep>ori_image=image<line_sep>image=image.resize((nw nh) Image.BICUBIC)<line_sep>new_image=Image.new('RGB' (w h) (128 128 128))<line_sep>new_image.paste(image (dx dy))<line_sep>image_data=np.array(new_image)/255.<line_sep>seg_map=seg_map.resize((nw nh))<line_sep>new_map=Image.new('L' (w h) (0))<line_sep>new_map.paste(seg_map (dx dy))<line_sep>seg_map_data=np.array(new_map)<line_sep>seg_map_data=cv2.resize(seg_map_data (seg_map_data.shape[0]<floordiv>config['seg_out_stride'] seg_map_data.shape[0]<floordiv>config['seg_out_stride']) interpolation=cv2.INTER_NEAREST)<line_sep>seg_map_data=np.reshape(seg_map_data [np.shape(seg_map_data)[0] np.shape(seg_map_data)[1] 1])<line_sep># print(new_image.size) # correct boxes box_data=np.zeros((max_boxes 5))<if_stmt>len(box)<g>0<block_start><if_stmt>len(box)<g>max_boxes<block_start>box=box[:max_boxes]<block_end>box[: [0 2]]=box[: [0 2]]<times>scale+dx<line_sep>box[: [1 3]]=box[: [1 3]]<times>scale+dy<line_sep>box_data[:len(box)]=box<block_end>box_data=box_data[: 0:4]#delete classfy <if_stmt><not>train_mode<block_start>word_vec=[qlist_to_vec(config['word_len'] sent embed)<for>sent sentences]<line_sep><return>image_data box_data word_vec ori_image sentences np.expand_dims(seg_map_ori -1)<block_end><return>image_data box_data word_vec seg_map_data<block_end><def_stmt>lr_step_decay lr_start=0.001 steps=[30 40]<block_start><def_stmt>get_lr epoch<block_start>decay_rate=len(steps)<for_stmt>i,e enumerate(steps)<block_start><if_stmt>epoch<l>e<block_start>decay_rate=i<line_sep><break><block_end><block_end>lr=lr_start/(10<power>(decay_rate))<line_sep><return>lr<block_end><return>get_lr<block_end>#powre decay <def_stmt>lr_power_decay lr_start=2.5e-4 lr_power=0.9 warm_up_lr=0. step_all=45<times>1414 warm_up_step=1000# step_per_epoch=3286 <block_start><def_stmt>warm_up base_lr lr cur_step end_step<block_start><return>base_lr+(lr-base_lr)<times>cur_step/end_step<block_end><def_stmt>get_learningrate epoch<block_start><if_stmt>epoch<l>warm_up_step<block_start>lr=warm_up(warm_up_lr lr_start epoch warm_up_step)<block_end><else_stmt><block_start>lr=lr_start<times>((1-float(epoch-warm_up_step)/(step_all-warm_up_step))<power>lr_power)<block_end><return>lr<line_sep># print("learning rate is", lr) <block_end><return>get_learningrate<block_end>
<import_from_stmt>typing TYPE_CHECKING Optional<import_from_stmt>django_countries countries<import_from_stmt>.interface ShippingMethodData<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>.models ShippingMethod<block_end><def_stmt>default_shipping_zone_exists zone_pk=<none><block_start><import_from_stmt>.models ShippingZone<line_sep><return>ShippingZone.objects.exclude(pk=zone_pk).filter(default=<true>)<block_end><def_stmt>get_countries_without_shipping_zone <block_start>"""Return countries that are not assigned to any shipping zone."""<import_from_stmt>.models ShippingZone<line_sep>covered_countries=set()<for_stmt>zone ShippingZone.objects.all()<block_start>covered_countries.update({c.code<for>c zone.countries})<block_end><return>(country[0]<for>country countries<if>country[0]<not><in>covered_countries)<block_end><def_stmt>convert_to_shipping_method_data shipping_method:Optional["ShippingMethod"] <arrow>Optional["ShippingMethodData"]<block_start><if_stmt><not>shipping_method<block_start><return><none><block_end><return>ShippingMethodData(id=str(shipping_method.id) name=shipping_method.name price=getattr(shipping_method "price" <none>) description=shipping_method.description type=shipping_method.type excluded_products=shipping_method.excluded_products channel_listings=shipping_method.channel_listings minimum_order_weight=shipping_method.minimum_order_weight maximum_order_weight=shipping_method.maximum_order_weight maximum_delivery_days=shipping_method.maximum_delivery_days minimum_delivery_days=shipping_method.minimum_delivery_days metadata=shipping_method.metadata private_metadata=shipping_method.private_metadata )<block_end>
""" Provides access to randomness generators. ========================================= .. versionadded:: 2014.7.0 """<import_stmt>base64<import_stmt>hashlib<import_stmt>random<import_stmt>salt.utils.pycrypto<import_from_stmt>salt.exceptions SaltInvocationError<line_sep>ALGORITHMS_ATTR_NAME="algorithms_guaranteed"<line_sep># Define the module's virtual name __virtualname__="random"<def_stmt>__virtual__ <block_start><return>__virtualname__<block_end><def_stmt>hash value algorithm="sha512"<block_start>""" .. versionadded:: 2014.7.0 Encodes a value with the specified encoder. value The value to be hashed. algorithm : sha512 The algorithm to use. May be any valid algorithm supported by hashlib. CLI Example: .. code-block:: bash salt '*' random.hash 'I am a string' md5 """<if_stmt>isinstance(value str)# Under Python 3 we must work with bytes <block_start>value=value.encode(__salt_system_encoding__)<block_end><if_stmt>hasattr(hashlib ALGORITHMS_ATTR_NAME)<and>algorithm<in>getattr(hashlib ALGORITHMS_ATTR_NAME)<block_start>hasher=hashlib.new(algorithm)<line_sep>hasher.update(value)<line_sep>out=hasher.hexdigest()<block_end><elif_stmt>hasattr(hashlib algorithm)<block_start>hasher=hashlib.new(algorithm)<line_sep>hasher.update(value)<line_sep>out=hasher.hexdigest()<block_end><else_stmt><block_start><raise>SaltInvocationError("You must specify a valid algorithm.")<block_end><return>out<block_end><def_stmt>str_encode value encoder="base64"<block_start>""" .. versionadded:: 2014.7.0 value The value to be encoded. encoder : base64 The encoder to use on the subsequent string. CLI Example: .. code-block:: bash salt '*' random.str_encode 'I am a new string' base64 """<if_stmt>isinstance(value str)<block_start>value=value.encode(__salt_system_encoding__)<block_end><if_stmt>encoder<eq>"base64"<block_start><try_stmt><block_start>out=base64.b64encode(value)<line_sep>out=out.decode(__salt_system_encoding__)<block_end><except_stmt>TypeError<block_start><raise>SaltInvocationError("Value must be an encode-able string")<block_end><block_end><else_stmt><block_start><try_stmt><block_start>out=value.encode(encoder)<block_end><except_stmt>LookupError<block_start><raise>SaltInvocationError("You must specify a valid encoder")<block_end><except_stmt>AttributeError<block_start><raise>SaltInvocationError("Value must be an encode-able string")<block_end><block_end><return>out<block_end><def_stmt>get_str length=20 chars=<none> lowercase=<true> uppercase=<true> digits=<true> punctuation=<true> whitespace=<false> printable=<false> <block_start>""" .. versionadded:: 2014.7.0 .. versionchanged:: 3004.0 Changed the default character set used to include symbols and implemented arguments to control the used character set. Returns a random string of the specified length. length : 20 Any valid number of bytes. chars : None .. versionadded:: 3004.0 String with any character that should be used to generate random string. This argument supersedes all other character controlling arguments. lowercase : True .. versionadded:: 3004.0 Use lowercase letters in generated random string. (see :py:data:`string.ascii_lowercase`) This argument is superseded by chars. uppercase : True .. versionadded:: 3004.0 Use uppercase letters in generated random string. (see :py:data:`string.ascii_uppercase`) This argument is superseded by chars. digits : True .. versionadded:: 3004.0 Use digits in generated random string. (see :py:data:`string.digits`) This argument is superseded by chars. printable : False .. versionadded:: 3004.0 Use printable characters in generated random string and includes lowercase, uppercase, digits, punctuation and whitespace. (see :py:data:`string.printable`) It is disabled by default as includes whitespace characters which some systems do not handle well in passwords. This argument also supersedes all other classes because it includes them. This argument is superseded by chars. punctuation : True .. versionadded:: 3004.0 Use punctuation characters in generated random string. (see :py:data:`string.punctuation`) This argument is superseded by chars. whitespace : False .. versionadded:: 3004.0 Use whitespace characters in generated random string. (see :py:data:`string.whitespace`) It is disabled by default as some systems do not handle whitespace characters in passwords well. This argument is superseded by chars. CLI Example: .. code-block:: bash salt '*' random.get_str 128 salt '*' random.get_str 128 chars='abc123.!()' salt '*' random.get_str 128 lowercase=False whitespace=True """<line_sep><return>salt.utils.pycrypto.secure_password(length=length chars=chars lowercase=lowercase uppercase=uppercase digits=digits punctuation=punctuation whitespace=whitespace printable=printable )<block_end><def_stmt>shadow_hash crypt_salt=<none> password=<none> algorithm="<PASSWORD>"<block_start>""" Generates a salted hash suitable for /etc/shadow. crypt_salt : None Salt to be used in the generation of the hash. If one is not provided, a random salt will be generated. password : None Value to be salted and hashed. If one is not provided, a random password will be generated. algorithm : sha512 Hash algorithm to use. CLI Example: .. code-block:: bash salt '*' random.shadow_hash 'My5alT' 'MyP@asswd' md5 """<line_sep><return>salt.utils.pycrypto.gen_hash(crypt_salt password algorithm)<block_end><def_stmt>rand_int start=1 end=10 seed=<none><block_start>""" Returns a random integer number between the start and end number. .. versionadded:: 2015.5.3 start : 1 Any valid integer number end : 10 Any valid integer number seed : Optional hashable object .. versionchanged:: 2019.2.0 Added seed argument. Will return the same result when run with the same seed. CLI Example: .. code-block:: bash salt '*' random.rand_int 1 10 """<if_stmt>seed<is><not><none><block_start>random.seed(seed)<block_end><return>random.randint(start end)<block_end><def_stmt>seed range=10 hash=<none><block_start>""" Returns a random number within a range. Optional hash argument can be any hashable object. If hash is omitted or None, the id of the minion is used. .. versionadded:: 2015.8.0 hash: None Any hashable object. range: 10 Any valid integer number CLI Example: .. code-block:: bash salt '*' random.seed 10 hash=None """<if_stmt>hash<is><none><block_start>hash=__grains__["id"]<block_end>random.seed(hash)<line_sep><return>random.randrange(range)<block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>observations.r.friendship friendship<def_stmt>test_friendship <block_start>"""Test module friendship.py by downloading friendship.csv and testing shape of extracted data has 0 rows and 7 columns """<line_sep>test_path=tempfile.mkdtemp()<line_sep>x_train,metadata=friendship(test_path)<try_stmt><block_start><assert_stmt>x_train.shape<eq>(0 7)<block_end><except_stmt><block_start>shutil.rmtree(test_path)<line_sep><raise>()<block_end><block_end>
<import_stmt>heapq<line_sep>H=[21 1 45 78 3 5]<line_sep># Covert to a heap heapq.heapify(H)<line_sep>print(H)<line_sep># Add element heapq.heappush(H 8)<line_sep>print(H)<line_sep>
""" The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0). https://creativecommons.org/licenses/by/4.0/ https://creativecommons.org/licenses/by/4.0/legalcode Copyright (c) COLONOLNUTTY """<import_from_stmt>typing Union List Tuple Iterator<import_from_stmt>buffs.buff Buff<import_from_stmt>distributor.shared_messages IconInfoData<import_from_stmt>protocolbuffers.Localization_pb2 LocalizedString<import_from_stmt>server_commands.argument_helpers TunableInstanceParam OptionalTargetParam<import_from_stmt>sims.sim_info SimInfo<import_from_stmt>sims4.commands Command CommandType CheatOutput<import_from_stmt>sims4.resources Types<import_from_stmt>sims4communitylib.enums.buffs_enum CommonBuffId<import_from_stmt>sims4communitylib.enums.strings_enum CommonStringId<import_from_stmt>sims4communitylib.enums.types.component_types CommonComponentType<import_from_stmt>sims4communitylib.exceptions.common_exceptions_handler CommonExceptionHandler<import_from_stmt>sims4communitylib.logging.has_class_log HasClassLog<import_from_stmt>sims4communitylib.mod_support.mod_identity CommonModIdentity<import_from_stmt>sims4communitylib.modinfo ModInfo<import_from_stmt>sims4communitylib.notifications.common_basic_notification CommonBasicNotification<import_from_stmt>sims4communitylib.utils.common_component_utils CommonComponentUtils<import_from_stmt>sims4communitylib.utils.localization.common_localization_utils CommonLocalizationUtils<import_from_stmt>sims4communitylib.utils.sims.common_sim_name_utils CommonSimNameUtils<import_from_stmt>sims4communitylib.utils.sims.common_sim_utils CommonSimUtils<class_stmt>CommonBuffUtils(HasClassLog)<block_start>"""Utilities for manipulating Buffs on Sims. """<line_sep># noinspection PyMissingOrEmptyDocstring @classmethod<def_stmt>get_mod_identity cls<arrow>CommonModIdentity<block_start><return>ModInfo.get_identity()<block_end># noinspection PyMissingOrEmptyDocstring @classmethod<def_stmt>get_log_identifier cls<arrow>str<block_start><return>'common_buff_utils'<block_end>@staticmethod<def_stmt>has_fertility_boosting_buff sim_info:SimInfo<arrow>bool<block_start>"""has_fertility_boosting_buff(sim_info) Determine if any fertility boosting buffs are currently active on a sim. .. note:: Fertility Boosting Buffs: - Fertility Potion - Fertility Potion Masterwork - Fertility Potion Normal - Fertility Potion Outstanding - Massage Table Fertility Boost - Massage Table Fertility Boost Incense :param sim_info: The Sim to check. :type sim_info: SimInfo :return: True, if they have any fertility boosting buffs. False, if not. :rtype: bool """<line_sep>buff_ids=(CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION_MASTERWORK CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION_NORMAL CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION_OUTSTANDING CommonBuffId.OBJECT_MASSAGE_TABLE_FERTILITY_BOOST CommonBuffId.OBJECT_MASSAGE_TABLE_FERTILITY_BOOST_INCENSE)<line_sep><return>CommonBuffUtils.has_buff(sim_info *buff_ids)<block_end>@staticmethod<def_stmt>has_morning_person_buff sim_info:SimInfo<arrow>bool<block_start>"""has_morning_person_buff(sim_info) Determine if any Morning Person Trait buffs are currently active on a Sim. :param sim_info: The Sim to check. :type sim_info: SimInfo :return: True, if they have any morning person buffs. False, if not. :rtype: bool """<line_sep>buff_ids=(CommonBuffId.TRAIT_MORNING_PERSON CommonBuffId.TRAIT_MORNING_PERSON_ACTIVE CommonBuffId.TRAIT_MORNING_PERSON_CHECK_ACTIVE)<line_sep><return>CommonBuffUtils.has_buff(sim_info *buff_ids)<block_end>@staticmethod<def_stmt>has_night_owl_buff sim_info:SimInfo<arrow>bool<block_start>"""has_night_owl_buff(sim_info) Determine if any Night Owl Trait buffs are currently active on a sim. :param sim_info: The Sim to check. :type sim_info: SimInfo :return: True, if they have any night owl buffs. False, if not. :rtype: bool """<line_sep>buff_ids=(CommonBuffId.TRAIT_NIGHT_OWL CommonBuffId.TRAIT_NIGHT_OWL_ACTIVE CommonBuffId.TRAIT_NIGHT_OWL_CHECK_ACTIVE)<line_sep><return>CommonBuffUtils.has_buff(sim_info *buff_ids)<block_end>@staticmethod<def_stmt>has_buff sim_info:SimInfo *buffs:Union[int CommonBuffId Buff]<arrow>bool<block_start>"""has_buff(sim_info, *buffs) Determine if any of the specified buffs are currently active on a sim. :param sim_info: The sim being checked. :type sim_info: SimInfo :param buffs: The identifiers of Buffs. :type buffs: Union[int, CommonBuffId, Buff] :return: True, if the sim has any of the specified buffs. :rtype: int """<if_stmt>sim_info<is><none><block_start><raise>AssertionError('Argument sim_info was None')<block_end><if_stmt><not>CommonComponentUtils.has_component(sim_info CommonComponentType.BUFF)<block_start><return><false><block_end><if_stmt><not>buffs<block_start><return><false><block_end>buff_ids=[CommonBuffUtils.get_buff_id(buff)<for>buff buffs]<line_sep>sim_buff_ids=CommonBuffUtils.get_buff_ids(sim_info)<for_stmt>sim_buff_id sim_buff_ids<block_start><if_stmt>sim_buff_id<in>buff_ids<block_start><return><true><block_end><block_end><return><false><block_end>@staticmethod<def_stmt>get_buffs sim_info:SimInfo<arrow>List[Buff]<block_start>"""get_buffs(sim_info) Retrieve all buffs currently active on a Sim. :param sim_info: The Sim to retrieve the buffs of. :type sim_info: SimInfo :return: A collection of currently active buffs on the Sim. :rtype: Tuple[Buff] """<if_stmt>sim_info<is><none><block_start><raise>AssertionError('Argument sim_info was None')<block_end><if_stmt><not>CommonComponentUtils.has_component(sim_info CommonComponentType.BUFF)<block_start><return>list()<block_end><import_from_stmt>objects.components.buff_component BuffComponent<line_sep>buff_component:BuffComponent=CommonComponentUtils.get_component(sim_info CommonComponentType.BUFF)<line_sep>buffs=list()<for_stmt>buff buff_component<block_start><if_stmt>buff<is><none><or><not>isinstance(buff Buff)<block_start><continue><block_end>buffs.append(buff)<block_end><return>buffs<block_end>@staticmethod<def_stmt>get_buff_ids sim_info:SimInfo<arrow>List[int]<block_start>"""get_buff_ids(sim_info) Retrieve decimal identifiers for all Buffs of a sim. :param sim_info: The sim to checked. :type sim_info: SimInfo :return: A collection of Buff identifiers on a Sim. :rtype: List[int] """<if_stmt>sim_info<is><none><block_start><raise>AssertionError('Argument sim_info was None')<block_end><if_stmt><not>CommonComponentUtils.has_component(sim_info CommonComponentType.BUFF)<block_start><return>list()<block_end>buff_ids=list()<line_sep>sim_buffs=CommonBuffUtils.get_buffs(sim_info)<for_stmt>buff sim_buffs<block_start>buff_id=CommonBuffUtils.get_buff_id(buff)<if_stmt>buff_id<is><none><block_start><continue><block_end>buff_ids.append(buff_id)<block_end><return>buff_ids<block_end>@classmethod<def_stmt>add_buff cls sim_info:SimInfo *buffs:Union[int CommonBuffId] buff_reason:Union[int str LocalizedString CommonStringId]=<none><arrow>bool<block_start>"""add_buff(sim_info, *buffs, buff_reason=None) Add the specified buffs to a sim. :param sim_info: The sim to add the specified buffs to. :type sim_info: SimInfo :param buffs: An iterable of identifiers of buffs being added. :type buffs: Union[int, CommonBuffId, Buff] :param buff_reason: The text that will display when the player hovers over the buffs. What caused the buffs to be added. :type buff_reason: Union[int, str, LocalizedString, CommonStringId], optional :return: True, if all of the specified buffs were successfully added. False, if not. :rtype: bool """<if_stmt>sim_info<is><none><block_start><raise>AssertionError('Argument sim_info was None')<block_end><if_stmt><not>CommonComponentUtils.has_component(sim_info CommonComponentType.BUFF)<block_start>cls.get_log().format_with_message('Failed to add Buff to Sim. They did not have a Buff component!' buffs=buffs sim=sim_info buff_reason=buff_reason)<line_sep><return><false><block_end>localized_buff_reason=<none><if_stmt>buff_reason<is><not><none><block_start>localized_buff_reason=CommonLocalizationUtils.create_localized_string(buff_reason)<block_end>has_any=<false><line_sep>success=<true><for_stmt>buff_id buffs<block_start>buff=CommonBuffUtils.load_buff_by_id(buff_id)<if_stmt>buff<is><none><block_start>cls.get_log().format_with_message('No buff found using identifier.' buffs=buffs sim=sim_info buff_reason=buff_reason buff_id=buff_id)<line_sep><continue><block_end><if_stmt><not>sim_info.add_buff_from_op(buff buff_reason=localized_buff_reason)<block_start>cls.get_log().format_with_message('Failed to add buff for unknown reasons.' buff=buff sim=sim_info buff_reason=buff_reason)<line_sep>success=<false><block_end><else_stmt><block_start>cls.get_log().format_with_message('Successfully added buff.' buff=buff sim=sim_info buff_reason=buff_reason)<line_sep>has_any=<true><block_end><block_end>cls.get_log().format_with_message('Finished adding buffs to Sim.' buffs=buffs sim=sim_info buff_reason=buff_reason success=success has_any=has_any)<line_sep><return>success<and>has_any<block_end>@staticmethod<def_stmt>remove_buff sim_info:SimInfo *buffs:Union[int CommonBuffId Buff]<arrow>bool<block_start>"""remove_buff(sim_info, *buffs) Remove the specified buffs from a sim. :param sim_info: The sim to remove the specified buffs from. :type sim_info: SimInfo :param buffs: An iterable of identifiers of buffs being removed. :type buffs: Union[int, CommonBuffId, Buff] :return: True, if all of the specified buffs were successfully removed. False, if not. :rtype: bool """<if_stmt>sim_info<is><none><block_start><raise>AssertionError('Argument sim_info was None')<block_end><if_stmt><not>CommonComponentUtils.has_component(sim_info CommonComponentType.BUFF)<block_start><return><false><block_end>has_any=<false><line_sep>success=<true><for_stmt>buff buffs<block_start>buff=CommonBuffUtils.load_buff_by_id(buff)<if_stmt>buff<is><none><block_start><continue><block_end>sim_info.remove_buff_by_type(buff)<line_sep>has_any=<true><if_stmt>CommonBuffUtils.has_buff(sim_info buff)<block_start>success=<false><block_end><block_end><return>success<and>has_any<block_end>@staticmethod<def_stmt>get_buff_id buff_identifier:Union[int Buff]<arrow>Union[int <none>]<block_start>"""get_buff_id(buff_identifier) Retrieve the decimal identifier of a Buff. :param buff_identifier: The identifier or instance of a Buff. :type buff_identifier: Union[int, Buff] :return: The decimal identifier of the Buff or None if the Buff does not have an id. :rtype: Union[int, None] """<if_stmt>isinstance(buff_identifier int)<block_start><return>buff_identifier<block_end><return>getattr(buff_identifier 'guid64' <none>)<block_end>@staticmethod<def_stmt>get_buff_name buff:Buff<arrow>Union[str <none>]<block_start>"""get_buff_name(buff) Retrieve the Name of a Buff. :param buff: An instance of a Buff. :type buff: Buff :return: The name of a Buff or None if a problem occurs. :rtype: Union[str, None] """<if_stmt>buff<is><none><block_start><return><none><block_end># noinspection PyBroadException <try_stmt><block_start><return>buff.__class__.__name__<or>''<block_end><except_stmt><block_start><return>''<block_end><block_end>@staticmethod<def_stmt>get_buff_names buffs:Iterator[Buff]<arrow>Tuple[str]<block_start>"""get_buff_names(buffs) Retrieve the Names of a collection of Buffs. :param buffs: A collection of Buff instances. :type buffs: Iterator[Buff] :return: A collection of names for all specified Buffs. :rtype: Tuple[str] """<if_stmt>buffs<is><none><or><not>buffs<block_start><return>tuple()<block_end>names:List[str]=[]<for_stmt>buff buffs# noinspection PyBroadException <block_start><try_stmt><block_start>name=CommonBuffUtils.get_buff_name(buff)<if_stmt><not>name<block_start><continue><block_end><block_end><except_stmt><block_start><continue><block_end>names.append(name)<block_end><return>tuple(names)<block_end>@staticmethod<def_stmt>load_buff_by_id buff:Union[int CommonBuffId Buff]<arrow>Union[Buff <none>]<block_start>"""load_buff_by_id(buff) Load an instance of a Buff by its identifier. :param buff: The identifier of a Buff. :type buff: Union[int, CommonBuffId, Buff] :return: An instance of a Buff matching the decimal identifier or None if not found. :rtype: Union[Buff, None] """<if_stmt>isinstance(buff Buff)<block_start><return>buff<block_end># noinspection PyBroadException <try_stmt><block_start>buff:int=int(buff)<block_end><except_stmt><block_start>buff:Buff=buff<line_sep><return>buff<block_end><import_from_stmt>sims4.resources Types<import_from_stmt>sims4communitylib.utils.common_resource_utils CommonResourceUtils<line_sep><return>CommonResourceUtils.load_instance(Types.BUFF buff)<block_end><block_end>@Command('s4clib.add_buff' command_type=CommandType.Live)<def_stmt>_common_add_buff buff:TunableInstanceParam(Types.BUFF) opt_sim:OptionalTargetParam=<none> buff_reason:str=<none> _connection:int=<none><block_start><import_from_stmt>server_commands.argument_helpers get_optional_target<line_sep>output=CheatOutput(_connection)<if_stmt>buff<is><none><block_start>output('Failed, Buff not specified or Buff did not exist! s4clib.add_buff <buff_name_or_id> [opt_sim=None]')<line_sep><return><block_end>sim_info=CommonSimUtils.get_sim_info(get_optional_target(opt_sim _connection))<if_stmt>sim_info<is><none><block_start>output('Failed, no Sim was specified or the specified Sim was not found!')<line_sep><return><block_end>sim_name=CommonSimNameUtils.get_full_name(sim_info)<line_sep>output('Adding buff {} to Sim {}'.format(str(buff) sim_name))<try_stmt><block_start><if_stmt>CommonBuffUtils.add_buff(sim_info buff buff_reason=buff_reason)<block_start>output('Successfully added buff.')<block_end><else_stmt><block_start>output('Failed to add buff.')<block_end><block_end><except_stmt>Exception<as>ex<block_start>CommonExceptionHandler.log_exception(ModInfo.get_identity() 'Failed to add buff {} to Sim {}.'.format(str(buff) sim_name) exception=ex)<line_sep>output('Failed to add buff {} to Sim {}. {}'.format(str(buff) sim_name str(ex)))<block_end><block_end>@Command('s4clib.remove_buff' command_type=CommandType.Live)<def_stmt>_common_remove_buff buff:TunableInstanceParam(Types.BUFF) opt_sim:OptionalTargetParam=<none> _connection:int=<none><block_start><import_from_stmt>server_commands.argument_helpers get_optional_target<line_sep>output=CheatOutput(_connection)<if_stmt>buff<is><none><block_start>output('Failed, Buff not specified or Buff did not exist! s4clib.remove_buff <buff_name_or_id> [opt_sim=None]')<line_sep><return><block_end>sim_info=CommonSimUtils.get_sim_info(get_optional_target(opt_sim _connection))<if_stmt>sim_info<is><none><block_start>output('Failed, no Sim was specified or the specified Sim was not found!')<line_sep><return><block_end>sim_name=CommonSimNameUtils.get_full_name(sim_info)<line_sep>output('Removing buff {} from Sim {}'.format(str(buff) sim_name))<try_stmt><block_start><if_stmt>CommonBuffUtils.remove_buff(sim_info buff)<block_start>output('Successfully removed buff.')<block_end><else_stmt><block_start>output('Failed to remove buff.')<block_end><block_end><except_stmt>Exception<as>ex<block_start>CommonExceptionHandler.log_exception(ModInfo.get_identity() 'Failed to remove buff {} from Sim {}.'.format(str(buff) sim_name) exception=ex)<line_sep>output('Failed to remove buff {} from Sim {}. {}'.format(str(buff) sim_name str(ex)))<block_end><block_end>@Command('s4clib.show_active_buffs' command_type=CommandType.Live)<def_stmt>_common_show_active_buffs opt_sim:OptionalTargetParam=<none> _connection:int=<none><block_start><import_from_stmt>server_commands.argument_helpers get_optional_target<line_sep>output=CheatOutput(_connection)<line_sep>sim=get_optional_target(opt_sim _connection)<line_sep>sim_info=CommonSimUtils.get_sim_info(sim)<if_stmt>sim_info<is><none><block_start>output('Failed, no Sim was specified or the specified Sim was not found!')<line_sep><return><block_end>sim_name=CommonSimNameUtils.get_full_name(sim_info)<line_sep>output('Showing active buffs of Sim {}'.format(sim_name))<try_stmt><block_start>sim_buff_strings:List[str]=list()<for_stmt>buff CommonBuffUtils.get_buffs(sim_info)<block_start>buff_name=CommonBuffUtils.get_buff_name(buff)<line_sep>buff_id=CommonBuffUtils.get_buff_id(buff)<line_sep>sim_buff_strings.append('{} ({})'.format(buff_name buff_id))<block_end>sim_buff_strings=sorted(sim_buff_strings key=<lambda>x:x)<line_sep>sim_buffs=', '.join(sim_buff_strings)<line_sep>text=''<line_sep>text<augadd>'Active Buffs:\n{}\n\n'.format(sim_buffs)<line_sep>CommonBasicNotification(CommonLocalizationUtils.create_localized_string('{} Active Buffs ({})'.format(sim_name CommonSimUtils.get_sim_id(sim_info))) CommonLocalizationUtils.create_localized_string(text)).show(icon=IconInfoData(obj_instance=CommonSimUtils.get_sim_instance(sim_info)))<block_end><except_stmt>Exception<as>ex<block_start>CommonExceptionHandler.log_exception(ModInfo.get_identity() 'Failed to show active buffs of Sim {}.'.format(sim_name) exception=ex)<line_sep>output('Failed to show active buffs of Sim {}. {}'.format(sim_name str(ex)))<block_end><block_end>
<import_stmt>unittest<import_from_stmt>tests.recipes.recipe_lib_test BaseTestForMakeRecipe<class_stmt>TestLibffiRecipe(BaseTestForMakeRecipe unittest.TestCase)<block_start>""" An unittest for recipe :mod:`~pythonforandroid.recipes.libffi` """<line_sep>recipe_name="libffi"<line_sep>sh_command_calls=["./autogen.sh" "autoreconf" "./configure"]<def_stmt>test_get_include_dirs self<block_start>list_of_includes=self.recipe.get_include_dirs(self.arch)<line_sep>self.assertIsInstance(list_of_includes list)<line_sep>self.assertTrue(list_of_includes[0].endswith("include"))<block_end><block_end>