content
stringlengths
0
1.55M
# Copyright 2015, Pinterest, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definition of job metadata included in job tokens. Job object describes job inputs, outputs, and all information required to execute a job (e.g., a command line of a shell job or class name of a data job)."""<import_stmt>abc<import_from_stmt>pinball.config.utils get_log<import_from_stmt>pinball.persistence.token_data TokenData<import_from_stmt>pinball.workflow.name Name<line_sep>__author__='<NAME>'<line_sep>__copyright__='Copyright 2015, Pinterest, Inc.'<line_sep>__credits__=[__author__]<line_sep>__license__='Apache'<line_sep>__version__='2.0'<line_sep>LOG=get_log('pinball.workflow.worker')<class_stmt>Job(TokenData)<block_start>"""Parent class for specialized job types."""<line_sep>__metaclass__=abc.ABCMeta<line_sep>IS_CONDITION=<false><def_stmt>__init__ self name=<none> inputs=<none> outputs=<none> emails=<none> max_attempts=1 retry_delay_sec=0 warn_timeout_sec=<none> abort_timeout_sec=<none><block_start>self.name=name<line_sep>self.inputs=inputs<if>inputs<is><not><none><else>[]<line_sep>self.outputs=outputs<if>outputs<is><not><none><else>[]<line_sep>self.emails=emails<if>emails<is><not><none><else>[]<line_sep>self.max_attempts=max_attempts<line_sep>self.retry_delay_sec=retry_delay_sec<line_sep>self.warn_timeout_sec=warn_timeout_sec<line_sep>self.abort_timeout_sec=abort_timeout_sec<assert_stmt>self.max_attempts<g>0<line_sep>self.disabled=<false><line_sep>self.history=[]<line_sep>self.events=[]<block_end>@property<def_stmt>_COMPATIBILITY_ATTRIBUTES self<block_start><return>{'emails':[] 'disabled':<false> 'max_attempts':1 'events':[] 'warn_timeout_sec':<none> 'abort_timeout_sec':<none> 'retry_delay_sec':0 }<block_end>@abc.abstractmethod<def_stmt>info self<block_start><return><block_end><def_stmt>retry self<block_start>"""Decide if the job should be retried. Returns: True if the job should be retried, otherwise False. """<if_stmt><not>self.history<block_start><return><false><block_end>last_record=self.history[-1]<line_sep>current_instance=last_record.instance<assert_stmt>last_record.exit_code<ne>0<line_sep>failed_runs=0<for_stmt>record reversed(self.history)<block_start><if_stmt>record.instance<ne>current_instance<block_start><break><block_end><if_stmt>record.exit_code<ne>0# There may have been successful runs in the past if we are # re-doing an execution. <block_start>failed_runs<augadd>1<block_end><if_stmt>failed_runs<ge>self.max_attempts<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>truncate_history self<block_start><if_stmt>self.IS_CONDITION<and>len(self.history)<g>self.max_attempts<block_start>self.history=self.history[-self.max_attempts:]<block_end><block_end><def_stmt>reload self new_job<block_start>"""Reload job config from a new config. Configuration elements defining the workflow topology (inputs and outputs), execution history, or run-time values (events) are not modified. Args: new_job: The new job configuration to update from. """<assert_stmt>self.__class__<eq>new_job.__class__<line_sep>self.emails=new_job.emails<line_sep>self.max_attempts=new_job.max_attempts<block_end><block_end><class_stmt>ShellJob(Job)<block_start>"""Shell job runs a command when executed."""<def_stmt>__init__ self name=<none> inputs=<none> outputs=<none> emails=<none> max_attempts=1 retry_delay_sec=0 warn_timeout_sec=<none> abort_timeout_sec=<none> command=<none> cleanup_template=<none><block_start>super(ShellJob self).__init__(name inputs outputs emails max_attempts retry_delay_sec warn_timeout_sec abort_timeout_sec)<line_sep>self.command=command<line_sep>self.cleanup_template=cleanup_template<block_end>@property<def_stmt>_COMPATIBILITY_ATTRIBUTES self<block_start>result=super(ShellJob self)._COMPATIBILITY_ATTRIBUTES<line_sep>result['cleanup_template']=<none><line_sep><return>result<block_end><def_stmt>__str__ self<block_start><return>('ShellJob(name=%s, inputs=%s, outputs=%s, emails=%s, '<concat>'max_attempts=%d, retry_delay_sec=%d, warn_timeout_sec=%s, '<concat>'abort_timeout_sec=%s, disabled=%s, command=%s, '<concat>'cleanup_template=%s, events=%s, history=%s)'%(self.name self.inputs self.outputs self.emails self.max_attempts self.retry_delay_sec self.warn_timeout_sec self.abort_timeout_sec self.disabled self.command self.cleanup_template self.events self.history))<block_end><def_stmt>__repr__ self<block_start><return>self.__str__()<block_end><def_stmt>info self<block_start><return>'command=%s'%self.command<block_end><def_stmt>reload self new_job<block_start>super(ShellJob self).reload(new_job)<line_sep>self.command=new_job.command<line_sep>self.cleanup_template=new_job.cleanup_template<block_end>@staticmethod<def_stmt>_get_command_attributes template<block_start>"""Extract attributes from a command string template. E.g., for template 'ls %(dir1)s %(dir2)s' the result is ['dir1', 'dir2']. Args: template: The template to extract attributes from. Returns: The list of named attributes extracted from the template. """<class_stmt>Extractor<block_start>"""Helper class extracting attributes from a string template. """<def_stmt>__init__ self<block_start>self.attributes=set()<block_end><def_stmt>__getitem__ self attribute<block_start>self.attributes.add(attribute)<line_sep><return>0<block_end><block_end>extractor=Extractor()<try_stmt><block_start>template%extractor<block_end><except_stmt>ValueError<block_start>LOG.exception('failed to customize template %s' template)<block_end><return>list(extractor.attributes)<block_end><def_stmt>_consolidate_event_attributes self<block_start>"""Consolidate attributes in triggering events. Iterate over events in the most recent execution record and combine them into one dictionary mapping attribute names to their values. If multiple events contain the same attribute, the return value will be a comma separated string of values from all those events. Returns: Dictionary of consolidated event attribute key-values. """<assert_stmt>self.history<line_sep>last_execution_record=self.history[-1]<line_sep>result={}<for_stmt>event last_execution_record.events<block_start><for_stmt>key,value event.attributes.items()<block_start>new_value=result.get(key)<if_stmt>new_value<block_start>new_value<augadd>',%s'%value<block_end><else_stmt><block_start>new_value=value<block_end>result[key]=new_value<block_end><block_end><return>result<block_end><def_stmt>customize_command self<block_start>"""Specialize the command with attribute values extracted from events. Returns: Job command with parameter values replaced by attributes extracted from the triggering events. If a parameter is not present in the event attribute set, it is replaced with an empty string. """<line_sep>attributes={}<line_sep>command_attributes=ShellJob._get_command_attributes(self.command)<for_stmt>attribute command_attributes<block_start>attributes[attribute]=''<block_end>event_attributes=self._consolidate_event_attributes()<line_sep>attributes.update(event_attributes)<try_stmt><block_start><return>self.command%attributes<block_end><except_stmt>ValueError<block_start>LOG.exception('failed to customize command %s' self.command)<line_sep><return>self.command<block_end><block_end><block_end><class_stmt>ShellConditionJob(ShellJob)<block_start>IS_CONDITION=<true><def_stmt>__init__ self name=<none> outputs=<none> emails=<none> max_attempts=10 retry_delay_sec=5<times>60 warn_timeout_sec=<none> abort_timeout_sec=<none> command=<none> cleanup_template=<none><block_start>super(ShellConditionJob self).__init__(name=name inputs=[Name.WORKFLOW_START_INPUT] outputs=outputs emails=emails max_attempts=max_attempts retry_delay_sec=retry_delay_sec warn_timeout_sec=warn_timeout_sec abort_timeout_sec=abort_timeout_sec command=command cleanup_template=cleanup_template)<block_end><block_end>
<import_from_stmt>.xglob *<line_sep>
<import_from_stmt>ctypes Structure Union<import_from_stmt>ctypes.wintypes DWORD LONG LPWSTR ULARGE_INTEGER VARIANT_BOOL WORD <import_from_stmt>comtypes GUID<import_from_stmt>comtypes.automation VARTYPE VT_BOOL VT_CLSID VT_LPWSTR VT_UI4<import_from_stmt>future.utils python_2_unicode_compatible<class_stmt>PROPVARIANT_UNION(Union)<block_start>_fields_=[('lVal' LONG) ('uhVal' ULARGE_INTEGER) ('boolVal' VARIANT_BOOL) ('pwszVal' LPWSTR) ('puuid' GUID) ]<block_end><class_stmt>PROPVARIANT(Structure)<block_start>_fields_=[('vt' VARTYPE) ('reserved1' WORD) ('reserved2' WORD) ('reserved3' WORD) ('union' PROPVARIANT_UNION) ]<def_stmt>GetValue self<block_start>vt=self.vt<if_stmt>vt<eq>VT_BOOL<block_start><return>self.union.boolVal<ne>0<block_end><elif_stmt>vt<eq>VT_LPWSTR# return Marshal.PtrToStringUni(union.pwszVal) <block_start><return>self.union.pwszVal<block_end><elif_stmt>vt<eq>VT_UI4<block_start><return>self.union.lVal<block_end><elif_stmt>vt<eq>VT_CLSID# TODO # return (Guid)Marshal.PtrToStructure(union.puuid, typeof(Guid)) <block_start><return><block_end><else_stmt><block_start><return>"%s:?"%(vt)<block_end><block_end><block_end>@python_2_unicode_compatible<class_stmt>PROPERTYKEY(Structure)<block_start>_fields_=[('fmtid' GUID) ('pid' DWORD) ]<def_stmt>__str__ self<block_start><return>"%s %s"%(self.fmtid self.pid)<block_end><block_end>
<import_from_future_stmt> absolute_import<import_stmt>torch<import_from_stmt>allennlp.common Params<import_from_stmt>allennlp.data.vocabulary Vocabulary<import_from_stmt>allennlp.modules.token_embedders.embedding Embedding<import_from_stmt>allennlp.modules.seq2vec_encoders.seq2vec_encoder Seq2VecEncoder<import_from_stmt>allennlp.modules.time_distributed TimeDistributed<import_from_stmt>allennlp.modules.token_embedders.token_embedder TokenEmbedder<class_stmt>TokenCharactersEncoder(TokenEmbedder)<block_start>u""" A ``TokenCharactersEncoder`` takes the output of a :class:`~allennlp.data.token_indexers.TokenCharactersIndexer`, which is a tensor of shape (batch_size, num_tokens, num_characters), embeds the characters, runs a token-level encoder, and returns the result, which is a tensor of shape (batch_size, num_tokens, encoding_dim). We also optionally apply dropout after the token-level encoder. We take the embedding and encoding modules as input, so this class is itself quite simple. """<def_stmt>__init__ self embedding encoder dropout=0.0<block_start>super(TokenCharactersEncoder self).__init__()<line_sep>self._embedding=TimeDistributed(embedding)<line_sep>self._encoder=TimeDistributed(encoder)<if_stmt>dropout<g>0<block_start>self._dropout=torch.nn.Dropout(p=dropout)<block_end><else_stmt><block_start>self._dropout=<lambda>x:x<block_end><block_end><def_stmt>get_output_dim self<block_start><return>self._encoder._module.get_output_dim()<block_end># pylint: disable=protected-access <def_stmt>forward self token_characters# pylint: disable=arguments-differ <block_start>mask=(token_characters<ne>0).long()<line_sep><return>self._dropout(self._encoder(self._embedding(token_characters) mask))<block_end># The setdefault requires a custom from_params @classmethod<def_stmt>from_params cls vocab params# type: ignore # pylint: disable=arguments-differ <block_start>embedding_params=params.pop(u"embedding")<line_sep># Embedding.from_params() uses "tokens" as the default namespace, but we need to change # that to be "token_characters" by default. embedding_params.setdefault(u"vocab_namespace" u"token_characters")<line_sep>embedding=Embedding.from_params(vocab embedding_params)<line_sep>encoder_params=params.pop(u"encoder")<line_sep>encoder=Seq2VecEncoder.from_params(encoder_params)<line_sep>dropout=params.pop_float(u"dropout" 0.0)<line_sep>params.assert_empty(cls.__name__)<line_sep><return>cls(embedding encoder dropout)<block_end><block_end>TokenCharactersEncoder=TokenEmbedder.register(u"character_encoding")(TokenCharactersEncoder)<line_sep>
# Distributed under the MIT License. # See LICENSE.txt for details. <import_stmt>numpy<as>np<def_stmt>raise_or_lower_first_index tensor metric<block_start><return>np.einsum("ij,ikl" metric tensor)<block_end><def_stmt>trace_last_indices tensor metric<block_start><return>np.einsum("ij,kij" metric tensor)<block_end>
""" Official evaluation script for SQuAD version 2.0. Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0 """<import_stmt>collections<import_stmt>json<import_stmt>re<import_stmt>string<def_stmt>get_raw_scores qa_ids actuals preds<block_start>""" Computes exact match and F1 scores without applying any unanswerable probability threshold. Args: qa_ids (list): Unique ids corresponding to the answers in `actuals`. actuals (list): List of ground truth answers. preds (dict): Dictionary with qa_id as keys and predicted answers as values. Returns: tuple: (exact_match, f1) """<line_sep># Helper functions <def_stmt>_normalize_answer s<block_start>"""Lower text and remove punctuation, articles and extra whitespace."""<def_stmt>remove_articles text<block_start>regex=re.compile(r"\b(a|an|the)\b" re.UNICODE)<line_sep><return>re.sub(regex " " text)<block_end><def_stmt>white_space_fix text<block_start><return>" ".join(text.split())<block_end><def_stmt>remove_punc text<block_start>exclude=set(string.punctuation)<line_sep><return>"".join(ch<for>ch text<if>ch<not><in>exclude)<block_end><def_stmt>lower text<block_start><return>text.lower()<block_end><return>white_space_fix(remove_articles(remove_punc(lower(s))))<block_end><def_stmt>_get_tokens s<block_start>"""Normalizes text and returns white-space tokenized tokens. """<if_stmt><not>s<block_start><return>[]<block_end><return>_normalize_answer(s).split()<block_end><def_stmt>_compute_exact a_gold a_pred<block_start>"""Compute the exact match between two sentences after normalization. Returns: int: 1 if two sentences match exactly after normalization, 0 otherwise. """<line_sep><return>int(_normalize_answer(a_gold)<eq>_normalize_answer(a_pred))<block_end><def_stmt>_compute_f1 a_gold a_pred<block_start>""" Compute F1 score based on token overlapping between two sentences. """<line_sep>gold_toks=_get_tokens(a_gold)<line_sep>pred_toks=_get_tokens(a_pred)<line_sep>common=collections.Counter(gold_toks)&collections.Counter(pred_toks)<line_sep>num_same=sum(common.values())<if_stmt>len(gold_toks)<eq>0<or>len(pred_toks)<eq>0# If either is no-answer, then F1 is 1 if they agree, 0 otherwise <block_start><return>int(gold_toks<eq>pred_toks)<block_end><if_stmt>num_same<eq>0<block_start><return>0<block_end>precision=1.0<times>num_same/len(pred_toks)<line_sep>recall=1.0<times>num_same/len(gold_toks)<line_sep>f1=(2<times>precision<times>recall)/(precision+recall)<line_sep><return>f1<block_end># Helper functions end exact_scores={}<line_sep>f1_scores={}<for_stmt>qid,gold_answers zip(qa_ids actuals)<block_start><if_stmt><not>gold_answers# For unanswerable questions, only correct answer is empty string <block_start>gold_answers=[""]<block_end><if_stmt>qid<not><in>preds<block_start>print("Missing prediction for %s"%qid)<line_sep><continue><block_end>a_pred=preds[qid]<line_sep># Take max over all gold answers <if_stmt>isinstance(gold_answers str)<block_start>gold_answers=[gold_answers]<block_end>exact_scores[qid]=max(_compute_exact(a a_pred)<for>a gold_answers)<line_sep>f1_scores[qid]=max(_compute_f1(a a_pred)<for>a gold_answers)<block_end><return>exact_scores f1_scores<block_end><def_stmt>find_best_thresh preds scores na_probs qid_to_has_ans unanswerable_exists=<false><block_start>""" Find the best threshold to determine a question is impossible to answer. Args: preds (dict): Dictionary with qa_id as keys and predicted answers as values. scores (dict): Dictionary with qa_id as keys and raw evaluation scores (exact_match or f1) as values. na_probs (dict): Dictionary with qa_id as keys and unanswerable probabilities as values. qid_to_has_ans (dict): Dictionary with qa_id as keys boolean values indicating if the question has answer as values. unanswerable_exists (bool, optional): Whether there is unanswerable questions in the data. Defaults to False. Returns: tuple: score after applying best threshold, best threshold, (score for answerable questions after applying best threshold, if unanswerable_exists=True) """<line_sep>num_no_ans=sum(1<for>k qid_to_has_ans<if><not>qid_to_has_ans[k])<line_sep># If na_prob > threshold, the question is considered as unanswerable by the prediction. # Initially, the threshold is 0. All questions are considered as unanswerable by the # predictions. So cur_score is the number of actual unanswerable questions (i.e. correctly # predicted as unanswerable in the data. cur_score=num_no_ans<line_sep>best_score=cur_score<line_sep>best_thresh=0.0<line_sep># Sorted in ascending order qid_list=sorted(na_probs key=<lambda>k:na_probs[k])<for_stmt>i,qid enumerate(qid_list)# When using the cur_na_prob as threshold, all predictions with na_prob > na_prob_cur are # considered as unanswerable. Current question is considered answerable. <block_start><if_stmt>qid<not><in>scores<block_start><continue><block_end><if_stmt>qid_to_has_ans[qid]# Current question has ground truth answer, the prediction is correct. The raw score # is added to cur_score <block_start>diff=scores[qid]<block_end><else_stmt># Current question doesn't have ground truth answer. <block_start><if_stmt>preds[qid]# Prediction is not empty, incorrect. cur_score -= 1 <block_start>diff=-1<block_end><else_stmt># Prediction is empty, correct, the original score 1 from num_no_ans is preserved. <block_start>diff=0<block_end><block_end>cur_score<augadd>diff<if_stmt>cur_score<g>best_score# When cur_score > best_score, the threshold can increase so that more questions are # considered as answerable and fewer questions are considered as unanswerable. # Imagine a PDF with two humps with some overlapping, the x axis is the na_prob. The # hump on the left is answerable questions and the hump on the right is unanswerable # questions. # At some point, the number of actual answerable questions decreases, and we got more # penalty from considering unanswerable questions as answerable than the score added # from actual answerable questions, we will not change the threshold anymore and the # optimal threshold is found. <block_start>best_score=cur_score<line_sep>best_thresh=na_probs[qid]<block_end><block_end><if_stmt><not>unanswerable_exists<block_start><return>100.0<times>best_score/len(scores) best_thresh<block_end><else_stmt><block_start>has_ans_score,has_ans_cnt=0 0<for_stmt>qid qid_list<block_start><if_stmt><not>qid_to_has_ans[qid]<block_start><continue><block_end>has_ans_cnt<augadd>1<if_stmt>qid<not><in>scores<block_start><continue><block_end>has_ans_score<augadd>scores[qid]<block_end><return>100.0<times>best_score/len(scores) best_thresh 1.0<times>has_ans_score/has_ans_cnt<block_end><block_end><def_stmt>find_all_best_thresh main_eval preds exact_raw f1_raw na_probs qid_to_has_ans unanswerable_exists=<false><block_start>""" Update raw evaluation scores by finding the best threshold to determine a question is impossible to answer. Args: main_eval (dict): Dictionary with raw evaluation scores without apply any threshold. preds (dict): Dictionary with qa_id as keys and predicted answers as values. exact_raw (dict): Dictionary with qa_id as keys and raw exact_match scores as values. f1_raw (dict): Dictionary with qa_id as keys and raw f1 scores as values. na_probs (dict): Dictionary with qa_id as keys and unanswerable probabilities as values. qid_to_has_ans (dict): Dictionary with qa_id as keys boolean values indicating if the question has answer as values. unanswerable_exists (bool, optional): Whether there is unanswerable questions in the data. Defaults to False. Returns: dict: Updated `main_eval` with scores after applying best threshold and best threshold for each score. """<line_sep>all_exact=find_best_thresh(preds exact_raw na_probs qid_to_has_ans unanswerable_exists)<line_sep>all_f1=find_best_thresh(preds f1_raw na_probs qid_to_has_ans unanswerable_exists)<line_sep>main_eval["best_exact"]=all_exact[0]<line_sep>main_eval["best_exact_thresh"]=all_exact[1]<line_sep>main_eval["best_f1"]=all_f1[0]<line_sep>main_eval["best_f1_thresh"]=all_f1[1]<if_stmt>unanswerable_exists<block_start>main_eval["has_ans_exact"]=all_exact[2]<line_sep>main_eval["has_ans_f1"]=all_f1[2]<block_end><block_end><def_stmt>evaluate_qa actual_dataset preds na_probs=<none> na_prob_thresh=0 unanswerable_exists=<false> out_file=<none><block_start>""" Evaluate question answering prediction results against ground truth answers. Args: Evaluates question answering model performance. Args: actual_dataset (:class:`utils_nlp.dataset.pytorch.QADataset`): Input question answering dataset with ground truth answers. preds (dict): The key of the dictionary is the qa_id in the original :class:`utils_nlp.dataset.pytorch.QADataset`. The values of the dictionary are the predicted answer texts in string type. na_probs (dict, optional): Dictionary of qa_id and unanswerable probability pairs. If None, unanswerable probabilities are all set to zero. Defaults to None. na_prob_thresh (float, optional): Probability threshold to predict a question to be unanswerable. For an unanswerable question, if `na_probs` > `na_prob_thresh`, the prediction is considered as correct. Otherwise, the prediction is considered as incorrect. Defaults to 0. out_file (str, optional): Path of the file to save the evaluation results to. Defaults to None. Returns: dict: A dictionary with exact_match and f1 values. """<line_sep># Helper functions <def_stmt>_apply_no_ans_threshold scores na_probs qid_to_has_ans na_prob_thresh<block_start>"""Update the input scores by applying unanswerable probability threshold."""<line_sep>new_scores={}<for_stmt>qid,s scores.items()<block_start>pred_na=na_probs[qid]<g>na_prob_thresh<if_stmt>pred_na<block_start>new_scores[qid]=float(<not>qid_to_has_ans[qid])<block_end><else_stmt><block_start>new_scores[qid]=s<block_end><block_end><return>new_scores<block_end><def_stmt>_make_eval_dict exact_scores f1_scores qid_list=<none><block_start>"""Create a dictionary of evaluation results."""<if_stmt><not>qid_list<block_start>total=len(exact_scores)<line_sep><return>collections.OrderedDict([("exact" 100.0<times>sum(exact_scores.values())/total) ("f1" 100.0<times>sum(f1_scores.values())/total) ("total" total) ])<block_end><else_stmt><block_start>total=len(qid_list)<line_sep><return>collections.OrderedDict([("exact" 100.0<times>sum(exact_scores[k]<for>k qid_list)/total) ("f1" 100.0<times>sum(f1_scores[k]<for>k qid_list)/total) ("total" total) ])<block_end><block_end><def_stmt>_merge_eval main_eval new_eval prefix<block_start>"""Merge multiple evaluation result dictionaries."""<for_stmt>k new_eval<block_start>main_eval["%s_%s"%(prefix k)]=new_eval[k]<block_end><block_end># Helper functions end <if_stmt>na_probs<is><none><block_start>na_probs_available=<false><line_sep>na_probs={k:0.0<for>k preds}<block_end><else_stmt><block_start>na_probs_available=<true><block_end>qa_ids=[item.qa_id<for>item actual_dataset]<line_sep>actuals=[item.answer_text<for>item actual_dataset]<line_sep>qid_to_has_ans={qa_id:bool(ans)<for>(qa_id ans) zip(qa_ids actuals)}<line_sep>has_ans_qids=[k<for>k,v qid_to_has_ans.items()<if>v]<line_sep>no_ans_qids=[k<for>k,v qid_to_has_ans.items()<if><not>v]<line_sep>exact_raw,f1_raw=get_raw_scores(qa_ids actuals preds)<line_sep>exact_thresh=_apply_no_ans_threshold(exact_raw na_probs qid_to_has_ans na_prob_thresh)<line_sep>f1_thresh=_apply_no_ans_threshold(f1_raw na_probs qid_to_has_ans na_prob_thresh)<line_sep>out_eval=_make_eval_dict(exact_thresh f1_thresh)<if_stmt>has_ans_qids<block_start>has_ans_eval=_make_eval_dict(exact_thresh f1_thresh qid_list=has_ans_qids)<line_sep>_merge_eval(out_eval has_ans_eval "HasAns")<block_end><if_stmt>no_ans_qids<block_start>no_ans_eval=_make_eval_dict(exact_thresh f1_thresh qid_list=no_ans_qids)<line_sep>_merge_eval(out_eval no_ans_eval "NoAns")<block_end><if_stmt>na_probs_available<block_start>find_all_best_thresh(out_eval preds exact_raw f1_raw na_probs qid_to_has_ans unanswerable_exists)<block_end><if_stmt>out_file<block_start><with_stmt>open(out_file "w")<as>f<block_start>json.dump(out_eval f)<block_end><block_end><else_stmt><block_start>print(json.dumps(out_eval indent=2))<block_end><return>out_eval<block_end>
<import_stmt>panel<as>pn<line_sep>text=r""" ```math f(x) = \int_{-\infty}^\infty \hat f(\xi)\,e^{2 \pi i \xi x} \,d\xi ``` """<line_sep>app=pn.Column(pn.pane.Markdown(text))<line_sep>app.servable()<line_sep>
<import_stmt>logging<import_from_stmt>functools lru_cache<import_from_stmt>urllib.parse urlencode quote_plus<import_from_stmt>boto_utils fetch_job_manifest paginate<import_from_stmt>botocore.exceptions ClientError<import_from_stmt>utils remove_none retry_wrapper<line_sep>logger=logging.getLogger(__name__)<def_stmt>save s3 client buf bucket key metadata source_version=<none><block_start>""" Save a buffer to S3, preserving any existing properties on the object """<line_sep># Get Object Settings request_payer_args,_=get_requester_payment(client bucket)<line_sep>object_info_args,_=get_object_info(client bucket key source_version)<line_sep>tagging_args,_=get_object_tags(client bucket key source_version)<line_sep>acl_args,acl_resp=get_object_acl(client bucket key source_version)<line_sep>extra_args={**request_payer_args **object_info_args **tagging_args **acl_args **{"Metadata":metadata} }<line_sep>logger.info("Object settings: %s" extra_args)<line_sep># Write Object Back to S3 logger.info("Saving updated object to s3://%s/%s" bucket key)<line_sep>contents=buf.read()<with_stmt>s3.open("s3://{}/{}".format(bucket key) "wb" **extra_args)<as>f<block_start>f.write(contents)<block_end>s3.invalidate_cache()# TODO: remove once https://github.com/dask/s3fs/issues/294 is resolved new_version_id=f.version_id<line_sep>logger.info("Object uploaded to S3")<line_sep># GrantWrite cannot be set whilst uploading therefore ACLs need to be restored separately write_grantees=",".join(get_grantees(acl_resp "WRITE"))<if_stmt>write_grantees<block_start>logger.info("WRITE grant found. Restoring additional grantees for object")<line_sep>client.put_object_acl(Bucket=bucket Key=key VersionId=new_version_id **{**request_payer_args **acl_args "GrantWrite":write_grantees })<block_end>logger.info("Processing of file s3://%s/%s complete" bucket key)<line_sep><return>new_version_id<block_end>@lru_cache()<def_stmt>get_requester_payment client bucket<block_start>""" Generates a dict containing the request payer args supported when calling S3. GetBucketRequestPayment call will be cached :returns tuple containing the info formatted for ExtraArgs and the raw response """<line_sep>request_payer=client.get_bucket_request_payment(Bucket=bucket)<line_sep><return>(remove_none({"RequestPayer":"requester"<if>request_payer["Payer"]<eq>"Requester"<else><none> }) request_payer )<block_end>@lru_cache()<def_stmt>get_object_info client bucket key version_id=<none><block_start>""" Generates a dict containing the non-ACL/Tagging args supported when uploading to S3. HeadObject call will be cached :returns tuple containing the info formatted for ExtraArgs and the raw response """<line_sep>kwargs={"Bucket":bucket "Key":key **get_requester_payment(client bucket)[0]}<if_stmt>version_id<block_start>kwargs["VersionId"]=version_id<block_end>object_info=client.head_object(**kwargs)<line_sep><return>(remove_none({"CacheControl":object_info.get("CacheControl") "ContentDisposition":object_info.get("ContentDisposition") "ContentEncoding":object_info.get("ContentEncoding") "ContentLanguage":object_info.get("ContentLanguage") "ContentType":object_info.get("ContentType") "Expires":object_info.get("Expires") "Metadata":object_info.get("Metadata") "ServerSideEncryption":object_info.get("ServerSideEncryption") "StorageClass":object_info.get("StorageClass") "SSECustomerAlgorithm":object_info.get("SSECustomerAlgorithm") "SSEKMSKeyId":object_info.get("SSEKMSKeyId") "WebsiteRedirectLocation":object_info.get("WebsiteRedirectLocation") }) object_info )<block_end>@lru_cache()<def_stmt>get_object_tags client bucket key version_id=<none><block_start>""" Generates a dict containing the Tagging args supported when uploading to S3 GetObjectTagging call will be cached :returns tuple containing tagging formatted for ExtraArgs and the raw response """<line_sep>kwargs={"Bucket":bucket "Key":key}<if_stmt>version_id<block_start>kwargs["VersionId"]=version_id<block_end>tagging=client.get_object_tagging(**kwargs)<line_sep><return>(remove_none({"Tagging":urlencode({tag["Key"]:tag["Value"]<for>tag tagging["TagSet"]} quote_via=quote_plus )}) tagging )<block_end>@lru_cache()<def_stmt>get_object_acl client bucket key version_id=<none><block_start>""" Generates a dict containing the ACL args supported when uploading to S3 GetObjectAcl call will be cached :returns tuple containing ACL formatted for ExtraArgs and the raw response """<line_sep>kwargs={"Bucket":bucket "Key":key **get_requester_payment(client bucket)[0]}<if_stmt>version_id<block_start>kwargs["VersionId"]=version_id<block_end>acl=client.get_object_acl(**kwargs)<line_sep>existing_owner={"id={}".format(acl["Owner"]["ID"])}<line_sep><return>(remove_none({"GrantFullControl":",".join(existing_owner|get_grantees(acl "FULL_CONTROL")) "GrantRead":",".join(get_grantees(acl "READ")) "GrantReadACP":",".join(get_grantees(acl "READ_ACP")) "GrantWriteACP":",".join(get_grantees(acl "WRITE_ACP")) }) acl )<block_end><def_stmt>get_grantees acl grant_type<block_start>prop_map={"CanonicalUser":("ID" "id") "AmazonCustomerByEmail":("EmailAddress" "emailAddress") "Group":("URI" "uri") }<line_sep>filtered=[grantee["Grantee"]<for>grantee acl.get("Grants")<if>grantee["Permission"]<eq>grant_type]<line_sep>grantees=set()<for_stmt>grantee filtered<block_start>identifier_type=grantee["Type"]<line_sep>identifier_prop=prop_map[identifier_type]<line_sep>grantees.add("{}={}".format(identifier_prop[1] grantee[identifier_prop[0]]))<block_end><return>grantees<block_end>@lru_cache()<def_stmt>validate_bucket_versioning client bucket<block_start>resp=client.get_bucket_versioning(Bucket=bucket)<line_sep>versioning_enabled=resp.get("Status")<eq>"Enabled"<line_sep>mfa_delete_enabled=resp.get("MFADelete")<eq>"Enabled"<if_stmt><not>versioning_enabled<block_start><raise>ValueError("Bucket {} does not have versioning enabled".format(bucket))<block_end><if_stmt>mfa_delete_enabled<block_start><raise>ValueError("Bucket {} has MFA Delete enabled".format(bucket))<block_end><return><true><block_end>@lru_cache()<def_stmt>fetch_manifest manifest_object<block_start><return>fetch_job_manifest(manifest_object)<block_end><def_stmt>delete_old_versions client input_bucket input_key new_version<block_start><try_stmt><block_start>resp=list(paginate(client client.list_object_versions ["Versions" "DeleteMarkers"] Bucket=input_bucket Prefix=input_key VersionIdMarker=new_version KeyMarker=input_key ))<line_sep>versions=[el[0]<for>el resp<if>el[0]<is><not><none>]<line_sep>delete_markers=[el[1]<for>el resp<if>el[1]<is><not><none>]<line_sep>versions.extend(delete_markers)<line_sep>sorted_versions=sorted(versions key=<lambda>x:x["LastModified"])<line_sep>version_ids=[v["VersionId"]<for>v sorted_versions]<line_sep>errors=[]<line_sep>max_deletions=1000<for_stmt>i range(0 len(version_ids) max_deletions)<block_start>resp=client.delete_objects(Bucket=input_bucket Delete={"Objects":[{"Key":input_key "VersionId":version_id}<for>version_id version_ids[i:i+max_deletions]] "Quiet":<true> } )<line_sep>errors.extend(resp.get("Errors" []))<block_end><if_stmt>len(errors)<g>0<block_start><raise>DeleteOldVersionsError(errors=["Delete object {} version {} failed: {}".format(e["Key"] e["VersionId"] e["Message"])<for>e errors])<block_end><block_end><except_stmt>ClientError<as>e<block_start><raise>DeleteOldVersionsError(errors=[str(e)])<block_end><block_end><def_stmt>verify_object_versions_integrity client bucket key from_version_id to_version_id<block_start><def_stmt>raise_exception msg<block_start><raise>IntegrityCheckFailedError(msg client bucket key to_version_id)<block_end>conflict_error_template="A {} ({}) was detected for the given object between read and write operations ({} and {})."<line_sep>not_found_error_template="Previous version ({}) has been deleted."<line_sep>object_versions=retry_wrapper(client.list_object_versions)(Bucket=bucket Prefix=key VersionIdMarker=to_version_id KeyMarker=key MaxKeys=1 )<line_sep>versions=object_versions.get("Versions" [])<line_sep>delete_markers=object_versions.get("DeleteMarkers" [])<line_sep>all_versions=versions+delete_markers<if_stmt><not>len(all_versions)<block_start><return>raise_exception(not_found_error_template.format(from_version_id))<block_end>prev_version=all_versions[0]<line_sep>prev_version_id=prev_version["VersionId"]<if_stmt>prev_version_id<ne>from_version_id<block_start>conflicting_version_type=("delete marker"<if>"ETag"<not><in>prev_version<else>"version")<line_sep><return>raise_exception(conflict_error_template.format(conflicting_version_type prev_version_id from_version_id to_version_id ))<block_end><return><true><block_end><def_stmt>rollback_object_version client bucket key version on_error<block_start>""" Delete newly created object version as soon as integrity conflict is detected """<try_stmt><block_start><return>client.delete_object(Bucket=bucket Key=key VersionId=version)<block_end><except_stmt>ClientError<as>e<block_start>err_message="ClientError: {}. Version rollback caused by version integrity conflict failed".format(str(e))<line_sep>on_error(err_message)<block_end><except_stmt>Exception<as>e<block_start>err_message="Unknown error: {}. Version rollback caused by version integrity conflict failed".format(str(e))<line_sep>on_error(err_message)<block_end><block_end><class_stmt>DeleteOldVersionsError(Exception)<block_start><def_stmt>__init__ self errors<block_start>super().__init__("\n".join(errors))<line_sep>self.errors=errors<block_end><block_end><class_stmt>IntegrityCheckFailedError(Exception)<block_start><def_stmt>__init__ self message client bucket key version_id<block_start>self.message=message<line_sep>self.client=client<line_sep>self.bucket=bucket<line_sep>self.key=key<line_sep>self.version_id=version_id<block_end><block_end>
<import_stmt>json<import_stmt>requests<import_from_stmt>urllib.parse quote<line_sep>octopus_server_uri='https://your.octopus.app/api'<line_sep>octopus_api_key='API-YOURAPIKEY'<line_sep>headers={'X-Octopus-ApiKey':octopus_api_key}<def_stmt>get_octopus_resource uri<block_start>response=requests.get(uri headers=headers)<line_sep>response.raise_for_status()<line_sep><return>json.loads(response.content.decode('utf-8'))<block_end><def_stmt>post_octopus_resource uri body<block_start>response=requests.post(uri headers=headers json=body)<line_sep>response.raise_for_status()<line_sep><return>json.loads(response.content.decode('utf-8'))<block_end><def_stmt>get_by_name uri name<block_start>resources=get_octopus_resource(uri)<line_sep><return>next((x<for>x resources['Items']<if>x['Name']<eq>name) <none>)<block_end>space_name='Default'<line_sep>environment_names=['Development' 'Test' 'Staging' 'Production']<line_sep>space=get_by_name('{0}/spaces?partialName={1}&skip=0&take=100'.format(octopus_server_uri quote(space_name)) space_name)<for_stmt>environment_name environment_names<block_start>existing_environment=get_by_name('{0}/{1}/environments?partialName={2}&skip=0&take=100'.format(octopus_server_uri space['Id'] quote(environment_name)) environment_name)<if_stmt>existing_environment<is><none><block_start>print('Creating environment \'{0}\''.format(environment_name))<line_sep>environment={'Name':environment_name}<line_sep>environment_resource=post_octopus_resource('{0}/{1}/environments'.format(octopus_server_uri space['Id']) environment)<line_sep>print('EnvironmentId: \'{0}\''.format(environment_resource['Id']))<block_end><else_stmt><block_start>print('Environment \'{0}\' already exists. Nothing to create :)'.format(environment_name))<block_end><block_end>
<import_from_stmt>.base script<as>base_script<import_from_stmt>.install_docker script<as>install_docker<import_from_stmt>.install_nvidia_docker script<as>install_nvidia_docker<import_from_stmt>.install_nvidia_drivers script<as>install_nvidia_drivers<line_sep>
#********************************************************************* # content = common functions # version = 0.1.0 # date = 2019-12-01 # # license = MIT <https://github.com/alexanderrichtertd> # author = <NAME> <<EMAIL>> #********************************************************************* <import_stmt>os<import_stmt>glob<import_stmt>json<import_stmt>time<import_stmt>webbrowser<line_sep># NO logging since it will break the init #********************************************************************* # FUNCTIONS <def_stmt>help name=''<block_start><import_from_stmt>tank Tank<if_stmt><not>name<and>os.getenv('SOFTWARE')<block_start>name=os.getenv('SOFTWARE')<block_end>project_help=Tank().data_project['HELP']<if_stmt>name<in>project_help<block_start>webbrowser.open(project_help[name])<block_end><else_stmt><block_start>webbrowser.open(project_help['default'])<block_end><block_end># GET all (sub) keys in dict <def_stmt>get_all_keys key_list dictonary=[]<block_start><for_stmt>key,items key_list.iteritems()<block_start>dictonary.append(key)<if_stmt>isinstance(items dict)<block_start>get_all_keys(items dictonary)<block_end><block_end><return>dictonary<block_end># decorator: return function duration time <def_stmt>get_duration func<block_start><def_stmt>timed *args **kw<block_start>startTime=time.time()<line_sep>resultTime=func(*args **kw)<line_sep>endTime=time.time()<line_sep>printResult='%r (%r, %r) %2.2f sec'%(func.__name__ args kw endTime-startTime)<line_sep>print(printResult)<line_sep><return>resultTime<block_end><return>timed<block_end><def_stmt>find_inbetween text first last<block_start><try_stmt><block_start>start=text.index(first)+len(first)<line_sep>end=text.index(last start)<block_end><except_stmt>ValueError<block_start><return>""<block_end><return>text[start:end]<block_end>#********************************************************************* # FOLDER # @BRIEF creates a folder, checks if it already exists, # creates the folder above if the path is a file <def_stmt>create_folder path<block_start><if_stmt>len(path.split('.'))<g>1<block_start>path=os.path.dirname(path)<block_end><if_stmt><not>os.path.exists(path)<block_start><try_stmt><block_start>os.makedirs(path)<block_end><except_stmt><block_start>print('CANT create folder: {}'.format(path))<block_end><block_end><block_end># @BRIEF opens folder even if file is given <def_stmt>open_folder path<block_start>path=os.path.normpath(path)<if_stmt>os.path.exists(path)<block_start><if_stmt>len(path.split('.'))<g>1<block_start>path=os.path.dirname(path)<block_end>webbrowser.open(path)<block_end><else_stmt><block_start>print('UNVALID path: {}'.format(path))<block_end><return>path<block_end>#********************************************************************* # FILES # @BRIEF get a file/folder list with specifics # # @PARAM path string. # file_type string/string[]. '*.py' # extension bool. True:[name.py] False:[name] # exclude string /string[]. '__init__.py' | '__init__' | ['btnReport48', 'btnHelp48'] # # @RETURN strint[]. <def_stmt>get_file_list path file_type='*' extension=<false> exclude='*' add_path=<false><block_start><if_stmt>(os.path.exists(path))<block_start>getFile=[]<try_stmt><block_start>os.chdir(path)<block_end><except_stmt><block_start>print('Invalid dir: {}'.format(path))<block_end><for_stmt>file_name glob.glob(file_type)<block_start><if_stmt>exclude<in>file_name<block_start><continue><block_end><if_stmt>add_path<block_start>file_name=os.path.normpath(('/').join([path file_name]))<block_end><if_stmt>extension<block_start>getFile.append(file_name)<block_end><else_stmt><block_start>getFile.append((file_name.split('.')[0]))<block_end><block_end><return>getFile<block_end><block_end>## # @BRIEF GET ALL subfolders in the path <def_stmt>get_deep_folder_list path add_path=<false><block_start><if_stmt>add_path<block_start>getFile=map(<lambda>x:x[0] os.walk(path))<block_end><else_stmt><block_start>getFile=map(<lambda>x:os.path.basename(x[0]) os.walk(path))<block_end><try_stmt><block_start>getFile.pop(0)<block_end><except_stmt><block_start>print('CANT pop file. Path: {}'.format(path))<block_end><return>getFile<block_end>#********************************************************************* # REPOSITORY <def_stmt>make_github_issue title body=<none> assignee='' milestone=<none> labels=<none><block_start><import_stmt>requests<import_from_stmt>tank Tank<line_sep>REPO_DATA=Tank().user.data_user_path<if_stmt><not>assignee<block_start>assignee=REPO_DATA['username']<block_end># Our url to create issues via POST url='https://api.github.com/repos/%s/%s/issues'%(REPO_DATA['owner'] REPO_DATA['repository'])<line_sep># Create an authenticated session to create the issue session=requests.Session()<line_sep>session.auth=(REPO_DATA['username'] REPO_DATA['password'])<line_sep>issue={'title':title 'body':body 'assignee':assignee 'milestone':milestone 'labels':labels}<line_sep># Add the issue to our repository repo=session.post(url json.dumps(issue))<if_stmt>repo.status_code<eq>201<block_start>LOG.info('Successfully created Issue {}'.format(title))<block_end><else_stmt><block_start>LOG.warning('Could not create Issue {}.\nResponse:{}'.format(title repo.content))<block_end><block_end>#********************************************************************* # TEST # make_github_issue(title='Login Test', body='Body text', milestone=None, labels=['bug'])
<import_stmt>pickle<import_stmt>angr<import_stmt>nose<def_stmt>test_pickle_state <block_start>b=angr.Project("/home/angr/angr/angr/tests/blob/x86_64/fauxware")<line_sep>p=b.path_generator.entry_point()<line_sep>p.state.inspect.make_breakpoint('mem_write')<line_sep>nose.tools.assert_true('inspector'<in>p.state.plugins)<line_sep>s_str=pickle.dumps(p.state)<line_sep>s2=pickle.loads(s_str)<line_sep>nose.tools.assert_is(p.state s2)<del_stmt>p<del_stmt>s2<import_stmt>gc<line_sep>gc.collect()<line_sep>s2=pickle.loads(s_str)<line_sep>nose.tools.assert_true('inspector'<not><in>s2.plugins)<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_pickle_state()<block_end>
<import_from_stmt>platypush.message.event Event<class_stmt>SnapcastEvent(Event)<block_start>""" Base class for Snapcast events """<def_stmt>__init__ self host='localhost' *args **kwargs<block_start>super().__init__(host=host *args **kwargs)<block_end><block_end><class_stmt>ClientConnectedEvent(SnapcastEvent)<block_start>""" Event fired upon client connection """<def_stmt>__init__ self client host='localhost' *args **kwargs<block_start>super().__init__(client=client host=host *args **kwargs)<block_end><block_end><class_stmt>ClientDisconnectedEvent(SnapcastEvent)<block_start>""" Event fired upon client disconnection """<def_stmt>__init__ self client host='localhost' *args **kwargs<block_start>super().__init__(client=client host=host *args **kwargs)<block_end><block_end><class_stmt>ClientVolumeChangeEvent(SnapcastEvent)<block_start>""" Event fired upon volume change or mute status change on a client """<def_stmt>__init__ self client volume muted host='localhost' *args **kwargs<block_start>super().__init__(client=client host=host volume=volume muted=muted *args **kwargs)<block_end><block_end><class_stmt>ClientLatencyChangeEvent(SnapcastEvent)<block_start>""" Event fired upon latency change on a client """<def_stmt>__init__ self client latency host='localhost' *args **kwargs<block_start>super().__init__(client=client host=host latency=latency *args **kwargs)<block_end><block_end><class_stmt>ClientNameChangeEvent(SnapcastEvent)<block_start>""" Event fired upon name change of a client """<def_stmt>__init__ self client name host='localhost' *args **kwargs<block_start>super().__init__(client=client host=host name=name *args **kwargs)<block_end><block_end><class_stmt>GroupMuteChangeEvent(SnapcastEvent)<block_start>""" Event fired upon mute status change """<def_stmt>__init__ self group muted host='localhost' *args **kwargs<block_start>super().__init__(group=group host=host muted=muted *args **kwargs)<block_end><block_end><class_stmt>GroupStreamChangeEvent(SnapcastEvent)<block_start>""" Event fired upon group stream change """<def_stmt>__init__ self group stream host='localhost' *args **kwargs<block_start>super().__init__(group=group host=host stream=stream *args **kwargs)<block_end><block_end><class_stmt>StreamUpdateEvent(SnapcastEvent)<block_start>""" Event fired upon stream update """<def_stmt>__init__ self stream_id stream host='localhost' *args **kwargs<block_start>super().__init__(stream_id=stream_id stream=stream host=host *args **kwargs)<block_end><block_end><class_stmt>ServerUpdateEvent(SnapcastEvent)<block_start>""" Event fired upon stream update """<def_stmt>__init__ self server host='localhost' *args **kwargs<block_start>super().__init__(server=server host=host *args **kwargs)<block_end><block_end># vim:sw=4:ts=4:et:
<import_from_stmt>sqlobject *<import_from_stmt>sqlobject.tests.dbtest *<line_sep>######################################## ## Transaction test ######################################## <class_stmt>TestSOTrans(SQLObject)#_cacheValues = False <block_start><class_stmt>sqlmeta<block_start>defaultOrder='name'<block_end>name=StringCol(length=10 alternateID=<true> dbName='name_col')<block_end><def_stmt>test_transaction <block_start><if_stmt><not>supports('transactions')<block_start><return><block_end>setupClass(TestSOTrans)<line_sep>TestSOTrans(name='bob')<line_sep>TestSOTrans(name='tim')<line_sep>trans=TestSOTrans._connection.transaction()<try_stmt><block_start>TestSOTrans._connection.autoCommit='exception'<line_sep>TestSOTrans(name='joe' connection=trans)<line_sep>trans.rollback()<line_sep>trans.begin()<assert_stmt>([n.name<for>n TestSOTrans.select(connection=trans)]<eq>['bob' 'tim'])<line_sep>b=TestSOTrans.byName('bob' connection=trans)<line_sep>b.name='robert'<line_sep>trans.commit()<assert_stmt>b.name<eq>'robert'<line_sep>b.name='bob'<line_sep>trans.rollback()<line_sep>trans.begin()<assert_stmt>b.name<eq>'robert'<block_end><finally_stmt><block_start>TestSOTrans._connection.autoCommit=<true><block_end><block_end><def_stmt>test_transaction_commit_sync <block_start><if_stmt><not>supports('transactions')<block_start><return><block_end>setupClass(TestSOTrans)<line_sep>trans=TestSOTrans._connection.transaction()<try_stmt><block_start>TestSOTrans(name='bob')<line_sep>bOut=TestSOTrans.byName('bob')<line_sep>bIn=TestSOTrans.byName('bob' connection=trans)<line_sep>bIn.name='robert'<assert_stmt>bOut.name<eq>'bob'<line_sep>trans.commit()<assert_stmt>bOut.name<eq>'robert'<block_end><finally_stmt><block_start>TestSOTrans._connection.autoCommit=<true><block_end><block_end><def_stmt>test_transaction_delete close=<false><block_start><if_stmt><not>supports('transactions')<block_start><return><block_end>setupClass(TestSOTrans)<line_sep>trans=TestSOTrans._connection.transaction()<try_stmt><block_start>TestSOTrans(name='bob')<line_sep>bIn=TestSOTrans.byName('bob' connection=trans)<line_sep>bIn.destroySelf()<line_sep>bOut=TestSOTrans.select(TestSOTrans.q.name<eq>'bob')<assert_stmt>bOut.count()<eq>1<line_sep>bOutInst=bOut[0]<line_sep>bOutID=bOutInst.id<line_sep>trans.commit(close=close)<assert_stmt>bOut.count()<eq>0<line_sep>raises(SQLObjectNotFound "TestSOTrans.get(bOutID)")<line_sep>raises(SQLObjectNotFound "bOutInst.name")<block_end><finally_stmt><block_start>trans.rollback()<line_sep>TestSOTrans._connection.autoCommit=<true><block_end><block_end><def_stmt>test_transaction_delete_with_close <block_start>test_transaction_delete(close=<true>)<block_end>
<import_from_future_stmt> print_function<import_stmt>os.path<import_stmt>sys<import_from_stmt>gensim.corpora WikiCorpus<import_stmt>xml.etree.ElementTree<as>etree<import_stmt>warnings<import_stmt>logging<import_stmt>string<import_from_stmt>gensim utils<def_stmt>tokenize_tr content token_min_len=2 token_max_len=50 lower=<true><block_start><if_stmt>lower<block_start>lowerMap={ord(u'A'):u'a' ord(u'A'):u'a' ord(u'B'):u'b' ord(u'C'):u'c' ord(u'Ç'):u'ç' ord(u'D'):u'd' ord(u'E'):u'e' ord(u'F'):u'f' ord(u'G'):u'g' ord(u'Ğ'):u'ğ' ord(u'H'):u'h' ord(u'I'):u'ı' ord(u'İ'):u'i' ord(u'J'):u'j' ord(u'K'):u'k' ord(u'L'):u'l' ord(u'M'):u'm' ord(u'N'):u'n' ord(u'O'):u'o' ord(u'Ö'):u'ö' ord(u'P'):u'p' ord(u'R'):u'r' ord(u'S'):u's' ord(u'Ş'):u'ş' ord(u'T'):u't' ord(u'U'):u'u' ord(u'Ü'):u'ü' ord(u'V'):u'v' ord(u'Y'):u'y' ord(u'Z'):u'z'}<line_sep>content=content.translate(lowerMap)<block_end><return>[utils.to_unicode(token)<for>token utils.tokenize(content lower=<false> errors='ignore')<if>token_min_len<le>len(token)<le>token_max_len<and><not>token.startswith('_')]<block_end><if_stmt>__name__<eq>'__main__'<block_start><if_stmt>len(sys.argv)<l>3<block_start>print("Please provide two arguments, first one is path to the wikipedia dump, second one is path to the output file")<line_sep>print("Example command: python3 preprocess.py trwiki-20180101-pages-articles.xml.bz2 wiki.tr.txt")<line_sep>sys.exit()<block_end>logging.basicConfig(level=logging.INFO format='%(asctime)s %(levelname)s %(message)s')<line_sep>inputFile=sys.argv[1]<line_sep>outputFile=sys.argv[2]<line_sep>wiki=WikiCorpus(inputFile lemmatize=<false> tokenizer_func=tokenize_tr)<line_sep>logging.info("Wikipedia dump is opened.")<line_sep>output=open(outputFile "w" encoding="utf-8")<line_sep>logging.info("Output file is created.")<line_sep>i=0<for_stmt>text wiki.get_texts()<block_start>output.write(" ".join(text)+"\n")<line_sep>i<augadd>1<if_stmt>(i%10000<eq>0)<block_start>logging.info("Saved "+str(i)+" articles.")<block_end><block_end>output.close()<block_end>
<import_stmt>os<import_stmt>boto3<import_stmt>placebo<import_stmt>pytest<line_sep>@pytest.fixture(name="session")<def_stmt>placebo_session request<block_start>session_kwargs={"region_name":os.environ.get("AWS_DEFAULT_REGION" "eu-west-1")}<line_sep>profile_name=os.environ.get("PLACEBO_PROFILE" <none>)<if_stmt>profile_name<block_start>session_kwargs["profile_name"]=profile_name<block_end>session=boto3.Session(**session_kwargs)<line_sep>prefix=request.function.__name__<line_sep>base_dir=os.environ.get("PLACEBO_DIR" os.path.join(os.getcwd() "placebo"))<line_sep>record_dir=os.path.join(base_dir prefix)<if_stmt><not>os.path.exists(record_dir)<block_start>os.makedirs(record_dir)<block_end>pill=placebo.attach(session data_path=record_dir)<if_stmt>os.environ.get("PLACEBO_MODE")<eq>"record"<block_start>pill.record()<block_end><else_stmt><block_start>pill.playback()<block_end><return>session<block_end>@pytest.fixture<def_stmt>ec2 session<block_start><return>session.resource("ec2" region_name="eu-west-1")<block_end>@pytest.fixture<def_stmt>ec2_ic session<block_start><return>session.resource("ec2-instance-connect" region_name="eu-west-1")<block_end>@pytest.fixture<def_stmt>ssm session<block_start><return>session.client("ssm" region_name="eu-west-1")<block_end>@pytest.fixture<def_stmt>ec2_mock mocker<block_start><return>mocker.MagicMock()<block_end>@pytest.fixture<def_stmt>ec2_ic_mock mocker<block_start><return>mocker.MagicMock()<block_end>@pytest.fixture<def_stmt>ssm_mock mocker<block_start>mock=mocker.MagicMock()<line_sep>response={"SessionId":"session-020bf6cd31f912b53" "TokenValue":"randomtokenvalue" }<line_sep>mock.configure_mock(**{"start_session.return_value":response "terminate_session.return_value":response })<line_sep>type(mock.meta).endpoint_url=mocker.PropertyMock(return_value="ssm")<line_sep><return>mock<block_end>@pytest.fixture<def_stmt>instance_id <block_start><return>"i-0c32153096cd68a6d"<block_end>@pytest.fixture<def_stmt>ssh_key mocker<block_start>mock=mocker.MagicMock()<line_sep>mock.configure_mock(**{"public_key.return_value":"ssh-rsa ranodombase64string" "key_path.return_value":"/home/user/.aws-gate/key" })<line_sep><return>mock<block_end>@pytest.fixture<def_stmt>config mocker<block_start>mock=mocker.MagicMock()<line_sep>mock.configure_mock(**{"get_host.return_value":{"alias":"test" "name":"SSM-test" "profile":"default" "region":"eu-west-1" }})<line_sep><return>mock<block_end>@pytest.fixture<def_stmt>empty_config mocker<block_start>mock=mocker.MagicMock()<line_sep>mock.configure_mock(**{"get_host.return_value":{}})<line_sep><return>mock<block_end>@pytest.fixture<def_stmt>get_instance_details_response <block_start><return>{"availability_zone":"eu-west-1a"}<block_end>
<import_stmt>warnings<class_stmt>AuthlibDeprecationWarning(DeprecationWarning)<block_start><pass><block_end>warnings.simplefilter('always' AuthlibDeprecationWarning)<def_stmt>deprecate message version=<none> link_uid=<none> link_file=<none><block_start><if_stmt>version<block_start>message<augadd>'\nIt will be compatible before version {}.'.format(version)<block_end><if_stmt>link_uid<and>link_file<block_start>message<augadd>'\nRead more <https://git.io/{}#file-{}-md>'.format(link_uid link_file)<block_end>warnings.warn(AuthlibDeprecationWarning(message) stacklevel=2)<block_end>
<import_from_future_stmt> absolute_import<import_from_stmt>flask current_app<import_from_stmt>changes.config db<import_from_stmt>changes.models.snapshot SnapshotImage<import_from_stmt>changes.models.command FutureCommand<import_from_stmt>changes.utils.http build_internal_uri<import_from_stmt>changes.buildsteps.base LXCConfig<import_from_stmt>.builder JenkinsBuilder<class_stmt>JenkinsGenericBuilder(JenkinsBuilder)<block_start><def_stmt>__init__ self master_urls=<none> setup_script='' teardown_script='' artifacts=() reset_script='' path='' workspace='' snapshot_script=<none> clean=<true> cluster=<none> *args **kwargs<block_start>"""Builder for JenkinsGenericBuildStep. See JenkinsGenericBuildStep for information on most of these arguments. """<line_sep>self.setup_script=setup_script<line_sep>self.script=kwargs.pop('script')<line_sep>self.teardown_script=teardown_script<line_sep>self.snapshot_script=snapshot_script<line_sep>self.reset_script=reset_script<line_sep>self.path=path<line_sep>self.workspace=workspace<line_sep>self.artifacts=artifacts<line_sep>self.clean=clean<line_sep># See configuration for more details; by default, the default build type is # legacy which sets up no additional configuration. self.build_type=kwargs.pop('build_type' current_app.config['CHANGES_CLIENT_DEFAULT_BUILD_TYPE'])<if_stmt>self.build_type<is><none><block_start>self.build_type=current_app.config['CHANGES_CLIENT_DEFAULT_BUILD_TYPE']<block_end># If a server url is not provided (default: None), set it to a blank string self.artifact_server_base_url=current_app.config['ARTIFACTS_SERVER']<or>''<line_sep># we do this as early as possible in order to propagate the # error faster. The build description is simply the configuration # key'd by the build_type, documented in config.py self.build_desc=self.load_build_desc(self.build_type)<line_sep>super(JenkinsGenericBuilder self).__init__(master_urls cluster=cluster *args **kwargs)<block_end><def_stmt>load_build_desc self build_type<block_start>build_desc=current_app.config['CHANGES_CLIENT_BUILD_TYPES'][build_type]<line_sep>self.validate_build_desc(build_type build_desc)<line_sep><return>build_desc<block_end># TODO validate configuration at start of application or use a linter to validate # configuration before pushing/deploying <def_stmt>validate_build_desc self build_type build_desc<block_start><if_stmt>build_desc.get('uses_client' <false>)<block_start><if_stmt>'jenkins-command'<not><in>build_desc<block_start><raise>ValueError('[CHANGES_CLIENT_BUILD_TYPES INVALID] build type %s missing required key: jenkins-command'%build_type)<block_end><if_stmt>'adapter'<not><in>build_desc<block_start><raise>ValueError('[CHANGES_CLIENT_BUILD_TYPES INVALID] build type %s missing required key: adapter'%build_type)<block_end><block_end><block_end># These three methods all describe which build specification, # setup, and teardown should be used to create a snapshot # build. In the generic builder, this is the same as a normal build, # but sharded builds need to override these with the shard equivalents # in order to create the correct snapshot. <def_stmt>get_snapshot_build_desc self<block_start><return>self.build_desc<block_end><def_stmt>get_snapshot_setup_script self<block_start><return>self.setup_script<block_end><def_stmt>get_snapshot_teardown_script self<block_start><return>self.teardown_script<block_end><def_stmt>get_expected_image self job_id<block_start>""" Get the snapshot-image (filesystem tarball for this jobstep). If this returns None, it is a normal build (the more common case), otherwise it returns the id of the snapshot image, which indicates to where the build agent should upload the snapshot onto s3. """<line_sep><return>db.session.query(SnapshotImage.id ).filter(SnapshotImage.job_id<eq>job_id ).scalar()<block_end><def_stmt>_get_build_desc self jobstep<block_start><if_stmt>self.get_expected_image(jobstep.job_id)<block_start><return>self.get_snapshot_build_desc()<block_end><return>self.build_desc<block_end><def_stmt>get_lxc_config self jobstep<block_start>""" Get the LXC configuration, if the LXC adapter should be used. Args: jobstep (JobStep): The JobStep to get the LXC config for. Returns: LXCConfig: The config to use for this jobstep, or None. """<line_sep>build_desc=self._get_build_desc(jobstep)<if_stmt>build_desc.get('uses_client')<and>build_desc.get('adapter')<eq>'lxc'<block_start>app_cfg=current_app.config<line_sep>snapshot_bucket=app_cfg.get('SNAPSHOT_S3_BUCKET' '')<line_sep>default_pre=self.debug_config.get('prelaunch_script')<or>app_cfg.get('LXC_PRE_LAUNCH' '')<line_sep>default_post=app_cfg.get('LXC_POST_LAUNCH' '')<line_sep>default_release=app_cfg.get('LXC_RELEASE' 'trusty')<line_sep><return>LXCConfig(s3_bucket=snapshot_bucket compression='lz4' prelaunch=build_desc.get('pre-launch' default_pre) postlaunch=build_desc.get('post-launch' default_post) release=build_desc.get('release' default_release) template=<none> mirror=<none> security_mirror=<none>)<block_end><return><none><block_end><def_stmt>get_job_parameters self job changes_bid setup_script=<none> script=<none> teardown_script=<none> path=<none><block_start>""" Gets a list containing dictionaries, each with two keys - name and value. These key,value pairs correspond to the input variables in Jenkins. changes_bid is actually the jobstep id, and job is the current job. *_script and path override the corresponding fields of the current builder. """<line_sep>params=super(JenkinsGenericBuilder self).get_job_parameters(job changes_bid=changes_bid)<if_stmt>path<is><none><block_start>path=self.path<block_end><if_stmt>setup_script<is><none><block_start>setup_script=self.setup_script<block_end><if_stmt>script<is><none><block_start>script=self.script<block_end><if_stmt>teardown_script<is><none><block_start>teardown_script=self.teardown_script<block_end>project=job.project<line_sep>repository=project.repository<line_sep>vcs=repository.get_vcs()<if_stmt>vcs<block_start>repo_url=vcs.remote_url<block_end><else_stmt><block_start>repo_url=repository.url<block_end>snapshot_bucket=current_app.config.get('SNAPSHOT_S3_BUCKET' '')<line_sep>default_pre=self.debug_config.get('prelaunch_script')<or>current_app.config.get('LXC_PRE_LAUNCH' '')<line_sep>default_post=current_app.config.get('LXC_POST_LAUNCH' '')<line_sep>default_release=current_app.config.get('LXC_RELEASE' 'trusty')<line_sep>build_desc=self.build_desc<line_sep># This is the image we are expected to produce or None # if this is not a snapshot build. expected_image=self.get_expected_image(job.id)<line_sep># Setting script to be empty essentially forces nothing # but setup/teardown to be run, making a clean snapshot snapshot_id=''<if_stmt>expected_image<block_start>snapshot_id=expected_image.hex<line_sep># this is a no-op command in sh, essentially equivalent # to '' except it tells changes-client that we are # deliberately doing absolutely nothing. However, # if snapshot script is not None, then we just use # that in place of script (so the normal script is # never used). script=self.snapshot_script<or>':'<line_sep># sharded builds will have different setup/teardown/build_desc # scripts between shards and collector so we need to # use the shard ones build_desc=self.get_snapshot_build_desc()<line_sep>setup_script=self.get_snapshot_setup_script()<line_sep>teardown_script=self.get_snapshot_teardown_script()<block_end># CHANGES_BID, the jobstep id, is provided by superclass params.update({'CHANGES_PID':project.slug 'PROJECT_CONFIG':project.get_config_path() 'REPO_URL':repo_url 'SETUP_SCRIPT':setup_script 'SCRIPT':script 'TEARDOWN_SCRIPT':teardown_script 'RESET_SCRIPT':self.reset_script 'REPO_VCS':repository.backend.name 'WORK_PATH':path 'C_WORKSPACE':self.workspace 'ARTIFACTS_SERVER_BASE_URL':self.artifact_server_base_url})<if_stmt>'bind_mounts'<in>self.debug_config<block_start>params['bind-mounts']=self.debug_config['bind_mounts']<block_end><if_stmt>build_desc.get('uses_client' <false>)<block_start>params.update({'JENKINS_COMMAND':build_desc['jenkins-command'] 'CHANGES_CLIENT_ADAPTER':build_desc['adapter'] 'CHANGES_CLIENT_SERVER':build_internal_uri('/api/0') 'CHANGES_CLIENT_SNAPSHOT_BUCKET':snapshot_bucket 'CHANGES_CLIENT_SNAPSHOT_ID':snapshot_id 'CHANGES_CLIENT_LXC_PRE_LAUNCH':build_desc.get('pre-launch' default_pre) 'CHANGES_CLIENT_LXC_POST_LAUNCH':build_desc.get('post-launch' default_post) 'CHANGES_CLIENT_LXC_RELEASE':build_desc.get('release' default_release)})<block_end><return>params<block_end><def_stmt>get_future_commands self env commands artifacts<block_start>"""Create future commands which are later created as comands. See models/command.py. """<line_sep><return>map(<lambda>command:FutureCommand(command['script'] artifacts=artifacts env=env) commands)<block_end><def_stmt>create_commands self jobstep env<block_start>""" This seems slightly redundant, but in fact is necessary for changes-client to work. The issue is mainly that the client is designed for the exact flow of information that mesos uses, in which the commands are taken from changes through an api request. We need to tell changes to run what would normally be ran through the Jenkins configuration - so we move this from the Jenkins configuration into the commands of the build type. Arguments: jobstep (JobStep): jobstep to create commands under env (dict): Env variables to supply to all commands. """<line_sep>commands=self.build_desc.get('commands' [])<line_sep>artifacts=self.artifacts_for_jobstep(jobstep)<line_sep>env=env.copy()<if_stmt><not>self.clean<block_start>env['SKIP_GIT_CLEAN']="1"<block_end>index=0<for_stmt>future_command self.get_future_commands(env commands artifacts)<block_start>db.session.add(future_command.as_command(jobstep index))<line_sep>index<augadd>1<block_end><block_end><def_stmt>can_snapshot self<block_start>""" Whether or not this build can snapshot is purely a function of the build type. Right now the only adapter supporting this is the lxc adapter, but in the scenario that another adapter is added (e.g. docker?) then we would need for multiple adapters to support snapshots, so we just encode whether it can or not as a field, defaulting to false as most types don't support this operation. """<line_sep><return>self.build_desc.get('can_snapshot' <false>)<block_end><def_stmt>artifacts_for_jobstep self jobstep<block_start>""" The artifact names/patterns we want to collect for a given jobstep. For example, we may want to collect different artifacts for a collection phase jobstep. Arguments: jobstep (JobStep): jobstep in question """<line_sep><return>self.artifacts<block_end><block_end>
<import_stmt>argparse<import_stmt>torch<def_stmt>get_args <block_start>parser=argparse.ArgumentParser(description='Goal-Oriented-Semantic-Exploration')<line_sep># General Arguments parser.add_argument('--seed' type=int default=1 help='random seed (default: 1)')<line_sep>parser.add_argument('--auto_gpu_config' type=int default=1)<line_sep>parser.add_argument('--total_num_scenes' type=str default="auto")<line_sep>parser.add_argument('-n' '--num_processes' type=int default=5 help="""how many training processes to use (default:5) Overridden when auto_gpu_config=1 and training on gpus""")<line_sep>parser.add_argument('--num_processes_per_gpu' type=int default=6)<line_sep>parser.add_argument('--num_processes_on_first_gpu' type=int default=1)<line_sep>parser.add_argument('--eval' type=int default=0 help='0: Train, 1: Evaluate (default: 0)')<line_sep>parser.add_argument('--num_training_frames' type=int default=10000000 help='total number of training frames')<line_sep>parser.add_argument('--num_eval_episodes' type=int default=200 help="number of test episodes per scene")<line_sep>parser.add_argument('--num_train_episodes' type=int default=10000 help="""number of train episodes per scene before loading the next scene""")<line_sep>parser.add_argument('--no_cuda' action='store_true' default=<false> help='disables CUDA training')<line_sep>parser.add_argument("--sim_gpu_id" type=int default=0 help="gpu id on which scenes are loaded")<line_sep>parser.add_argument("--sem_gpu_id" type=int default=-1 help="""gpu id for semantic model, -1: same as sim gpu, -2: cpu""")<line_sep># Logging, loading models, visualization parser.add_argument('--log_interval' type=int default=10 help="""log interval, one log per n updates (default: 10) """)<line_sep>parser.add_argument('--save_interval' type=int default=1 help="""save interval""")<line_sep>parser.add_argument('-d' '--dump_location' type=str default="./tmp/" help='path to dump models and log (default: ./tmp/)')<line_sep>parser.add_argument('--exp_name' type=str default="exp1" help='experiment name (default: exp1)')<line_sep>parser.add_argument('--save_periodic' type=int default=500000 help='Model save frequency in number of updates')<line_sep>parser.add_argument('--load' type=str default="0" help="""model path to load, 0 to not reload (default: 0)""")<line_sep>parser.add_argument('-v' '--visualize' type=int default=0 help="""1: Render the observation and the predicted semantic map, 2: Render the observation with semantic predictions and the predicted semantic map (default: 0)""")<line_sep>parser.add_argument('--print_images' type=int default=0 help='1: save visualization as images')<line_sep># Environment, dataset and episode specifications parser.add_argument('-efw' '--env_frame_width' type=int default=640 help='Frame width (default:640)')<line_sep>parser.add_argument('-efh' '--env_frame_height' type=int default=480 help='Frame height (default:480)')<line_sep>parser.add_argument('-fw' '--frame_width' type=int default=160 help='Frame width (default:160)')<line_sep>parser.add_argument('-fh' '--frame_height' type=int default=120 help='Frame height (default:120)')<line_sep>parser.add_argument('-el' '--max_episode_length' type=int default=500 help="""Maximum episode length""")<line_sep>parser.add_argument("--task_config" type=str default="tasks/objectnav_gibson.yaml" help="path to config yaml containing task information")<line_sep>parser.add_argument("--split" type=str default="train" help="dataset split (train | val | val_mini) ")<line_sep>parser.add_argument('--camera_height' type=float default=0.88 help="agent camera height in metres")<line_sep>parser.add_argument('--hfov' type=float default=79.0 help="horizontal field of view in degrees")<line_sep>parser.add_argument('--turn_angle' type=float default=30 help="Agent turn angle in degrees")<line_sep>parser.add_argument('--min_depth' type=float default=0.5 help="Minimum depth for depth sensor in meters")<line_sep>parser.add_argument('--max_depth' type=float default=5.0 help="Maximum depth for depth sensor in meters")<line_sep>parser.add_argument('--success_dist' type=float default=1.0 help="success distance threshold in meters")<line_sep>parser.add_argument('--floor_thr' type=int default=50 help="floor threshold in cm")<line_sep>parser.add_argument('--min_d' type=float default=1.5 help="min distance to goal during training in meters")<line_sep>parser.add_argument('--max_d' type=float default=100.0 help="max distance to goal during training in meters")<line_sep>parser.add_argument('--version' type=str default="v1.1" help="dataset version")<line_sep># Model Hyperparameters parser.add_argument('--agent' type=str default="sem_exp")<line_sep>parser.add_argument('--lr' type=float default=2.5e-5 help='learning rate (default: 2.5e-5)')<line_sep>parser.add_argument('--global_hidden_size' type=int default=256 help='global_hidden_size')<line_sep>parser.add_argument('--eps' type=float default=1e-5 help='RL Optimizer epsilon (default: 1e-5)')<line_sep>parser.add_argument('--alpha' type=float default=0.99 help='RL Optimizer alpha (default: 0.99)')<line_sep>parser.add_argument('--gamma' type=float default=0.99 help='discount factor for rewards (default: 0.99)')<line_sep>parser.add_argument('--use_gae' action='store_true' default=<false> help='use generalized advantage estimation')<line_sep>parser.add_argument('--tau' type=float default=0.95 help='gae parameter (default: 0.95)')<line_sep>parser.add_argument('--entropy_coef' type=float default=0.001 help='entropy term coefficient (default: 0.01)')<line_sep>parser.add_argument('--value_loss_coef' type=float default=0.5 help='value loss coefficient (default: 0.5)')<line_sep>parser.add_argument('--max_grad_norm' type=float default=0.5 help='max norm of gradients (default: 0.5)')<line_sep>parser.add_argument('--num_global_steps' type=int default=20 help='number of forward steps in A2C (default: 5)')<line_sep>parser.add_argument('--ppo_epoch' type=int default=4 help='number of ppo epochs (default: 4)')<line_sep>parser.add_argument('--num_mini_batch' type=str default="auto" help='number of batches for ppo (default: 32)')<line_sep>parser.add_argument('--clip_param' type=float default=0.2 help='ppo clip parameter (default: 0.2)')<line_sep>parser.add_argument('--use_recurrent_global' type=int default=0 help='use a recurrent global policy')<line_sep>parser.add_argument('--num_local_steps' type=int default=25 help="""Number of steps the local policy between each global step""")<line_sep>parser.add_argument('--reward_coeff' type=float default=0.1 help="Object goal reward coefficient")<line_sep>parser.add_argument('--intrinsic_rew_coeff' type=float default=0.02 help="intrinsic exploration reward coefficient")<line_sep>parser.add_argument('--num_sem_categories' type=float default=16)<line_sep>parser.add_argument('--sem_pred_prob_thr' type=float default=0.9 help="Semantic prediction confidence threshold")<line_sep># Mapping parser.add_argument('--global_downscaling' type=int default=2)<line_sep>parser.add_argument('--vision_range' type=int default=100)<line_sep>parser.add_argument('--map_resolution' type=int default=5)<line_sep>parser.add_argument('--du_scale' type=int default=1)<line_sep>parser.add_argument('--map_size_cm' type=int default=2400)<line_sep>parser.add_argument('--cat_pred_threshold' type=float default=5.0)<line_sep>parser.add_argument('--map_pred_threshold' type=float default=1.0)<line_sep>parser.add_argument('--exp_pred_threshold' type=float default=1.0)<line_sep>parser.add_argument('--collision_threshold' type=float default=0.20)<line_sep># parse arguments args=parser.parse_args()<line_sep>args.cuda=<not>args.no_cuda<and>torch.cuda.is_available()<if_stmt>args.cuda<block_start><if_stmt>args.auto_gpu_config<block_start>num_gpus=torch.cuda.device_count()<if_stmt>args.total_num_scenes<ne>"auto"<block_start>args.total_num_scenes=int(args.total_num_scenes)<block_end><elif_stmt>"objectnav_gibson"<in>args.task_config<and>"train"<in>args.split<block_start>args.total_num_scenes=25<block_end><elif_stmt>"objectnav_gibson"<in>args.task_config<and>"val"<in>args.split<block_start>args.total_num_scenes=5<block_end><else_stmt><block_start><assert_stmt><false> "Unknown task config, please specify"+" total_num_scenes"<block_end># GPU Memory required for the SemExp model: # 0.8 + 0.4 * args.total_num_scenes (GB) # GPU Memory required per thread: 2.6 (GB) min_memory_required=max(0.8+0.4<times>args.total_num_scenes 2.6)<line_sep># Automatically configure number of training threads based on # number of GPUs available and GPU memory size gpu_memory=1000<for_stmt>i range(num_gpus)<block_start>gpu_memory=min(gpu_memory torch.cuda.get_device_properties(i).total_memory/1024/1024/1024)<assert_stmt>gpu_memory<g>min_memory_required """Insufficient GPU memory for GPU {}, gpu memory ({}GB) needs to be greater than {}GB""".format(i gpu_memory min_memory_required)<block_end>num_processes_per_gpu=int(gpu_memory/2.6)<line_sep>num_processes_on_first_gpu=int((gpu_memory-min_memory_required)/2.6)<if_stmt>args.eval<block_start>max_threads=num_processes_per_gpu<times>(num_gpus-1)+num_processes_on_first_gpu<assert_stmt>max_threads<ge>args.total_num_scenes """Insufficient GPU memory for evaluation"""<block_end><if_stmt>num_gpus<eq>1<block_start>args.num_processes_on_first_gpu=num_processes_on_first_gpu<line_sep>args.num_processes_per_gpu=0<line_sep>args.num_processes=num_processes_on_first_gpu<assert_stmt>args.num_processes<g>0 "Insufficient GPU memory"<block_end><else_stmt><block_start>num_threads=num_processes_per_gpu<times>(num_gpus-1)+num_processes_on_first_gpu<line_sep>num_threads=min(num_threads args.total_num_scenes)<line_sep>args.num_processes_per_gpu=num_processes_per_gpu<line_sep>args.num_processes_on_first_gpu=max(0 num_threads-args.num_processes_per_gpu<times>(num_gpus-1))<line_sep>args.num_processes=num_threads<block_end>args.sim_gpu_id=1<line_sep>print("Auto GPU config:")<line_sep>print("Number of processes: {}".format(args.num_processes))<line_sep>print("Number of processes on GPU 0: {}".format(args.num_processes_on_first_gpu))<line_sep>print("Number of processes per GPU: {}".format(args.num_processes_per_gpu))<block_end><block_end><else_stmt><block_start>args.sem_gpu_id=-2<block_end><if_stmt>args.num_mini_batch<eq>"auto"<block_start>args.num_mini_batch=max(args.num_processes<floordiv>2 1)<block_end><else_stmt><block_start>args.num_mini_batch=int(args.num_mini_batch)<block_end><return>args<block_end>
########################################################################## # # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## <import_from_future_stmt> with_statement<import_stmt>os<import_stmt>maya.cmds<import_stmt>imath<import_stmt>IECore<import_stmt>IECoreScene<import_stmt>IECoreMaya<class_stmt>FnSceneShapeTest(IECoreMaya.TestCase)<block_start>__testFile="test/test.scc"<def_stmt>setUp self<block_start>scene=IECoreScene.SceneCache(FnSceneShapeTest.__testFile IECore.IndexedIO.OpenMode.Write)<line_sep>sc=scene.createChild(str(1))<line_sep>mesh=IECoreScene.MeshPrimitive.createBox(imath.Box3f(imath.V3f(0) imath.V3f(1)))<line_sep>mesh["Cd"]=IECoreScene.PrimitiveVariable(IECoreScene.PrimitiveVariable.Interpolation.Uniform IECore.V3fVectorData([imath.V3f(1 0 0)]<times>6))<line_sep>sc.writeObject(mesh 0.0)<line_sep>matrix=imath.M44d().translate(imath.V3d(1 0 0))<line_sep>sc.writeTransform(IECore.M44dData(matrix) 0.0)<line_sep>sc=sc.createChild("child")<line_sep>mesh=IECoreScene.MeshPrimitive.createBox(imath.Box3f(imath.V3f(0) imath.V3f(1)))<line_sep>mesh["Cd"]=IECoreScene.PrimitiveVariable(IECoreScene.PrimitiveVariable.Interpolation.Uniform IECore.V3fVectorData([imath.V3f(0 1 0)]<times>6))<line_sep>sc.writeObject(mesh 0.0)<line_sep>matrix=imath.M44d().translate(imath.V3d(2 0 0))<line_sep>sc.writeTransform(IECore.M44dData(matrix) 0.0)<line_sep>sc=sc.createChild(str(3))<line_sep>mesh=IECoreScene.MeshPrimitive.createBox(imath.Box3f(imath.V3f(0) imath.V3f(1)))<line_sep>mesh["Cd"]=IECoreScene.PrimitiveVariable(IECoreScene.PrimitiveVariable.Interpolation.Uniform IECore.V3fVectorData([imath.V3f(0 0 1)]<times>6))<line_sep>sc.writeObject(mesh 0.0)<line_sep>matrix=imath.M44d().translate(imath.V3d(3 0 0))<line_sep>sc.writeTransform(IECore.M44dData(matrix) 0.0)<line_sep><return>scene<block_end><def_stmt>__setupTableProp self<block_start>boxSize=imath.Box3f(imath.V3f(-.5 -.5 -.5) imath.V3f(.5 .5 .5))<line_sep>table=IECoreScene.SceneCache(FnSceneShapeTest.__testFile IECore.IndexedIO.Write)<line_sep>table.writeAttribute('scene:visible' IECore.BoolData(<true>) 0)<line_sep>table.writeAttribute('user:testBool' IECore.BoolData(<true>) 0)<line_sep>table.writeAttribute('user:testShort' IECore.ShortData(2) 0)<line_sep>table.writeAttribute('user:testInt' IECore.IntData(3) 0)<line_sep>table.writeAttribute('user:testInt64' IECore.Int64Data(4) 0)<line_sep>table.writeAttribute('user:testFloat' IECore.FloatData(5) 0)<line_sep>table.writeAttribute('user:testDouble' IECore.DoubleData(6) 0)<line_sep>table.writeAttribute('user:testString' IECore.StringData('seven') 0)<line_sep>mat=imath.M44d((8 9 10 11) (12 13 14 15) (16 17 18 19) (20 21 22 23))<line_sep>table.writeAttribute('user:testMatrixd' IECore.M44dData(mat) 0)<line_sep>mat=imath.M44f((24 25 26 27) (28 29 30 31) (32 33 34 35) (36 37 38 39))<line_sep>table.writeAttribute('user:testMatrixf' IECore.M44fData(mat) 0)<line_sep>pedestal_GEO=table.createChild('pedestal_GEO')<line_sep>pedestal_GEO.writeObject(IECoreScene.MeshPrimitive.createBox(boxSize) 0)<line_sep>s=imath.V3d(15 1 15)<line_sep>r=imath.Eulerd()<line_sep>t=imath.V3d(0 .5 0)<line_sep>mat=IECore.TransformationMatrixd(s r t)<line_sep>pedestal_GEO.writeTransform(IECore.TransformationMatrixdData(mat) 0)<line_sep>column_GEO=pedestal_GEO.createChild('column_GEO')<line_sep>column_GEO.writeObject(IECoreScene.MeshPrimitive.createBox(boxSize) 0)<line_sep>s=imath.V3d(.25 20 .25)<line_sep>r=imath.Eulerd()<line_sep>t=imath.V3d(0 10.5 0)<line_sep>mat=IECore.TransformationMatrixd(s r t)<line_sep>column_GEO.writeTransform(IECore.TransformationMatrixdData(mat) 0)<line_sep>tableTop_GEO=column_GEO.createChild('tableTop_GEO')<line_sep>tableTop_GEO.writeObject(IECoreScene.MeshPrimitive.createBox(boxSize) 0)<line_sep>s=imath.V3d(10 0.05 10)<line_sep>r=imath.Eulerd()<line_sep>t=imath.V3d(0 .525 0)<line_sep>mat=IECore.TransformationMatrixd(s r t)<line_sep>tableTop_GEO.writeTransform(IECore.TransformationMatrixdData(mat) 0)<block_end><def_stmt>testSceneInterface self<block_start>maya.cmds.file(new=<true> f=<true>)<line_sep>node=maya.cmds.createNode("ieSceneShape")<line_sep>maya.cmds.setAttr(node+'.file' FnSceneShapeTest.__testFile type='string')<line_sep>fn=IECoreMaya.FnSceneShape(node)<line_sep># Check scene for a wrong path maya.cmds.setAttr(node+'.root' 'blabla' type='string')<line_sep>scene=fn.sceneInterface()<line_sep>self.assertEqual(scene <none>)<line_sep>maya.cmds.setAttr(node+'.root' '/' type='string')<line_sep>scene=fn.sceneInterface()<line_sep>self.assertTrue(isinstance(scene IECoreScene.SceneCache))<line_sep>self.assertEqual(scene.childNames() ['1'])<line_sep>self.assertFalse(scene.hasObject())<line_sep>maya.cmds.setAttr(node+'.root' '/1' type='string')<line_sep>scene=fn.sceneInterface()<line_sep>self.assertTrue(isinstance(scene IECoreScene.SceneCache))<line_sep>self.assertEqual(scene.childNames() ['child'])<line_sep>self.assertTrue(scene.hasObject())<block_end><def_stmt>testCreationName self<block_start>maya.cmds.file(new=<true> f=<true>)<line_sep>fn=IECoreMaya.FnSceneShape.create("bob")<line_sep>self.assertEqual(fn.fullPathName() u"|bob|bobSceneShape")<line_sep>fn=IECoreMaya.FnSceneShape.create("bob1")<line_sep>self.assertEqual(fn.fullPathName() u"|bob1|bobSceneShape1")<line_sep>fn=IECoreMaya.FnSceneShape.create("bob")<line_sep>self.assertEqual(fn.fullPathName() u"|bob2|bobSceneShape2")<block_end><def_stmt>testCreationSetup self<block_start>maya.cmds.file(new=<true> f=<true>)<line_sep>fn=IECoreMaya.FnSceneShape.create("test")<line_sep>self.assertTrue(maya.cmds.sets(fn.fullPathName() isMember="initialShadingGroup"))<line_sep>self.assertTrue(maya.cmds.getAttr(fn.fullPathName()+".objectOnly" l=<true>))<line_sep>self.assertFalse(maya.cmds.getAttr(fn.fullPathName()+".objectOnly"))<line_sep>self.assertTrue(maya.cmds.isConnected("time1.outTime" fn.fullPathName()+".time"))<block_end><def_stmt>testExpandOnce self<block_start>maya.cmds.file(new=<true> f=<true>)<line_sep>fn=IECoreMaya.FnSceneShape.create("test")<line_sep>maya.cmds.setAttr(fn.fullPathName()+'.file' FnSceneShapeTest.__testFile type='string')<line_sep>result=fn.expandOnce()<line_sep>self.assertTrue(maya.cmds.getAttr(fn.fullPathName()+".objectOnly"))<line_sep>self.assertEqual(maya.cmds.getAttr(fn.fullPathName()+".queryPaths[0]") "/1")<line_sep>self.assertTrue(len(result)<eq>1)<line_sep>childFn=result[0]<line_sep>self.assertTrue(isinstance(childFn IECoreMaya.FnSceneShape))<line_sep>self.assertEqual(childFn.fullPathName() "|test|sceneShape_1|sceneShape_SceneShape1")<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".file") FnSceneShapeTest.__testFile)<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".root") "/1")<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTransform[0].outTranslate" "|test|sceneShape_1.translate"))<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTransform[0].outRotate" "|test|sceneShape_1.rotate"))<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTransform[0].outScale" "|test|sceneShape_1.scale"))<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTime" childFn.fullPathName()+".time"))<line_sep>maya.cmds.setAttr(childFn.fullPathName()+".drawGeometry" 1)<line_sep>result=childFn.expandOnce()<line_sep>self.assertTrue(maya.cmds.getAttr(childFn.fullPathName()+".objectOnly"))<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".queryPaths[0]") "/child")<line_sep>self.assertTrue(len(result)<eq>1)<line_sep>self.assertTrue(isinstance(result[0] IECoreMaya.FnSceneShape))<line_sep>self.assertEqual(result[0].fullPathName() "|test|sceneShape_1|child|childSceneShape")<line_sep>self.assertEqual(maya.cmds.getAttr(result[0].fullPathName()+".file") FnSceneShapeTest.__testFile)<line_sep>self.assertEqual(maya.cmds.getAttr(result[0].fullPathName()+".root") "/1/child")<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTransform[0].outTranslate" "|test|sceneShape_1|child.translate"))<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTransform[0].outRotate" "|test|sceneShape_1|child.rotate"))<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTransform[0].outScale" "|test|sceneShape_1|child.scale"))<line_sep>self.assertEqual(maya.cmds.getAttr(result[0].fullPathName()+".drawGeometry") 1)<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTime" result[0].fullPathName()+".time"))<block_end><def_stmt>testExpandOnceNamespace self<block_start>maya.cmds.file(new=<true> f=<true>)<line_sep>namespace="INPUT"<if_stmt><not>maya.cmds.namespace(exists=namespace)<block_start>maya.cmds.namespace(addNamespace=namespace)<block_end><def_stmt>addnamespace path<block_start><return>path.replace("|" "|"+namespace+":")<block_end>fn=IECoreMaya.FnSceneShape.create(namespace+":"+"test")<line_sep>maya.cmds.setAttr(fn.fullPathName()+'.file' FnSceneShapeTest.__testFile type='string')<line_sep>result=fn.expandOnce(preserveNamespace=<true>)<line_sep>self.assertTrue(len(result)<eq>1)<line_sep>childFn=result[0]<line_sep>self.assertTrue(isinstance(childFn IECoreMaya.FnSceneShape))<line_sep>self.assertEqual(childFn.fullPathName() addnamespace("|test|sceneShape_1|sceneShape_SceneShape1"))<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTransform[0].outTranslate" addnamespace("|test|sceneShape_1.translate")))<block_end><def_stmt>testExpandAll self<block_start>maya.cmds.file(new=<true> f=<true>)<line_sep>fn=IECoreMaya.FnSceneShape.create("test")<line_sep>maya.cmds.setAttr(fn.fullPathName()+'.file' FnSceneShapeTest.__testFile type='string')<line_sep>maya.cmds.setAttr(fn.fullPathName()+".drawGeometry" 1)<line_sep>result=fn.expandAll()<line_sep>self.assertTrue(maya.cmds.getAttr(fn.fullPathName()+".objectOnly"))<line_sep>self.assertEqual(maya.cmds.getAttr(fn.fullPathName()+".queryPaths[0]") "/1")<line_sep>self.assertTrue(len(result)<eq>3)<line_sep>childFn=result[0]<line_sep>self.assertTrue(isinstance(childFn IECoreMaya.FnSceneShape))<line_sep>self.assertEqual(childFn.fullPathName() "|test|sceneShape_1|sceneShape_SceneShape1")<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".file") FnSceneShapeTest.__testFile)<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".root") "/1")<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTransform[0].outTranslate" "|test|sceneShape_1.translate"))<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTransform[0].outRotate" "|test|sceneShape_1.rotate"))<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTransform[0].outScale" "|test|sceneShape_1.scale"))<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTime" childFn.fullPathName()+".time"))<line_sep>self.assertTrue(maya.cmds.getAttr(childFn.fullPathName()+".objectOnly"))<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".queryPaths[0]") "/child")<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".drawGeometry") 1)<line_sep>self.assertTrue(isinstance(result[1] IECoreMaya.FnSceneShape))<line_sep>self.assertEqual(result[1].fullPathName() "|test|sceneShape_1|child|childSceneShape")<line_sep>self.assertEqual(maya.cmds.getAttr(result[1].fullPathName()+".file") FnSceneShapeTest.__testFile)<line_sep>self.assertEqual(maya.cmds.getAttr(result[1].fullPathName()+".root") "/1/child")<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTransform[0].outTranslate" "|test|sceneShape_1|child.translate"))<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTransform[0].outRotate" "|test|sceneShape_1|child.rotate"))<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTransform[0].outScale" "|test|sceneShape_1|child.scale"))<line_sep>self.assertEqual(maya.cmds.getAttr(result[1].fullPathName()+".drawGeometry") 1)<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTime" result[1].fullPathName()+".time"))<block_end><def_stmt>testExpandAllNamespace self<block_start>namespace="INPUT"<if_stmt><not>maya.cmds.namespace(exists=namespace)<block_start>maya.cmds.namespace(addNamespace=namespace)<block_end><def_stmt>addnamespace path<block_start><return>path.replace("|" "|"+namespace+":")<block_end>maya.cmds.file(new=<true> f=<true>)<line_sep>fn=IECoreMaya.FnSceneShape.create(namespace+":"+"test")<line_sep>maya.cmds.setAttr(fn.fullPathName()+'.file' FnSceneShapeTest.__testFile type='string')<line_sep>maya.cmds.setAttr(fn.fullPathName()+".drawGeometry" 1)<line_sep>result=fn.expandAll(preserveNamespace=<true>)<line_sep>self.assertTrue(maya.cmds.getAttr(fn.fullPathName()+".objectOnly"))<line_sep>self.assertEqual(maya.cmds.getAttr(fn.fullPathName()+".queryPaths[0]") "/1")<line_sep>self.assertTrue(len(result)<eq>3)<line_sep>childFn=result[0]<line_sep>self.assertTrue(isinstance(childFn IECoreMaya.FnSceneShape))<line_sep>self.assertEqual(childFn.fullPathName() addnamespace("|test|sceneShape_1|sceneShape_SceneShape1"))<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".file") FnSceneShapeTest.__testFile)<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".root") "/1")<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTransform[0].outTranslate" addnamespace("|test|sceneShape_1.translate")))<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTransform[0].outRotate" addnamespace("|test|sceneShape_1.rotate")))<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTransform[0].outScale" addnamespace("|test|sceneShape_1.scale")))<line_sep>self.assertTrue(maya.cmds.isConnected(fn.fullPathName()+".outTime" childFn.fullPathName()+".time"))<line_sep>self.assertTrue(maya.cmds.getAttr(childFn.fullPathName()+".objectOnly"))<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".queryPaths[0]") "/child")<line_sep>self.assertEqual(maya.cmds.getAttr(childFn.fullPathName()+".drawGeometry") 1)<line_sep>self.assertTrue(isinstance(result[1] IECoreMaya.FnSceneShape))<line_sep>self.assertEqual(result[1].fullPathName() addnamespace("|test|sceneShape_1|child|childSceneShape"))<line_sep>self.assertEqual(maya.cmds.getAttr(result[1].fullPathName()+".file") FnSceneShapeTest.__testFile)<line_sep>self.assertEqual(maya.cmds.getAttr(result[1].fullPathName()+".root") "/1/child")<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTransform[0].outTranslate" addnamespace("|test|sceneShape_1|child.translate")))<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTransform[0].outRotate" addnamespace("|test|sceneShape_1|child.rotate")))<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTransform[0].outScale" addnamespace("|test|sceneShape_1|child.scale")))<line_sep>self.assertEqual(maya.cmds.getAttr(result[1].fullPathName()+".drawGeometry") 1)<line_sep>self.assertTrue(maya.cmds.isConnected(childFn.fullPathName()+".outTime" result[1].fullPathName()+".time"))<block_end><def_stmt>testCollapse self<block_start>maya.cmds.file(new=<true> f=<true>)<line_sep>fn=IECoreMaya.FnSceneShape.create("test")<line_sep>maya.cmds.setAttr(fn.fullPathName()+'.file' FnSceneShapeTest.__testFile type='string')<line_sep>result=fn.expandOnce()<line_sep>result[0].expandOnce()<line_sep>children=set(["|test|testSceneShape" "|test|sceneShape_1" "|test|sceneShape_1|sceneShape_SceneShape1" "|test|sceneShape_1|child" "|test|sceneShape_1|child|childSceneShape"])<line_sep>self.assertEqual(set(maya.cmds.listRelatives("|test" ad=<true> f=<true>)) children)<line_sep>fn.collapse()<line_sep>self.assertEqual(maya.cmds.listRelatives("|test" ad=<true> f=<true>) ["|test|testSceneShape"])<line_sep>self.assertEqual(maya.cmds.getAttr(fn.fullPathName()+".objectOnly") 0)<line_sep>self.assertEqual(maya.cmds.getAttr(fn.fullPathName()+".visibility") 1)<block_end><def_stmt>testConvertAllToGeometry self<block_start>maya.cmds.file(new=<true> f=<true>)<line_sep>fn=IECoreMaya.FnSceneShape.create("test")<line_sep>maya.cmds.setAttr(fn.fullPathName()+'.file' FnSceneShapeTest.__testFile type='string')<line_sep>fn.convertAllToGeometry()<line_sep>children=["|test|testSceneShape" "|test|sceneShape_1"]<line_sep>self.assertEqual(maya.cmds.listRelatives("|test" f=<true>) children)<line_sep>self.assertEqual(maya.cmds.getAttr(fn.fullPathName()+".intermediateObject") 0)<line_sep>children=["|test|sceneShape_1|sceneShape_SceneShape1" "|test|sceneShape_1|child" "|test|sceneShape_1|sceneShape_Shape1"]<line_sep>self.assertEqual(maya.cmds.listRelatives("|test|sceneShape_1" f=<true>) children)<line_sep>self.assertEqual(maya.cmds.getAttr("|test|sceneShape_1|sceneShape_SceneShape1.intermediateObject") 1)<line_sep>self.assertEqual(maya.cmds.nodeType("|test|sceneShape_1|sceneShape_Shape1") "mesh")<line_sep>self.assertEqual(maya.cmds.getAttr("|test|sceneShape_1|sceneShape_SceneShape1.queryPaths[1]") "/")<line_sep>self.assertTrue(maya.cmds.isConnected("|test|sceneShape_1|sceneShape_SceneShape1.outObjects[1]" "|test|sceneShape_1|sceneShape_Shape1.inMesh"))<block_end><def_stmt>testComponentNames self<block_start>maya.cmds.file(new=<true> f=<true>)<line_sep>fn=IECoreMaya.FnSceneShape.create("test")<line_sep>maya.cmds.setAttr(fn.fullPathName()+'.file' FnSceneShapeTest.__testFile type='string')<line_sep>maya.cmds.setAttr(fn.fullPathName()+".drawGeometry" 0)<line_sep>self.assertEqual(fn.componentNames() [])<line_sep>maya.cmds.setAttr(fn.fullPathName()+".drawGeometry" 1)<line_sep>self.assertEqual(fn.componentNames() ['/' '/1' '/1/child' '/1/child/3'])<line_sep>fn.selectComponentNames(['/' '/1' '/1/child/3'])<line_sep>self.assertEqual(fn.selectedComponentNames() set(['/' '/1' '/1/child/3']))<block_end><def_stmt>testQuery self<block_start>maya.cmds.file(new=<true> f=<true>)<def_stmt>createSceneFile <block_start>scene=IECoreScene.SceneCache(FnSceneShapeTest.__testFile IECore.IndexedIO.OpenMode.Write)<line_sep>sc=scene.createChild(str(1))<line_sep>curves=IECoreScene.CurvesPrimitive.createBox(imath.Box3f(imath.V3f(0) imath.V3f(1)))# 6 curves. sc.writeObject(curves 0.0)<line_sep>matrix=imath.M44d().translate(imath.V3d(0 0 0))<line_sep>sc.writeTransform(IECore.M44dData(matrix) 0.0)<block_end>createSceneFile()<line_sep>node=maya.cmds.createNode("ieSceneShape")<line_sep>maya.cmds.setAttr(node+'.file' FnSceneShapeTest.__testFile type='string')<line_sep>maya.cmds.setAttr(node+'.root' '/' type='string')<line_sep>fn=IECoreMaya.FnSceneShape(node)<line_sep>self.assertEqual(maya.cmds.getAttr(fn.fullPathName()+".outObjects[0]" type=<true>) <none>)<line_sep>self.assertEqual(maya.cmds.getAttr(fn.fullPathName()+".outObjects[1]" type=<true>) <none>)<line_sep>maya.cmds.setAttr(fn.fullPathName()+".queryPaths[0]" "/1" type="string")<line_sep>maya.cmds.setAttr(fn.fullPathName()+".queryPaths[1]" "/1" type="string")<line_sep>maya.cmds.setAttr(fn.fullPathName()+".queryConvertParameters[0]" "-index 0" type="string")# Set it to output 0 th box curve. maya.cmds.setAttr(fn.fullPathName()+".queryConvertParameters[1]" "-index 1" type="string")# Set it to output 1 th box curve. self.assertEqual(maya.cmds.getAttr(fn.fullPathName()+".outObjects[0]" type=<true>) "nurbsCurve")<line_sep>self.assertEqual(maya.cmds.getAttr(fn.fullPathName()+".outObjects[1]" type=<true>) "nurbsCurve")<line_sep>curveShape0=maya.cmds.createNode("nurbsCurve")<line_sep>curveShape1=maya.cmds.createNode("nurbsCurve")<line_sep>maya.cmds.connectAttr(fn.fullPathName()+".outObjects[0]" curveShape0+'.create')<line_sep>maya.cmds.connectAttr(fn.fullPathName()+".outObjects[1]" curveShape1+'.create')<line_sep>self.assertNotEqual(maya.cmds.pointPosition(curveShape0+'.cv[0]') maya.cmds.pointPosition(curveShape1+'.cv[0]'))<line_sep>maya.cmds.setAttr(fn.fullPathName()+".queryConvertParameters[1]" "-index 0" type="string")<line_sep>self.assertEqual(maya.cmds.pointPosition(curveShape0+'.cv[0]') maya.cmds.pointPosition(curveShape1+'.cv[0]'))<block_end><def_stmt>testPromotableAttributeNames self<block_start>maya.cmds.file(new=<true> force=<true>)<line_sep>self.__setupTableProp()<line_sep>sceneShapeFn=IECoreMaya.FnSceneShape.create('table')<line_sep>sceneShapeFn.findPlug('file').setString(FnSceneShapeTest.__testFile)<line_sep>expectedAttrs=['user:testBool' 'user:testShort' 'user:testInt' 'user:testInt64' 'user:testFloat' 'user:testDouble' 'user:testString' 'user:testMatrixd' 'user:testMatrixf' 'scene:visible']<line_sep>self.assertEquals(set(sceneShapeFn.promotableAttributeNames()) set(expectedAttrs))<block_end><def_stmt>testPromoteAttribute self<block_start>maya.cmds.file(new=<true> force=<true>)<line_sep>self.__setupTableProp()<line_sep>sceneShapeFn=IECoreMaya.FnSceneShape.create('table')<line_sep>sceneShapeFn.findPlug('file').setString(FnSceneShapeTest.__testFile)<for_stmt>pAttr sceneShapeFn.promotableAttributeNames()<block_start>sceneShapeFn.promoteAttribute(pAttr)<block_end>sceneShape=sceneShapeFn.fullPathName()<line_sep>table=maya.cmds.listRelatives(sceneShape parent=<true>)[0]<line_sep>testVisibility=maya.cmds.getAttr(table+'.'+str(IECoreMaya.LiveScene.visibilityOverrideName))<line_sep>testBool=maya.cmds.getAttr(table+'.ieAttr_testBool')<line_sep>testShort=maya.cmds.getAttr(table+'.ieAttr_testShort')<line_sep>testInt=maya.cmds.getAttr(table+'.ieAttr_testInt')<line_sep>testInt64=maya.cmds.getAttr(table+'.ieAttr_testInt64')<line_sep>testFloat=maya.cmds.getAttr(table+'.ieAttr_testFloat')<line_sep>testDouble=maya.cmds.getAttr(table+'.ieAttr_testDouble')<line_sep>testString=maya.cmds.getAttr(table+'.ieAttr_testString')<line_sep>testMatrixd=maya.cmds.getAttr(table+'.ieAttr_testMatrixd')<line_sep>testMatrixf=maya.cmds.getAttr(table+'.ieAttr_testMatrixf')<line_sep>self.assertTrue(testVisibility)<line_sep>self.assertTrue(testBool)<line_sep>self.assertEquals(testShort 2)<line_sep>self.assertEquals(testInt 3)<line_sep>self.assertEquals(testInt64 4)<line_sep>self.assertEquals(testFloat 5.)<line_sep>self.assertEquals(testDouble 6.)<line_sep>self.assertEquals(testString 'seven')<line_sep>self.assertEquals(testMatrixd [8. 9. 10. 11. 12. 13. 14. 15. 16. 17. 18. 19. 20. 21. 22. 23.])<line_sep>self.assertEquals(testMatrixf [24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35. 36. 37. 38. 39.])<block_end><def_stmt>tearDown self<block_start><if_stmt>os.path.exists(FnSceneShapeTest.__testFile)<block_start>os.remove(FnSceneShapeTest.__testFile)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>IECoreMaya.TestProgram(plugins=["ieCore"])<block_end>
<import_from_future_stmt> unicode_literals<import_from_stmt>moya.expose View<class_stmt>TestView(View)<block_start>name="hello"<def_stmt>get self context<block_start><return>"Hello, World"<block_end><block_end>
<import_from_stmt>absl flags<line_sep>FLAGS=flags.FLAGS<line_sep>flags.DEFINE_string('name' 'rnn-t-v5' help='session name')<line_sep>flags.DEFINE_enum('mode' 'train' ['train' 'resume' 'eval'] help='mode')<line_sep>flags.DEFINE_integer('resume_step' <none> help='model step')<line_sep># dataset flags.DEFINE_string('LibriSpeech_train_100' "../librispeech/LibriSpeech/train-clean-100" help='LibriSpeech train')<line_sep>flags.DEFINE_string('LibriSpeech_train_360' "../librispeech/LibriSpeech/train-clean-360" help='LibriSpeech train')<line_sep>flags.DEFINE_string('LibriSpeech_train_500' "../librispeech/LibriSpeech/train-other-500" help='LibriSpeech train')<line_sep>flags.DEFINE_string('LibriSpeech_test' "../librispeech/LibriSpeech/test-clean" help='LibriSpeech test')<line_sep>flags.DEFINE_string('LibriSpeech_dev' "../librispeech/LibriSpeech/dev-clean" help='LibriSpeech dev')<line_sep>flags.DEFINE_string('TEDLIUM_train' "../speech_data/TEDLIUM/TEDLIUM_release1/train" help='TEDLIUM 1 train')<line_sep>flags.DEFINE_string('TEDLIUM_test' "../speech_data/TEDLIUM/TEDLIUM_release1/test" help='TEDLIUM 1 test')<line_sep>flags.DEFINE_string('CommonVoice' "../speech_data/common_voice" help='common voice')<line_sep>flags.DEFINE_string('YT_bloomberg2' "../speech_data/common_voice" help='common voice')<line_sep>flags.DEFINE_string('YT_life' "../speech_data/common_voice" help='common voice')<line_sep>flags.DEFINE_integer('num_workers' 4 help='dataloader workers')<line_sep># learning flags.DEFINE_bool('use_pretrained' default=<false> help='Use pretrained enncoder')<line_sep>flags.DEFINE_enum('optim' "adam" ['adam' 'sgd' 'sm3'] help='optimizer')<line_sep>flags.DEFINE_float('lr' 1e-4 help='initial lr')<line_sep>flags.DEFINE_bool('sched' <true> help='lr reduce rate on plateau')<line_sep>flags.DEFINE_integer('sched_patience' 1 help='lr reduce rate on plateau')<line_sep>flags.DEFINE_float('sched_factor' 0.5 help='lr reduce rate on plateau')<line_sep>flags.DEFINE_float('sched_min_lr' 1e-6 help='lr reduce rate on plateau')<line_sep>flags.DEFINE_integer('warmup_step' 10000 help='linearly warmup lr')<line_sep>flags.DEFINE_integer('epochs' 30 help='epoch')<line_sep>flags.DEFINE_integer('batch_size' 8 help='batch size')<line_sep>flags.DEFINE_integer('sub_batch_size' 8 help='accumulate batch size')<line_sep>flags.DEFINE_integer('eval_batch_size' 4 help='evaluation batch size')<line_sep>flags.DEFINE_float('gradclip' <none> help='clip norm value')<line_sep># encoder flags.DEFINE_string('enc_type' 'LSTM' help='encoder rnn type')<line_sep>flags.DEFINE_integer('enc_hidden_size' 600 help='encoder hidden dimension')<line_sep>flags.DEFINE_integer('enc_layers' 4 help='encoder layers')<line_sep>flags.DEFINE_integer('enc_proj_size' 600 help='encoder layers')<line_sep>flags.DEFINE_float('enc_dropout' 0 help='encoder dropout')<line_sep># decoder flags.DEFINE_integer('dec_hidden_size' 150 help='decoder hidden dimension')<line_sep>flags.DEFINE_integer('dec_layers' 2 help='decoder layers')<line_sep>flags.DEFINE_integer('dec_proj_size' 150 help='encoder layers')<line_sep>flags.DEFINE_float('dec_dropout' 0. help='decoder dropout')<line_sep># joint flags.DEFINE_integer('joint_size' 512 help='Joint hidden dimension')<line_sep># tokenizer flags.DEFINE_enum('tokenizer' 'char' ['char' 'bpe'] help='tokenizer')<line_sep>flags.DEFINE_integer('bpe_size' 256 help='BPE vocabulary size')<line_sep>flags.DEFINE_integer('vocab_embed_size' 16 help='vocabulary embedding size')<line_sep># data preprocess flags.DEFINE_float('audio_max_length' 14 help='max length in seconds')<line_sep>flags.DEFINE_enum('feature' 'mfcc' ['mfcc' 'melspec' 'logfbank'] help='audio feature')<line_sep>flags.DEFINE_integer('feature_size' 80 help='mel_bins')<line_sep>flags.DEFINE_integer('n_fft' 400 help='spectrogram')<line_sep>flags.DEFINE_integer('win_length' 400 help='spectrogram')<line_sep>flags.DEFINE_integer('hop_length' 200 help='spectrogram')<line_sep>flags.DEFINE_bool('delta' <false> help='concat delta and detal of dealt')<line_sep>flags.DEFINE_bool('cmvn' <false> help='normalize spectrogram')<line_sep>flags.DEFINE_integer('downsample' 3 help='downsample audio feature')<line_sep>flags.DEFINE_integer('T_mask' 50 help='downsample audio feature')<line_sep>flags.DEFINE_integer('T_num_mask' 2 help='downsample audio feature')<line_sep>flags.DEFINE_integer('F_mask' 5 help='downsample audio feature')<line_sep>flags.DEFINE_integer('F_num_mask' 1 help='downsample audio feature')<line_sep># apex flags.DEFINE_bool('apex' default=<true> help='fp16 training')<line_sep>flags.DEFINE_string('opt_level' 'O1' help='use mix precision')<line_sep># parallel flags.DEFINE_bool('multi_gpu' <false> help='DataParallel')<line_sep># log flags.DEFINE_integer('loss_step' 5 help='frequency to show loss in pbar')<line_sep>flags.DEFINE_integer('save_step' 10000 help='frequency to save model')<line_sep>flags.DEFINE_integer('eval_step' 10000 help='frequency to save model')<line_sep>flags.DEFINE_integer('sample_size' 20 help='size of visualized examples')<line_sep>
<import_from_stmt>.default_icons *<class_stmt>Config(Config)<block_start><def_stmt>__init__ self num_gpus=1<block_start>super().__init__(num_gpus=num_gpus)<line_sep># Dataset self.data_dir="./dataset/fonts_tensor/"<line_sep>self.meta_filepath="./dataset/fonts_meta.csv"<block_end><block_end>
# Databricks notebook source # MAGIC %md # MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html) # MAGIC # MAGIC This is a 2019-2021 augmentation and update of [<NAME>](https://www.linkedin.com/in/adbreind)'s initial notebooks. # MAGIC # MAGIC _Thanks to [<NAME>](https://www.linkedin.com/in/christianvonkoch/) and [<NAME>](https://www.linkedin.com/in/william-anz%C3%A9n-b52003199/) for their contributions towards making these materials Spark 3.0.1 and Python 3+ compliant._ # COMMAND ---------- # MAGIC %md # MAGIC # Convolutional Neural Networks # MAGIC ## aka CNN, ConvNet # COMMAND ---------- # MAGIC %md # MAGIC As a baseline, let's start a lab running with what we already know. # MAGIC # MAGIC We'll take our deep feed-forward multilayer perceptron network, with ReLU activations and reasonable initializations, and apply it to learning the MNIST digits. # MAGIC # MAGIC The main part of the code looks like the following (full code you can run is in the next cell): # MAGIC # MAGIC ``` # MAGIC # imports, setup, load data sets # MAGIC # MAGIC model = Sequential() # MAGIC model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu')) # MAGIC model.add(Dense(15, kernel_initializer='normal', activation='relu')) # MAGIC model.add(Dense(10, kernel_initializer='normal', activation='softmax')) # MAGIC model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy']) # MAGIC # MAGIC categorical_labels = to_categorical(y_train, num_classes=10) # MAGIC # MAGIC history = model.fit(X_train, categorical_labels, epochs=100, batch_size=100) # MAGIC # MAGIC # print metrics, plot errors # MAGIC ``` # MAGIC # MAGIC Note the changes, which are largely about building a classifier instead of a regression model: # MAGIC * Output layer has one neuron per category, with softmax activation # MAGIC * __Loss function is cross-entropy loss__ # MAGIC * Accuracy metric is categorical accuracy # COMMAND ---------- # MAGIC %md # MAGIC Let's hold pointers into wikipedia for these new concepts. # COMMAND ---------- # MAGIC %scala # MAGIC //This allows easy embedding of publicly available information into any other notebook # MAGIC //Example usage: # MAGIC // displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250)) # MAGIC def frameIt( u:String, h:Int ) : String = { # MAGIC """<iframe # MAGIC src=""""+ u+"""" # MAGIC width="95%" height="""" + h + """" # MAGIC sandbox> # MAGIC <p> # MAGIC <a href="http://spark.apache.org/docs/latest/index.html"> # MAGIC Fallback link for browsers that, unlikely, don't support frames # MAGIC </a> # MAGIC </p> # MAGIC </iframe>""" # MAGIC } # MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Cross_entropy#Cross-entropy_error_function_and_logistic_regression",500)) # COMMAND ---------- # MAGIC %scala # MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Softmax_function",380)) # COMMAND ---------- # MAGIC %md # MAGIC The following is from: [https://www.quora.com/How-does-Keras-calculate-accuracy](https://www.quora.com/How-does-Keras-calculate-accuracy). # MAGIC # MAGIC **Categorical accuracy:** # MAGIC # MAGIC ```%python # MAGIC def categorical_accuracy(y_true, y_pred): # MAGIC return K.cast(K.equal(K.argmax(y_true, axis=-1), # MAGIC K.argmax(y_pred, axis=-1)), # MAGIC K.floatx()) # MAGIC ``` # MAGIC # MAGIC > `K.argmax(y_true)` takes the highest value to be the prediction and matches against the comparative set. # COMMAND ---------- # MAGIC %md # MAGIC Watch (1:39) # MAGIC * [![Udacity: Deep Learning by <NAME> - Cross-entropy](http://img.youtube.com/vi/tRsSi_sqXjI/0.jpg)](https://www.youtube.com/watch?v=tRsSi_sqXjI) # MAGIC # MAGIC Watch (1:54) # MAGIC * [![Udacity: Deep Learning by <NAME> - Minimizing Cross-entropy](http://img.youtube.com/vi/x449QQDhMDE/0.jpg)](https://www.youtube.com/watch?v=x449QQDhMDE) # COMMAND ---------- <import_from_stmt>keras.models Sequential<import_from_stmt>keras.layers Dense<import_from_stmt>keras.utils to_categorical<import_stmt>sklearn.datasets<import_stmt>datetime<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<line_sep>train_libsvm="/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"<line_sep>test_libsvm="/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"<line_sep>X_train,y_train=sklearn.datasets.load_svmlight_file(train_libsvm n_features=784)<line_sep>X_train=X_train.toarray()<line_sep>X_test,y_test=sklearn.datasets.load_svmlight_file(test_libsvm n_features=784)<line_sep>X_test=X_test.toarray()<line_sep>model=Sequential()<line_sep>model.add(Dense(20 input_dim=784 kernel_initializer='normal' activation='relu'))<line_sep>model.add(Dense(15 kernel_initializer='normal' activation='relu'))<line_sep>model.add(Dense(10 kernel_initializer='normal' activation='softmax'))<line_sep>model.compile(loss='categorical_crossentropy' optimizer='adam' metrics=['categorical_accuracy'])<line_sep>categorical_labels=to_categorical(y_train num_classes=10)<line_sep>start=datetime.datetime.today()<line_sep>history=model.fit(X_train categorical_labels epochs=40 batch_size=100 validation_split=0.1 verbose=2)<line_sep>scores=model.evaluate(X_test to_categorical(y_test num_classes=10))<line_sep>print<for_stmt>i range(len(model.metrics_names))<block_start>print("%s: %f"%(model.metrics_names[i] scores[i]))<block_end>print("Start: "+str(start))<line_sep>end=datetime.datetime.today()<line_sep>print("End: "+str(end))<line_sep>print("Elapse: "+str(end-start))<line_sep># COMMAND ---------- # MAGIC %md # MAGIC after about a minute we have: # MAGIC # MAGIC ``` # MAGIC ... # MAGIC # MAGIC Epoch 40/40 # MAGIC 1s - loss: 0.0610 - categorical_accuracy: 0.9809 - val_loss: 0.1918 - val_categorical_accuracy: 0.9583 # MAGIC # MAGIC ... # MAGIC # MAGIC loss: 0.216120 # MAGIC # MAGIC categorical_accuracy: 0.955000 # MAGIC # MAGIC Start: 2017-12-06 07:35:33.948102 # MAGIC # MAGIC End: 2017-12-06 07:36:27.046130 # MAGIC # MAGIC Elapse: 0:00:53.098028 # MAGIC ``` # COMMAND ---------- <import_stmt>matplotlib.pyplot<as>plt<line_sep>fig,ax=plt.subplots()<line_sep>fig.set_size_inches((5 5))<line_sep>plt.plot(history.history['loss'])<line_sep>plt.plot(history.history['val_loss'])<line_sep>plt.title('model loss')<line_sep>plt.ylabel('loss')<line_sep>plt.xlabel('epoch')<line_sep>plt.legend(['train' 'val'] loc='upper left')<line_sep>display(fig)<line_sep># COMMAND ---------- # MAGIC %md # MAGIC What are the big takeaways from this experiment? # MAGIC # MAGIC 1. We get pretty impressive "apparent error" accuracy right from the start! A small network gets us to training accuracy 97% by epoch 20 # MAGIC 2. The model *appears* to continue to learn if we let it run, although it does slow down and oscillate a bit. # MAGIC 3. Our test accuracy is about 95% after 5 epochs and never gets better ... it gets worse! # MAGIC 4. Therefore, we are overfitting very quickly... most of the "training" turns out to be a waste. # MAGIC 5. For what it's worth, we get 95% accuracy without much work. # MAGIC # MAGIC This is not terrible compared to other, non-neural-network approaches to the problem. After all, we could probably tweak this a bit and do even better. # MAGIC # MAGIC But we talked about using deep learning to solve "95%" problems or "98%" problems ... where one error in 20, or 50 simply won't work. If we can get to "multiple nines" of accuracy, then we can do things like automate mail sorting and translation, create cars that react properly (all the time) to street signs, and control systems for robots or drones that function autonomously. # MAGIC # MAGIC Try two more experiments (try them separately): # MAGIC 1. Add a third, hidden layer. # MAGIC 2. Increase the size of the hidden layers. # MAGIC # MAGIC Adding another layer slows things down a little (why?) but doesn't seem to make a difference in accuracy. # MAGIC # MAGIC Adding a lot more neurons into the first topology slows things down significantly -- 10x as many neurons, and only a marginal increase in accuracy. Notice also (in the plot) that the learning clearly degrades after epoch 50 or so. # MAGIC # MAGIC ... We need a new approach! # MAGIC # MAGIC --- # MAGIC # MAGIC ... let's think about this: # MAGIC # MAGIC ### What is layer 2 learning from layer 1? Combinations of pixels # MAGIC # MAGIC #### Combinations of pixels contain information but... # MAGIC # MAGIC There are a lot of them (combinations) and they are "fragile" # MAGIC # MAGIC In fact, in our last experiment, we basically built a model that memorizes a bunch of "magic" pixel combinations. # MAGIC # MAGIC What might be a better way to build features? # MAGIC # MAGIC * When humans perform this task, we look not at arbitrary pixel combinations, but certain geometric patterns -- lines, curves, loops. # MAGIC * These features are made up of combinations of pixels, but they are far from arbitrary # MAGIC * We identify these features regardless of translation, rotation, etc. # MAGIC # MAGIC Is there a way to get the network to do the same thing? # MAGIC # MAGIC I.e., in layer one, identify pixels. Then in layer 2+, identify abstractions over pixels that are translation-invariant 2-D shapes? # MAGIC # MAGIC We could look at where a "filter" that represents one of these features (e.g., and edge) matches the image. # MAGIC # MAGIC How would this work? # MAGIC # MAGIC ### Convolution # MAGIC # MAGIC Convolution in the general mathematical sense is define as follows: # MAGIC # MAGIC <img src="https://i.imgur.com/lurC2Cx.png" width=300> # MAGIC # MAGIC The convolution we deal with in deep learning is a simplified case. We want to compare two signals. Here are two visualizations, courtesy of Wikipedia, that help communicate how convolution emphasizes features: # MAGIC # MAGIC <img src="http://i.imgur.com/EDCaMl2.png" width=500> # MAGIC # MAGIC --- # MAGIC # MAGIC #### Here's an animation (where we change \\({\tau}\\)) # MAGIC <img src="http://i.imgur.com/0BFcnaw.gif"> # MAGIC # MAGIC __In one sense, the convolution captures and quantifies the pattern matching over space__ # MAGIC # MAGIC If we perform this in two dimensions, we can achieve effects like highlighting edges: # MAGIC # MAGIC <img src="http://i.imgur.com/DKEXIII.png"> # MAGIC # MAGIC The matrix here, also called a convolution kernel, is one of the functions we are convolving. Other convolution kernels can blur, "sharpen," etc. # MAGIC # MAGIC ### So we'll drop in a number of convolution kernels, and the network will learn where to use them? Nope. Better than that. # MAGIC # MAGIC ## We'll program in the *idea* of discrete convolution, and the network will learn what kernels extract meaningful features! # MAGIC # MAGIC The values in a (fixed-size) convolution kernel matrix will be variables in our deep learning model. Although inuitively it seems like it would be hard to learn useful params, in fact, since those variables are used repeatedly across the image data, it "focuses" the error on a smallish number of parameters with a lot of influence -- so it should be vastly *less* expensive to train than just a huge fully connected layer like we discussed above. # MAGIC # MAGIC This idea was developed in the late 1980s, and by 1989, <NAME> (at AT&T/Bell Labs) had built a practical high-accuracy system (used in the 1990s for processing handwritten checks and mail). # MAGIC # MAGIC __How do we hook this into our neural networks?__ # MAGIC # MAGIC * First, we can preserve the geometric properties of our data by "shaping" the vectors as 2D instead of 1D. # MAGIC # MAGIC * Then we'll create a layer whose value is not just activation applied to weighted sum of inputs, but instead it's the result of a dot-product (element-wise multiply and sum) between the kernel and a patch of the input vector (image). # MAGIC * This value will be our "pre-activation" and optionally feed into an activation function (or "detector") # MAGIC # MAGIC <img src="http://i.imgur.com/ECyi9lL.png"> # MAGIC # MAGIC # MAGIC If we perform this operation at lots of positions over the image, we'll get lots of outputs, as many as one for every input pixel. # MAGIC # MAGIC # MAGIC <img src="http://i.imgur.com/WhOrJ0Y.jpg"> # MAGIC # MAGIC * So we'll add another layer that "picks" the highest convolution pattern match from nearby pixels, which # MAGIC * makes our pattern match a little bit translation invariant (a fuzzy location match) # MAGIC * reduces the number of outputs significantly # MAGIC * This layer is commonly called a pooling layer, and if we pick the "maximum match" then it's a "max pooling" layer. # MAGIC # MAGIC <img src="http://i.imgur.com/9iPpfpb.png"> # MAGIC # MAGIC __The end result is that the kernel or filter together with max pooling creates a value in a subsequent layer which represents the appearance of a pattern in a local area in a prior layer.__ # MAGIC # MAGIC __Again, the network will be given a number of "slots" for these filters and will learn (by minimizing error) what filter values produce meaningful features. This is the key insight into how modern image-recognition networks are able to generalize -- i.e., learn to tell 6s from 7s or cats from dogs.__ # MAGIC # MAGIC <img src="http://i.imgur.com/F8eH3vj.png"> # MAGIC # MAGIC ## Ok, let's build our first ConvNet: # MAGIC # MAGIC First, we want to explicity shape our data into a 2-D configuration. We'll end up with a 4-D tensor where the first dimension is the training examples, then each example is 28x28 pixels, and we'll explicitly say it's 1-layer deep. (Why? with color images, we typically process over 3 or 4 channels in this last dimension) # MAGIC # MAGIC A step by step animation follows: # MAGIC * http://cs231n.github.io/assets/conv-demo/index.html # COMMAND ---------- train_libsvm="/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"<line_sep>test_libsvm="/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"<line_sep>X_train,y_train=sklearn.datasets.load_svmlight_file(train_libsvm n_features=784)<line_sep>X_train=X_train.toarray()<line_sep>X_test,y_test=sklearn.datasets.load_svmlight_file(test_libsvm n_features=784)<line_sep>X_test=X_test.toarray()<line_sep>X_train=X_train.reshape((X_train.shape[0] 28 28 1))<line_sep>X_train=X_train.astype('float32')<line_sep>X_train<augdiv>255<line_sep>y_train=to_categorical(y_train num_classes=10)<line_sep>X_test=X_test.reshape((X_test.shape[0] 28 28 1))<line_sep>X_test=X_test.astype('float32')<line_sep>X_test<augdiv>255<line_sep>y_test=to_categorical(y_test num_classes=10)<line_sep># COMMAND ---------- # MAGIC %md # MAGIC Now the model: # COMMAND ---------- <import_from_stmt>keras.layers Dense Dropout Activation Flatten Conv2D MaxPooling2D<line_sep>model=Sequential()<line_sep>model.add(Conv2D(8 # number of kernels (4 4) # kernel size padding='valid' # no padding; output will be smaller than input input_shape=(28 28 1)))<line_sep>model.add(Activation('relu'))<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(128))<line_sep>model.add(Activation('relu'))# alternative syntax for applying activation model.add(Dense(10))<line_sep>model.add(Activation('softmax'))<line_sep>model.compile(loss='categorical_crossentropy' optimizer='adam' metrics=['accuracy'])<line_sep># COMMAND ---------- # MAGIC %md # MAGIC ... and the training loop and output: # COMMAND ---------- start=datetime.datetime.today()<line_sep>history=model.fit(X_train y_train batch_size=128 epochs=8 verbose=2 validation_split=0.1)<line_sep>scores=model.evaluate(X_test y_test verbose=1)<line_sep>print<for_stmt>i range(len(model.metrics_names))<block_start>print("%s: %f"%(model.metrics_names[i] scores[i]))<block_end># COMMAND ---------- fig,ax=plt.subplots()<line_sep>fig.set_size_inches((5 5))<line_sep>plt.plot(history.history['loss'])<line_sep>plt.plot(history.history['val_loss'])<line_sep>plt.title('model loss')<line_sep>plt.ylabel('loss')<line_sep>plt.xlabel('epoch')<line_sep>plt.legend(['train' 'val'] loc='upper left')<line_sep>display(fig)<line_sep># COMMAND ---------- # MAGIC %md # MAGIC ### Our MNIST ConvNet # MAGIC # MAGIC In our first convolutional MNIST experiment, we get to almost 99% validation accuracy in just a few epochs (a minutes or so on CPU)! # MAGIC # MAGIC The training accuracy is effectively 100%, though, so we've almost completely overfit (i.e., memorized the training data) by this point and need to do a little work if we want to keep learning. # MAGIC # MAGIC Let's add another convolutional layer: # COMMAND ---------- model=Sequential()<line_sep>model.add(Conv2D(8 # number of kernels (4 4) # kernel size padding='valid' input_shape=(28 28 1)))<line_sep>model.add(Activation('relu'))<line_sep>model.add(Conv2D(8 (4 4)))<line_sep>model.add(Activation('relu'))<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(128))<line_sep>model.add(Activation('relu'))<line_sep>model.add(Dense(10))<line_sep>model.add(Activation('softmax'))<line_sep>model.compile(loss='categorical_crossentropy' optimizer='adam' metrics=['accuracy'])<line_sep>history=model.fit(X_train y_train batch_size=128 epochs=15 verbose=2 validation_split=0.1)<line_sep>scores=model.evaluate(X_test y_test verbose=1)<line_sep>print<for_stmt>i range(len(model.metrics_names))<block_start>print("%s: %f"%(model.metrics_names[i] scores[i]))<block_end># COMMAND ---------- # MAGIC %md # MAGIC While that's running, let's look at a number of "famous" convolutional networks! # MAGIC # MAGIC ### LeNet (<NAME>, 1998) # MAGIC # MAGIC <img src="http://i.imgur.com/k5hMtMK.png"> # MAGIC # MAGIC <img src="http://i.imgur.com/ERV9pHW.gif"> # COMMAND ---------- # MAGIC %md <img src="http://i.imgur.com/TCN9C4P.png"> # COMMAND ---------- # MAGIC %md # MAGIC ### AlexNet (2012) # MAGIC # MAGIC <img src="http://i.imgur.com/CpokDKV.jpg"> # MAGIC # MAGIC <img src="http://i.imgur.com/Ld2QhXr.jpg"> # COMMAND ---------- # MAGIC %md # MAGIC ### Back to our labs: Still Overfitting # MAGIC # MAGIC We're making progress on our test error -- about 99% -- but just a bit for all the additional time, due to the network overfitting the data. # MAGIC # MAGIC There are a variety of techniques we can take to counter this -- forms of regularization. # MAGIC # MAGIC Let's try a relatively simple solution solution that works surprisingly well: add a pair of `Dropout` filters, a layer that randomly omits a fraction of neurons from each training batch (thus exposing each neuron to only part of the training data). # MAGIC # MAGIC We'll add more convolution kernels but shrink them to 3x3 as well. # COMMAND ---------- model=Sequential()<line_sep>model.add(Conv2D(32 # number of kernels (3 3) # kernel size padding='valid' input_shape=(28 28 1)))<line_sep>model.add(Activation('relu'))<line_sep>model.add(Conv2D(32 (3 3)))<line_sep>model.add(Activation('relu'))<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>model.add(Dropout(rate=1-0.25))# <- regularize, new parameter rate added (rate=1-keep_prob) model.add(Flatten())<line_sep>model.add(Dense(128))<line_sep>model.add(Activation('relu'))<line_sep>model.add(Dropout(rate=1-0.5))# <-regularize, new parameter rate added (rate=1-keep_prob) model.add(Dense(10))<line_sep>model.add(Activation('softmax'))<line_sep>model.compile(loss='categorical_crossentropy' optimizer='adam' metrics=['accuracy'])<line_sep>history=model.fit(X_train y_train batch_size=128 epochs=15 verbose=2)<line_sep>scores=model.evaluate(X_test y_test verbose=2)<line_sep>print<for_stmt>i range(len(model.metrics_names))<block_start>print("%s: %f"%(model.metrics_names[i] scores[i]))<block_end># COMMAND ---------- # MAGIC %md # MAGIC While that's running, let's look at some more recent ConvNet architectures: # MAGIC # MAGIC ### VGG16 (2014) # MAGIC # MAGIC <img src="http://i.imgur.com/gl4kZDf.png"> # COMMAND ---------- # MAGIC %md # MAGIC ### GoogLeNet (2014) # MAGIC # MAGIC <img src="http://i.imgur.com/hvmtDqN.png"> # MAGIC # MAGIC *"Inception" layer: parallel convolutions at different resolutions* # MAGIC # MAGIC ### Residual Networks (2015-) # MAGIC # MAGIC Skip layers to improve training (error propagation). Residual layers learn from details at multiple previous layers. # MAGIC # MAGIC <img src="http://i.imgur.com/32g8Ykl.png"> # COMMAND ---------- # MAGIC %md # MAGIC --- # MAGIC # MAGIC > __ASIDE: Atrous / Dilated Convolutions__ # MAGIC # MAGIC > An atrous or dilated convolution is a convolution filter with "holes" in it. Effectively, it is a way to enlarge the filter spatially while not adding as many parameters or attending to every element in the input. # MAGIC # MAGIC > Why? Covering a larger input volume allows recognizing coarser-grained patterns; restricting the number of parameters is a way of regularizing or constraining the capacity of the model, making training easier. # MAGIC # MAGIC --- # COMMAND ---------- # MAGIC %md # MAGIC ## *Lab Wrapup* # MAGIC # MAGIC From the last lab, you should have a test accuracy of over 99.1% # MAGIC # MAGIC For one more activity, try changing the optimizer to old-school "sgd" -- just to see how far we've come with these modern gradient descent techniques in the last few years. # MAGIC # MAGIC Accuracy will end up noticeably worse ... about 96-97% test accuracy. Two key takeaways: # MAGIC # MAGIC * Without a good optimizer, even a very powerful network design may not achieve results # MAGIC * In fact, we could replace the word "optimizer" there with # MAGIC * initialization # MAGIC * activation # MAGIC * regularization # MAGIC * (etc.) # MAGIC * All of these elements we've been working with operate together in a complex way to determine final performance # COMMAND ---------- # MAGIC %md # MAGIC Of course this world evolves fast - see the new kid in the CNN block -- **capsule networks** # MAGIC # MAGIC > Hinton: “The pooling operation used in convolutional neural networks is a big mistake and the fact that it works so well is a disaster.” # MAGIC # MAGIC Well worth the 8 minute read: # MAGIC * [https://medium.com/ai%C2%B3-theory-practice-business/understanding-hintons-capsule-networks-part-i-intuition-b4b559d1159b](https://medium.com/ai%C2%B3-theory-practice-business/understanding-hintons-capsule-networks-part-i-intuition-b4b559d1159b) # MAGIC # MAGIC To understand deeper: # MAGIC * original paper: [https://arxiv.org/abs/1710.09829](https://arxiv.org/abs/1710.09829) # MAGIC # MAGIC [Keras capsule network example](https://keras.io/examples/cifar10_cnn_capsule/) # COMMAND ---------- # MAGIC %md # MAGIC # More resources # MAGIC # MAGIC - http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/ # MAGIC - https://openai.com/ # COMMAND ----------
# Copyright 2020-2021 Dolthub, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_stmt>pymysql.cursors<class_stmt>TestMySQL(unittest.TestCase)<block_start><def_stmt>test_connect self<block_start>connection=pymysql.connect(host='127.0.0.1' user='root' password='' db='' cursorclass=pymysql.cursors.DictCursor)<try_stmt><block_start><with_stmt>connection.cursor()<as>cursor<block_start>sql="SELECT name, email FROM mytable ORDER BY name, email"<line_sep>cursor.execute(sql)<line_sep>rows=cursor.fetchall()<line_sep>expected=[{"name":"<NAME>" "email":"<EMAIL>"} {"name":"<NAME>" "email":"<EMAIL>"} {"name":"<NAME>" "email":"<EMAIL>"} {"name":"<NAME>" "email":"<EMAIL>"}]<line_sep>self.assertEqual(expected rows)<block_end><block_end><finally_stmt><block_start>connection.close()<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<def_stmt>test_main mocker<block_start>app=mocker.patch("openapi_python_client.cli.app")<line_sep># noinspection PyUnresolvedReferences <import_from_stmt>openapi_python_client __main__<line_sep>app.assert_called_once()<block_end>
<import_stmt>base64<import_stmt>json<import_stmt>socket<import_from_stmt>typing Optional Union<import_from_stmt>platypush.plugins Plugin action<class_stmt>TcpPlugin(Plugin)<block_start>""" Plugin for raw TCP communications. """<def_stmt>__init__ self **kwargs<block_start>super().__init__(**kwargs)<line_sep>self._sockets={}<block_end><def_stmt>_connect self host:str port:int timeout:Optional[float]=<none><arrow>socket.socket<block_start>sd=self._sockets.get((host port))<if_stmt>sd<block_start><return>sd<block_end>sd=socket.socket(socket.AF_INET socket.SOCK_STREAM)<if_stmt>timeout<block_start>sd.settimeout(timeout)<block_end>sd.connect((host port))<line_sep>self._sockets[(host port)]=sd<line_sep><return>sd<block_end>@action<def_stmt>connect self host:str port:int timeout:Optional[float]=<none><block_start>""" Open a TCP connection. :param host: Host IP/name. :param port: TCP port. :param timeout: Connection timeout in seconds (default: None). """<line_sep>self._connect(host port timeout)<block_end>@action<def_stmt>close self host:str port:int<block_start>""" Close an active TCP connection. :param host: Host IP/name. :param port: TCP port. """<line_sep>sd=self._sockets.get((host port))<if_stmt><not>sd<block_start>self.logger.warning('Not connected to ({}, {})'.format(host port))<line_sep><return><block_end>sd.close()<block_end>@action<def_stmt>send self data:Union[bytes str] host:str port:int binary:bool=<false> timeout:Optional[float]=<none> recv_response:bool=<false> **recv_opts<block_start>""" Send data over a TCP connection. If the connection isn't active it will be created. :param data: Data to be sent, as bytes or string. :param host: Host IP/name. :param port: TCP port. :param binary: If set to True and ``data`` is a string then will be treated as base64-encoded binary input. :param timeout: Connection timeout in seconds (default: None). :param recv_response: If True then the action will wait for a response from the server before closing the connection. Note that ``recv_opts`` must be specified in this case - at least ``length``. """<if_stmt>isinstance(data list)<or>isinstance(data dict)<block_start>data=json.dumps(data)<block_end><if_stmt>isinstance(data str)<block_start>data=data.encode()<if_stmt>binary<block_start>data=base64.decodebytes(data)<block_end><block_end>sd=self._connect(host port timeout)<try_stmt><block_start>sd.send(data)<if_stmt>recv_response<block_start>recv_opts.update({'host':host 'port':port 'timeout':timeout 'binary':binary })<line_sep><return>self.recv(**recv_opts)<block_end><block_end><finally_stmt><block_start>self.close(host port)<block_end><block_end>@action<def_stmt>recv self length:int host:str port:int binary:bool=<false> timeout:Optional[float]=<none><arrow>str<block_start>""" Receive data from a TCP connection. If the connection isn't active it will be created. :param length: Maximum number of bytes to be received. :param host: Host IP/name. :param port: TCP port. :param binary: If set to True then the output will be base64-encoded, otherwise decoded as string. :param timeout: Connection timeout in seconds (default: None). """<line_sep>sd=self._connect(host port timeout)<try_stmt><block_start>data=sd.recv(length)<if_stmt>binary<block_start>data=base64.encodebytes(data).decode()<block_end><else_stmt><block_start>data=data.decode()<block_end><return>data<block_end><finally_stmt><block_start>self.close(host port)<block_end><block_end><block_end># vim:sw=4:ts=4:et:
"""mBuild bulk materials library."""<import_from_stmt>mbuild.lib.bulk_materials.amorphous_silica_bulk AmorphousSilicaBulk<line_sep>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. <import_stmt>os<import_stmt>glob<import_stmt>re<import_from_stmt>petridish.philly.container is_philly<import_from_stmt>petridish.app.multi_proc has_stopped<line_sep>""" Dir structures """<def_stmt>_updir d n=1<block_start><for_stmt>_ range(n)<block_start>d=os.path.dirname(d)<block_end><return>d<block_end>""" Philly specific dir structures regarding multiple trials of the same experiment """<def_stmt>previous_trial_log_root log_root<block_start><if_stmt><not>is_philly()<block_start><return><none><block_end># e.g., xx/application_xx-xx/logs/2/petridish_main log_root=os.path.normpath(log_root)<line_sep>triali=int(os.path.basename(_updir(log_root 1)))<if_stmt>triali<eq>1<block_start><return><none><block_end><return>os.path.join(_updir(log_root 2) str(triali-1) os.path.basename(log_root))<block_end><def_stmt>previous_trial_model_root model_root<block_start><if_stmt><not>is_philly()<block_start><return><none><block_end># e.g., xxx/application_xx-xx/models <return>os.path.normpath(model_root)<line_sep>#model_root = os.path.normpath(model_root) #triali = int(os.path.basename(model_root)) #if triali == 1: # return None #return os.path.join(_updir(model_root, 1), str(triali - 1)) <block_end>""" Helper functions to create names for communication over file-system. Direct connections are not available. """<def_stmt>_auto_script_fn i prefix=<none><block_start><if_stmt>prefix<is><not><none><block_start><return>'{}_{}.sh'.format(prefix i)<block_end><return>'{}.sh'.format(i)<block_end><def_stmt>_auto_script_dir log_dir is_critic is_log_dir_root=<false><block_start>n_updir=1+int(bool(is_critic))-int(bool(is_log_dir_root))#+ 2 * is_philly() <return>os.path.join(_updir(log_dir n_updir) 'auto_scripts')<block_end><def_stmt>_all_mi dir_root<block_start>all_mi=[]<for_stmt>dn os.listdir(dir_root)<block_start><try_stmt><block_start>mi=int(os.path.basename(dn.strip()))<line_sep>all_mi.append(mi)<block_end><except_stmt><block_start><continue><block_end><block_end><return>all_mi<block_end><def_stmt>_dn_to_mi dn<block_start><try_stmt><block_start>mi=int(os.path.basename(os.path.normpath(dn)))<line_sep><return>mi<block_end><except_stmt><block_start><return><none><block_end><block_end><def_stmt>_mi_to_dn dir_root model_iter<block_start><return>os.path.join(dir_root str(model_iter))<block_end><def_stmt>_dn_to_ci dn<block_start><try_stmt><block_start>ci=int(os.path.basename(os.path.normpath(dn)))<line_sep><return>ci<block_end><except_stmt><block_start><return><none><block_end><block_end><def_stmt>_ci_to_dn dir_root critic_iter queue_name<block_start><if_stmt>critic_iter<is><none><block_start><return>os.path.join(dir_root queue_name)<block_end><return>os.path.join(dir_root queue_name str(critic_iter))<block_end><def_stmt>_all_critic_dn dir_root queue_name<block_start><return>glob.glob(os.path.join(dir_root queue_name '*'))<block_end><def_stmt>_latest_ci log_dir_root model_dir_root queue_name<block_start>l_dns=_all_critic_dn(log_dir_root queue_name)<line_sep>max_ci=<none><for_stmt>dn l_dns<block_start>dn=os.path.normpath(dn.strip())<try_stmt># make sure the dirname is an int so it is actually a dir for critic <block_start>ci=int(os.path.basename(dn))<block_end><except_stmt><block_start><continue><block_end><if_stmt><not>has_stopped(dn)# make sure model is mark finished. <block_start><continue><block_end><if_stmt><not>os.path.exists(_ci_to_dn(model_dir_root ci queue_name))# make sure model exists <block_start><continue><block_end><if_stmt>max_ci<is><none><or>max_ci<l>ci<block_start>max_ci=ci<block_end><block_end><return>max_ci<block_end><def_stmt>_mi_info_save_fn log_dir_root<block_start><return>os.path.join(log_dir_root 'mi_info.npz')<block_end>
# -*- python -*- # Copyright (c) 2014 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>gdb_test<class_stmt>CompleteTest(gdb_test.GdbTest)<block_start><def_stmt>test_complete self# Test that continue causes the debugged program to run to completion. <block_start>self.gdb.ResumeCommand('continue')<block_end><def_stmt>tearDown self# Test program should run to completion and return a special value. # Intentionally bypass superclass's tearDown as it assumes gdb exits first. <block_start>self.AssertSelLdrExits(expected_returncode=123)<line_sep>self.gdb.Quit()<line_sep>self.gdb.Wait()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>gdb_test.Main()<block_end>
"""Airwatch Collect Device information using API Key, Host, and CMSURL Authentication """<import_from_stmt>runners.helpers log<import_from_stmt>runners.helpers db<import_from_stmt>runners.helpers.dbconfig ROLE<as>SA_ROLE<import_from_stmt>datetime datetime<import_stmt>requests<import_from_stmt>urllib.error HTTPError<import_from_stmt>.utils yaml_dump<line_sep>PAGE_SIZE=500<line_sep>CONNECTION_OPTIONS=[{'name':'api_key' 'title':"Airwatch API Key" 'prompt':"Your Airwatch API Key" 'type':'str' 'secret':<true> 'required':<true> } {'name':'host_airwatch' 'title':"Airwatch Host" 'prompt':"Your Airwatch Host" 'type':'str' 'secret':<true> 'required':<true> } {'name':'device_auth' 'title':"Device URL" 'prompt':"Your Airwatch CMS Auth for Device URL" 'type':'str' 'secret':<true> 'required':<true> } {'name':'custom_attributes_auth' 'title':"Custom Attributes URL" 'prompt':"Your Airwatch CMS Auth for Custom Attributes URL" 'type':'str' 'secret':<true> 'required':<true> } ]<line_sep>LANDING_TABLE_COLUMNS_DEVICE=[('INSERT_ID' 'NUMBER IDENTITY START 1 INCREMENT 1') ('SNAPSHOT_AT' 'TIMESTAMP_LTZ(9)') ('RAW' 'VARIANT') ('EAS_IDS' 'VARIANT') ('UDID' 'VARCHAR(256)') ('SERIAL_NUMBER' 'VARCHAR(256)') ('MAC_ADDRESS' 'VARCHAR(256)') ('IMEI' 'VARCHAR(256)') ('EAS_ID' 'VARCHAR(256)') ('ASSET_NUMBER' 'VARCHAR(256)') ('DEVICE_FRIENDLY_NAME' 'VARCHAR(256)') ('LOCATION_GROUP_ID' 'VARIANT') ('LOCATION_GROUP_NAME' 'VARCHAR(256)') ('USER_ID' 'VARIANT') ('USER_NAME' 'VARCHAR(256)') ('DATA_PROTECTION_STATUS' 'NUMBER(38,0)') ('USER_EMAIL_ADDRESS' 'VARCHAR(256)') ('OWNERSHIP' 'VARCHAR(256)') ('PLATFORM_ID' 'VARIANT') ('PLATFORM' 'VARCHAR(256)') ('MODEL_ID' 'VARIANT') ('MODEL' 'VARCHAR(256)') ('OPERATING_SYSTEM' 'VARCHAR(256)') ('PHONE_NUMBER' 'VARCHAR(256)') ('LAST_SEEN' 'TIMESTAMP_LTZ(9)') ('ENROLLMENT_STATUS' 'VARCHAR(256)') ('COMPLIANCE_STATUS' 'VARCHAR(256)') ('COMPROMISED_STATUS' 'BOOLEAN') ('LAST_ENROLLED_ON' 'TIMESTAMP_LTZ(9)') ('LAST_COMPLIANCE_CHECK_ON' 'TIMESTAMP_LTZ(9)') ('LAST_COMPROMISED_CHECK_ON' 'TIMESTAMP_LTZ(9)') ('IS_SUPERVISED' 'BOOLEAN') ('VIRTUAL_MEMORY' 'NUMBER(38,0)') ('DEVICE_CAPACITY' 'FLOAT') ('AVAILABLE_DEVICE_CAPACITY' 'FLOAT') ('IS_DEVICE_DND_ENABLED' 'BOOLEAN') ('IS_DEVICE_LOCATOR_ENABLED' 'BOOLEAN') ('IS_CLOUD_BACKUP_ENABLED' 'BOOLEAN') ('IS_ACTIVATION_LOCK_ENABLED' 'BOOLEAN') ('IS_NETWORKTETHERED' 'BOOLEAN') ('BATTERY_LEVEL' 'VARCHAR(256)') ('IS_ROAMING' 'BOOLEAN') ('SYSTEM_INTEGRITY_PROTECTION_ENABLED' 'BOOLEAN') ('PROCESSOR_ARCHITECTURE' 'NUMBER(38,0)') ('TOTAL_PHYSICAL_MEMORY' 'NUMBER(38,0)') ('AVAILABLE_PHYSICAL_MEMORY' 'NUMBER(38,0)') ('DEVICE_CELLULAR_NETWORK_INFO' 'VARIANT') ('ENROLLMENT_USER_UUID' 'VARCHAR(256)') ('ID' 'VARIANT') ('UUID' 'VARCHAR(256)') ]<line_sep>LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES=[('INSERT_ID' 'NUMBER IDENTITY START 1 INCREMENT 1') ('SNAPSHOT_AT' 'TIMESTAMP_LTZ(9)') ('RAW' 'VARIANT') ('DEVICE_ID' 'INT') ('UDID' 'VARCHAR(256)') ('SERIAL_NUMBER' 'VARCHAR(256)') ('ENROLLMENT_USER_NAME' 'VARCHAR(256)') ('ASSET_NUMBER' 'VARCHAR(256)') ('CUSTOM_ATTRIBUTES' 'VARIANT') ]<def_stmt>get_data url:str cms_auth:str api_key:str params:dict={}<arrow>dict<block_start>headers:dict={'Content-Type':'application/json' 'aw-tenant-code':api_key 'Accept':'application/json' 'Authorization':cms_auth }<try_stmt><block_start>log.debug(f"Preparing GET: url={url} with params={params}")<line_sep>req=requests.get(url params=params headers=headers)<line_sep>req.raise_for_status()<block_end><except_stmt>HTTPError<as>http_err<block_start>log.error(f"Error GET: url={url}")<line_sep>log.error(f"HTTP error occurred: {http_err}")<line_sep><raise><block_end><return>req.json()<block_end><def_stmt>connect connection_name options<block_start>landing_table_device=f'data.airwatch_devices_{connection_name}_device_connection'<line_sep>landing_table_custom_attributes=(f'data.airwatch_devices_{connection_name}_custom_attributes_connection')<line_sep>comment=yaml_dump(module='airwatch_devices' **options)<line_sep>db.create_table(name=landing_table_device cols=LANDING_TABLE_COLUMNS_DEVICE comment=comment rw_role=SA_ROLE)<line_sep>db.create_table(name=landing_table_custom_attributes cols=LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES comment=comment rw_role=SA_ROLE)<line_sep><return>{'newStage':'finalized' 'newMessage':"Airwatch ingestion tables created!"}<block_end><def_stmt>ingest table_name options<block_start>host_airwatch=options['host_airwatch']<line_sep>api_key=options['api_key']<line_sep>device_auth=options['device_auth']<line_sep>custom_attributes_auth=options['custom_attributes_auth']<line_sep>ingest_type=('device'<if>table_name.endswith('_DEVICE_CONNECTION')<else>'custom_attributes')<line_sep>timestamp=datetime.utcnow()<line_sep>landing_table=f'data.{table_name}'<if_stmt>ingest_type<eq>'device'<block_start>device_params:dict={'PageSize':PAGE_SIZE 'Page':0}<line_sep>url=f'https://{host_airwatch}/api/mdm/devices/search'<while_stmt>1<block_start>result:dict=get_data(url device_auth api_key device_params)<line_sep>devices=result['Devices']<line_sep>db.insert(landing_table values=[(timestamp device device.get('EasIds') device.get('Udid') device.get('SerialNumber') device.get('MacAddress') device.get('Imei') device.get('EasId') device.get('AssetNumber') device.get('DeviceFriendlyName') device.get('LocationGroupId') device.get('LocationGroupName') device.get('UserId') device.get('UserName') device.get('DataProtectionStatus') device.get('UserEmailAddress') device.get('Ownership') device.get('PlatformId') device.get('Platform') device.get('ModelId') device.get('Model') device.get('OperatingSystem') device.get('PhoneNumber') device.get('LastSeen') device.get('EnrollmentStatus') device.get('ComplianceStatus') device.get('CompromisedStatus') device.get('LastEnrolledOn') device.get('LastComplianceCheckOn') device.get('LastCompromisedCheckOn') device.get('IsSupervised') device.get('VirtualMemory') device.get('DeviceCapacity') device.get('AvailableDeviceCapacity') device.get('IsDeviceDNDEnabled') device.get('IsDeviceLocatorEnabled') device.get('IsCloudBackupEnabled') device.get('IsActivationLockEnabled') device.get('IsNetworkTethered') device.get('BatteryLevel') device.get('IsRoaming') device.get('SystemIntegrityProtectionEnabled') device.get('ProcessorArchitecture') device.get('TotalPhysicalMemory') device.get('AvailablePhysicalMemory') device.get('DeviceCellularNetworkInfo') device.get('EnrollmentUserUuid') device.get('Id') device.get('Uuid') )<for>device devices] select=db.derive_insert_select(LANDING_TABLE_COLUMNS_DEVICE) columns=db.derive_insert_columns(LANDING_TABLE_COLUMNS_DEVICE) )<line_sep>log.info(f'Inserted {len(devices)} rows ({landing_table}).')<line_sep><yield>len(devices)<line_sep>processed_total=(result['Page']+1)<times>result['PageSize']<if_stmt>processed_total<ge>result['Total']<block_start><break><block_end>device_params['Page']<augadd>1<block_end><block_end><else_stmt><block_start>custom_device_params:dict={'PageSize':PAGE_SIZE 'Page':0}<line_sep>url=f'https://{host_airwatch}/api/mdm/devices/customattribute/search'<while_stmt>1<block_start>result:dict=get_data(url custom_attributes_auth api_key custom_device_params)<line_sep>device_attributes=result['Devices']<line_sep>db.insert(landing_table values=[(timestamp device_attr device_attr.get('DeviceId') device_attr.get('Udid') device_attr.get('SerialNumber') device_attr.get('EnrollmentUserName') device_attr.get('AssetNumber') device_attr.get('CustomAttributes') )<for>device_attr device_attributes] select=db.derive_insert_select(LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES) columns=db.derive_insert_columns(LANDING_TABLE_COLUMNS_CUSTOM_ATTRIBUTES) )<line_sep>log.info(f'Inserted {len(device_attributes)} rows ({landing_table}).')<line_sep><yield>len(device_attributes)<line_sep>processed_total=(result['Page']+1)<times>result['PageSize']<if_stmt>processed_total<ge>result['Total']<block_start><break><block_end>custom_device_params['Page']<augadd>1<block_end><block_end><block_end>
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for glazier.lib.logs."""<import_stmt>os<import_stmt>zipfile<import_from_stmt>absl.testing absltest<import_from_stmt>glazier.lib constants<import_from_stmt>glazier.lib file_util<import_from_stmt>glazier.lib logs<import_stmt>mock<import_from_stmt>pyfakefs.fake_filesystem_unittest Patcher<line_sep>TEST_ID='1A19SEL90000R90DZN7A-1234567'<class_stmt>LoggingTest(absltest.TestCase)<block_start><def_stmt>testCollect self<block_start><with_stmt>Patcher()<as>patcher<block_start>files=[os.path.join(constants.SYS_LOGS_PATH 'log1.log') os.path.join(constants.SYS_LOGS_PATH 'log2.log') ]<line_sep>patcher.fs.create_dir(constants.SYS_LOGS_PATH)<line_sep>patcher.fs.create_file(files[0] contents='log1 content')<line_sep>patcher.fs.create_file(files[1] contents='log2 content')<line_sep>logs.Collect(r'C:\glazier.zip')<with_stmt>zipfile.ZipFile(r'C:\glazier.zip' 'r')<as>out<block_start><with_stmt>out.open(files[1].lstrip('/'))<as>f2<block_start>self.assertEqual(f2.read() b'log2 content')<block_end><block_end><block_end><block_end><def_stmt>testCollectIOErr self<block_start><with_stmt>Patcher()<as>patcher<block_start>patcher.fs.create_dir(constants.SYS_LOGS_PATH)<with_stmt>self.assertRaises(logs.LogError)<block_start>logs.Collect(constants.SYS_LOGS_PATH)<block_end><block_end><block_end>@mock.patch.object(zipfile.ZipFile 'write' autospec=<true>)<def_stmt>testCollectValueErr self wr<block_start>wr.side_effect=ValueError('ZIP does not support timestamps before 1980')<with_stmt>Patcher()<as>patcher<block_start>patcher.fs.create_dir(constants.SYS_LOGS_PATH)<line_sep>patcher.fs.create_file(os.path.join(constants.SYS_LOGS_PATH 'log1.log'))<with_stmt>self.assertRaises(logs.LogError)<block_start>logs.Collect(r'C:\glazier.zip')<block_end><block_end><block_end>@mock.patch.object(logs.winpe 'check_winpe' autospec=<true>)<def_stmt>testGetLogsPath self wpe# WinPE <block_start>wpe.return_value=<true><line_sep>self.assertEqual(logs.GetLogsPath() logs.constants.WINPE_LOGS_PATH)<line_sep># Host wpe.return_value=<false><line_sep>self.assertEqual(logs.GetLogsPath() logs.constants.SYS_LOGS_PATH)<block_end>@mock.patch.object(file_util 'CreateDirectories')@mock.patch.object(logs.buildinfo.BuildInfo 'ImageID' autospec=<true>)@mock.patch.object(logs.winpe 'check_winpe' autospec=<true>)@mock.patch.object(logs.logging 'FileHandler')<def_stmt>testSetup self fh wpe ii create_dir<block_start>ii.return_value=TEST_ID<line_sep>wpe.return_value=<false><line_sep>logs.Setup()<line_sep>create_dir.assert_called_with(r'%s\glazier.log'%logs.constants.SYS_LOGS_PATH)<line_sep>fh.assert_called_with(r'%s\glazier.log'%logs.constants.SYS_LOGS_PATH)<block_end>@mock.patch.object(file_util 'CreateDirectories')@mock.patch.object(logs.buildinfo.BuildInfo 'ImageID' autospec=<true>)@mock.patch.object(logs.winpe 'check_winpe' autospec=<true>)@mock.patch.object(logs.logging 'FileHandler')<def_stmt>testSetupError self fh wpe ii create_dir<block_start>ii.return_value=TEST_ID<line_sep>wpe.return_value=<false><line_sep>fh.side_effect=IOError<with_stmt>self.assertRaises(logs.LogError)<block_start>logs.Setup()<block_end>self.assertTrue(create_dir.called)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
<import_from_stmt>numpy.testing._private.utils assert_allclose<import_from_stmt>sysidentpy.polynomial_basis PolynomialNarmax<import_from_stmt>sysidentpy.utils.generate_data get_miso_data get_siso_data<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_almost_equal assert_array_equal<import_from_stmt>numpy.testing assert_raises<import_from_stmt>sysidentpy.polynomial_basis SimulatePolynomialNarmax<def_stmt>test_get_index_from_regressor_code <block_start>s=SimulatePolynomialNarmax()<line_sep>model=np.array([[1001 0] # y(k-1) [2001 1001] # x1(k-1)y(k-1) [2002 0] # x1(k-2) ])<line_sep>regressor_space=np.array([[0 0] [1001 0] [2001 0] [2002 0] [1001 1001] [2001 1001] [2002 1001] [2001 2001] [2002 2001] [2002 2002] ])<line_sep>index=s._get_index_from_regressor_code(regressor_code=regressor_space model_code=model)<assert_stmt>(index<eq>np.array([1 3 5])).all()<block_end><def_stmt>test_list_output_regressor <block_start>s=SimulatePolynomialNarmax()<line_sep>model=np.array([[1001 0] # y(k-1) [2001 1001] # x1(k-1)y(k-1) [2002 0] # x1(k-2) ])<line_sep>y_code=s._list_output_regressor_code(model)<assert_stmt>(y_code<eq>np.array([1001 1001])).all()<block_end><def_stmt>test_list_input_regressor <block_start>s=SimulatePolynomialNarmax()<line_sep>model=np.array([[1001 0] # y(k-1) [2001 1001] # x1(k-1)y(k-1) [2002 0] # x1(k-2) ])<line_sep>x_code=s._list_input_regressor_code(model)<assert_stmt>(x_code<eq>np.array([2001 2002])).all()<block_end><def_stmt>test_get_lag_from_regressor_code <block_start>s=SimulatePolynomialNarmax()<line_sep>list_regressor1=np.array([2001 2002])<line_sep>list_regressor2=np.array([1004 1002])<line_sep>max_lag1=s._get_lag_from_regressor_code(list_regressor1)<line_sep>max_lag2=s._get_lag_from_regressor_code(list_regressor2)<assert_stmt>max_lag1<eq>2<assert_stmt>max_lag2<eq>4<block_end><def_stmt>test_simulate <block_start>x_train,x_valid,y_train,y_valid=get_siso_data(n=1000 colored_noise=<false> sigma=0.001 train_percentage=90)<line_sep>s=SimulatePolynomialNarmax()<line_sep># the model must be a numpy array model=np.array([[1001 0] # y(k-1) [2001 1001] # x1(k-1)y(k-1) [2002 0] # x1(k-2) ])<line_sep># theta must be a numpy array of shape (n, 1) where n is the number of regressors theta=np.array([[0.2 0.9 0.1]]).T<line_sep>yhat,results=s.simulate(X_test=x_valid y_test=y_valid model_code=model theta=theta plot=<false>)<assert_stmt>yhat.shape<eq>(100 1)<assert_stmt>len(results)<eq>3<block_end><def_stmt>test_simulate_theta <block_start>x_train,x_valid,y_train,y_valid=get_siso_data(n=1000 colored_noise=<false> sigma=0.001 train_percentage=90)<line_sep>s=SimulatePolynomialNarmax(estimate_parameter=<true>)<line_sep># the model must be a numpy array model=np.array([[1001 0] # y(k-1) [2001 1001] # x1(k-1)y(k-1) [2002 0] # x1(k-2) ])<line_sep>yhat,results=s.simulate(X_train=x_train y_train=y_train X_test=x_valid y_test=y_valid model_code=model plot=<false> )<line_sep>theta=np.array([[0.2 0.9 0.1]]).T<line_sep>assert_almost_equal(s.theta theta decimal=1)<block_end><def_stmt>test_estimate_parameter <block_start>assert_raises(TypeError SimulatePolynomialNarmax estimmate_parameter=1)<block_end>
<import_from_stmt>office365.planner.tasks.check_list_item PlannerChecklistItem<import_from_stmt>office365.runtime.client_value_collection ClientValueCollection<class_stmt>PlannerChecklistItems(ClientValueCollection)<block_start>"""The plannerChecklistItemCollection resource represents the collection of checklist items on a task. It is an Open Type. It is part of the task details object. The value in the property-value pair is the checklistItem object. """<def_stmt>__init__ self initial_values=<none><block_start>super(PlannerChecklistItems self).__init__(PlannerChecklistItem initial_values)<block_end><block_end>
<import_stmt>os<import_stmt>random<import_stmt>string<import_stmt>time<import_from_stmt>flask_testing TestCase<import_from_stmt>cellphonedb.src.app.cellphonedb_app cellphonedb_app<import_from_stmt>cellphonedb.src.local_launchers.local_collector_launcher LocalCollectorLauncher<import_from_stmt>cellphonedb.utils utils<class_stmt>CellphoneFlaskTestCase(TestCase)<block_start>@staticmethod<def_stmt>fixtures_dir <block_start>current_dir=os.path.dirname(os.path.realpath(__file__))<line_sep>fixtures_dir='{}/fixtures'.format(current_dir)<line_sep><return>fixtures_dir<block_end>@staticmethod<def_stmt>reset_db <block_start>cellphonedb_app.cellphonedb.database_manager.database.drop_everything()<line_sep>cellphonedb_app.cellphonedb.database_manager.database.create_all()<block_end><def_stmt>populate_db self<block_start>LocalCollectorLauncher().all('collect_protein.csv' 'collect_gene.csv' 'collect_complex.csv' 'collect_interaction.csv' self.fixtures_dir())<block_end>@staticmethod<def_stmt>remove_file file<block_start>os.remove(file)<block_end>@staticmethod<def_stmt>rand_string digits=5<block_start><return>''.join(random.choice(string.ascii_uppercase+string.digits)<for>_ range(digits))<block_end>@staticmethod<def_stmt>get_test_filename original_namefile extension prefix='TESTING'<block_start>namefile='{}_{}_{}_{}.{}'.format(prefix original_namefile int(time.time()) CellphoneFlaskTestCase.rand_string() extension)<line_sep><return>namefile<block_end><def_stmt>assert_file_not_empty self file message=''<block_start><if_stmt><not>message<block_start>message='File {} is empty'.format(file)<block_end>read_data=utils.read_data_table_from_file(file)<line_sep>self.assertFalse(read_data.empty message)<block_end><def_stmt>assert_file_exist self path_file message=''<block_start><if_stmt><not>message<block_start>message='File {} didnt exist'.format(path_file)<block_end>self.assertTrue(os.path.isfile(path_file) message)<block_end><block_end>
""" 爬虫的用途:12306抢票,短信轰炸,数据获取 分类:通用爬虫:是搜索引擎抓取系统的重要部分,主要是把互联网上的页面下载到本地作为一个镜像备份 聚焦爬虫:对特定需求进行数据获取,会对页面的内容进行筛选,保证只抓取和需求相关的网页信息 Http:端口号80 Https: 端口号443 使用第三方的requests进行请求:支持python2和3,在urllib中2和3的语法有些不一样 """<import_stmt>requests<line_sep>kw={'wd':'长城'}<line_sep># headers伪装成一个浏览器进行的请求 # 不加这个的话,网页会识别出请求来自一个python而不是浏览器的正常请求 headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}<line_sep>response=requests.get("https://www.baidu.com/s?" params=kw headers=headers)<line_sep># 返回的是unicode格式解码的str的数据 print(response.text)<line_sep># 返回字节流的二进制数据,并根据unicode进行解码 print(response.content)<line_sep>print(response.content.decode())<line_sep># 返回完整的url地址 print(response.url)<line_sep># 返回字符编码 print(response.encoding)<line_sep># 返回状态吗 print(response.status_code)<line_sep># 保存响应结果 <with_stmt>open('baidu.html' 'wb')<as>f<block_start>f.write(response.content)<block_end>
# -*- coding: utf-8 -*- <import_stmt>explainaboard.error_analysis<as>ea<import_stmt>numpy<import_stmt>os<def_stmt>get_aspect_value sample_list dict_aspect_func<block_start>dict_span2aspect_val={}<line_sep>dict_span2aspect_val_pred={}<for_stmt>aspect,fun dict_aspect_func.items()<block_start>dict_span2aspect_val[aspect]={}<line_sep>dict_span2aspect_val_pred[aspect]={}<block_end># maintain it for print error case dict_sid2sent={}<line_sep>sample_id=0<for_stmt>info_list sample_list# # # # word_list = word_segment(sent).split(" ") # Sentence Entities Paragraph True Relation Label Predicted Relation Label # Sentence Length Paragraph Length Number of Entities in Ground Truth Relation Average Distance of Entities <block_start>sent,entities,paragraph,true_label,pred_label,sent_length,para_length,n_entity,avg_distance=info_list<line_sep>dict_sid2sent[str(sample_id)]=ea.format4json2(entities+"|||"+sent)<line_sep>sent_pos=ea.tuple2str((sample_id true_label))<line_sep>sent_pos_pred=ea.tuple2str((sample_id pred_label))<line_sep># Sentence Length: sentALen aspect="sLen"<if_stmt>aspect<in>dict_aspect_func.keys()<block_start>dict_span2aspect_val[aspect][sent_pos]=float(sent_length)<line_sep>dict_span2aspect_val_pred[aspect][sent_pos_pred]=float(sent_length)<block_end># Paragraph Length: pLen aspect="pLen"<if_stmt>aspect<in>dict_aspect_func.keys()<block_start>dict_span2aspect_val[aspect][sent_pos]=float(para_length)<line_sep>dict_span2aspect_val_pred[aspect][sent_pos_pred]=float(para_length)<block_end># Number of Entity: nEnt aspect="nEnt"<if_stmt>aspect<in>dict_aspect_func.keys()<block_start>dict_span2aspect_val[aspect][sent_pos]=float(n_entity)<line_sep>dict_span2aspect_val_pred[aspect][sent_pos_pred]=float(n_entity)<block_end># Average Distance: avgDist aspect="avgDist"<if_stmt>aspect<in>dict_aspect_func.keys()<block_start>dict_span2aspect_val[aspect][sent_pos]=float(avg_distance)<line_sep>dict_span2aspect_val_pred[aspect][sent_pos_pred]=float(avg_distance)<block_end># Tag: tag aspect="tag"############## MUST Be Gold Tag for text classification task <if_stmt>aspect<in>dict_aspect_func.keys()<block_start>dict_span2aspect_val[aspect][sent_pos]=true_label<line_sep>dict_span2aspect_val_pred[aspect][sent_pos_pred]=true_label<block_end>sample_id<augadd>1<block_end># print(dict_span2aspect_val["bleu"]) <return>dict_span2aspect_val dict_span2aspect_val_pred dict_sid2sent<block_end><def_stmt>evaluate task_type="ner" analysis_type="single" systems=[] dataset_name='dataset_name' model_name='model_name' output_filename="./output.json" is_print_ci=<false> is_print_case=<false> is_print_ece=<false><block_start>path_text=systems[0]<if>analysis_type<eq>"single"<else>""<line_sep>path_comb_output="model_name"+"/"+path_text.split("/")[-1]<line_sep>dict_aspect_func,dict_precomputed_path,obj_json=ea.load_task_conf(task_dir=os.path.dirname(__file__))<line_sep>sample_list,sent_list,entity_list,true_list,pred_list=file_to_list(path_text)<line_sep>error_case_list=[]<if_stmt>is_print_case<block_start>error_case_list=get_error_case(sent_list entity_list true_list pred_list)<line_sep>print(" -*-*-*- the number of error casse:\t" len(error_case_list))<block_end>dict_span2aspect_val,dict_span2aspect_val_pred,dict_sid2sent=get_aspect_value(sample_list dict_aspect_func)<line_sep>holistic_performance=ea.accuracy(true_list pred_list)<line_sep>holistic_performance=format(holistic_performance '.3g')<line_sep># Confidence Interval of Holistic Performance confidence_low,confidence_up=0 0<if_stmt>is_print_ci<block_start>confidence_low,confidence_up=ea.compute_confidence_interval_acc(true_list pred_list n_times=1000)<block_end>dict_span2aspect_val,dict_span2aspect_val_pred,dict_sid2sent=get_aspect_value(sample_list dict_aspect_func)<line_sep>print("------------------ Holistic Result----------------------")<line_sep>print(holistic_performance)<line_sep># print(f1(list_true_tags_token, list_pred_tags_token)["f1"]) dict_bucket2span={}<line_sep>dict_bucket2span_pred={}<line_sep>dict_bucket2f1={}<line_sep>aspect_names=[]<for_stmt>aspect,func dict_aspect_func.items()# print(aspect, dict_span2aspect_val[aspect]) <block_start>dict_bucket2span[aspect]=ea.select_bucketing_func(func[0] func[1] dict_span2aspect_val[aspect])<line_sep># print(aspect, dict_bucket2span[aspect]) # exit() dict_bucket2span_pred[aspect]=ea.bucket_attribute_specified_bucket_interval(dict_span2aspect_val_pred[aspect] dict_bucket2span[aspect].keys())<line_sep># dict_bucket2span_pred[aspect] = __select_bucketing_func(func[0], func[1], dict_span2aspect_val_pred[aspect]) dict_bucket2f1[aspect]=get_bucket_acc_with_error_case(dict_bucket2span[aspect] dict_bucket2span_pred[aspect] dict_sid2sent is_print_ci is_print_case)<line_sep>aspect_names.append(aspect)<block_end>print("aspect_names: " aspect_names)<line_sep>print("------------------ Breakdown Performance")<for_stmt>aspect dict_aspect_func.keys()<block_start>ea.print_dict(dict_bucket2f1[aspect] aspect)<block_end>print("")<line_sep># Calculate databias w.r.t numeric attributes dict_aspect2bias={}<for_stmt>aspect,aspect2Val dict_span2aspect_val.items()<block_start><if_stmt>type(list(aspect2Val.values())[0])<ne>type("string")<block_start>dict_aspect2bias[aspect]=numpy.average(list(aspect2Val.values()))<block_end><block_end>print("------------------ Dataset Bias")<for_stmt>k,v dict_aspect2bias.items()<block_start>print(k+":\t"+str(v))<block_end>print("")<line_sep>dict_fine_grained={}<for_stmt>aspect,metadata dict_bucket2f1.items()<block_start>dict_fine_grained[aspect]=[]<for_stmt>bucket_name,v metadata.items()# print("---------debug--bucket name old---") # print(bucket_name) <block_start>bucket_name=ea.beautify_interval(bucket_name)<line_sep># print("---------debug--bucket name new---") # print(bucket_name) # bucket_value = format(v[0]*100,'.4g') bucket_value=format(v[0] '.4g')<line_sep>n_sample=v[1]<line_sep>confidence_low_bucket=format(v[2] '.4g')<line_sep>confidence_up_bucket=format(v[3] '.4g')<line_sep>bucket_error_case=v[4]<line_sep># instantiation dict_fine_grained[aspect].append({"bucket_name":bucket_name "bucket_value":bucket_value "num":n_sample "confidence_low":confidence_low_bucket "confidence_up":confidence_up_bucket "bucket_error_case":bucket_error_case})<block_end><block_end>obj_json["task"]=task_type<line_sep>obj_json["data"]["language"]="English"<line_sep>obj_json["data"]["name"]=dataset_name<line_sep>obj_json["data"]["bias"]=dict_aspect2bias<line_sep>obj_json["data"]["output"]=path_comb_output<line_sep>obj_json["model"]["name"]=model_name<line_sep>obj_json["model"]["results"]["overall"]["error_case"]=error_case_list<line_sep>obj_json["model"]["results"]["overall"]["performance"]=holistic_performance<line_sep>obj_json["model"]["results"]["overall"]["confidence_low"]=confidence_low<line_sep>obj_json["model"]["results"]["overall"]["confidence_up"]=confidence_up<line_sep>obj_json["model"]["results"]["fine_grained"]=dict_fine_grained<line_sep><raise>NotImplementedError('RE is not fully implemented yet, see below')<line_sep># ece = 0 # dic_calibration = None # if is_print_ece: # ece, dic_calibration = process_all(path_text, # size_of_bin=10, dataset=corpus_type, model=model_name) # obj_json["model"]["results"]["calibration"] = dic_calibration # # print(dic_calibration) # ea.save_json(obj_json, output_filename) <block_end># # def main(): # # parser = argparse.ArgumentParser(description='Interpretable Evaluation for NLP') # # # parser.add_argument('--task', type=str, required=True, # help="absa") # # parser.add_argument('--ci', type=str, required=False, default= False, # help="True|False") # # parser.add_argument('--case', type=str, required=False, default= False, # help="True|False") # # parser.add_argument('--ece', type=str, required=False, default= False, # help="True|False") # # # parser.add_argument('--type', type=str, required=False, default="single", # help="analysis type: single|pair|combine") # parser.add_argument('--systems', type=str, required=True, # help="the directories of system outputs. Multiple one should be separated by comma, for example, system1,system2 (no space)") # # parser.add_argument('--output', type=str, required=True, # help="analysis output file") # args = parser.parse_args() # # # is_print_ci = args.ci # is_print_case = args.case # is_print_ece = args.ece # # task = args.task # analysis_type = args.type # systems = args.systems.split(",") # output = args.output # # # print("task", task) # print("type", analysis_type) # print("systems", systems) # # sample_list = file_to_list_re(systems[0]) # # print(sample_list[0]) # evaluate(task_type=task, analysis_type=analysis_type, systems=systems, output=output, is_print_ci = is_print_ci, is_print_case = is_print_case, is_print_ece = is_print_ece) # # # python eval_spec.py --task re --systems ./test_re.tsv --output ./a.json # if __name__ == '__main__': # main() <def_stmt>get_bucket_acc_with_error_case dict_bucket2span dict_bucket2span_pred dict_sid2sent is_print_ci is_print_case# The structure of span_true or span_pred # 2345|||Positive # 2345 represents sentence id # Positive represents the "label" of this instance <block_start>dict_bucket2f1={}<for_stmt>bucket_interval,spans_true dict_bucket2span.items()<block_start>spans_pred=[]<if_stmt>bucket_interval<not><in>dict_bucket2span_pred.keys()<block_start><raise>ValueError("Predict Label Bucketing Errors")<block_end><else_stmt><block_start>spans_pred=dict_bucket2span_pred[bucket_interval]<block_end># loop over samples from a given bucket error_case_bucket_list=[]<if_stmt>is_print_case<block_start><for_stmt>info_true,info_pred zip(spans_true spans_pred)<block_start>sid_true,label_true=info_true.split("|||")<line_sep>sid_pred,label_pred=info_pred.split("|||")<if_stmt>sid_true<ne>sid_pred<block_start><continue><block_end>sent_entities=dict_sid2sent[sid_true]<if_stmt>label_true<ne>label_pred<block_start>error_case_info=label_true+"|||"+label_pred+"|||"+sent_entities<line_sep>error_case_bucket_list.append(error_case_info)<block_end><block_end><block_end>accuracy_each_bucket=ea.accuracy(spans_pred spans_true)<line_sep>confidence_low,confidence_up=0 0<if_stmt>is_print_ci<block_start>confidence_low,confidence_up=ea.compute_confidence_interval_acc(spans_pred spans_true)<block_end>dict_bucket2f1[bucket_interval]=[accuracy_each_bucket len(spans_true) confidence_low confidence_up error_case_bucket_list]<block_end><return>ea.sort_dict(dict_bucket2f1)<block_end><def_stmt>get_error_case sent_list entity_list true_label_list pred_label_list<block_start>error_case_list=[]<for_stmt>sent,entities,true_label,pred_label zip(sent_list entity_list true_label_list pred_label_list)<block_start><if_stmt>true_label<ne>pred_label<block_start>error_case_list.append(true_label+"|||"+pred_label+"|||"+entities+"|||"+ea.format4json2(sent))<block_end><block_end><return>error_case_list<block_end><def_stmt>file_to_list file_path<block_start>sample_list=[]<line_sep>fin=open(file_path "r")<line_sep>true_list=[]<line_sep>pred_list=[]<line_sep>sent_list=[]<line_sep>entity_list=[]<for_stmt>idx,line enumerate(fin)<block_start><if_stmt>idx<eq>0<block_start><continue><block_end>info_list=line.rstrip("\n").split("\t")<line_sep>sample_list.append([info<for>info info_list])<line_sep>true_list.append(info_list[3])<line_sep>pred_list.append(info_list[4])<line_sep>sent_list.append(info_list[0])<line_sep>entity_list.append(info_list[1])<block_end><return>sample_list sent_list entity_list true_list pred_list<block_end>
"""Shows how you can implement a simple WebSocket echo server using the wsproto library. """<import_from_stmt>werkzeug.exceptions InternalServerError<import_from_stmt>werkzeug.serving run_simple<import_from_stmt>werkzeug.wrappers Request<import_from_stmt>werkzeug.wrappers Response<import_from_stmt>wsproto ConnectionType<import_from_stmt>wsproto WSConnection<import_from_stmt>wsproto.events AcceptConnection<import_from_stmt>wsproto.events CloseConnection<import_from_stmt>wsproto.events Message<import_from_stmt>wsproto.events Ping<import_from_stmt>wsproto.events Request<as>WSRequest<import_from_stmt>wsproto.events TextMessage<import_from_stmt>wsproto.frame_protocol CloseReason<line_sep>@Request.application<def_stmt>websocket request# The underlying socket must be provided by the server. Gunicorn and # Werkzeug's dev server are known to support this. <block_start>stream=request.environ.get("werkzeug.socket")<if_stmt>stream<is><none><block_start>stream=request.environ.get("gunicorn.socket")<block_end><if_stmt>stream<is><none><block_start><raise>InternalServerError()<block_end># Initialize the wsproto connection. Need to recreate the request # data that was read by the WSGI server already. ws=WSConnection(ConnectionType.SERVER)<line_sep>in_data=b"GET %s HTTP/1.1\r\n"%request.path.encode("utf8")<for_stmt>header,value request.headers.items()<block_start>in_data<augadd>f"{header}: {value}\r\n".encode()<block_end>in_data<augadd>b"\r\n"<line_sep>ws.receive_data(in_data)<line_sep>running=<true><while_stmt><true><block_start>out_data=b""<for_stmt>event ws.events()<block_start><if_stmt>isinstance(event WSRequest)<block_start>out_data<augadd>ws.send(AcceptConnection())<block_end><elif_stmt>isinstance(event CloseConnection)<block_start>out_data<augadd>ws.send(event.response())<line_sep>running=<false><block_end><elif_stmt>isinstance(event Ping)<block_start>out_data<augadd>ws.send(event.response())<block_end><elif_stmt>isinstance(event TextMessage)# echo the incoming message back to the client <block_start><if_stmt>event.data<eq>"quit"<block_start>out_data<augadd>ws.send(CloseConnection(CloseReason.NORMAL_CLOSURE "bye"))<line_sep>running=<false><block_end><else_stmt><block_start>out_data<augadd>ws.send(Message(data=event.data))<block_end><block_end><block_end><if_stmt>out_data<block_start>stream.send(out_data)<block_end><if_stmt><not>running<block_start><break><block_end>in_data=stream.recv(4096)<line_sep>ws.receive_data(in_data)<block_end># The connection will be closed at this point, but WSGI still # requires a response. <return>Response("" status=204)<block_end><if_stmt>__name__<eq>"__main__"<block_start>run_simple("localhost" 5000 websocket)<block_end>
# -*- coding: utf-8 -*- """ Python-Future Documentation Extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for automatically documenting filters and tests. Based on the Jinja2 documentation extensions. :copyright: Copyright 2008 by <NAME>. :license: BSD. """<import_stmt>collections<import_stmt>os<import_stmt>re<import_stmt>inspect<import_from_stmt>itertools islice<import_from_stmt>types BuiltinFunctionType<import_from_stmt>docutils nodes<import_from_stmt>docutils.statemachine ViewList<import_from_stmt>sphinx.ext.autodoc prepare_docstring<import_from_stmt>sphinx.application TemplateBridge<import_from_stmt>pygments.style Style<import_from_stmt>pygments.token Keyword Name Comment String Error Number Operator Generic<def_stmt>parse_rst state content_offset doc<block_start>node=nodes.section()<line_sep># hack around title style bookkeeping surrounding_title_styles=state.memo.title_styles<line_sep>surrounding_section_level=state.memo.section_level<line_sep>state.memo.title_styles=[]<line_sep>state.memo.section_level=0<line_sep>state.nested_parse(doc content_offset node match_titles=1)<line_sep>state.memo.title_styles=surrounding_title_styles<line_sep>state.memo.section_level=surrounding_section_level<line_sep><return>node.children<block_end><class_stmt>FutureStyle(Style)<block_start>title='Future Style'<line_sep>default_style=""<line_sep>styles={Comment:'italic #0B6A94' # was: #0066ff', Comment.Preproc:'noitalic #B11414' Comment.Special:'italic #505050' Keyword:'bold #D15E27' Keyword.Type:'#D15E27' Operator.Word:'bold #B80000' Name.Builtin:'#333333' Name.Function:'#333333' Name.Class:'bold #333333' Name.Namespace:'bold #333333' Name.Entity:'bold #363636' Name.Attribute:'#686868' Name.Tag:'bold #686868' Name.Decorator:'#686868' String:'#AA891C' Number:'#444444' Generic.Heading:'bold #000080' Generic.Subheading:'bold #800080' Generic.Deleted:'#aa0000' Generic.Inserted:'#00aa00' Generic.Error:'#aa0000' Generic.Emph:'italic' Generic.Strong:'bold' Generic.Prompt:'#555555' Generic.Output:'#888888' Generic.Traceback:'#aa0000' Error:'#F00 bg:#FAA'}<block_end><def_stmt>setup app<block_start><pass><line_sep># uncomment for inline toc. links are broken unfortunately ##app.connect('doctree-resolved', inject_toc) <block_end>
__author__='<NAME>'<line_sep>__all__=['invdx' 'parse' 'query' 'rank']<line_sep>
<import_from_stmt>concurrent futures<import_stmt>sys<import_stmt>requests<import_stmt>countryflags<as>cf<import_stmt>time<import_from_stmt>getsequential fetch<line_sep>DEFAULT_NUM_THREADS=100<line_sep>GLOBAL_TIMEOUT=300# seconds times={}<def_stmt>main source num_threads<block_start>pool=futures.ThreadPoolExecutor(num_threads)<line_sep>pending={}<line_sep>t0=time.time()<line_sep># submit all jobs <for_stmt>iso_cc sorted(cf.cc2name)<block_start>print('get:' iso_cc)<line_sep>times[iso_cc]=[time.time()-t0]<line_sep>job=pool.submit(fetch iso_cc source)<line_sep>pending[job]=iso_cc<block_end>to_download=len(pending)<line_sep>downloaded=0<line_sep># get results as jobs are done <for_stmt>job futures.as_completed(pending timeout=GLOBAL_TIMEOUT)<block_start><try_stmt><block_start>octets,file_name=job.result()<line_sep>times[pending[job]].append(time.time()-t0)<line_sep>downloaded<augadd>1<line_sep>print('\t--> {}: {:5d} bytes'.format(file_name octets))<block_end><except_stmt>Exception<as>exc<block_start>print('\t***' pending[job] 'generated an exception:' exc)<block_end><block_end>ratio=downloaded/to_download<line_sep>print('{} of {} downloaded ({:.1%})'.format(downloaded to_download ratio))<for_stmt>iso_cc sorted(times)<block_start>start,end=times[iso_cc]<line_sep>print('{}\t{:.6g}\t{:.6g}'.format(iso_cc start end))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>argparse<line_sep>source_names=', '.join(sorted(cf.SOURCE_URLS))<line_sep>parser=argparse.ArgumentParser(description='Download flag images.')<line_sep>parser.add_argument('source' help='one of: '+source_names)<line_sep>parser.add_argument('-t' '--threads' type=int default=DEFAULT_NUM_THREADS help='number of threads (default: %s)'%DEFAULT_NUM_THREADS)<line_sep>args=parser.parse_args()<line_sep>main(args.source args.threads)<block_end>""" From CIA, 1 thread: real 2m0.832s user 0m4.685s sys 0m0.366s """<line_sep>
# pylint: disable=invalid-name,protected-access <import_from_stmt>copy deepcopy<import_from_stmt>unittest TestCase<import_stmt>codecs<import_stmt>gzip<import_stmt>logging<import_stmt>os<import_stmt>shutil<import_from_stmt>keras backend<as>K<import_stmt>numpy<import_from_stmt>numpy.testing assert_allclose<import_from_stmt>deep_qa.common.checks log_keras_version_info<import_from_stmt>deep_qa.data.instances.instance TextInstance<import_from_stmt>deep_qa.data.tokenizers tokenizers<import_from_stmt>deep_qa.common.params Params<class_stmt>DeepQaTestCase(TestCase)# pylint: disable=too-many-public-methods <block_start>TEST_DIR='./TMP_TEST/'<line_sep>TRAIN_FILE=TEST_DIR+'train_file'<line_sep>VALIDATION_FILE=TEST_DIR+'validation_file'<line_sep>TEST_FILE=TEST_DIR+'test_file'<line_sep>TRAIN_BACKGROUND=TEST_DIR+'train_background'<line_sep>VALIDATION_BACKGROUND=TEST_DIR+'validation_background'<line_sep>SNLI_FILE=TEST_DIR+'snli_file'<line_sep>PRETRAINED_VECTORS_FILE=TEST_DIR+'pretrained_glove_vectors_file'<line_sep>PRETRAINED_VECTORS_GZIP=TEST_DIR+'pretrained_glove_vectors_file.gz'<def_stmt>setUp self<block_start>logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' level=logging.DEBUG)<line_sep>log_keras_version_info()<line_sep>os.makedirs(self.TEST_DIR exist_ok=<true>)<block_end><def_stmt>tearDown self<block_start>shutil.rmtree(self.TEST_DIR)<line_sep>TextInstance.tokenizer=tokenizers["words"](Params({}))<line_sep>K.clear_session()<block_end><def_stmt>get_model_params self additional_arguments=<none><block_start>params=Params({})<line_sep>params['save_models']=<false><line_sep>params['model_serialization_prefix']=self.TEST_DIR<line_sep>params['train_files']=[self.TRAIN_FILE]<line_sep>params['validation_files']=[self.VALIDATION_FILE]<line_sep>params['embeddings']={'words':{'dimension':6} 'characters':{'dimension':2}}<line_sep>params['encoder']={"default":{'type':'bow'}}<line_sep>params['num_epochs']=1<line_sep>params['validation_split']=0.0<if_stmt>additional_arguments<block_start><for_stmt>key,value additional_arguments.items()<block_start>params[key]=deepcopy(value)<block_end><block_end><return>params<block_end><def_stmt>get_model self model_class additional_arguments=<none><block_start>params=self.get_model_params(additional_arguments)<line_sep><return>model_class(params)<block_end><def_stmt>ensure_model_trains_and_loads self model_class args:Params<block_start>args['save_models']=<true><line_sep># Our loading tests work better if you're not using data generators. Unless you # specifically request it in your test, we'll avoid using them here, and if you _do_ use # them, we'll skip some of the stuff below that isn't compatible. args.setdefault('data_generator' <none>)<line_sep>model=self.get_model(model_class args)<line_sep>model.train()<line_sep># load the model that we serialized loaded_model=self.get_model(model_class args)<line_sep>loaded_model.load_model()<line_sep># verify that original model and the loaded model predict the same outputs <if_stmt>model._uses_data_generators()# We shuffle the data in the data generator. Instead of making that logic more # complicated, we'll just pass on the loading tests here. See comment above. <block_start><pass><block_end><else_stmt><block_start>model_predictions=model.model.predict(model.validation_arrays[0])<line_sep>loaded_model_predictions=loaded_model.model.predict(model.validation_arrays[0])<for_stmt>model_prediction,loaded_prediction zip(model_predictions loaded_model_predictions)<block_start>assert_allclose(model_prediction loaded_prediction)<block_end><block_end># We should get the same result if we index the data from the original model and the loaded # model. _,indexed_validation_arrays=loaded_model.load_data_arrays(model.validation_files)<if_stmt>model._uses_data_generators()# As above, we'll just pass on this. <block_start><pass><block_end><else_stmt><block_start>model_predictions=model.model.predict(model.validation_arrays[0])<line_sep>loaded_model_predictions=loaded_model.model.predict(indexed_validation_arrays[0])<for_stmt>model_prediction,loaded_prediction zip(model_predictions loaded_model_predictions)<block_start>assert_allclose(model_prediction loaded_prediction)<block_end><block_end><return>model loaded_model<block_end>@staticmethod<def_stmt>one_hot index length<block_start>vector=numpy.zeros(length)<line_sep>vector[index]=1<line_sep><return>vector<block_end><def_stmt>write_snli_files self<block_start><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file<block_start>train_file.write('1\ttext 1\thypothesis1\tentails\n')<line_sep>train_file.write('2\ttext 2\thypothesis2\tcontradicts\n')<line_sep>train_file.write('3\ttext3\thypothesis3\tentails\n')<line_sep>train_file.write('4\ttext 4\thypothesis4\tneutral\n')<line_sep>train_file.write('5\ttext5\thypothesis 5\tentails\n')<line_sep>train_file.write('6\ttext6\thypothesis6\tcontradicts\n')<block_end><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file<block_start>validation_file.write('1\ttext 1 with extra words\thypothesis1\tentails\n')<line_sep>validation_file.write('2\ttext 2\tlonger hypothesis 2\tcontradicts\n')<line_sep>validation_file.write('3\ttext3\thypothesis withreallylongfakeword\tentails\n')<block_end><block_end><def_stmt>write_sequence_tagging_files self<block_start><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file<block_start>train_file.write('cats###N\tare###V\tanimals###N\t.###N\n')<line_sep>train_file.write('dogs###N\tare###V\tanimals###N\t.###N\n')<line_sep>train_file.write('snakes###N\tare###V\tanimals###N\t.###N\n')<line_sep>train_file.write('birds###N\tare###V\tanimals###N\t.###N\n')<block_end><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file<block_start>validation_file.write('horses###N\tare###V\tanimals###N\t.###N\n')<line_sep>validation_file.write('blue###N\tcows###N\tare###V\tanimals###N\t.###N\n')<line_sep>validation_file.write('monkeys###N\tare###V\tanimals###N\t.###N\n')<line_sep>validation_file.write('caterpillars###N\tare###V\tanimals###N\t.###N\n')<block_end><block_end><def_stmt>write_verb_semantics_files self<block_start><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file<block_start>train_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')<line_sep>train_file.write('this####mixture####is####converted####into####sugar####inside####leaf'<concat>'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')<line_sep>train_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')<block_end><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file<block_start>validation_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')<line_sep>validation_file.write('this####mixture####is####converted####into####sugar####inside####leaf'<concat>'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')<line_sep>validation_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')<block_end><block_end><def_stmt>write_true_false_model_files self<block_start><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file<block_start>validation_file.write('1\tq1a1\t0\n')<line_sep>validation_file.write('2\tq1a2\t1\n')<line_sep>validation_file.write('3\tq1a3\t0\n')<line_sep>validation_file.write('4\tq1a4\t0\n')<line_sep>validation_file.write('5\tq2a1\t0\n')<line_sep>validation_file.write('6\tq2a2\t0\n')<line_sep>validation_file.write('7\tq2a3\t1\n')<line_sep>validation_file.write('8\tq2a4\t0\n')<line_sep>validation_file.write('9\tq3a1\t0\n')<line_sep>validation_file.write('10\tq3a2\t0\n')<line_sep>validation_file.write('11\tq3a3\t0\n')<line_sep>validation_file.write('12\tq3a4\t1\n')<block_end><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file<block_start>train_file.write('1\tsentence1\t0\n')<line_sep>train_file.write('2\tsentence2 word2 word3\t1\n')<line_sep>train_file.write('3\tsentence3 word2\t0\n')<line_sep>train_file.write('4\tsentence4\t1\n')<line_sep>train_file.write('5\tsentence5\t0\n')<line_sep>train_file.write('6\tsentence6\t0\n')<block_end><with_stmt>codecs.open(self.TEST_FILE 'w' 'utf-8')<as>test_file<block_start>test_file.write('1\ttestsentence1\t0\n')<line_sep>test_file.write('2\ttestsentence2 word2 word3\t1\n')<line_sep>test_file.write('3\ttestsentence3 word2\t0\n')<line_sep>test_file.write('4\ttestsentence4\t1\n')<line_sep>test_file.write('5\ttestsentence5 word4\t0\n')<line_sep>test_file.write('6\ttestsentence6\t0\n')<block_end><block_end><def_stmt>write_additional_true_false_model_files self<block_start><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file<block_start>validation_file.write('1\tq4a1\t0\n')<line_sep>validation_file.write('2\tq4a2\t1\n')<line_sep>validation_file.write('3\tq4a3\t0\n')<line_sep>validation_file.write('4\tq4a4\t0\n')<line_sep>validation_file.write('5\tq5a1\t0\n')<line_sep>validation_file.write('6\tq5a2\t0\n')<line_sep>validation_file.write('7\tq5a3\t1\n')<line_sep>validation_file.write('8\tq5a4\t0\n')<line_sep>validation_file.write('9\tq6a1\t0\n')<line_sep>validation_file.write('10\tq6a2\t0\n')<line_sep>validation_file.write('11\tq6a3\t0\n')<line_sep>validation_file.write('12\tq6a4\t1\n')<block_end><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file<block_start>train_file.write('1\tsentence7\t0\n')<line_sep>train_file.write('2\tsentence8 word4 word5\t1\n')<line_sep>train_file.write('3\tsentence9 word4\t0\n')<line_sep>train_file.write('4\tsentence10\t1\n')<line_sep>train_file.write('5\tsentence11 word3 word2\t0\n')<line_sep>train_file.write('6\tsentence12\t0\n')<block_end><block_end><def_stmt>write_question_answer_files self<block_start><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file<block_start>validation_file.write('1\tquestion1\tanswer1###answer2\t0\n')<block_end><with_stmt>codecs.open(self.VALIDATION_BACKGROUND 'w' 'utf-8')<as>validation_background<block_start>validation_background.write('1\tvb1\tvb2\n')<block_end><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file<block_start>train_file.write('1\ta b e i d\tanswer 1###answer2\t0\n')<line_sep>train_file.write('2\ta b c d\tanswer3###answer4\t1\n')<line_sep>train_file.write('3\te d w f d s a b\tanswer5###answer6###answer9\t2\n')<line_sep>train_file.write('4\te fj k w q\tanswer7###answer8\t0\n')<block_end><with_stmt>codecs.open(self.TRAIN_BACKGROUND 'w' 'utf-8')<as>train_background<block_start>train_background.write('1\tsb1\tsb2\n')<line_sep>train_background.write('2\tsb3\n')<line_sep>train_background.write('3\tsb4\n')<line_sep>train_background.write('4\tsb5\tsb6\n')<block_end><block_end><def_stmt>write_who_did_what_files self<block_start><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file<block_start>validation_file.write('1\tHe went to the store to buy goods, because he wanted to.'<concat>'\tHe bought xxxxx\tgoods###store\t0\n')<line_sep>validation_file.write('1\tShe hiking on the weekend with her friend.'<concat>'\tShe went xxxxx\thiking###friend###weekend###her friend\t0\n')<block_end><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file# document, question, answers <block_start>train_file.write('1\tFred hit the ball with the bat.\tHe hit the ball with the xxxxx\tbat###ball\t0\n')<line_sep>train_file.write('1\tShe walked the dog today.\tThe xxxxx was walked today.\tShe###dog###today\t1\n')<line_sep>train_file.write('1\tHe kept typing at his desk.\tHe typed at his xxxxx\tdesk###kept\t0\n')<line_sep>train_file.write('1\tThe pup at the bone but not the biscuit.\tThe pup ate the xxxxx\t'<concat>'bone###biscuit\t0\n')<block_end><block_end><def_stmt>write_tuple_inference_files self<block_start><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file<block_start>validation_file.write('1\tss<>v f d<>oo o<>c$$$s<>v ff<>o i###ss r<>v<>o e<>o ee\t'<concat>'ss ss<>ve gg<>o sd<>ccs\t0\n')<block_end><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file# document, question, answers <block_start>train_file.write('1\tss<>v<>oo o<>c$$$s e<>ff<>o ii i###ss r<>rr<>o e<>o ee\t'<concat>'ss<>ve gg<>o sd<>ccs\t0\n')<line_sep>train_file.write('2\tsg g<>vg<>oo o<>c$$$s e<>v ff<>o ii i###ss<>v rr<>o e<>o ee'<concat>'###hh kk<>hdj d<>hh\tss ss<>ve gg<>o sd<>ccs\t2\n')<line_sep>train_file.write('3\ts r<>v f d<>o ss<>c$$$s e<>v ff<>o ss i$$$r<>v ss<>s o e<>o ee\t'<concat>'ss ss<>v g<>o sd<>ccs\t0\n')<line_sep>train_file.write('4\tty y<>cf fv ss<>s ss<>c$$$rt e<>vv f<>oss i i###ss<>v<>os e<>o ee\t'<concat>'ss ss<>ve gg<>o sd<>ccs\t1\n')<block_end><block_end><def_stmt>write_span_prediction_files self<block_start><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file<block_start>validation_file.write('1\tquestion 1 with extra words\t'<concat>'passage with answer and a reallylongword\t13,18\n')<block_end><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file<block_start>train_file.write('1\tquestion 1\tpassage1 with answer1\t14,20\n')<line_sep>train_file.write('2\tquestion 2\tpassage2 with answer2\t0,8\n')<line_sep>train_file.write('3\tquestion 3\tpassage3 with answer3\t9,13\n')<line_sep>train_file.write('4\tquestion 4\tpassage4 with answer4\t14,20\n')<block_end><block_end><def_stmt>write_sentence_selection_files self<block_start><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file<block_start>validation_file.write('1\tWhere is Paris?\tParis is the capital of France.###It '<concat>'is by the Seine.###It is quite old###this is a '<concat>'very long sentence meant to test that loading '<concat>'and padding works properly in the model.\t1\n')<block_end><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file<block_start>train_file.write('1\tWho won Super Bowl 50?\tSuper Bowl 50 was in Santa '<concat>'Clara.###The Patriots beat the Broncos.\t1\n')<line_sep>train_file.write('2\tWhen is Thanksgiving?\tFolk tales tell '<concat>'of the Pilgrims celebrating the holiday.###Many '<concat>'people eat a lot.###It is in November.\t2\n')<line_sep>train_file.write('3\tWhen were computers invented?\tThe ancient Chinese used '<concat>'abacuses.###Alan Turing cracked Enigma.###It is hard to '<concat>'pinpoint an inventor of the computer.\t2\n')<block_end><block_end><def_stmt>write_pretrained_vector_files self# write the file <block_start><with_stmt>codecs.open(self.PRETRAINED_VECTORS_FILE 'w' 'utf-8')<as>vector_file<block_start>vector_file.write('word2 0.21 0.57 0.51 0.31\n')<line_sep>vector_file.write('sentence1 0.81 0.48 0.19 0.47\n')<block_end># compress the file <with_stmt>open(self.PRETRAINED_VECTORS_FILE 'rb')<as>f_in<block_start><with_stmt>gzip.open(self.PRETRAINED_VECTORS_GZIP 'wb')<as>f_out<block_start>shutil.copyfileobj(f_in f_out)<block_end><block_end><block_end><def_stmt>write_sentence_data self<block_start><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file<block_start>train_file.write("This is a sentence for language modelling.\n")<line_sep>train_file.write("Here's another one for language modelling.\n")<block_end><block_end><def_stmt>write_original_snli_data self<block_start><with_stmt>codecs.open(self.TRAIN_FILE 'w' 'utf-8')<as>train_file# pylint: disable=line-too-long <block_start>train_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")<line_sep>train_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")<line_sep>train_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")<line_sep># pylint: enable=line-too-long <block_end><with_stmt>codecs.open(self.VALIDATION_FILE 'w' 'utf-8')<as>validation_file# pylint: disable=line-too-long <block_start>validation_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")<line_sep>validation_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")<line_sep>validation_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")<line_sep># pylint: enable=line-too-long <block_end><block_end><block_end>
<import_stmt>re<def_stmt>count word<block_start>""" Simple syllable counting """<line_sep>word=word<if>type(word)<is>str<else>str(word)<line_sep>word=word.lower()<if_stmt>len(word)<le>3<block_start><return>1<block_end>word=re.sub('(?:[^laeiouy]es|[^laeiouy]e)$' '' word)# removed ed| word=re.sub('^y' '' word)<line_sep>matches=re.findall('[aeiouy]{1,2}' word)<line_sep><return>len(matches)<block_end>
# Copyright 2016 <NAME> Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_stmt>sys<import_stmt>select<import_stmt>time<import_stmt>sys tty termios<import_from_stmt>op_cpu_package.python_l0_module OpCPUData<import_from_stmt>op_cpu_package.op_cpu_module OpCPU<line_sep># A Linux interface to using the python implementation of the One Page CPU emulator <def_stmt>main_loop <block_start>loader_data=OpCPUData()<line_sep>op_cpu=OpCPU(loader_data)<line_sep>fd=sys.stdin.fileno()<line_sep>old_settings=termios.tcgetattr(fd)<line_sep>tty.setraw(sys.stdin.fileno())<line_sep>input_buffer=[]# Characters to be sent to emulator <while_stmt><not>op_cpu.is_halted()<block_start>in_chrs=select.select([sys.stdin] [] [] 0.0001)[0]<if_stmt><not>in_chrs<block_start><for_stmt>x range(0 10000)<block_start>r=op_cpu.vm_getc()<if_stmt>'chr'<in>r<block_start>sys.stdout.write(chr(r['chr']))<if_stmt>r['chr']<eq>10<block_start>sys.stdout.write('\r')<block_end>sys.stdout.flush()<block_end><if_stmt>len(input_buffer)<block_start>inchr=input_buffer.pop()<if_stmt>op_cpu.vm_putc(inchr)# Not able to input chr <block_start>input_buffer=[inchr]+input_buffer<block_end><block_end>op_cpu.step()<block_end><block_end><else_stmt><block_start>dobreak=<false><for_stmt>file in_chrs<block_start>c=file.read(1)<line_sep>input_buffer=input_buffer+[ord(c)]<if_stmt>ord(c)<eq>3<block_start>dobreak=<true><block_end><block_end><if_stmt>dobreak<block_start><break><block_end><block_end><block_end>termios.tcsetattr(fd termios.TCSADRAIN old_settings)<block_end>main_loop()<line_sep>
<import_from_stmt>typing List<import_from_stmt>fractions Fraction<import_from_stmt>abc ABC abstractmethod<import_stmt>spacy<import_stmt>string<import_stmt>random<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_stmt>diskcache<import_stmt>sys<import_from_stmt>somajo SoMaJo<import_from_stmt>spacy.lang.tr Turkish<import_from_stmt>spacy.lang.sv Swedish<import_from_stmt>spacy.lang.uk Ukrainian<line_sep>NO_MODEL_LANGUAGE_LOOKUP={"turkish":Turkish "swedish":Swedish "ukrainian":Ukrainian }<def_stmt>noise text insert_chance delete_chance repeat_chance<block_start><assert_stmt>insert_chance<eq>delete_chance<eq>repeat_chance<line_sep>chances=np.random.random(len(text)<times>3)<if_stmt>(chances<l>insert_chance).all()<block_start><return>text<block_end>out=""<for_stmt>i,char enumerate(text)<block_start><if_stmt>chances[i<times>3]<ge>delete_chance<block_start>out<augadd>char<block_end><if_stmt>chances[(i<times>3)+1]<l>repeat_chance<block_start>out<augadd>char<block_end><if_stmt>chances[(i<times>3)+2]<l>insert_chance<block_start>out<augadd>random.choice(string.ascii_letters)<block_end><block_end><return>out<block_end><def_stmt>get_model name<block_start><try_stmt><block_start>nlp=spacy.load(name disable=["tagger" "parser" "ner"])<block_end><except_stmt>OSError<block_start>nlp=NO_MODEL_LANGUAGE_LOOKUP[name]()<block_end><return>nlp<block_end><def_stmt>has_space text:str<arrow>bool<block_start><return>any(x.isspace()<for>x text)<block_end><class_stmt>Tokenizer(ABC)<block_start><def_stmt>__init__ self<block_start>self.training=<true><block_end><def_stmt>train self mode=<true><block_start>self.training=mode<block_end><def_stmt>eval self<block_start>self.train(<false>)<block_end>@abstractmethod<def_stmt>tokenize self text:str<arrow>List[str]<block_start><pass><block_end><block_end><def_stmt>remove_last_punct text:str punctuation<arrow>str<block_start><for_stmt>i range(len(text))[::-1]<block_start><if_stmt>text[i]<in>punctuation<block_start><return>text[:i]+text[i+1:]<block_end><elif_stmt><not>text[i].isspace()<block_start><return>text<block_end><block_end><return>text<block_end><class_stmt>SpacySentenceTokenizer(Tokenizer)<block_start><def_stmt>__init__ self model_name:str lower_start_prob:Fraction remove_end_punct_prob:Fraction punctuation:str <block_start>super().__init__()<line_sep>self.nlp=get_model(model_name)<line_sep>self.nlp.add_pipe("sentencizer")<line_sep>self.lower_start_prob=lower_start_prob<line_sep>self.remove_end_punct_prob=remove_end_punct_prob<line_sep>self.punctuation=punctuation<block_end><def_stmt>tokenize self text:str<arrow>List[str]<block_start>out_sentences=[]<line_sep>current_sentence=""<line_sep>end_sentence=<false><for_stmt>token self.nlp(text)<block_start>text=token.text<line_sep>whitespace=token.whitespace_<if_stmt>token.is_sent_start<block_start>end_sentence=<true><block_end><if_stmt>end_sentence<and><not>text.isspace()<block_start><if_stmt>self.training<and>random.random()<l>self.remove_end_punct_prob<block_start>current_sentence=remove_last_punct(current_sentence self.punctuation)<block_end>out_sentences.append(current_sentence)<line_sep>current_sentence=""<line_sep>end_sentence=<false><block_end><if_stmt>(self.training<and>len(current_sentence)<eq>0<and>random.random()<l>self.lower_start_prob)<block_start>text=text.lower()<block_end>current_sentence<augadd>text+whitespace<block_end>out_sentences.append(current_sentence)<line_sep><return>[x<for>x out_sentences<if>len(x)<g>0]<block_end><block_end><class_stmt>SpacyWordTokenizer(Tokenizer)<block_start><def_stmt>__init__ self model_name:str<block_start>super().__init__()<line_sep>self.tokenizer=get_model(model_name).tokenizer<block_end><def_stmt>tokenize self text:str<arrow>List[str]<block_start>out_tokens=[]<line_sep>current_token=""<for_stmt>token self.tokenizer(text)<block_start><if_stmt><not>token.text.isspace()<block_start>out_tokens.append(current_token)<line_sep>current_token=""<block_end>current_token<augadd>token.text+token.whitespace_<block_end>out_tokens.append(current_token)<line_sep><return>[x<for>x out_tokens<if>len(x)<g>0]<block_end><block_end><class_stmt>SoMaJoSentenceTokenizer(Tokenizer)<block_start><def_stmt>__init__ self model_name:str<block_start>super().__init__()<line_sep>self.tokenizer=SoMaJo(model_name)<block_end><def_stmt>tokenize self text:str<arrow>List[str]<block_start>out_sentences=[]<line_sep>sentences=list(self.tokenizer.tokenize_text([text]))<for_stmt>i,sentence enumerate(sentences)<block_start>text=""<for_stmt>token sentence<block_start><if_stmt>"SpaceAfter=No"<in>token.extra_info<block_start>whitespace=""<block_end><else_stmt><block_start>whitespace=" "<block_end>text<augadd>token.text+whitespace<block_end><if_stmt>i<eq>len(sentences)-1<block_start>text=text.rstrip()<block_end>out_sentences.append(text)<block_end><return>out_sentences<block_end><block_end><class_stmt>SoMaJoWordTokenizer(Tokenizer)<block_start><def_stmt>__init__ self model_name:str<block_start>super().__init__()<line_sep>self.tokenizer=SoMaJo(model_name split_sentences=<false>)<block_end><def_stmt>tokenize self text:str<arrow>List[str]<block_start>out_tokens=[]<line_sep>tokens=next(self.tokenizer.tokenize_text([text]))<for_stmt>i,token enumerate(tokens)<block_start><if_stmt>"SpaceAfter=No"<in>token.extra_info<or>i<eq>len(tokens)-1<block_start>whitespace=""<block_end><else_stmt><block_start>whitespace=" "<block_end># sometimes sample more spaces than one space so the model learns to deal with it <while_stmt>random.random()<l>0.05<block_start>whitespace<augadd>" "<block_end>out_tokens.append(token.text+whitespace)<block_end><return>[x<for>x out_tokens<if>len(x)<g>0]<block_end><block_end><class_stmt>WhitespaceTokenizer(Tokenizer)<block_start><def_stmt>tokenize self text:str<arrow>List[str]<block_start>out=<none><for_stmt>i range(len(text))[::-1]<block_start><if_stmt><not>text[i].isspace()<block_start>out=[text[:i+1] text[i+1:]]<line_sep><break><block_end><block_end><if_stmt>out<is><none><block_start>out=[text ""]<block_end><return>out<block_end><block_end><class_stmt>SECOSCompoundTokenizer(Tokenizer)<block_start><def_stmt>__init__ self secos_path:str<block_start>super().__init__()<line_sep>sys.path.append(secos_path)<import_stmt>decompound_server<line_sep>self.decompound=decompound_server.make_decompounder(["decompound_server.py" f"{secos_path}data/denews70M_trigram__candidates" f"{secos_path}data/denews70M_trigram__WordCount" "50" "3" "3" "5" "3" "upper" "0.01" "2020" ])<line_sep>self.disk_cache=diskcache.Index("secos_cache")<line_sep>self.cache={}<for_stmt>key self.disk_cache<block_start>self.cache[key]=self.disk_cache[key]<block_end><block_end><def_stmt>tokenize self text:str<arrow>List[str]<block_start><if_stmt>text.isspace()<block_start><return>[text]<block_end>text_bytes=text.encode("utf-8")<line_sep>compounds=self.cache.get(text_bytes)<if_stmt>compounds<is><none><block_start><assert_stmt><not>has_space(text) text<line_sep>compounds=self.decompound(text)<if_stmt>len(compounds)<eq>0<block_start>compounds=text<block_end>compound_bytes=compounds.encode("utf-8")<line_sep>self.disk_cache[text_bytes]=compound_bytes<line_sep>self.cache[text_bytes]=compound_bytes<block_end><else_stmt><block_start>compounds=compounds.decode("utf-8")<block_end>compounds=compounds.split()<line_sep>compounds=[noise(x 0.001 0.001 0.001)<for>x compounds]<line_sep><return>compounds<if>len(compounds)<g>0<else>[noise(text 0.001 0.001 0.001)]<block_end><block_end><class_stmt>Labeler<block_start><def_stmt>__init__ self tokenizers<block_start>self.tokenizers=tokenizers<block_end><def_stmt>_annotate self text:str tok_index=0<block_start><if_stmt>tok_index<ge>len(self.tokenizers)<block_start><return>[(text set())]<block_end>out=[]<for_stmt>token self.tokenizers[tok_index].tokenize(text)<block_start>out<augadd>self._annotate(token tok_index=tok_index+1)<line_sep>out[-1][1].add(tok_index)<block_end><return>out<block_end><def_stmt>_to_dense_label self annotations<block_start>input_bytes=[]<line_sep>label=[]<line_sep>all_zeros=[0]<times>len(self.tokenizers)<for_stmt>(token annotation) annotations<block_start>token_bytes=token.encode("utf-8")<line_sep>input_bytes<augadd>token_bytes<line_sep>label<augadd>[all_zeros.copy()<for>_ range(len(token_bytes))]<if_stmt>len(label)<g>0<block_start><for_stmt>idx annotation<block_start>label[-1][idx]=1<block_end><block_end><block_end><return>input_bytes label<block_end><def_stmt>label self text<block_start><return>self._to_dense_label(self._annotate(text))<block_end><def_stmt>visualize self text<block_start>text,label=self.label(text)<line_sep>data=[]<for_stmt>char,label_col zip(text label)<block_start>data.append([char *label_col])<block_end>df=pd.DataFrame(data columns=["byte" *[x.__class__.__name__<for>x self.tokenizers]]).T<line_sep>df.columns=[""<for>_ range(len(df.columns))]<with_stmt>pd.option_context("display.max_columns" len(text) )<block_start>print(df)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>labeler=Labeler([SpacySentenceTokenizer("de_core_news_sm" lower_start_prob=0.7 remove_end_punct_prob=0.7 punctuation=".?!") SpacyWordTokenizer("de_core_news_sm") WhitespaceTokenizer() SECOSCompoundTokenizer("../../../Experiments/SECOS/") ])<line_sep>labeler.visualize("KNN (ANN).")<block_end>
<import_stmt>unittest<import_from_stmt>unittest.mock patch<import_stmt>pytest<import_stmt>re<import_from_stmt>fds.version __version__<import_from_stmt>fds.services.fds_service FdsService<import_from_stmt>fds.run HooksRunner<line_sep>BOOLS=[<true> <false>]<line_sep># NOTE unittest.mock:_Call backport <def_stmt>patch_unittest_mock_call_cls <block_start><import_stmt>sys<if_stmt>sys.version_info.minor<ge>8<block_start><return><block_end><import_stmt>unittest.mock<def_stmt>_get_call_arguments self<block_start><if_stmt>len(self)<eq>2<block_start>args,kwargs=self<block_end><else_stmt><block_start>name,args,kwargs=self<block_end><return>args kwargs<block_end>@property<def_stmt>args self<block_start><return>self._get_call_arguments()[0]<block_end>@property<def_stmt>kwargs self<block_start><return>self._get_call_arguments()[1]<block_end>unittest.mock._Call._get_call_arguments=_get_call_arguments<line_sep>unittest.mock._Call.args=args<line_sep>unittest.mock._Call.kwargs=kwargs<block_end>patch_unittest_mock_call_cls()<class_stmt>TestFds(unittest.TestCase)<block_start>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_init_success self mock_git_service mock_dvc_service<block_start>fds_service=FdsService(mock_git_service mock_dvc_service)<line_sep>fds_service.init()<assert_stmt>mock_git_service.init.called<assert_stmt>mock_dvc_service.init.called<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_status_success self mock_git_service mock_dvc_service<block_start>fds_service=FdsService(mock_git_service mock_dvc_service)<line_sep>fds_service.status()<assert_stmt>mock_git_service.status.called<assert_stmt>mock_dvc_service.status.called<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_status_git_failure self mock_git_service mock_dvc_service<block_start>mock_git_service.status.side_effect=Exception<line_sep>fds_service=FdsService(mock_git_service mock_dvc_service)<line_sep>self.assertRaises(Exception mock_git_service.status)<line_sep>self.assertRaises(Exception fds_service.status)<assert_stmt>mock_git_service.status.called<assert_stmt>mock_dvc_service.status.notcalled<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_status_dvc_failure self mock_git_service mock_dvc_service<block_start>mock_dvc_service.status.side_effect=Exception<line_sep>fds_service=FdsService(mock_git_service mock_dvc_service)<line_sep>self.assertRaises(Exception fds_service.status)<line_sep>self.assertRaises(Exception mock_dvc_service.status)<assert_stmt>mock_git_service.status.called<assert_stmt>mock_dvc_service.status.called<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_add_success self mock_git_service mock_dvc_service<block_start>fds_service=FdsService(mock_git_service mock_dvc_service)<line_sep>fds_service.add(".")<assert_stmt>mock_git_service.add.called<assert_stmt>mock_dvc_service.add.called<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_add_git_failure self mock_git_service mock_dvc_service<block_start>mock_git_service.add.side_effect=Exception<line_sep>fds_service=FdsService(mock_git_service mock_dvc_service)<line_sep>self.assertRaises(Exception mock_git_service.add)<with_stmt>self.assertRaises(Exception)<block_start>fds_service.add(".")<block_end><assert_stmt>mock_git_service.add.called<assert_stmt>mock_dvc_service.add.called<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_add_dvc_failure self mock_git_service mock_dvc_service<block_start>mock_dvc_service.add.side_effect=Exception<line_sep>fds_service=FdsService(mock_git_service mock_dvc_service)<with_stmt>self.assertRaises(Exception)<block_start>fds_service.add(".")<block_end>self.assertRaises(Exception mock_dvc_service.add)<assert_stmt>mock_dvc_service.add.called<assert_stmt>mock_git_service.add.notcalled<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_commit_success self mock_git_service mock_dvc_service<block_start>fds_service=FdsService(mock_git_service mock_dvc_service)<line_sep>fds_service.commit("some commit message" <true>)<assert_stmt>mock_git_service.commit.called<assert_stmt>mock_dvc_service.commit.called<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_commit_git_failure self mock_git_service mock_dvc_service<block_start>mock_git_service.commit.side_effect=Exception<line_sep>fds_service=FdsService(mock_git_service mock_dvc_service)<with_stmt>self.assertRaises(Exception)<block_start>fds_service.commit("some commit message" <true>)<block_end>self.assertRaises(Exception mock_git_service.commit)<assert_stmt>mock_git_service.commit.called<assert_stmt>mock_dvc_service.commit.called<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_commit_dvc_failure self mock_git_service mock_dvc_service<block_start>mock_dvc_service.commit.side_effect=Exception<line_sep>fds_service=FdsService(mock_git_service mock_dvc_service)<with_stmt>self.assertRaises(Exception)<block_start>fds_service.commit("some commit message" <false>)<block_end>self.assertRaises(Exception mock_dvc_service.commit)<assert_stmt>mock_dvc_service.commit.called<assert_stmt>mock_git_service.commit.notcalled<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_clone_dvc_failure self mock_git_service mock_dvc_service<block_start>mock_dvc_service.pull.side_effect=Exception<line_sep>fds_service=FdsService(mock_git_service mock_dvc_service)<with_stmt>self.assertRaises(Exception)<block_start>fds_service.clone("https://github.com/dagshub/fds.git" <none> <none>)<block_end>self.assertRaises(Exception mock_dvc_service.pull)<line_sep>mock_git_service.clone.assert_called_with("https://github.com/dagshub/fds.git" <none>)<block_end>@patch('fds.services.dvc_service.DVCService')@patch('fds.services.git_service.GitService')<def_stmt>test_clone_git_failure self mock_git_service mock_dvc_service<block_start>mock_git_service.clone.side_effect=Exception<line_sep>fds_service=FdsService(mock_git_service mock_dvc_service)<with_stmt>self.assertRaises(Exception)<block_start>fds_service.clone("https://github.com/dagshub/fds.git" <none> <none>)<block_end>self.assertRaises(Exception mock_git_service.clone)<assert_stmt>mock_dvc_service.pull.notcalled<block_end><block_end><class_stmt>TestFdsHooks<block_start>@pytest.mark.parametrize("dvc_preinstalled" BOOLS)@pytest.mark.parametrize("install_prompt_accept" BOOLS)@patch('fds.run.execute_command')@patch('fds.run.get_confirm_from_user')@patch('fds.services.fds_service.FdsService')@patch('fds.run.which')<def_stmt>test_dvc_installed self mock_which mock_fds_service mock_prompt mock_execute_command dvc_preinstalled:bool install_prompt_accept:bool<block_start>mock_which.return_value=dvc_preinstalled<or><none><line_sep>mock_prompt.return_value=install_prompt_accept<line_sep>hooks_runner=HooksRunner(mock_fds_service.service mock_fds_service.printer mock_fds_service.logger )<line_sep>ret=hooks_runner._ensure_dvc_installed()<line_sep>mock_which.assert_called_with("dvc")<if_stmt>dvc_preinstalled<block_start><return><block_end><assert_stmt>mock_prompt.call_count<eq>1<if_stmt><not>install_prompt_accept<block_start><assert_stmt>ret<ne>0<line_sep># TODO validate printer containing "install dvc manually" <return><block_end><assert_stmt>ret<eq>0<assert_stmt>mock_execute_command.call_count<eq>1<line_sep>args=mock_execute_command.call_args_list[0].args[0]<assert_stmt>re.findall(r"^pip3 install .*'dvc" args[0])<block_end>@pytest.mark.parametrize("git_preinstalled" BOOLS)@patch('fds.run.sys.exit')@patch('fds.services.fds_service.FdsService')@patch('fds.run.which')<def_stmt>test_git_installed self mock_which mock_fds_service mock_sys_exit git_preinstalled:bool <block_start>mock_which.return_value=git_preinstalled<or><none><line_sep>hooks_runner=HooksRunner(mock_fds_service.service mock_fds_service.printer mock_fds_service.logger )<line_sep>ret=hooks_runner._ensure_git_installed()<line_sep>mock_which.assert_called_with("git")<if_stmt>git_preinstalled<block_start><assert_stmt>ret<eq>0<line_sep><return><block_end><assert_stmt>mock_sys_exit.call_count<eq>1<assert_stmt>0<not><in>mock_sys_exit.called_with<block_end>@pytest.mark.parametrize("is_latest" BOOLS)@pytest.mark.parametrize("install_prompt_accept" BOOLS)@patch('fds.run.rerun_in_new_shell_and_exit')@patch('fds.run.execute_command')@patch('fds.run.get_confirm_from_user')@patch('fds.services.fds_service.FdsService')@patch('fds.run.requests.get')<def_stmt>test_fds_update self mock_requests_get mock_fds_service mock_prompt mock_execute_command mock_rerun is_latest:bool install_prompt_accept:bool<block_start>mock_requests_get.return_value=type("Response" () {"json":<lambda>self:{"info":{"version":__version__+("b3"<if><not>is_latest<else>"")}}})()<line_sep>mock_prompt.return_value=install_prompt_accept<line_sep>hooks_runner=HooksRunner(mock_fds_service.service mock_fds_service.printer mock_fds_service.logger )<line_sep>ret=hooks_runner._ensure_fds_updated()<line_sep>mock_requests_get.assert_called_with("https://pypi.python.org/pypi/fastds/json")<assert_stmt>ret<eq>0<if_stmt>is_latest<block_start><return><block_end><assert_stmt>mock_prompt.call_count<eq>1<line_sep># # TODO validate stdout contains "Should we upgrade..." <if_stmt><not>install_prompt_accept<block_start><return><block_end><assert_stmt>mock_execute_command.call_count<eq>1<line_sep>lst=mock_execute_command.call_args_list[0]<assert_stmt>re.findall(r"^pip3 install .*fastds.*--upgrade" lst.args[0][0])<assert_stmt>mock_rerun.call_count<eq>1<line_sep>mock_rerun.assert_called_with()<block_end>@pytest.mark.parametrize("raise_on_reject" BOOLS)@pytest.mark.parametrize("service_preinitialized" BOOLS)@pytest.mark.parametrize("initialize_prompt_accept" BOOLS)@pytest.mark.parametrize("service_name" ["git" "dvc"])@patch('fds.run.sys.exit')@patch('fds.run.get_confirm_from_user')@patch('fds.services.fds_service.FdsService')<def_stmt>test_service_initialized self mock_fds_service mock_prompt mock_sys_exit raise_on_reject:bool service_preinitialized:bool initialize_prompt_accept:bool service_name:str tmpdir <block_start>attr_name=f"{service_name}_service"<line_sep>svc=getattr(mock_fds_service.service attr_name)<line_sep>fut_name=f"_ensure_{service_name}_initialized"<line_sep>hooks_runner=HooksRunner(mock_fds_service.service mock_fds_service.printer mock_fds_service.logger )<line_sep>fut=getattr(hooks_runner fut_name)<line_sep>mock_prompt.return_value=initialize_prompt_accept<with_stmt>patch.object(svc "repo_path" tmpdir.strpath ) patch.object(svc "is_initialized" return_value=service_preinitialized ) patch.object(svc "init" )<block_start>ret=fut()<assert_stmt>svc.is_initialized.call_count<eq>1<if_stmt>service_preinitialized<block_start><assert_stmt>ret<eq>0<line_sep><return><block_end><assert_stmt>mock_prompt.call_count<eq>1<if_stmt>initialize_prompt_accept<block_start><assert_stmt>svc.init.call_count<eq>1<assert_stmt>ret<eq>0<line_sep><return><block_end><assert_stmt>re.findall(r"You can initialize.*{}.*manually by running".format(service_name) mock_fds_service.printer.warn.call_args_list[0].args[0])<if_stmt>raise_on_reject<block_start><assert_stmt>mock_sys_exit.call_count<eq>1<block_end><else_stmt><block_start><assert_stmt>0<not><in>mock_sys_exit.called_with<block_end><block_end><block_end><block_end>
<import_stmt>re<import_stmt>ply.lex<as>lex<line_sep>states=(('instring' 'exclusive') )<line_sep>tokens=('COMMENT' 'HEXSTRING' 'INT' 'FLOAT' 'LITERAL' 'KEYWORD' 'STRING' 'OPERATOR')<line_sep>delimiter=r'\(\)\<\>\[\]\{\}\/\%\s'<line_sep>delimiter_end=r'(?=[%s]|$)'%delimiter<def_stmt>t_COMMENT t# r'^%!.+\n' <block_start>r'%.*\n'<line_sep><pass><block_end>RE_SPC=re.compile(r'\s')<line_sep>RE_HEX_PAIR=re.compile(r'[0-9a-fA-F]{2}|.')<line_sep>@lex.TOKEN(r'<[0-9A-Fa-f\s]*>')<def_stmt>t_HEXSTRING t<block_start>cleaned=RE_SPC.sub('' t.value[1:-1])<line_sep>pairs=RE_HEX_PAIR.findall(cleaned)<line_sep>token_bytes=bytes([int(pair 16)<for>pair pairs])<try_stmt><block_start>t.value=token_bytes.decode('ascii')<block_end><except_stmt>UnicodeDecodeError# should be kept as bytes <block_start>t.value=token_bytes<block_end><return>t<block_end>@lex.TOKEN(r'(\-|\+)?[0-9]+'+delimiter_end)<def_stmt>t_INT t<block_start>t.value=int(t.value)<line_sep><return>t<block_end>@lex.TOKEN(r'(\-|\+)?([0-9]+\.|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?'+delimiter_end)<def_stmt>t_FLOAT t<block_start>t.value=float(t.value)<line_sep><return>t<block_end>RE_LITERAL_HEX=re.compile(r'#[0-9A-Fa-f]{2}')<line_sep>@lex.TOKEN(r'/.+?'+delimiter_end)<def_stmt>t_LITERAL t<block_start>newvalue=t.value[1:]<line_sep># If there's '#' chars in the literal, we much de-hex it <def_stmt>re_sub m# convert any hex str to int (without the # char) and the convert that <block_start><return>bytes.fromhex(m.group(0)[1:]).decode('latin-1')<block_end>newvalue=RE_LITERAL_HEX.sub(re_sub newvalue)<line_sep># If there's any lone # char left, remove them newvalue=newvalue.replace('#' '')<line_sep>t.value=newvalue<line_sep><return>t<block_end><def_stmt>t_OPERATOR t<block_start>r'{|}|<<|>>|\[|\]'<line_sep><return>t<block_end>t_KEYWORD=r'.+?'+delimiter_end<def_stmt>t_instring t<block_start>r'\('<line_sep>t.lexer.value_buffer=[]<line_sep>t.lexer.string_startpos=t.lexpos<line_sep>t.lexer.level=1<line_sep>t.lexer.begin('instring')<block_end># The parens situation: it's complicated. We can have both escaped parens and unescaped parens. # If they're escaped, there's nothing special, we unescape them and add them to the string. If # they're not escaped, we have to count how many of them there are, to know when a rparen is the # end of the string. The regular expression for this is messed up, so what we do is when we hit # a paren, we look if the previous buffer ended up with a backslash. If it did, we don't to paren # balancing. <def_stmt>t_instring_lparen t<block_start>r'\('<line_sep>is_escaped=t.lexer.value_buffer<and>t.lexer.value_buffer[-1].endswith('\\')<if_stmt>is_escaped<block_start>t.lexer.value_buffer[-1]=t.lexer.value_buffer[-1][:-1]<block_end><else_stmt><block_start>t.lexer.level<augadd>1<block_end>t.lexer.value_buffer.append('(')<block_end><def_stmt>t_instring_rparen t<block_start>r'\)'<line_sep>is_escaped=t.lexer.value_buffer<and>t.lexer.value_buffer[-1].endswith('\\')<if_stmt>is_escaped<block_start>t.lexer.value_buffer[-1]=t.lexer.value_buffer[-1][:-1]<block_end><else_stmt><block_start>t.lexer.level<augsub>1<block_end><if_stmt>t.lexer.level<eq>0<block_start>t.value=''.join(t.lexer.value_buffer)<if_stmt>any(ord(c)<g>0x7f<for>c t.value)<block_start>t.value=t.value.encode('latin-1')<block_end>t.type="STRING"<line_sep>t.lexpos=t.lexer.string_startpos<line_sep>t.lexer.begin('INITIAL')<line_sep><return>t<block_end><else_stmt><block_start>t.lexer.value_buffer.append(')')<block_end><block_end>RE_STRING_ESCAPE=re.compile(r'\\[btnfr\\]')<line_sep>RE_STRING_OCTAL=re.compile(r'\\[0-7]{1,3}')<line_sep>RE_STRING_LINE_CONT=re.compile(r'\\\n|\\\r|\\\r\n')<line_sep>ESC_STRING={'b':'\b' 't':'\t' 'n':'\n' 'f':'\f' 'r':'\r' '\\':'\\'}<def_stmt>repl_string_escape m<block_start><return>ESC_STRING[m.group(0)[1]]<block_end><def_stmt>repl_string_octal m<block_start>i=int(m.group(0)[1:] 8)<if_stmt>i<l>0xff# we never want to go above 256 because it's unencodable <block_start><return>chr(i)<block_end><else_stmt><block_start><return>m.group(0)<block_end><block_end><def_stmt>t_instring_contents t<block_start>r'[^()]+'<line_sep>s=t.value<line_sep>s=RE_STRING_ESCAPE.sub(repl_string_escape s)<line_sep>s=RE_STRING_OCTAL.sub(repl_string_octal s)<line_sep>s=RE_STRING_LINE_CONT.sub('' s)<line_sep>t.lexer.value_buffer.append(s)<block_end>t_instring_ignore=''<line_sep>t_ignore=' \t\r\n'<line_sep># Error handling rule <def_stmt>t_error t<block_start>print("Illegal character '%r'"%t.value[0])<line_sep>t.lexer.skip(1)<block_end>t_instring_error=t_error<line_sep>lexer=lex.lex()<line_sep>
""" Sorts a sequence of strings from standard input using merge sort. % more tiny.txt S O R T E X A M P L E % python merge.py < tiny.txt A E E L M O P R S T X [ one string per line ] % more words3.txt bed bug dad yes zoo ... all bad yet % python merge.py < words3.txt all bad bed bug dad ... yes yet zoo [ one string per line ] """<class_stmt>Merge<block_start>@classmethod<def_stmt>merge cls arr lo mid hi<block_start>aux=list(arr)# copy to aux i=lo<line_sep>j=mid+1<line_sep>k=lo<while_stmt>k<le>hi<block_start><if_stmt>i<g>mid<block_start>arr[k]=aux[j]<line_sep>j<augadd>1<block_end><elif_stmt>j<g>hi<block_start>arr[k]=aux[i]<line_sep>i<augadd>1<block_end><elif_stmt>aux[i]<l>aux[j]<block_start>arr[k]=aux[i]<line_sep>i<augadd>1<block_end><else_stmt><block_start>arr[k]=aux[j]<line_sep>j<augadd>1<block_end>k<augadd>1<block_end><block_end>@classmethod<def_stmt>mergesort cls arr lo hi<block_start><if_stmt>lo<ge>hi<block_start><return><block_end>mid=(lo+hi)<floordiv>2<line_sep>cls.mergesort(arr lo mid)<line_sep>cls.mergesort(arr mid+1 hi)<line_sep>cls.merge(arr lo mid hi)<line_sep><return>arr<block_end>@classmethod<def_stmt>sort cls arr<block_start><return>cls.mergesort(arr 0 len(arr)-1)<block_end>@classmethod<def_stmt>is_sorted cls arr<block_start><for_stmt>i range(1 len(arr))<block_start><if_stmt>arr[i]<l>arr[i-1]<block_start><return><false><block_end><block_end><return><true><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys<line_sep>items=[]<for_stmt>line sys.stdin<block_start>items.extend(line.split())<block_end>print(' items: ' items)<line_sep>print('sort items: ' Merge.sort(items))<assert_stmt>Merge.is_sorted(items)<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>capm path<block_start>"""Stock Market Data monthly observations from 1960–01 to 2002–12 *number of observations* : 516 A time serie containing : rfood excess returns food industry rdur excess returns durables industry rcon excess returns construction industry rmrf excess returns market portfolio rf riskfree return most of the above data are from Kenneth French's data library at http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `capm.csv`. Returns: Tuple of np.ndarray `x_train` with 516 rows and 5 columns and dictionary `metadata` of column headers (feature names). """<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='capm.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/Ecdat/Capm.csv'<line_sep>maybe_download_and_extract(path url save_file_name='capm.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end>
""" 741. Cherry Pickup """<class_stmt>Solution<block_start><def_stmt>cherryPickup self grid<block_start>""" :type grid: List[List[int]] :rtype: int """<line_sep>n=len(grid)<if_stmt>grid[0][0]<eq>-1<or>grid[n-1][n-1]<eq>-1<block_start><return>0<block_end>dp=[[-1 ]<times>n<for>_ range(n)]# 很重要, """ 因为比如[[1,-1,1],[-1,1,1],[1,1,1]], 如果初始化为0,k=1 后 , dp = [[-1,-1,0],[-1,-1,0],[0, 0,0]] 然后k = 2, i = 2, j = 2, 只能从(1,0) (1,0) 过来, dp[1][1] = -1,但是因为初始化为0,通过比较max,所以dp最后应该为-1,但是结果为0 """<line_sep>dp[0][0]=grid[0][0]<for_stmt>k range(1 2<times>n-1)<block_start><for_stmt>i range(min(k n-1) max(-1 k-n) -1)<block_start><for_stmt>j range(min(k n-1) max(-1 k-n) -1)<block_start><if_stmt>grid[i][k-i]<eq>-1<or>grid[j][k-j]<eq>-1<block_start>dp[i][j]=-1<line_sep><continue><block_end><if_stmt>i<g>0<block_start>dp[i][j]=max(dp[i][j] dp[i-1][j])#向下向右 <block_end><if_stmt>j<g>0<block_start>dp[i][j]=max(dp[i][j] dp[i][j-1])#向右向下 <block_end><if_stmt>i<g>0<and>j<g>0<block_start>dp[i][j]=max(dp[i][j] dp[i-1][j-1])#向下向下 <block_end><if_stmt>dp[i][j]<l>0<block_start><continue><block_end>dp[i][j]<augadd>grid[i][k-i]<if_stmt>i<ne>j<block_start>dp[i][j]<augadd>grid[j][k-j]<block_end><block_end><block_end><block_end><return>max(dp[-1][-1] 0)<block_end><block_end><class_stmt>Solution<block_start><def_stmt>cherryPickup self grid:List[List[int]]<arrow>int<block_start>n=len(grid)<if_stmt>grid[0][0]<eq>-1<or>grid[n-1][n-1]<eq>-1<block_start><return>0<block_end>dp=[[-1 ]<times>n<for>_ range(n)]<line_sep>dp[0][0]=grid[0][0]<for_stmt>k range(1 2<times>n-1)<block_start><for_stmt>i range(n-1 -1 -1)<block_start><for_stmt>j range(n-1 -1 -1)<block_start>p,q=k-i k-j<if_stmt>p<l>0<or>p<ge>n<or>q<l>0<or>q<ge>n<or>grid[i][p]<eq>-1<or>grid[j][q]<eq>-1<block_start>dp[i][j]=-1<line_sep><continue><block_end><if_stmt>i<g>0<block_start>dp[i][j]=max(dp[i][j] dp[i-1][j])#向下向右 <block_end><if_stmt>j<g>0<block_start>dp[i][j]=max(dp[i][j] dp[i][j-1])#向右向下 <block_end><if_stmt>i<g>0<and>j<g>0<block_start>dp[i][j]=max(dp[i][j] dp[i-1][j-1])#向下向下 <block_end><if_stmt>dp[i][j]<l>0<block_start><continue><block_end>dp[i][j]<augadd>grid[i][p]<if_stmt>i<ne>j<block_start>dp[i][j]<augadd>grid[j][q]<block_end><block_end><block_end><block_end><return>max(dp[-1][-1] 0)<block_end><block_end># Top-Down <class_stmt>Solution<block_start><def_stmt>cherryPickup self grid:List[List[int]]<arrow>int<block_start>N=len(grid)<line_sep>lookup={}<def_stmt>solve x1 y1 x2 y2# check if we reached bottom right corner <block_start><if_stmt>x1<eq>N-1<and>y1<eq>N-1<block_start><return>grid[x1][y1]<if>grid[x1][y1]<ne>-1<else>float("-inf")<block_end># out of the grid and thorn check <if_stmt>x1<eq>N<or>y1<eq>N<or>x2<eq>N<or>y2<eq>N<or>grid[x1][y1]<eq>-1<or>grid[x2][y2]<eq>-1<block_start><return>float("-inf")<line_sep># memorization check <block_end>lookup_key=(x1 y1 x2 y2)<if_stmt>lookup_key<in>lookup<block_start><return>lookup[lookup_key]<block_end># pick your cherries <if_stmt>x1<eq>x2<and>y1<eq>y2<block_start>cherries=grid[x1][y1]<block_end><else_stmt><block_start>cherries=grid[x1][y1]+grid[x2][y2]<block_end>res=cherries+max(solve(x1+1 y1 x2+1 y2) # right, right solve(x1 y1+1 x2 y2+1) # down, down solve(x1+1 y1 x2 y2+1) # right, down solve(x1 y1+1 x2+1 y2) # down, right )<line_sep>lookup[lookup_key]=res<line_sep><return>res<block_end>res=solve(0 0 0 0)<line_sep><return>res<if>res<g>0<else>0<block_end><block_end>
<import_stmt>random<import_from_stmt>torch.utils.data.sampler Sampler<import_from_stmt>torchnlp.utils identity<def_stmt>_uniform_noise _<block_start><return>random.uniform(-1 1)<block_end><class_stmt>NoisySortedSampler(Sampler)<block_start>""" Samples elements sequentially with noise. **Background** ``NoisySortedSampler`` is similar to a ``BucketIterator`` found in popular libraries like `AllenNLP` and `torchtext`. A ``BucketIterator`` pools together examples with a similar size length to reduce the padding required for each batch. ``BucketIterator`` also includes the ability to add noise to the pooling. **AllenNLP Implementation:** https://github.com/allenai/allennlp/blob/e125a490b71b21e914af01e70e9b00b165d64dcd/allennlp/data/iterators/bucket_iterator.py **torchtext Implementation:** https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L225 Args: data (iterable): Data to sample from. sort_key (callable): Specifies a function of one argument that is used to extract a numerical comparison key from each list element. get_noise (callable): Noise added to each numerical ``sort_key``. Example: >>> from torchnlp.random import set_seed >>> set_seed(123) >>> >>> import random >>> get_noise = lambda i: round(random.uniform(-1, 1)) >>> list(NoisySortedSampler(range(10), sort_key=lambda i: i, get_noise=get_noise)) [0, 1, 2, 3, 5, 4, 6, 7, 9, 8] """<def_stmt>__init__ self data sort_key=identity get_noise=_uniform_noise<block_start>super().__init__(data)<line_sep>self.data=data<line_sep>self.sort_key=sort_key<line_sep>self.get_noise=get_noise<block_end><def_stmt>__iter__ self<block_start>zip_=[]<for_stmt>i,row enumerate(self.data)<block_start>value=self.get_noise(row)+self.sort_key(row)<line_sep>zip_.append(tuple([i value]))<block_end>zip_=sorted(zip_ key=<lambda>r:r[1])<line_sep><return>iter([item[0]<for>item zip_])<block_end><def_stmt>__len__ self<block_start><return>len(self.data)<block_end><block_end>
# Generated by Django 2.2.11 on 2020-10-01 06:04 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('facility' '0189_auto_20200929_1258') ]<line_sep>operations=[migrations.AlterField(model_name='shiftingrequest' name='status' field=models.IntegerField(choices=[(10 'PENDING') (15 'ON HOLD') (20 'APPROVED') (30 'REJECTED') (40 'DESTINATION APPROVED') (50 'DESTINATION REJECTED') (60 'AWAITING TRANSPORTATION') (70 'TRANSFER IN PROGRESS') (80 'COMPLETED')] default=10) ) ]<block_end>
<import_from_stmt>typing List<import_from_stmt>nuplan.planning.metrics.evaluation_metrics.base.within_bound_metric_base WithinBoundMetricBase<import_from_stmt>nuplan.planning.metrics.metric_result MetricStatistics<import_from_stmt>nuplan.planning.metrics.utils.state_extractors extract_ego_jerk<import_from_stmt>nuplan.planning.scenario_builder.abstract_scenario AbstractScenario<import_from_stmt>nuplan.planning.simulation.history.simulation_history SimulationHistory<class_stmt>EgoLatJerkStatistics(WithinBoundMetricBase)<block_start>"""Ego lateral jerk metric."""<def_stmt>__init__ self name:str category:str<arrow><none><block_start>""" Initializes the EgoLatJerkStatistics class :param name: Metric name :param category: Metric category. """<line_sep>super().__init__(name=name category=category)<block_end><def_stmt>compute self history:SimulationHistory scenario:AbstractScenario<arrow>List[MetricStatistics]<block_start>""" Returns the lateral jerk metric :param history: History from a simulation engine :param scenario: Scenario running this metric :return the estimated lateral jerk metric. """<line_sep><return>self._compute_statistics(# type: ignore history=history scenario=scenario statistic_unit_name='meters_per_second_cubed' extract_function=extract_ego_jerk extract_function_params={'acceleration_coordinate':'y'} )<block_end><block_end>
"""Defines a command message that sets FAILED status for job models"""<import_from_future_stmt> unicode_literals<import_stmt>logging<import_from_stmt>collections namedtuple<import_from_stmt>django.db transaction<import_from_stmt>error.models get_error<import_from_stmt>job.models Job<import_from_stmt>messaging.messages.message CommandMessage<import_from_stmt>util.parse datetime_to_string parse_datetime<import_from_stmt>util.retry retry_database_query<line_sep># This is the maximum number of job models that can fit in one message. This maximum ensures that every message of this # type is less than 25 KiB long. MAX_NUM=100<line_sep>FailedJob=namedtuple('FailedJob' ['job_id' 'exe_num' 'error_id'])<line_sep>logger=logging.getLogger(__name__)<def_stmt>create_failed_jobs_messages failed_jobs when<block_start>"""Creates messages to fail the given jobs :param failed_jobs: The failed jobs :type failed_jobs: :func:`list` :param when: When the jobs failed :type when: :class:`datetime.datetime` :return: The list of messages :rtype: :func:`list` """<line_sep>messages=[]<line_sep>message=<none><for_stmt>failed_job failed_jobs<block_start><if_stmt><not>message<block_start>message=FailedJobs()<line_sep>message.ended=when<block_end><elif_stmt><not>message.can_fit_more()<block_start>messages.append(message)<line_sep>message=FailedJobs()<line_sep>message.ended=when<block_end>message.add_failed_job(failed_job)<block_end><if_stmt>message<block_start>messages.append(message)<block_end><return>messages<block_end><class_stmt>FailedJobs(CommandMessage)<block_start>"""Command message that sets FAILED status for job models """<def_stmt>__init__ self<block_start>"""Constructor """<line_sep>super(FailedJobs self).__init__('failed_jobs')<line_sep>self._count=0<line_sep>self._failed_jobs={}# {Error ID: [FailedJob]} self.ended=<none><block_end><def_stmt>add_failed_job self failed_job<block_start>"""Adds the given failed job to this message :param failed_job: The failed job :type failed_job: :class:`job.messages.failed_jobs.FailedJob` """<line_sep>self._count<augadd>1<if_stmt>failed_job.error_id<in>self._failed_jobs<block_start>self._failed_jobs[failed_job.error_id].append(failed_job)<block_end><else_stmt><block_start>self._failed_jobs[failed_job.error_id]=[failed_job]<block_end><block_end><def_stmt>can_fit_more self<block_start>"""Indicates whether more failed jobs can fit in this message :return: True if more failed jobs can fit, False otherwise :rtype: bool """<line_sep><return>self._count<l>MAX_NUM<block_end><def_stmt>to_json self<block_start>"""See :meth:`messaging.messages.message.CommandMessage.to_json` """<line_sep>error_list=[]<for_stmt>error_id,job_list self._failed_jobs.items()<block_start>jobs_list=[]<for_stmt>failed_job job_list<block_start>jobs_list.append({'id':failed_job.job_id 'exe_num':failed_job.exe_num})<block_end>error_list.append({'id':error_id 'jobs':jobs_list})<block_end><return>{'ended':datetime_to_string(self.ended) 'errors':error_list}<block_end>@staticmethod<def_stmt>from_json json_dict<block_start>"""See :meth:`messaging.messages.message.CommandMessage.from_json` """<line_sep>message=FailedJobs()<line_sep>message.ended=parse_datetime(json_dict['ended'])<for_stmt>error_dict json_dict['errors']<block_start>error_id=error_dict['id']<for_stmt>job_dict error_dict['jobs']<block_start>job_id=job_dict['id']<line_sep>exe_num=job_dict['exe_num']<line_sep>message.add_failed_job(FailedJob(job_id exe_num error_id))<block_end><block_end><return>message<block_end>@retry_database_query(max_tries=5 base_ms_delay=1000 max_ms_delay=5000)<def_stmt>execute self<block_start>"""See :meth:`messaging.messages.message.CommandMessage.execute` """<import_from_stmt>queue.messages.queued_jobs create_queued_jobs_messages QueuedJob<line_sep>job_ids=[]<for_stmt>job_list self._failed_jobs.values()<block_start><for_stmt>failed_job job_list<block_start>job_ids.append(failed_job.job_id)<block_end><block_end>root_recipe_ids=set()<with_stmt>transaction.atomic()# Retrieve locked job models <block_start>job_models={}<for_stmt>job Job.objects.get_locked_jobs(job_ids)<block_start>job_models[job.id]=job<if_stmt>job.root_recipe_id<block_start>root_recipe_ids.add(job.root_recipe_id)<block_end><block_end># Get job models with related fields # TODO: once long running job types are gone, the related fields are not needed <for_stmt>job Job.objects.get_jobs_with_related(job_ids)<block_start>job_models[job.id]=job<block_end>jobs_to_retry=[]<line_sep>all_failed_job_ids=[]<for_stmt>error_id,job_list self._failed_jobs.items()<block_start>error=get_error(error_id)<line_sep>jobs_to_fail=[]<for_stmt>failed_job job_list<block_start>job_model=job_models[failed_job.job_id]<line_sep># If job cannot be failed or execution number does not match, then this update is obsolete <if_stmt><not>job_model.can_be_failed()<or>job_model.num_exes<ne>failed_job.exe_num# Ignore this job <block_start><continue><block_end># Re-try job if error supports re-try and there are more tries left retry=error.should_be_retried<and>job_model.num_exes<l>job_model.max_tries<line_sep># Also re-try long running jobs retry=retry<or>job_model.job_type.is_long_running<line_sep># Do not re-try superseded jobs retry=retry<and><not>job_model.is_superseded<if_stmt>retry<block_start>jobs_to_retry.append(QueuedJob(job_model.id job_model.num_exes))<block_end><else_stmt><block_start>jobs_to_fail.append(job_model)<block_end><block_end># Update jobs that failed with this error <if_stmt>jobs_to_fail<block_start>failed_job_ids=Job.objects.update_jobs_to_failed(jobs_to_fail error_id self.ended)<line_sep>logger.info('Set %d job(s) to FAILED status with error %s' len(failed_job_ids) error.name)<line_sep>all_failed_job_ids.extend(failed_job_ids)<block_end><block_end># Need to update recipes of failed jobs so that dependent jobs are BLOCKED <if_stmt>root_recipe_ids<block_start><import_from_stmt>recipe.messages.update_recipe create_update_recipe_messages_from_node<line_sep>self.new_messages.extend(create_update_recipe_messages_from_node(root_recipe_ids))<block_end># Place jobs to retry back onto the queue <if_stmt>jobs_to_retry<block_start>self.new_messages.extend(create_queued_jobs_messages(jobs_to_retry requeue=<true>))<block_end><block_end># Send messages to update recipe metrics <import_from_stmt>recipe.messages.update_recipe_metrics create_update_recipe_metrics_messages_from_jobs<line_sep>self.new_messages.extend(create_update_recipe_metrics_messages_from_jobs(job_ids))<line_sep><return><true><block_end><block_end>
<import_from_stmt>.ball *<class_stmt>TeamHandler(BaseActorHandler)<block_start>@classmethod<def_stmt>can_handle cls actor:dict<arrow>bool<block_start><return>actor['ClassName']<eq>'TAGame.Team_Soccar_TA'<block_end><def_stmt>update self actor:dict frame_number:int time:float delta:float<arrow><none><block_start>self.parser.team_dicts[actor['Id']]=actor<line_sep>self.parser.team_dicts[actor['Id']]['colour']='blue'<if>actor["TypeName"]<eq>"Archetypes.Teams.Team0"<else>'orange'<block_end><block_end>
<import_stmt>os.path<import_from_stmt>pathlib Path<line_sep>file_name=Path(os.path.expanduser("~/Desktop")).resolve()/"README_YOU_WERE_HACKED.txt"<line_sep>file_name.touch(exist_ok=<true>)<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>shallowGainCalibration=cms.EDProducer("ShallowGainCalibration" Tracks=cms.InputTag("generalTracks" "") Prefix=cms.string("GainCalibration") Suffix=cms.string(""))<line_sep>
# -- Path setup -------------------------------------------------------------- <import_stmt>os<import_stmt>sys<line_sep>sys.path.insert(0 os.path.abspath("../../src"))<import_stmt>sphinx_gallery<line_sep># -- Project information ----------------------------------------------------- project="SPFlow"<line_sep>copyright="2020, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>"<line_sep>author="<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>"<line_sep># Get __version__ from _meta <import_from_stmt>spn._meta __version__<line_sep>version=__version__<line_sep>release=__version__<line_sep>extensions=["sphinx.ext.linkcode" "sphinx.ext.autodoc" "sphinx.ext.autosummary" "sphinx.ext.doctest" "sphinx.ext.intersphinx" "sphinx.ext.todo" "sphinx.ext.coverage" "sphinx.ext.mathjax" "sphinx.ext.githubpages" "sphinx.ext.napoleon" "sphinx_gallery.gen_gallery" ]<line_sep>templates_path=["_templates"]<line_sep>source_suffix=".rst"<line_sep>master_doc="index"<line_sep>exclude_patterns=["build" "Thumbs.db" ".DS_Store" "env"]<line_sep>pygments_style="sphinx"<line_sep>html_theme="sphinx_rtd_theme"<line_sep>html_static_path=["_static"]<line_sep>html_logo="../../Documentation/logo/spflow_logoSquare.png"<line_sep># -- Extension configuration ------------------------------------------------- autosummary_generate=<true><line_sep>autodoc_default_options={"undoc-members":<none>}<line_sep># -- Options for intersphinx extension --------------------------------------- intersphinx_mapping={"python":("https://docs.python.org/3/" <none>) "numpy":("https://numpy.org/doc/stable/" <none>) "sklearn":("https://scikit-learn.org/stable" <none>) }<line_sep># -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos=<true><line_sep># Linkcode extension <def_stmt>linkcode_resolve domain info<block_start><if_stmt>domain<ne>"py"<block_start><return><none><block_end><if_stmt><not>info["module"]<block_start><return><none><block_end>filename=info["module"].replace("." "/")<line_sep><return>"https://github.com/SPFlow/SPFlow/blob/master/src/%s.py"%filename<block_end># If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names=<false><line_sep># Napoleon settings napoleon_google_docstring=<false><line_sep>napoleon_numpy_docstring=<true><line_sep>napoleon_include_init_with_doc=<false><line_sep>napoleon_include_private_with_doc=<false><line_sep>napoleon_include_special_with_doc=<true><line_sep>napoleon_use_admonition_for_examples=<false><line_sep>napoleon_use_admonition_for_notes=<false><line_sep>napoleon_use_admonition_for_references=<false><line_sep>napoleon_use_ivar=<false><line_sep>napoleon_use_param=<true><line_sep>napoleon_use_rtype=<true><line_sep># sphinx_gallery.gen_gallery settings sphinx_gallery_conf={"doc_module":"spn" "backreferences_dir":os.path.join("generated") "reference_url":{"spn":<none>} "remove_config_comments":<true> }<line_sep>
{"includes":["node/global.gypi" "node/configs.gypi" "node/dicts.gypi" "node/node_opencc.gypi" ]}<line_sep>
""" Adadelta algorithm implementation """<import_stmt>numpy<as>np<import_stmt>theano<import_stmt>theano.tensor<as>T<def_stmt>build_adadelta_updates params param_shapes param_grads rho=0.95 epsilon=0.001# AdaDelta parameter update # E[g^2] # initialized to zero <block_start>egs=[theano.shared(value=np.zeros(param_shape dtype=theano.config.floatX) borrow=<true> name="Eg:"+param.name)<for>param_shape,param zip(param_shapes params)]<line_sep># E[\delta x^2], initialized to zero exs=[theano.shared(value=np.zeros(param_shape dtype=theano.config.floatX) borrow=<true> name="Ex:"+param.name)<for>param_shape,param zip(param_shapes params)]<line_sep>new_egs=[rho<times>eg+(1-rho)<times>g<power>2<for>eg,g zip(egs param_grads)]<line_sep>delta_x=[-(T.sqrt(ex+epsilon)/T.sqrt(new_eg+epsilon))<times>g<for>new_eg,ex,g zip(new_egs exs param_grads)]<line_sep>new_exs=[rho<times>ex+(1-rho)<times>(dx<power>2)<for>ex,dx zip(exs delta_x)]<line_sep>egs_updates=zip(egs new_egs)<line_sep>exs_updates=zip(exs new_exs)<line_sep>param_updates=[(p p+dx)<for>dx,g,p zip(delta_x param_grads params)]<line_sep>updates=egs_updates+exs_updates+param_updates<line_sep><return>updates<block_end>
<import_from_stmt>qtrader.simulation.tests.arbitrage Arbitrage<import_from_stmt>qtrader.simulation.tests.moments Moments<line_sep>
# -*- coding: utf-8 -*- """Tests :func:`orion.analysis.lpi`"""<import_stmt>copy<import_stmt>numpy<import_stmt>pandas<as>pd<import_stmt>pytest<import_from_stmt>orion.analysis.base to_numpy train_regressor<import_from_stmt>orion.analysis.lpi_utils compute_variances lpi make_grid<import_from_stmt>orion.core.io.space_builder SpaceBuilder<line_sep>data=pd.DataFrame(data={"id":["a" "b" "c" "d"] "x":[0 1 2 3] "y":[1 2 0 3] "objective":[0.1 0.2 0.3 0.5] })<line_sep>space=SpaceBuilder().build({"x":"uniform(0, 6)" "y":"uniform(0, 3)"})<def_stmt>test_accept_empty <block_start>"""Tests an empty dataframe is returned if you give an empty dataframe"""<line_sep>empty_frame=pd.DataFrame()<line_sep>results=lpi(empty_frame space)<assert_stmt>results.columns.tolist()<eq>["LPI"]<assert_stmt>results.index.tolist()<eq>list(space.keys())<assert_stmt>results["LPI"].tolist()<eq>[0 0]<line_sep>empty_frame=pd.DataFrame(columns=["x" "y" "objective"])<line_sep>results=lpi(empty_frame space)<assert_stmt>results.columns.tolist()<eq>["LPI"]<assert_stmt>results.index.tolist()<eq>list(space.keys())<assert_stmt>results["LPI"].tolist()<eq>[0 0]<block_end><def_stmt>test_parameter_not_modified <block_start>"""Tests the original dataframe is not modified"""<line_sep>original=copy.deepcopy(data)<line_sep>lpi(data space)<line_sep>pd.testing.assert_frame_equal(data original)<block_end><def_stmt>test_make_grid <block_start>"""Test grid has correct format"""<line_sep>trials=to_numpy(data space)<line_sep>model=train_regressor("RandomForestRegressor" trials)<line_sep>best_point=trials[numpy.argmin(trials[: -1])]<line_sep>grid=make_grid(best_point space model 4)<line_sep># Are fixed to anchor value numpy.testing.assert_equal(grid[0][: 1] best_point[1])<line_sep>numpy.testing.assert_equal(grid[1][: 0] best_point[0])<line_sep># Is a grid in search space numpy.testing.assert_equal(grid[0][: 0] [0 2 4 6])<line_sep>numpy.testing.assert_equal(grid[1][: 1] [0 1 2 3])<block_end><def_stmt>test_make_grid_predictor monkeypatch<block_start>"""Test grid contains corresponding predictions from the model"""<line_sep>trials=to_numpy(data space)<line_sep>model=train_regressor("RandomForestRegressor" trials)<line_sep>best_point=trials[numpy.argmin(trials[: -1])]<line_sep># Make sure model is not predicting exactly the original objective <with_stmt>numpy.testing.assert_raises(AssertionError)<block_start>numpy.testing.assert_equal(best_point[-1] model.predict(best_point[:-1].reshape(1 -1)))<block_end>grid=make_grid(best_point space model 4)<line_sep># Verify that grid predictions are those of the model numpy.testing.assert_equal(grid[0][: -1] model.predict(grid[0][: :-1]))<line_sep>numpy.testing.assert_equal(grid[1][: -1] model.predict(grid[1][: :-1]))<line_sep># Verify model predictions differ on different points <with_stmt>numpy.testing.assert_raises(AssertionError)<block_start>numpy.testing.assert_equal(grid[0][: -1] grid[1][: -1])<block_end><block_end><def_stmt>test_compute_variance <block_start>"""Test variance computation over the grid"""<line_sep>grid=numpy.arange(3<times>5<times>4).reshape(3 5 4)<line_sep>grid[0 : -1]=10<line_sep>grid[1 : -1]=[0 1 2 3 4]<line_sep>grid[2 : -1]=[0 10 20 30 40]<line_sep>variances=compute_variances(grid)<assert_stmt>variances.shape<eq>(3 )<assert_stmt>variances[0]<eq>0<assert_stmt>variances[1]<eq>numpy.var([0 1 2 3 4])<assert_stmt>variances[2]<eq>numpy.var([0 10 20 30 40])<block_end><def_stmt>test_lpi_results <block_start>"""Verify LPI results in DataFrame"""<line_sep>results=lpi(data space random_state=1)<assert_stmt>results.columns.tolist()<eq>["LPI" "STD"]<assert_stmt>results.index.tolist()<eq>list(space.keys())<line_sep># The data is made such that x correlates more strongly with objective than y <assert_stmt>results["LPI"].loc["x"]<g>results["LPI"].loc["y"]<block_end><def_stmt>test_lpi_with_categorical_data <block_start>"""Verify LPI can be computed on categorical dimensions"""<line_sep>data=pd.DataFrame(data={"id":["a" "b" "c" "d"] "x":[0 1 2 3] "y":["b" "c" "a" "d"] "objective":[0.1 0.2 0.3 0.5] })<line_sep>space=SpaceBuilder().build({"x":"uniform(0, 6)" "y":'choices(["a", "b", "c", "d"])'})<line_sep>results=lpi(data space random_state=1)<assert_stmt>results.columns.tolist()<eq>["LPI" "STD"]<assert_stmt>results.index.tolist()<eq>["x" "y"]<line_sep># The data is made such that x correlates more strongly with objective than y <assert_stmt>results["LPI"].loc["x"]<g>results["LPI"].loc["y"]<block_end><def_stmt>test_lpi_with_multidim_data <block_start>"""Verify LPI can be computed on categorical dimensions"""<line_sep>data=pd.DataFrame(data={"id":["a" "b" "c" "d"] "x":[[0 2 4] [1 1 3] [2 2 2] [3 0 3]] "y":[["b" "b"] ["c" "b"] ["a" "a"] ["d" "c"]] "objective":[0.1 0.2 0.3 0.5] })<line_sep>space=SpaceBuilder().build({"x":"uniform(0, 6, shape=3)" "y":'choices(["a", "b", "c", "d"], shape=2)'})<line_sep>results=lpi(data space random_state=1)<assert_stmt>results.columns.tolist()<eq>["LPI" "STD"]<assert_stmt>results.index.tolist()<eq>["x[0]" "x[1]" "x[2]" "y[0]" "y[1]"]<line_sep># The data is made such some x correlates more strongly with objective than other x and most y <assert_stmt>results["LPI"].loc["x[0]"]<g>results["LPI"].loc["x[1]"]<assert_stmt>results["LPI"].loc["x[1]"]<g>results["LPI"].loc["x[2]"]<assert_stmt>results["LPI"].loc["x[0]"]<g>results["LPI"].loc["y[0]"]<assert_stmt>results["LPI"].loc["x[0]"]<g>results["LPI"].loc["y[1]"]<block_end><def_stmt>test_lpi_n_points monkeypatch<block_start>"""Verify given number of points is used"""<line_sep>N_POINTS=numpy.random.randint(2 50)<def_stmt>mock_make_grid *args **kwargs<block_start>grid=make_grid(*args **kwargs)<assert_stmt>grid.shape<eq>(len(space) N_POINTS len(space)+1)<line_sep><return>grid<block_end>monkeypatch.setattr("orion.analysis.lpi_utils.make_grid" mock_make_grid)<line_sep>lpi(data space random_state=1 n_points=N_POINTS)<block_end><def_stmt>test_lpi_n_runs monkeypatch<block_start>"""Verify number of runs"""<line_sep>N_RUNS=5<line_sep>seeds=set()<line_sep>n_runs=0<def_stmt>mock_train_regressor *args **kwargs<block_start><nonlocal>n_runs<line_sep>n_runs<augadd>1<line_sep>seeds.add(kwargs["random_state"])<line_sep><return>train_regressor(*args **kwargs)<block_end>monkeypatch.setattr("orion.analysis.lpi_utils.train_regressor" mock_train_regressor)<line_sep>lpi(data space random_state=1 n_runs=N_RUNS)<assert_stmt>n_runs<eq>N_RUNS<assert_stmt>len(seeds)<g>0<block_end>
<import_stmt>unittest<import_stmt>libpysal<import_from_stmt>libpysal.common pandas RTOL ATOL<import_from_stmt>esda.geary_local_mv Geary_Local_MV<import_stmt>numpy<as>np<line_sep>PANDAS_EXTINCT=pandas<is><none><class_stmt>Geary_Local_MV_Tester(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>np.random.seed(100)<line_sep>self.w=libpysal.io.open(libpysal.examples.get_path("stl.gal")).read()<line_sep>f=libpysal.io.open(libpysal.examples.get_path("stl_hom.txt"))<line_sep>self.y1=np.array(f.by_col['HR8893'])<line_sep>self.y2=np.array(f.by_col['HC8488'])<block_end><def_stmt>test_local_geary_mv self<block_start>lG_mv=Geary_Local_MV(connectivity=self.w).fit([self.y1 self.y2])<line_sep>print(lG_mv.p_sim[0])<line_sep>self.assertAlmostEqual(lG_mv.localG[0] 0.4096931479581422)<line_sep>self.assertAlmostEqual(lG_mv.p_sim[0] 0.211)<block_end><block_end>suite=unittest.TestSuite()<line_sep>test_classes=[Geary_Local_MV_Tester]<for_stmt>i test_classes<block_start>a=unittest.TestLoader().loadTestsFromTestCase(i)<line_sep>suite.addTest(a)<block_end><if_stmt>__name__<eq>"__main__"<block_start>runner=unittest.TextTestRunner()<line_sep>runner.run(suite)<block_end>
<import_stmt>json<import_stmt>os<import_stmt>pathlib<import_from_stmt>typing Any Dict List Union cast<line_sep>JSONData=Union[List[Any] Dict[str Any]]<line_sep># Splitting this out for testing with no side effects <def_stmt>mkdir directory:str<arrow><none><block_start><return>pathlib.Path(directory).mkdir(parents=<true> exist_ok=<true>)<block_end># Splitting this out for testing with no side effects <def_stmt>remove filename:str<arrow><none><block_start><return>os.remove(filename)<block_end><def_stmt>safe_jsonify directory:str filename:str data:JSONData<arrow><none><block_start>mkdir(directory)<line_sep>fname=os.path.join(directory filename)<with_stmt>open(fname 'w')<as>json_file<block_start>json.dump(data json_file)<block_end><block_end><def_stmt>load_json filename:str<arrow>JSONData<block_start><with_stmt>open(filename)<as>json_file<block_start><return>cast(JSONData json.load(json_file))<block_end><block_end>
<import_stmt>micropython<as>micropython<line_sep># check we can get and set the level micropython.opt_level(0)<line_sep>print(micropython.opt_level())<line_sep>micropython.opt_level(1)<line_sep>print(micropython.opt_level())<line_sep># check that the optimisation levels actually differ micropython.opt_level(0)<line_sep>exec('print(__debug__)')<line_sep>micropython.opt_level(1)<line_sep>exec('print(__debug__)')<line_sep>exec('assert 0')<line_sep>
<import_from_stmt>vergeml.img INPUT_PATTERNS open_image fixext ImageType<import_from_stmt>vergeml.io source SourcePlugin Sample<import_from_stmt>vergeml.data Labels<import_from_stmt>vergeml.utils VergeMLError<import_from_stmt>vergeml.sources.labeled_image LabeledImageSource<import_stmt>random<import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_stmt>os.path<import_stmt>json<import_from_stmt>operator methodcaller<import_stmt>io<import_from_stmt>typing List<import_stmt>gzip<import_stmt>hashlib<line_sep>_FILES=("train-images-idx3-ubyte.gz" "train-labels-idx1-ubyte.gz" "t10k-images-idx3-ubyte.gz" "t10k-labels-idx1-ubyte.gz")<line_sep>_MNIST_LABELS=("0" "1" "2" "3" "4" "5" "6" "7" "8" "9")<line_sep>_FASHION_MNIST_LABELS=("tshirt_top" "trouser" "pullover" "dress" "coat" "sandal" "shirt" "sneaker" "sag" "ankle_boot")<line_sep># we use the md5 to check for fashion mnist, so we can provide the labels # automatically _MD5_FASHION="8d4fb7e6c68d591d4c3dfef9ec88bf0d"<def_stmt>_md5 fname<block_start>hash_md5=hashlib.md5()<with_stmt>open(fname "rb")<as>f<block_start><for_stmt>chunk iter(<lambda>:f.read(4096) b"")<block_start>hash_md5.update(chunk)<block_end><block_end><return>hash_md5.hexdigest()<block_end>@source('image' descr="Load images in MNIST format.")<class_stmt>InputMnist(SourcePlugin)<block_start>data=<none><def_stmt>num_samples self split:str<arrow>int<block_start><return>len(self.data[split])<block_end><def_stmt>read_sample self split:str index:int<block_start><return>self.data[split][index]<block_end><def_stmt>_check_files self<block_start>self.data=dict(train=[] val=[] test=[])<line_sep>samples_dir=self.config["samples_dir"]<line_sep>files=[os.path.join(samples_dir file)<for>file _FILES]<for_stmt>path files<block_start><if_stmt><not>os.path.exists(path)<block_start><raise>VergeMLError("File not found in samples_dir: {}".format(os.path.basename(path)))<block_end><block_end><if_stmt>_md5(files[0])<eq>_MD5_FASHION<block_start>self.meta['labels']=_FASHION_MNIST_LABELS<block_end><else_stmt><block_start>self.meta['labels']=_MNIST_LABELS<block_end># preload <for_stmt>split,images,labels (('train' files[0] files[1]) ('test' files[2] files[3]))<block_start><with_stmt>gzip.open(images)<as>f# First 16 bytes are magic_number, n_imgs, n_rows, n_cols <block_start>pixels=np.frombuffer(f.read() 'B' offset=16)<line_sep>pixels=pixels.reshape(-1 28 28)<block_end><with_stmt>gzip.open(labels)<as>f# First 8 bytes are magic_number, n_labels <block_start>integer_labels=np.frombuffer(f.read() 'B' offset=8)<block_end>n_cols=integer_labels.max()+1<for_stmt>ix,imagearr enumerate(pixels)<block_start>label=integer_labels[ix]<line_sep>onehot=np.zeros((n_cols) dtype='float32')<line_sep>onehot[label]=1.0<line_sep>self.data[split].append((Image.fromarray(imagearr) onehot dict(labels=self.meta['labels'] filename=images split=split types=('pil' 'labels'))))<block_end><if_stmt>split<eq>'train'<block_start>n=self.config['val_num']<if_stmt>self.config['val_perc']<is><not><none><block_start>n=int(len(self.data['train'])<times>self.config['val_perc']<floordiv>100)<block_end><if_stmt>n<is><not><none><block_start><if_stmt>n<g>len(self.data['train'])<block_start><raise>VergeMLError("number of test samples is greater than number of available samples.")<block_end>rng=random.Random(self.config['random_seed'])<line_sep>count=len(self.data[split])<line_sep>indices=rng.sample(range(count) count)<line_sep>self.data['val']=[self.data['train'][i]<for>i indices[:n]]<line_sep>self.data['train']=[self.data['train'][i]<for>i indices[n:]]<block_end><block_end><else_stmt><block_start><if_stmt>self.config['test_num']<block_start><if_stmt>self.config['test_num']<g>len(self.data['test'])<block_start><raise>VergeMLError("number of test samples is greater than number of available samples.")<block_end>rng=random.Random(self.config['random_seed'])<line_sep>indices=rng.sample(range(len(self.data[split])) len(pixels))<line_sep>self.data['test']=[self.data['test'][i]<for>i indices[:n]]<block_end><block_end><block_end><block_end><block_end>plugin=InputMnist<line_sep>
#! /usr/bin/env python # -*- coding: utf-8 -*- <import_stmt>sys<import_from_stmt>pycket impersonators<as>imp<import_from_stmt>pycket values values_string<import_from_stmt>pycket.hash.base W_HashTable W_ImmutableHashTable w_missing<import_from_stmt>pycket.hash.simple W_EqvMutableHashTable W_EqMutableHashTable W_EqvImmutableHashTable W_EqImmutableHashTable make_simple_mutable_table make_simple_mutable_table_assocs make_simple_immutable_table make_simple_immutable_table_assocs <import_from_stmt>pycket.hash.equal W_EqualHashTable<import_from_stmt>pycket.impersonators.baseline W_ImpHashTable W_ChpHashTable<import_from_stmt>pycket.cont continuation loop_label<import_from_stmt>pycket.error SchemeException<import_from_stmt>pycket.prims.expose default expose procedure define_nyi<import_from_stmt>rpython.rlib jit objectmodel<line_sep>_KEY=0<line_sep>_VALUE=1<line_sep>_KEY_AND_VALUE=2<line_sep>_PAIR=3<line_sep>PREFIXES=["unsafe-mutable" "unsafe-immutable"]<def_stmt>prefix_hash_names base<block_start>result=[base]<for_stmt>pre PREFIXES<block_start>result.append("%s-%s"%(pre base))<block_end><return>result<block_end>@expose(prefix_hash_names("hash-iterate-first") [W_HashTable])<def_stmt>hash_iterate_first ht<block_start><if_stmt>ht.length()<eq>0<block_start><return>values.w_false<block_end><return>values.W_Fixnum.ZERO<block_end>@expose(prefix_hash_names("hash-iterate-next") [W_HashTable values.W_Fixnum])<def_stmt>hash_iterate_next ht pos<block_start><return>ht.hash_iterate_next(pos)<block_end>@objectmodel.specialize.arg(4)<def_stmt>hash_iter_ref ht n env cont returns<block_start><import_from_stmt>pycket.interpreter return_value return_multi_vals<try_stmt><block_start>w_key,w_val=ht.get_item(n)<if_stmt>returns<eq>_KEY<block_start><return>return_value(w_key env cont)<block_end><if_stmt>returns<eq>_VALUE<block_start><return>return_value(w_val env cont)<block_end><if_stmt>returns<eq>_KEY_AND_VALUE<block_start>vals=values.Values._make2(w_key w_val)<line_sep><return>return_multi_vals(vals env cont)<block_end><if_stmt>returns<eq>_PAIR<block_start>vals=values.W_Cons.make(w_key w_val)<line_sep><return>return_value(vals env cont)<block_end><assert_stmt><false> "unknown return code"<block_end><except_stmt>KeyError<block_start><raise>SchemeException("hash-iterate-key: invalid position")<block_end><except_stmt>IndexError<block_start><raise>SchemeException("hash-iterate-key: invalid position")<block_end><block_end>@expose(prefix_hash_names("hash-iterate-key") [W_HashTable values.W_Fixnum] simple=<false>)<def_stmt>hash_iterate_key ht pos env cont<block_start><return>hash_iter_ref(ht pos.value env cont returns=_KEY)<block_end>@expose(prefix_hash_names("hash-iterate-value") [W_HashTable values.W_Fixnum] simple=<false>)<def_stmt>hash_iterate_value ht pos env cont<block_start><return>hash_iter_ref(ht pos.value env cont returns=_VALUE)<block_end>@expose(prefix_hash_names("hash-iterate-key+value") [W_HashTable values.W_Fixnum] simple=<false>)<def_stmt>hash_iterate_key_value ht pos env cont<block_start><return>hash_iter_ref(ht pos.value env cont returns=_KEY_AND_VALUE)<block_end>@expose(prefix_hash_names("hash-iterate-pair") [W_HashTable values.W_Fixnum] simple=<false>)<def_stmt>hash_iterate_pair ht pos env cont<block_start><return>hash_iter_ref(ht pos.value env cont returns=_PAIR)<block_end>@expose("hash-for-each" [W_HashTable procedure default(values.W_Object values.w_false)] simple=<false>)<def_stmt>hash_for_each ht f try_order env cont# FIXME: implmeent try-order? -- see hash-map <block_start><return>hash_for_each_loop(ht f 0 env cont)<block_end>@loop_label<def_stmt>hash_for_each_loop ht f index env cont<block_start><import_from_stmt>pycket.interpreter return_value<try_stmt><block_start>w_key,w_value=ht.get_item(index)<block_end><except_stmt>KeyError<block_start><return>hash_for_each_loop(ht f index+1 env cont)<block_end><except_stmt>IndexError<block_start><return>return_value(values.w_void env cont)<block_end><return>f.call([w_key w_value] env hash_for_each_cont(ht f index env cont))<block_end>@continuation<def_stmt>hash_for_each_cont ht f index env cont _vals<block_start><return>hash_for_each_loop(ht f index+1 env cont)<block_end>@expose("hash-map" [W_HashTable procedure default(values.W_Object values.w_false)] simple=<false>)<def_stmt>hash_map h f try_order env cont# FIXME : If try-order? is true, then the order of keys and values # passed to proc is normalized under certain circumstances, such # as when the keys are all symbols and hash is not an # impersonator. <block_start><import_from_stmt>pycket.interpreter return_value<line_sep>acc=values.w_null<line_sep><return>hash_map_loop(f h 0 acc env cont)<line_sep># f.enable_jitting() # return return_value(w_missing, env, # hash_map_cont(f, h, 0, acc, env, cont)) <block_end>@loop_label<def_stmt>hash_map_loop f ht index w_acc env cont<block_start><import_from_stmt>pycket.interpreter return_value<try_stmt><block_start>w_key,w_value=ht.get_item(index)<block_end><except_stmt>KeyError<block_start><return>hash_map_loop(f ht index+1 w_acc env cont)<block_end><except_stmt>IndexError<block_start><return>return_value(w_acc env cont)<block_end>after=hash_map_cont(f ht index w_acc env cont)<line_sep><return>f.call([w_key w_value] env after)<block_end>@continuation<def_stmt>hash_map_cont f ht index w_acc env cont _vals<block_start><import_from_stmt>pycket.interpreter check_one_val<line_sep>w_val=check_one_val(_vals)<line_sep>w_acc=values.W_Cons.make(w_val w_acc)<line_sep><return>hash_map_loop(f ht index+1 w_acc env cont)<block_end>@jit.elidable<def_stmt>from_assocs assocs fname<block_start><if_stmt><not>assocs.is_proper_list()<block_start><raise>SchemeException("%s: expected proper list"%fname)<block_end>keys=[]<line_sep>vals=[]<while_stmt>isinstance(assocs values.W_Cons)<block_start>val,assocs=assocs.car() assocs.cdr()<if_stmt><not>isinstance(val values.W_Cons)<block_start><raise>SchemeException("%s: expected list of pairs"%fname)<block_end>keys.append(val.car())<line_sep>vals.append(val.cdr())<block_end><return>keys[:] vals[:]<block_end>@expose("make-weak-hasheq" [default(values.W_List values.w_null)])<def_stmt>make_weak_hasheq assocs# FIXME: not actually weak <block_start><return>make_simple_mutable_table_assocs(W_EqMutableHashTable assocs "make-weak-hasheq")<block_end>@expose("make-weak-hasheqv" [default(values.W_List values.w_null)])<def_stmt>make_weak_hasheqv assocs# FIXME: not actually weak <block_start><return>make_simple_mutable_table_assocs(W_EqvMutableHashTable assocs "make-weak-hasheqv")<block_end>@expose(["make-weak-hash" "make-late-weak-hasheq"] [default(values.W_List <none>)])<def_stmt>make_weak_hash assocs<block_start><if_stmt>assocs<is><none><block_start><return>W_EqualHashTable([] [] immutable=<false>)<block_end><return>W_EqualHashTable(*from_assocs(assocs "make-weak-hash") immutable=<false>)<block_end>@expose("make-immutable-hash" [default(values.W_List values.w_null)])<def_stmt>make_immutable_hash assocs<block_start>keys,vals=from_assocs(assocs "make-immutable-hash")<line_sep><return>W_EqualHashTable(keys vals immutable=<true>)<block_end>@expose("make-immutable-hasheq" [default(values.W_List values.w_null)])<def_stmt>make_immutable_hasheq assocs<block_start><return>make_simple_immutable_table_assocs(W_EqImmutableHashTable assocs "make-immutable-hasheq")<block_end>@expose("make-immutable-hasheqv" [default(values.W_List values.w_null)])<def_stmt>make_immutable_hasheqv assocs<block_start><return>make_simple_immutable_table_assocs(W_EqvImmutableHashTable assocs "make-immutable-hasheq")<block_end>@expose("hash")<def_stmt>hash args<block_start><if_stmt>len(args)%2<ne>0<block_start><raise>SchemeException("hash: key does not have a corresponding value")<block_end>keys=[args[i]<for>i range(0 len(args) 2)]<line_sep>vals=[args[i]<for>i range(1 len(args) 2)]<line_sep><return>W_EqualHashTable(keys vals immutable=<true>)<block_end>@expose("hasheq")<def_stmt>hasheq args<block_start><if_stmt>len(args)%2<ne>0<block_start><raise>SchemeException("hasheq: key does not have a corresponding value")<block_end>keys=[args[i]<for>i range(0 len(args) 2)]<line_sep>vals=[args[i]<for>i range(1 len(args) 2)]<line_sep><return>make_simple_immutable_table(W_EqImmutableHashTable keys vals)<block_end>@expose("hasheqv")<def_stmt>hasheqv args<block_start><if_stmt>len(args)%2<ne>0<block_start><raise>SchemeException("hasheqv: key does not have a corresponding value")<block_end>keys=[args[i]<for>i range(0 len(args) 2)]<line_sep>vals=[args[i]<for>i range(1 len(args) 2)]<line_sep><return>make_simple_immutable_table(W_EqvImmutableHashTable keys vals)<block_end>@expose("make-hash" [default(values.W_List values.w_null)])<def_stmt>make_hash pairs<block_start><return>W_EqualHashTable(*from_assocs(pairs "make-hash"))<block_end>@expose("make-hasheq" [default(values.W_List values.w_null)])<def_stmt>make_hasheq pairs<block_start><return>make_simple_mutable_table_assocs(W_EqMutableHashTable pairs "make-hasheq")<block_end>@expose("make-hasheqv" [default(values.W_List values.w_null)])<def_stmt>make_hasheqv pairs<block_start><return>make_simple_mutable_table_assocs(W_EqvMutableHashTable pairs "make-hasheqv")<block_end>@expose("hash-set!" [W_HashTable values.W_Object values.W_Object] simple=<false>)<def_stmt>hash_set_bang ht k v env cont<block_start><if_stmt>ht.immutable()<block_start><raise>SchemeException("hash-set!: given immutable table")<block_end><return>ht.hash_set(k v env cont)<block_end>@continuation<def_stmt>hash_set_cont key val env cont _vals<block_start><import_from_stmt>pycket.interpreter check_one_val<line_sep>table=check_one_val(_vals)<line_sep><return>table.hash_set(key val env return_table_cont(table env cont))<block_end>@continuation<def_stmt>return_table_cont table env cont _vals<block_start><import_from_stmt>pycket.interpreter return_value<line_sep><return>return_value(table env cont)<block_end>@expose("hash-set" [W_HashTable values.W_Object values.W_Object] simple=<false>)<def_stmt>hash_set table key val env cont<block_start><import_from_stmt>pycket.interpreter return_value<if_stmt><not>table.immutable()<block_start><raise>SchemeException("hash-set: not given an immutable table")<block_end># Fast path <if_stmt>isinstance(table W_ImmutableHashTable)<block_start>new_table=table.assoc(key val)<line_sep><return>return_value(new_table env cont)<block_end><return>hash_copy(table env hash_set_cont(key val env cont))<block_end>@continuation<def_stmt>hash_ref_cont default k env cont _vals<block_start><import_from_stmt>pycket.interpreter return_value check_one_val<line_sep>val=check_one_val(_vals)<if_stmt>val<is><not>w_missing<block_start><return>return_value(val env cont)<block_end><if_stmt>default<is><none><block_start><raise>SchemeException("key %s not found"%k.tostring())<block_end><if_stmt>default.iscallable()<block_start><return>default.call([] env cont)<block_end><return>return_value(default env cont)<block_end>@expose("hash-ref" [W_HashTable values.W_Object default(values.W_Object <none>)] simple=<false>)<def_stmt>hash_ref ht k default env cont<block_start><return>ht.hash_ref(k env hash_ref_cont(default k env cont))<block_end>@expose("hash-remove!" [W_HashTable values.W_Object] simple=<false>)<def_stmt>hash_remove_bang ht k env cont<block_start><if_stmt>ht.immutable()<block_start><raise>SchemeException("hash-remove!: expected mutable hash table")<block_end><return>ht.hash_remove_inplace(k env cont)<block_end>@expose("hash-remove" [W_HashTable values.W_Object] simple=<false>)<def_stmt>hash_remove ht k env cont<block_start><if_stmt><not>ht.immutable()<block_start><raise>SchemeException("hash-remove: expected immutable hash table")<block_end><return>ht.hash_remove(k env cont)<block_end>@continuation<def_stmt>hash_clear_cont ht env cont _vals<block_start><return>hash_clear_loop(ht env cont)<block_end><def_stmt>hash_clear_loop ht env cont<block_start><import_from_stmt>pycket.interpreter return_value<if_stmt>ht.length()<eq>0<block_start><return>return_value(values.w_void env cont)<block_end>w_k,w_v=ht.get_item(0)<line_sep><return>ht.hash_remove_inplace(w_k env hash_clear_cont(ht env cont))<block_end>@expose("hash-clear!" [W_HashTable] simple=<false>)<def_stmt>hash_clear_bang ht env cont<block_start><import_from_stmt>pycket.interpreter return_value<if_stmt>ht.is_impersonator()<block_start>ht.hash_clear_proc(env cont)<line_sep><return>hash_clear_loop(ht env cont)<block_end><else_stmt><block_start>ht.hash_empty()<line_sep><return>return_value(values.w_void env cont)<block_end><block_end>define_nyi("hash-clear" [W_HashTable])<line_sep>@expose("hash-count" [W_HashTable])<def_stmt>hash_count hash<block_start><return>values.W_Fixnum(hash.length())<block_end>@continuation<def_stmt>hash_keys_subset_huh_cont keys_vals hash_2 idx env cont _vals<block_start><import_from_stmt>pycket.interpreter return_value check_one_val<line_sep>val=check_one_val(_vals)<if_stmt>val<is>values.w_false<block_start><return>return_value(values.w_false env cont)<block_end><else_stmt><block_start><return>hash_keys_subset_huh_loop(keys_vals hash_2 idx+1 env cont)<block_end><block_end>@loop_label<def_stmt>hash_keys_subset_huh_loop keys_vals hash_2 idx env cont<block_start><import_from_stmt>pycket.interpreter return_value<if_stmt>idx<ge>len(keys_vals)<block_start><return>return_value(values.w_true env cont)<block_end><else_stmt><block_start><return>hash_ref([hash_2 keys_vals[idx][0] values.w_false] env hash_keys_subset_huh_cont(keys_vals hash_2 idx env cont))<block_end><block_end>@jit.elidable<def_stmt>uses_same_eq_comparison hash_1 hash_2<block_start>h_1=hash_1<line_sep>h_2=hash_2<if_stmt>hash_1.is_impersonator()<or>hash_1.is_chaperone()<block_start>h_1=hash_1.get_proxied()<block_end><if_stmt>hash_2.is_impersonator()<or>hash_2.is_chaperone()<block_start>h_2=hash_2.get_proxied()<block_end><if_stmt>isinstance(h_1 W_EqualHashTable)<block_start><return>isinstance(h_2 W_EqualHashTable)<block_end><elif_stmt>isinstance(h_1 W_EqMutableHashTable)<or>isinstance(h_1 W_EqImmutableHashTable)<block_start><return>isinstance(h_2 W_EqMutableHashTable)<or>isinstance(h_2 W_EqImmutableHashTable)<block_end><elif_stmt>isinstance(h_1 W_EqvMutableHashTable)<or>isinstance(h_1 W_EqvImmutableHashTable)<block_start><return>isinstance(h_2 W_EqvMutableHashTable)<or>isinstance(h_2 W_EqvImmutableHashTable)<block_end><else_stmt><block_start><return><false><block_end><block_end>@expose("hash-keys-subset?" [W_HashTable W_HashTable] simple=<false>)<def_stmt>hash_keys_subset_huh hash_1 hash_2 env cont<block_start><if_stmt><not>uses_same_eq_comparison(hash_1 hash_2)<block_start><raise>SchemeException("hash-keys-subset?: given hash tables do not use the same key comparison -- first table : %s - second table: %s"%(hash_1.tostring() hash_2.tostring()))<block_end><return>hash_keys_subset_huh_loop(hash_1.hash_items() hash_2 0 env cont)<block_end>@continuation<def_stmt>hash_copy_ref_cont keys idx src new env cont _vals<block_start><import_from_stmt>pycket.interpreter check_one_val<line_sep>val=check_one_val(_vals)<line_sep><return>new.hash_set(keys[idx][0] val env hash_copy_set_cont(keys idx src new env cont))<block_end>@continuation<def_stmt>hash_copy_set_cont keys idx src new env cont _vals<block_start><return>hash_copy_loop(keys idx+1 src new env cont)<block_end>@loop_label<def_stmt>hash_copy_loop keys idx src new env cont<block_start><import_from_stmt>pycket.interpreter return_value<if_stmt>idx<ge>len(keys)<block_start><return>return_value(new env cont)<block_end><return>src.hash_ref(keys[idx][0] env hash_copy_ref_cont(keys idx src new env cont))<block_end><def_stmt>hash_copy src env cont<block_start><import_from_stmt>pycket.interpreter return_value<if_stmt>isinstance(src W_ImmutableHashTable)<block_start>new=src.make_copy()<line_sep><return>return_value(new env cont)<block_end>new=src.make_empty()<if_stmt>src.length()<eq>0<block_start><return>return_value(new env cont)<block_end><return>hash_copy_loop(src.hash_items() 0 src new env cont)<block_end>expose("hash-copy" [W_HashTable] simple=<false>)(hash_copy)<line_sep># FIXME: not implemented @expose("equal-hash-code" [values.W_Object])<def_stmt>equal_hash_code v# only for improper path cache entries <block_start><if_stmt>isinstance(v values.W_Cons)<block_start><if_stmt>v.is_proper_list()<block_start><return>values.W_Fixnum.ZERO<block_end>nm=v.car()<line_sep>p=v.cdr()<if_stmt>isinstance(nm values_string.W_String)<and>isinstance(p values.W_Path)<and>isinstance(p.path str)<block_start><return>values.W_Fixnum(objectmodel.compute_hash((nm.tostring() p.path)))<block_end><block_end><return>values.W_Fixnum.ZERO<block_end>@expose("equal-secondary-hash-code" [values.W_Object])<def_stmt>equal_secondary_hash_code v<block_start><return>values.W_Fixnum.ZERO<block_end>@expose("eq-hash-code" [values.W_Object])<def_stmt>eq_hash_code v<block_start>t=type(v)<if_stmt>t<is>values.W_Fixnum<block_start><return>v<block_end><if_stmt>t<is>values.W_Flonum<block_start>hash=objectmodel.compute_hash(v.value)<block_end><elif_stmt>t<is>values.W_Character<block_start>hash=objectmodel.compute_hash(v.value)<block_end><else_stmt><block_start>hash=objectmodel.compute_hash(v)<block_end><return>values.W_Fixnum(hash)<block_end>@expose("eqv-hash-code" [values.W_Object])<def_stmt>eqv_hash_code v<block_start>hash=v.hash_eqv()<line_sep><return>values.W_Fixnum(hash)<block_end>
<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>numpy<def_stmt>linearCombo a b c<block_start>'''This function is for visualizing linear combination of standard basis in 3D. Function syntax: linearCombo(a, b, c), where a, b, c are the scalar multiplier, also the elements of the vector. '''<line_sep>fig=plt.figure(figsize=(10 10))<line_sep>ax=fig.add_subplot(projection='3d')<line_sep>######################## Standard basis and Scalar Multiplid Vectors######################### vec=np.array([[[0 0 0 1 0 0]] # e1 [[0 0 0 0 1 0]] # e2 [[0 0 0 0 0 1]] # e3 [[0 0 0 a 0 0]] # a* e1 [[0 0 0 0 b 0]] # b* e2 [[0 0 0 0 0 c]] # c* e3 [[0 0 0 a b c]]])<line_sep># ae1 + be2 + ce3 colors=['b' 'b' 'b' 'r' 'r' 'r' 'g']<for_stmt>i range(vec.shape[0])<block_start>X,Y,Z,U,V,W=zip(*vec[i : :])<line_sep>ax.quiver(X Y Z U V W length=1 normalize=<false> color=colors[i] arrow_length_ratio=.08 pivot='tail' linestyles='solid' linewidths=3 alpha=.6)<block_end>#################################Plot Rectangle Boxes############################## dlines=np.array([[[a 0 0] [a b 0]] [[0 b 0] [a b 0]] [[0 0 c] [a b c]] [[0 0 c] [a 0 c]] [[a 0 c] [a b c]] [[0 0 c] [0 b c]] [[0 b c] [a b c]] [[a 0 0] [a 0 c]] [[0 b 0] [0 b c]] [[a b 0] [a b c]]])<line_sep>colors=['k' 'k' 'g' 'k' 'k' 'k' 'k' 'k' 'k']<for_stmt>i range(dlines.shape[0])<block_start>ax.plot(dlines[i : 0] dlines[i : 1] dlines[i : 2] lw=3 ls='--' color='black' alpha=0.5)<block_end>#################################Annotation######################################## ax.text(x=a y=b z=c s=' $(%0.d, %0.d, %.0d)$'%(a b c) size=18)<line_sep>ax.text(x=a y=0 z=0 s=' $%0.d e_1 = (%0.d, 0, 0)$'%(a a) size=15)<line_sep>ax.text(x=0 y=b z=0 s=' $%0.d e_2 = (0, %0.d, 0)$'%(b b) size=15)<line_sep>ax.text(x=0 y=0 z=c s=' $%0.d e_3 = (0, 0, %0.d)$'%(c c) size=15)<line_sep>#################################Axis Setting###################################### ax.grid()<line_sep>ax.set_xlim([0 a+1])<line_sep>ax.set_ylim([0 b+1])<line_sep>ax.set_zlim([0 c+1])<line_sep>ax.set_xlabel('x-axis' size=18)<line_sep>ax.set_ylabel('y-axis' size=18)<line_sep>ax.set_zlabel('z-axis' size=18)<line_sep>ax.set_title('Vector $(%0.d, %0.d, %.0d)$ Visualization'%(a b c) size=20)<line_sep>ax.view_init(elev=20. azim=15)<block_end><if_stmt>__name__<eq>'__main__'<block_start>a=7<line_sep>b=4<line_sep>c=9<line_sep>linearCombo(a b c)<block_end><def_stmt>linearComboNonStd a b c vec1 vec2 vec3<block_start>'''This function is for visualizing linear combination of non-standard basis in 3D. Function syntax: linearCombo(a, b, c, vec1, vec2, vec3), where a, b, c are the scalar multiplier, ve1, vec2 and vec3 are the basis. '''<line_sep>fig=plt.figure(figsize=(10 10))<line_sep>ax=fig.add_subplot(projection='3d')<line_sep>########################Plot basis############################## vec1=np.array([[0 0 0 vec1[0] vec1[1] vec1[2]]])<line_sep>X,Y,Z,U,V,W=zip(*vec1)<line_sep>ax.quiver(X Y Z U V W length=1 normalize=<false> color='blue' arrow_length_ratio=.08 pivot='tail' linestyles='solid' linewidths=3)<line_sep>vec2=np.array([[0 0 0 vec2[0] vec2[1] vec2[2]]])<line_sep>X,Y,Z,U,V,W=zip(*vec2)<line_sep>ax.quiver(X Y Z U V W length=1 normalize=<false> color='blue' arrow_length_ratio=.08 pivot='tail' linestyles='solid' linewidths=3)<line_sep>vec3=np.array([[0 0 0 vec3[0] vec3[1] vec3[2]]])<line_sep>X,Y,Z,U,V,W=zip(*vec3)<line_sep>ax.quiver(X Y Z U V W length=1 normalize=<false> color='blue' arrow_length_ratio=.08 pivot='tail' linestyles='solid' linewidths=3)<line_sep>###########################Plot Scalar Muliplied Vectors#################### avec1=a<times>vec1<line_sep>X,Y,Z,U,V,W=zip(*avec1)<line_sep>ax.quiver(X Y Z U V W length=1 normalize=<false> color='red' alpha=.6 arrow_length_ratio=a/100 pivot='tail' linestyles='solid' linewidths=3)<line_sep>bvec2=b<times>vec2<line_sep>X,Y,Z,U,V,W=zip(*bvec2)<line_sep>ax.quiver(X Y Z U V W length=1 normalize=<false> color='red' alpha=.6 arrow_length_ratio=b/100 pivot='tail' linestyles='solid' linewidths=3)<line_sep>cvec3=c<times>vec3<line_sep>X,Y,Z,U,V,W=zip(*cvec3)<line_sep>ax.quiver(X Y Z U V W length=1 normalize=<false> color='red' alpha=.6 arrow_length_ratio=c/100 pivot='tail' linestyles='solid' linewidths=3)<line_sep>combo=avec1+bvec2+cvec3<line_sep>X,Y,Z,U,V,W=zip(*combo)<line_sep>ax.quiver(X Y Z U V W length=1 normalize=<false> color='green' alpha=.7 arrow_length_ratio=np.linalg.norm(combo)/300 pivot='tail' linestyles='solid' linewidths=3)<line_sep>#################################Plot Rectangle Boxes############################## point1=[avec1[0 3] avec1[0 4] avec1[0 5]]<line_sep>point2=[avec1[0 3]+bvec2[0 3] avec1[0 4]+bvec2[0 4] avec1[0 5]+bvec2[0 5]]<line_sep>line1=np.array([point1 point2])<line_sep>ax.plot(line1[: 0] line1[: 1] line1[: 2] lw=3 ls='--' color='black' alpha=0.5)<line_sep>point1=[bvec2[0 3] bvec2[0 4] bvec2[0 5]]<line_sep>point2=[avec1[0 3]+bvec2[0 3] avec1[0 4]+bvec2[0 4] avec1[0 5]+bvec2[0 5]]<line_sep>line1=np.array([point1 point2])<line_sep>ax.plot(line1[: 0] line1[: 1] line1[: 2] lw=3 ls='--' color='black' alpha=0.5)<line_sep>point1=[bvec2[0 3] bvec2[0 4] bvec2[0 5]]<line_sep>point2=[cvec3[0 3]+bvec2[0 3] cvec3[0 4]+bvec2[0 4] cvec3[0 5]+bvec2[0 5]]<line_sep>line1=np.array([point1 point2])<line_sep>ax.plot(line1[: 0] line1[: 1] line1[: 2] lw=3 ls='--' color='black' alpha=0.5)<line_sep>point1=[cvec3[0 3] cvec3[0 4] cvec3[0 5]]<line_sep>point2=[cvec3[0 3]+bvec2[0 3] cvec3[0 4]+bvec2[0 4] cvec3[0 5]+bvec2[0 5]]<line_sep>line1=np.array([point1 point2])<line_sep>ax.plot(line1[: 0] line1[: 1] line1[: 2] lw=3 ls='--' color='black' alpha=0.5)<line_sep>point1=[cvec3[0 3] cvec3[0 4] cvec3[0 5]]<line_sep>point2=[cvec3[0 3]+avec1[0 3] cvec3[0 4]+avec1[0 4] cvec3[0 5]+avec1[0 5]]<line_sep>line1=np.array([point1 point2])<line_sep>ax.plot(line1[: 0] line1[: 1] line1[: 2] lw=3 ls='--' color='black' alpha=0.5)<line_sep>point1=[avec1[0 3] avec1[0 4] avec1[0 5]]<line_sep>point2=[cvec3[0 3]+avec1[0 3] cvec3[0 4]+avec1[0 4] cvec3[0 5]+avec1[0 5]]<line_sep>line1=np.array([point1 point2])<line_sep>ax.plot(line1[: 0] line1[: 1] line1[: 2] lw=3 ls='--' color='black' alpha=0.5)<line_sep>## point1=[avec1[0 3]+bvec2[0 3]+cvec3[0 3] avec1[0 4]+bvec2[0 4]+cvec3[0 4] avec1[0 5]+bvec2[0 5]+cvec3[0 5]]<line_sep>point2=[cvec3[0 3]+avec1[0 3] cvec3[0 4]+avec1[0 4] cvec3[0 5]+avec1[0 5]]<line_sep>line1=np.array([point1 point2])<line_sep>ax.plot(line1[: 0] line1[: 1] line1[: 2] lw=3 ls='--' color='black' alpha=0.5)<line_sep>## point1=[avec1[0 3]+bvec2[0 3]+cvec3[0 3] avec1[0 4]+bvec2[0 4]+cvec3[0 4] avec1[0 5]+bvec2[0 5]+cvec3[0 5]]<line_sep>point2=[cvec3[0 3]+bvec2[0 3] cvec3[0 4]+bvec2[0 4] cvec3[0 5]+bvec2[0 5]]<line_sep>line1=np.array([point1 point2])<line_sep>ax.plot(line1[: 0] line1[: 1] line1[: 2] lw=3 ls='--' color='black' alpha=0.5)<line_sep>## point1=[avec1[0 3]+bvec2[0 3]+cvec3[0 3] avec1[0 4]+bvec2[0 4]+cvec3[0 4] avec1[0 5]+bvec2[0 5]+cvec3[0 5]]<line_sep>point2=[bvec2[0 3]+avec1[0 3] bvec2[0 4]+avec1[0 4] bvec2[0 5]+avec1[0 5]]<line_sep>line1=np.array([point1 point2])<line_sep>ax.plot(line1[: 0] line1[: 1] line1[: 2] lw=3 ls='--' color='black' alpha=0.5)<line_sep>#################################Annotation######################################## ax.text(x=vec1[0 3] y=vec1[0 4] z=vec1[0 5] s=' $v_1 =(%0.d, %0.d, %.0d)$'%(vec1[0 3] vec1[0 4] vec1[0 4]) size=8)<line_sep>ax.text(x=vec2[0 3] y=vec2[0 4] z=vec2[0 5] s=' $v_2 =(%0.d, %0.d, %.0d)$'%(vec2[0 3] vec2[0 4] vec2[0 4]) size=8)<line_sep>ax.text(x=vec3[0 3] y=vec3[0 4] z=vec3[0 5] s=' $v_3= (%0.d, %0.d, %.0d)$'%(vec3[0 3] vec3[0 4] vec3[0 4]) size=8)<line_sep>ax.text(x=avec1[0 3] y=avec1[0 4] z=avec1[0 5] s=' $%.0d v_1 =(%0.d, %0.d, %.0d)$'%(a avec1[0 3] avec1[0 4] avec1[0 4]) size=8)<line_sep>ax.text(x=bvec2[0 3] y=bvec2[0 4] z=bvec2[0 5] s=' $%.0d v_2 =(%0.d, %0.d, %.0d)$'%(b bvec2[0 3] bvec2[0 4] bvec2[0 4]) size=8)<line_sep>ax.text(x=cvec3[0 3] y=cvec3[0 4] z=cvec3[0 5] s=' $%.0d v_3= (%0.d, %0.d, %.0d)$'%(c cvec3[0 3] cvec3[0 4] cvec3[0 4]) size=8)<line_sep># ax.text(x = 0, y = b, z = 0, s= ' $%0.d e_2 = (0, %0.d, 0)$'% (b, b), size = 15) # ax.text(x = 0, y = 0, z = c, s= ' $%0.d e_3 = (0, 0, %0.d)$' %(c, c), size = 15) #################################Axis Setting###################################### ax.grid()<line_sep>ax.set_xlim([0 15])<line_sep>ax.set_ylim([0 15])<line_sep>ax.set_zlim([0 15])<line_sep>ax.set_xlabel('x-axis' size=18)<line_sep>ax.set_ylabel('y-axis' size=18)<line_sep>ax.set_zlabel('z-axis' size=18)<line_sep>#ax.set_title('Vector $(%0.d, %0.d, %.0d)$ Visualization' %(a, b, c), size = 20) ax.view_init(elev=20. azim=15)<block_end><if_stmt>__name__<eq>'__main__'<block_start>a=2<line_sep>b=3<line_sep>c=4<line_sep>vec1=np.array([2 1 0])<line_sep>vec2=np.array([0 3 1])<line_sep>vec3=np.array([1 2 3])<line_sep>linearComboNonStd(a b c vec1 vec2 vec3)<block_end>
<import_stmt>numpy<as>np<import_from_stmt>multiprocessing Pool<import_from_stmt>..bbox bbox_overlaps<line_sep># https://zhuanlan.zhihu.com/p/34655990 <def_stmt>calc_PR_curve pred label<block_start>pos=label[label<eq>1]# 正样本 threshold=np.sort(pred)[::-1]# pred是每个样本的正例预测概率值,逆序 label=label[pred.argsort()[::-1]]<line_sep>precision=[]<line_sep>recall=[]<line_sep>tp=0<line_sep>fp=0<line_sep>ap=0# 平均精度 <for_stmt>i range(len(threshold))<block_start><if_stmt>label[i]<eq>1<block_start>tp<augadd>1<line_sep>recall.append(tp/len(pos))<line_sep>precision.append(tp/(tp+fp))<line_sep># 近似曲线下面积 ap<augadd>(recall[i]-recall[i-1])<times>precision[i]<block_end><else_stmt><block_start>fp<augadd>1<line_sep>recall.append(tp/len(pos))<line_sep>precision.append(tp/(tp+fp))<block_end><block_end><return>precision recall ap<block_end><def_stmt>tpfp_voc det_bboxes gt_bboxes iou_thr=0.5<block_start>num_dets=det_bboxes.shape[0]<line_sep>num_gts=gt_bboxes.shape[0]<line_sep># tp和fp都是针对预测个数而言,不是gt个数 tp=np.zeros(num_dets dtype=np.float32)<line_sep>fp=np.zeros(num_dets dtype=np.float32)<line_sep># 如果gt=0,那么所有预测框都算误报,所有预测bbox位置的fp都设置为1 <if_stmt>gt_bboxes.shape[0]<eq>0<block_start>fp[<ellipsis>]=1<line_sep><return>tp fp<block_end><if_stmt>num_dets<eq>0<block_start><return>tp fp<block_end>ious=bbox_overlaps(det_bboxes[: :4] gt_bboxes).numpy()<line_sep># print(ious) # 对于每个预测框,找到最匹配的gt iou ious_max=ious.max(axis=1)<line_sep># 对于每个预测框,找到最匹配gt的索引 ious_argmax=ious.argmax(axis=1)<line_sep># 按照预测概率分支降序排列 sort_inds=np.argsort(-det_bboxes[: -1])<line_sep>gt_covered=np.zeros(num_gts dtype=bool)<line_sep># 多对一情况下,除了概率分值最大且大于阈值的预测框算tp外,其他框全部算fp <for_stmt>i sort_inds# 如果大于iou,则表示匹配 <block_start><if_stmt>ious_max[i]<ge>iou_thr<block_start>matched_gt=ious_argmax[i]<line_sep># 每个gt bbox只匹配一次,且是和预测概率最大的匹配,不是按照iou <if_stmt><not>gt_covered[matched_gt]<block_start>gt_covered[matched_gt]=<true><line_sep>tp[i]=1<block_end><else_stmt><block_start>fp[i]=1<block_end><block_end><else_stmt><block_start>fp[i]=1<block_end><block_end><return>tp fp<block_end><def_stmt>_average_precision recalls precisions mode='voc2007'<block_start>recalls=recalls[np.newaxis :]<line_sep>precisions=precisions[np.newaxis :]<assert_stmt>recalls.shape<eq>precisions.shape<and>recalls.ndim<eq>2<line_sep>num_scales=recalls.shape[0]<line_sep>ap=np.zeros(num_scales dtype=np.float32)<if_stmt>mode<eq>'voc2012'# 平滑后就是标准的PR曲线算法 <block_start>zeros=np.zeros((num_scales 1) dtype=recalls.dtype)<line_sep>ones=np.ones((num_scales 1) dtype=recalls.dtype)<line_sep>mrec=np.hstack((zeros recalls ones))<line_sep>mpre=np.hstack((zeros precisions zeros))<line_sep># 写法比较高级,高效 <for_stmt>i range(mpre.shape[1]-1 0 -1)<block_start>mpre[: i-1]=np.maximum(mpre[: i-1] mpre[: i])# 每段区间内,精度都是取最大值,也就是水平线 <block_end><for_stmt>i range(num_scales)<block_start>ind=np.where(mrec[i 1:]<ne>mrec[i :-1])[0]# 找到召回率转折点,表示x轴移动区间索引 ap[i]=np.sum((mrec[i ind+1]-mrec[i ind])<times>mpre[i ind+1])<line_sep># 每段面积和 <block_end><block_end><elif_stmt>mode<eq>'voc2007'# 11点法,需要平平滑处理 <block_start><for_stmt>i range(num_scales)<block_start><for_stmt>thr np.arange(0 1+1e-3 0.1)<block_start>precs=precisions[i recalls[i :]<ge>thr]<line_sep>prec=precs.max()<if>precs.size<g>0<else>0<line_sep>ap[i]<augadd>prec<block_end>ap<augdiv>11<block_end><block_end><else_stmt><block_start><raise>ValueError('Unrecognized mode, only "area" and "11points" are supported')<block_end><return>ap<block_end># code ref from mmdetection <def_stmt>voc_eval_map results annotations iou_thr=0.5 name='voc2007' nproc=4<block_start>""" :param results: list[list],外层list是指代图片编号,内层list是指代类别编号, 假设一共20个类,则内层list长度为20,每个List内部是numpy矩阵,nx5表示每张图片对应的每个类别的检测bbox,xyxyconf格式 :param annotations:和results一样 :param iou_thr: 是否算TP的阈值,voc默认是0.5 :param name: 采用哪一种评估指标,voc2007是11点,voc2012是标准pr曲线计算 :return: """<assert_stmt>len(results)<eq>len(annotations)<line_sep>num_imgs=len(results)# 图片个数 num_classes=len(results[0])# positive class num pool=Pool(nproc)<line_sep>eval_results=[]<for_stmt>i range(num_classes)<block_start>cls_dets=[img_res[i]<for>img_res results]<line_sep>cls_gts=[img_res[i]<for>img_res annotations]<line_sep>tpfp=pool.starmap(tpfp_voc zip(cls_dets cls_gts [iou_thr<for>_ range(num_imgs)]))<line_sep># 得到每个预测bbox的tp和fp情况 tp,fp=tuple(zip(*tpfp))<line_sep># 统计gt bbox数目 num_gts=0<for_stmt>j,bbox enumerate(cls_gts)<block_start>num_gts<augadd>bbox.shape[0]<block_end># 合并所有图片所有预测bbox cls_dets=np.vstack(cls_dets)<line_sep>num_dets=cls_dets.shape[0]# 检测bbox个数 # 以上计算出了每个预测bbox的tp和fp情况 # 此处计算精度和召回率,写的比较高级 sort_inds=np.argsort(-cls_dets[: -1])# 按照预测概率分值降序排列 # 仔细思考这种写法,其实是c3_pr_roc.py里面calc_PR_curve的高级快速写法 tp=np.hstack(tp)[sort_inds][<none>]<line_sep>fp=np.hstack(fp)[sort_inds][<none>]<line_sep>tp=np.cumsum(tp axis=1)<line_sep>fp=np.cumsum(fp axis=1)<line_sep>eps=np.finfo(np.float32).eps<line_sep>recalls=tp/np.maximum(num_gts eps)<line_sep>precisions=tp/np.maximum((tp+fp) eps)<line_sep>recalls=recalls[0 :]<line_sep>precisions=precisions[0 :]<line_sep># print('recalls', recalls, 'precisions', precisions) ap=_average_precision(recalls precisions name)[0]<line_sep>eval_results.append({'num_gts':num_gts 'num_dets':num_dets 'recall':recalls 'precision':precisions 'ap':ap})<block_end>pool.close()<line_sep>aps=[]<for_stmt>cls_result eval_results<block_start><if_stmt>cls_result['num_gts']<g>0<block_start>aps.append(cls_result['ap'])<block_end><block_end>mean_ap=np.array(aps).mean().item()<if>aps<else>0.0<line_sep><return>mean_ap<block_end>
<import_stmt>torch<import_stmt>pyro<import_from_stmt>pyro.nn pyro_method<import_from_stmt>pyro.distributions Normal Bernoulli TransformedDistribution<import_from_stmt>pyro.distributions.conditional ConditionalTransformedDistribution<import_from_stmt>deepscm.distributions.transforms.affine ConditionalAffineTransform<import_from_stmt>pyro.nn DenseNN<import_from_stmt>deepscm.experiments.medical.ukbb.sem_vi.base_sem_experiment BaseVISEM MODEL_REGISTRY<class_stmt>ConditionalVISEM(BaseVISEM)<block_start>context_dim=2<def_stmt>__init__ self **kwargs<block_start>super().__init__(**kwargs)<line_sep># ventricle_volume flow ventricle_volume_net=DenseNN(2 [8 16] param_dims=[1 1] nonlinearity=torch.nn.LeakyReLU(.1))<line_sep>self.ventricle_volume_flow_components=ConditionalAffineTransform(context_nn=ventricle_volume_net event_dim=0)<line_sep>self.ventricle_volume_flow_transforms=[self.ventricle_volume_flow_components self.ventricle_volume_flow_constraint_transforms]<line_sep># brain_volume flow brain_volume_net=DenseNN(2 [8 16] param_dims=[1 1] nonlinearity=torch.nn.LeakyReLU(.1))<line_sep>self.brain_volume_flow_components=ConditionalAffineTransform(context_nn=brain_volume_net event_dim=0)<line_sep>self.brain_volume_flow_transforms=[self.brain_volume_flow_components self.brain_volume_flow_constraint_transforms]<block_end>@pyro_method<def_stmt>pgm_model self<block_start>sex_dist=Bernoulli(logits=self.sex_logits).to_event(1)<line_sep>_=self.sex_logits<line_sep>sex=pyro.sample('sex' sex_dist)<line_sep>age_base_dist=Normal(self.age_base_loc self.age_base_scale).to_event(1)<line_sep>age_dist=TransformedDistribution(age_base_dist self.age_flow_transforms)<line_sep>age=pyro.sample('age' age_dist)<line_sep>age_=self.age_flow_constraint_transforms.inv(age)<line_sep># pseudo call to thickness_flow_transforms to register with pyro _=self.age_flow_components<line_sep>brain_context=torch.cat([sex age_] 1)<line_sep>brain_volume_base_dist=Normal(self.brain_volume_base_loc self.brain_volume_base_scale).to_event(1)<line_sep>brain_volume_dist=ConditionalTransformedDistribution(brain_volume_base_dist self.brain_volume_flow_transforms).condition(brain_context)<line_sep>brain_volume=pyro.sample('brain_volume' brain_volume_dist)<line_sep># pseudo call to intensity_flow_transforms to register with pyro _=self.brain_volume_flow_components<line_sep>brain_volume_=self.brain_volume_flow_constraint_transforms.inv(brain_volume)<line_sep>ventricle_context=torch.cat([age_ brain_volume_] 1)<line_sep>ventricle_volume_base_dist=Normal(self.ventricle_volume_base_loc self.ventricle_volume_base_scale).to_event(1)<line_sep>ventricle_volume_dist=ConditionalTransformedDistribution(ventricle_volume_base_dist self.ventricle_volume_flow_transforms).condition(ventricle_context)# noqa: E501 ventricle_volume=pyro.sample('ventricle_volume' ventricle_volume_dist)<line_sep># pseudo call to intensity_flow_transforms to register with pyro _=self.ventricle_volume_flow_components<line_sep><return>age sex ventricle_volume brain_volume<block_end>@pyro_method<def_stmt>model self<block_start>age,sex,ventricle_volume,brain_volume=self.pgm_model()<line_sep>ventricle_volume_=self.ventricle_volume_flow_constraint_transforms.inv(ventricle_volume)<line_sep>brain_volume_=self.brain_volume_flow_constraint_transforms.inv(brain_volume)<line_sep>z=pyro.sample('z' Normal(self.z_loc self.z_scale).to_event(1))<line_sep>latent=torch.cat([z ventricle_volume_ brain_volume_] 1)<line_sep>x_dist=self._get_transformed_x_dist(latent)<line_sep>x=pyro.sample('x' x_dist)<line_sep><return>x z age sex ventricle_volume brain_volume<block_end>@pyro_method<def_stmt>guide self x age sex ventricle_volume brain_volume<block_start><with_stmt>pyro.plate('observations' x.shape[0])<block_start>hidden=self.encoder(x)<line_sep>ventricle_volume_=self.ventricle_volume_flow_constraint_transforms.inv(ventricle_volume)<line_sep>brain_volume_=self.brain_volume_flow_constraint_transforms.inv(brain_volume)<line_sep>hidden=torch.cat([hidden ventricle_volume_ brain_volume_] 1)<line_sep>latent_dist=self.latent_encoder.predict(hidden)<line_sep>z=pyro.sample('z' latent_dist)<block_end><return>z<block_end><block_end>MODEL_REGISTRY[ConditionalVISEM.__name__]=ConditionalVISEM<line_sep>
""" Copyright 2017-2018 Fizyr (https://fizyr.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>numpy<as>np<def_stmt>compute_overlap a b<block_start>""" Args a: (N, H, W) ndarray of float b: (K, H, W) ndarray of float Returns overlaps: (N, K) ndarray of overlap between boxes and query_boxes """<line_sep>intersection=np.zeros((a.shape[0] b.shape[0]))<line_sep>union=np.zeros((a.shape[0] b.shape[0]))<for_stmt>index,mask enumerate(a)<block_start>intersection[index :]=np.sum(np.count_nonzero(b&mask axis=1) axis=1)<line_sep>union[index :]=np.sum(np.count_nonzero(b+mask axis=1) axis=1)<block_end><return>intersection/union<block_end>
""" Leetcode's Medium challege #43 - Multiply Strings (Solution) <https://leetcode.com/problems/multiply-strings/> Description: Given two non-negative integers num1 and num2 represented as strings, return the product of num1 and num2, also represented as a string. EXAMPLE: Input: num1 = "2", num2 = "3" Output: "6" Author: <Curiouspaul1> github: https://github.com/Curiouspaul1 """<def_stmt>int_ s<block_start>""" Converts strings to int, raises exception for non-int literals """<line_sep>reslt=0<for_stmt>i s<block_start><if_stmt>ord(i)<in>range(48 58)# checks that string character is something in [0-9] <block_start>reslt=reslt<times>10+(ord(i)-ord('0'))<block_end><else_stmt><block_start><raise>ValueError<block_end><block_end><return>reslt<block_end><class_stmt>Solution<block_start><def_stmt>multiply self num1:str num2:str<arrow>str<block_start><if_stmt>len(num1)<ge>110<or>len(num2)<ge>110# constraints from leetcode <block_start><return>0<block_end><try_stmt><block_start>num1,num2=int_(num1) int_(num2)<line_sep>result=num1<times>num2<line_sep><return>str(result)<block_end><except_stmt>ValueError<block_start>print("Invalid Entry")<line_sep><return>0<block_end><block_end><block_end>
<import_from_stmt>saboteur.agent SaboteurWebApp<import_stmt>json<import_stmt>unittest<import_from_stmt>test_utils MockShell<import_from_stmt>saboteur.apicommands FAULT_TYPES alphabetical_keys<def_stmt>post_request params<block_start><return>request('POST' params)<block_end><def_stmt>delete_request <block_start><return>{'path':'/' 'method':'DELETE'}<block_end><def_stmt>request method params<block_start><return>{'path':'/' 'method':method 'body':json.dumps(params)}<block_end><def_stmt>http_request method params_json<block_start><return>{'path':'/' 'method':method 'body':params_json}<block_end><class_stmt>TestAgent(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.shell=MockShell()<line_sep>self.app=SaboteurWebApp(self.shell)<block_end><def_stmt>test_successful_iptables_based_fault_returns_200_and_executes_correct_command self<block_start>params=json.dumps({'name':'isolate-web-server' 'type':'NETWORK_FAILURE' 'direction':'IN' 'to_port':80 'protocol':'TCP'})<line_sep>response=self.app.handle(http_request('POST' params))<line_sep>self.assertEqual(response['status'] 200)<line_sep>self.assertEqual(self.shell.last_command 'sudo /sbin/iptables -A INPUT -p TCP -j DROP --dport 80')<block_end><def_stmt>test_invalid_json_returns_400 self<block_start>params='{ "name": }'<line_sep>response=self.app.handle(http_request('POST' params))<line_sep>self.assertEqual(400 response['status'])<line_sep>self.assertEqual(json.dumps('Not valid JSON') response['body'])<block_end><def_stmt>test_invalid_fault_type self<block_start>params=json.dumps({'name':'isolate-web-server' 'type':'WORMS'})<line_sep>response=self.app.handle(http_request('POST' params))<line_sep>self.assertEqual(400 response['status'])<line_sep>self.assertEqual(json.dumps({"errors":{"type":"must be present and one of "+str(alphabetical_keys(FAULT_TYPES))}}) response['body'])<block_end><def_stmt>test_fault_with_single_invalid_field_returns_400 self<block_start>params=json.dumps({'name':'isolate-web-server' 'type':'NETWORK_FAILURE' 'to_port':7871})<line_sep>response=self.app.handle(http_request('POST' params))<line_sep>self.assertEqual(400 response['status'])<line_sep>self.assertEqual(json.dumps({"errors":{"direction":"required key not provided"}}) response['body'])<block_end><def_stmt>test_fault_with_multiple_invalid_fields_returns_400 self<block_start>params=json.dumps({'name':'isolate-web-server' 'type':'DELAY' 'direction':'IN' 'to_port':7871 'delay':'bad' 'probability':'worse'})<line_sep>response=self.app.handle(http_request('POST' params))<line_sep>self.assertEqual(400 response['status'])<line_sep>self.assertEqual(json.dumps({"errors":{"delay":"expected int" "probability":"expected float"}}) response['body'])<block_end><def_stmt>test_reset self<block_start>self.shell.next_result='eth1'<line_sep>response=self.app.handle(delete_request())<line_sep>self.assertEqual(response['status'] 200)<line_sep>self.assertEqual(self.shell.commands ['sudo /sbin/iptables -F' "netstat -i | tail -n+3 | cut -f1 -d ' '" 'sudo /sbin/tc qdisc del dev eth1 root'])<block_end><def_stmt>test_returns_500_when_shell_command_exits_with_non_zero self<block_start>params=json.dumps({'name':'whatever' 'type':'NETWORK_FAILURE' 'direction':'IN' 'to_port':80 'protocol':'TCP'})<line_sep>self.shell.next_exit_code=1<line_sep>response=self.app.handle(http_request('POST' params))<line_sep>self.assertEqual(500 response['status'])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<class_stmt>PositionalEncoder(nn.Module)<block_start><def_stmt>__init__ self embed_dim:int max_len:int=512<arrow><none><block_start>super(PositionalEncoder self).__init__()<line_sep>self._embed_dim=embed_dim<line_sep>self._max_len=max_len<line_sep>self._embed_matrix=torch.tensor([[pos/pow(1.0e4 2.0<times>(i<floordiv>2)/self._embed_dim)<for>i range(self._embed_dim)]<for>pos range(self._max_len)])<line_sep>self._embed_matrix[: 0::2]=torch.sin(self._embed_matrix[: 0::2])<line_sep>self._embed_matrix[: 1::2]=torch.cos(self._embed_matrix[: 1::2])<line_sep>self._embedder=nn.Embedding(self._max_len self._embed_dim)<line_sep>self._embedder.weight=nn.Parameter(self._embed_matrix requires_grad=<false>)<block_end><def_stmt>forward self embed:torch.Tensor<arrow>torch.Tensor<block_start>token_len=embed.size()[1]<if_stmt>embed.is_cuda<block_start>ids=torch.cuda.LongTensor([l<for>l range(token_len)])<block_end><else_stmt><block_start>ids=torch.LongTensor([l<for>l range(token_len)])<block_end>embed<augadd>self._embedder(ids)<line_sep><return>embed<block_end><block_end>
<import_stmt>gdbremote_testcase<import_stmt>lldbgdbserverutils<import_from_stmt>lldbsuite.test.decorators *<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test lldbutil<class_stmt>TestGdbRemoteProcessInfo(gdbremote_testcase.GdbRemoteTestCaseBase)<block_start>mydir=TestBase.compute_mydir(__file__)<def_stmt>test_qProcessInfo_returns_running_process self<block_start>self.build()<line_sep>procs=self.prep_debug_monitor_and_inferior()<line_sep>self.add_process_info_collection_packets()<line_sep># Run the stream context=self.expect_gdbremote_sequence()<line_sep>self.assertIsNotNone(context)<line_sep># Gather process info response process_info=self.parse_process_info_response(context)<line_sep>self.assertIsNotNone(process_info)<line_sep># Ensure the process id looks reasonable. pid_text=process_info.get("pid")<line_sep>self.assertIsNotNone(pid_text)<line_sep>pid=int(pid_text base=16)<line_sep>self.assertNotEqual(0 pid)<line_sep># If possible, verify that the process is running. self.assertTrue(lldbgdbserverutils.process_is_running(pid <true>))<block_end><def_stmt>test_attach_commandline_qProcessInfo_reports_correct_pid self<block_start>self.build()<line_sep>self.set_inferior_startup_attach()<line_sep>procs=self.prep_debug_monitor_and_inferior()<line_sep>self.assertIsNotNone(procs)<line_sep>self.add_process_info_collection_packets()<line_sep># Run the stream context=self.expect_gdbremote_sequence()<line_sep>self.assertIsNotNone(context)<line_sep># Gather process info response process_info=self.parse_process_info_response(context)<line_sep>self.assertIsNotNone(process_info)<line_sep># Ensure the process id matches what we expected. pid_text=process_info.get('pid' <none>)<line_sep>self.assertIsNotNone(pid_text)<line_sep>reported_pid=int(pid_text base=16)<line_sep>self.assertEqual(reported_pid procs["inferior"].pid)<block_end><def_stmt>test_qProcessInfo_reports_valid_endian self<block_start>self.build()<line_sep>procs=self.prep_debug_monitor_and_inferior()<line_sep>self.add_process_info_collection_packets()<line_sep># Run the stream context=self.expect_gdbremote_sequence()<line_sep>self.assertIsNotNone(context)<line_sep># Gather process info response process_info=self.parse_process_info_response(context)<line_sep>self.assertIsNotNone(process_info)<line_sep># Ensure the process id looks reasonable. endian=process_info.get("endian")<line_sep>self.assertIsNotNone(endian)<line_sep>self.assertIn(endian ["little" "big" "pdp"])<block_end><def_stmt>qProcessInfo_contains_keys self expected_key_set<block_start>procs=self.prep_debug_monitor_and_inferior()<line_sep>self.add_process_info_collection_packets()<line_sep># Run the stream context=self.expect_gdbremote_sequence()<line_sep>self.assertIsNotNone(context)<line_sep># Gather process info response process_info=self.parse_process_info_response(context)<line_sep>self.assertIsNotNone(process_info)<line_sep># Ensure the expected keys are present and non-None within the process # info. missing_key_set=set()<for_stmt>expected_key expected_key_set<block_start><if_stmt>expected_key<not><in>process_info<block_start>missing_key_set.add(expected_key)<block_end><block_end>self.assertEqual(missing_key_set set() "the listed keys are missing in the qProcessInfo result")<block_end><def_stmt>qProcessInfo_does_not_contain_keys self absent_key_set<block_start>procs=self.prep_debug_monitor_and_inferior()<line_sep>self.add_process_info_collection_packets()<line_sep># Run the stream context=self.expect_gdbremote_sequence()<line_sep>self.assertIsNotNone(context)<line_sep># Gather process info response process_info=self.parse_process_info_response(context)<line_sep>self.assertIsNotNone(process_info)<line_sep># Ensure the unexpected keys are not present unexpected_key_set=set()<for_stmt>unexpected_key absent_key_set<block_start><if_stmt>unexpected_key<in>process_info<block_start>unexpected_key_set.add(unexpected_key)<block_end><block_end>self.assertEqual(unexpected_key_set set() "the listed keys were present but unexpected in qProcessInfo result")<block_end>@add_test_categories(["debugserver"])<def_stmt>test_qProcessInfo_contains_cputype_cpusubtype self<block_start>self.build()<line_sep>self.qProcessInfo_contains_keys(set(['cputype' 'cpusubtype']))<block_end>@add_test_categories(["llgs"])<def_stmt>test_qProcessInfo_contains_triple_ppid self<block_start>self.build()<line_sep>self.qProcessInfo_contains_keys(set(['triple' 'parent-pid']))<block_end>@add_test_categories(["debugserver"])<def_stmt>test_qProcessInfo_does_not_contain_triple self<block_start>self.build()<line_sep># We don't expect to see triple on darwin. If we do, we'll prefer triple # to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup # for the remote Host and Process. self.qProcessInfo_does_not_contain_keys(set(['triple']))<block_end>@add_test_categories(["llgs"])<def_stmt>test_qProcessInfo_does_not_contain_cputype_cpusubtype self<block_start>self.build()<line_sep>self.qProcessInfo_does_not_contain_keys(set(['cputype' 'cpusubtype']))<block_end><block_end>
# The MIT License # # Copyright (C) 2007 <NAME> # # Copyright (C) 2008-2009 <NAME> # # Copyright (C) 2008-2009 Abilisoft Ltd. # # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """psi._version This is used so that this information can stay easily in sync in both psi and setup.py. """<line_sep>version='0.3b2'<line_sep>author='<NAME>, <NAME>, <NAME>'<line_sep>copyright="""\ Copyright (C) 2007-2009 <NAME> Copyright (C) 2008, 2009 <NAME> Copyright (C) 2008, 2009 Abilisoft Ltd. Copyright (C) 2009 <NAME>"""<line_sep>license='MIT'<line_sep>
"""original source: https://github.com/chainer/chainerrl/pull/480 MIT License Copyright (c) Preferred Networks, Inc. """<import_from_future_stmt> print_function<import_from_future_stmt> division<import_from_future_stmt> unicode_literals<import_from_future_stmt> absolute_import<import_from_stmt>builtins *<import_from_stmt>future standard_library<line_sep>standard_library.install_aliases()<import_stmt>argparse<import_from_stmt>inspect getsourcefile<import_stmt>os<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>chainer<import_stmt>minerl# noqa: register MineRL envs as Gym envs. <import_stmt>gym<import_stmt>chainerrl<import_from_stmt>chainerrl experiments explorers<import_from_stmt>chainerrl.experiments.evaluator Evaluator<import_from_stmt>dqfd DQfD PrioritizedDemoReplayBuffer<import_from_stmt>q_functions CNNBranchingQFunction<import_from_stmt>env_wrappers BranchedRandomizedAction BranchedActionWrapper MoveAxisWrapper FrameSkip FrameStack ObtainPoVWrapper PoVWithCompassAngleWrapper FullObservationSpaceWrapper <import_from_stmt>expert_converter choose_top_experts fill_buffer<class_stmt>ScaleGradHook(object)<block_start>name='ScaleGrad'<line_sep>call_for_each_param=<true><line_sep>timing='pre'<def_stmt>__init__ self scale<block_start>self.scale=scale<block_end><def_stmt>__call__ self rule param<block_start><if_stmt>getattr(param 'scale_param' <false>)<block_start>param.grad<augmul>self.scale<block_end><block_end><block_end><def_stmt>main <block_start>"""Parses arguments and runs the example """<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--env' type=str default='MineRLTreechop-v0' choices=['MineRLTreechop-v0' 'MineRLNavigate-v0' 'MineRLNavigateDense-v0' 'MineRLNavigateExtreme-v0' 'MineRLNavigateExtremeDense-v0' 'MineRLObtainIronPickaxe-v0' 'MineRLObtainIronPickaxeDense-v0' 'MineRLObtainDiamond-v0' 'MineRLObtainDiamondDense-v0' 'MineRLNavigateDenseFixed-v0'# for debug use ] help='MineRL environment identifier')<line_sep>parser.add_argument('--outdir' type=str default='results' help='Directory path to save output files.'<concat>' If it does not exist, it will be created.')<line_sep>parser.add_argument('--seed' type=int default=0 help='Random seed [0, 2 ** 31)')<line_sep>parser.add_argument('--gpu' type=int default=-1 help='GPU to use, set to -1 if no GPU.')<line_sep>parser.add_argument('--final-exploration-frames' type=int default=10<power>6 help='Timesteps after which we stop '+'annealing exploration rate')<line_sep>parser.add_argument('--final-epsilon' type=float default=0.01 help='Final value of epsilon during training.')<line_sep>parser.add_argument('--eval-epsilon' type=float default=0.001 help='Exploration epsilon used during eval episodes.')<line_sep>parser.add_argument('--replay-start-size' type=int default=1000 help='Minimum replay buffer size before '+'performing gradient updates.')<line_sep>parser.add_argument('--target-update-interval' type=int default=10<power>4 help='Frequency (in timesteps) at which '+'the target network is updated.')<line_sep>parser.add_argument('--update-interval' type=int default=4 help='Frequency (in timesteps) of network updates.')<line_sep>parser.add_argument('--eval-n-runs' type=int default=10)<line_sep>parser.add_argument('--no-clip-delta' dest='clip_delta' action='store_false')<line_sep>parser.add_argument('--error-max' type=float default=1.0)<line_sep>parser.add_argument('--num-step-return' type=int default=10)<line_sep>parser.set_defaults(clip_delta=<true>)<line_sep>parser.add_argument('--logging-level' type=int default=20 help='Logging level. 10:DEBUG, 20:INFO etc.')<line_sep>parser.add_argument('--logging-filename' type=str default=<none>)<line_sep>parser.add_argument('--monitor' action='store_true' default=<false> help='Monitor env. Videos and additional information are saved as output files when evaluation')<line_sep># parser.add_argument('--render', action='store_true', default=False, # help='Render env states in a GUI window.') parser.add_argument('--optimizer' type=str default='rmsprop' choices=['rmsprop' 'adam'])<line_sep>parser.add_argument('--lr' type=float default=2.5e-4 help='Learning rate')<line_sep>parser.add_argument("--replay-buffer-size" type=int default=10<power>6 help="Size of replay buffer (Excluding demonstrations)")<line_sep>parser.add_argument("--minibatch-size" type=int default=32)<line_sep>parser.add_argument('--batch-accumulator' type=str default="sum")<line_sep>parser.add_argument('--demo' action='store_true' default=<false>)<line_sep>parser.add_argument('--load' type=str default=<none>)<line_sep>parser.add_argument("--save-demo-trajectories" action="store_true" default=<false>)<line_sep># DQfD specific parameters for loading and pretraining. parser.add_argument('--n-experts' type=int default=10)<line_sep>parser.add_argument('--expert-demo-path' type=str default=<none>)<line_sep>parser.add_argument('--n-pretrain-steps' type=int default=750000)<line_sep>parser.add_argument('--demo-supervised-margin' type=float default=0.8)<line_sep>parser.add_argument('--loss-coeff-l2' type=float default=1e-5)<line_sep>parser.add_argument('--loss-coeff-nstep' type=float default=1.0)<line_sep>parser.add_argument('--loss-coeff-supervised' type=float default=1.0)<line_sep>parser.add_argument('--bonus-priority-agent' type=float default=0.001)<line_sep>parser.add_argument('--bonus-priority-demo' type=float default=1.0)<line_sep># Action branching architecture parser.add_argument('--gradient-clipping' action='store_true' default=<false>)<line_sep>parser.add_argument('--gradient-rescaling' action='store_true' default=<false>)<line_sep># NoisyNet parameters parser.add_argument('--use-noisy-net' type=str default=<none> choices=['before-pretraining' 'after-pretraining'])<line_sep>parser.add_argument('--noisy-net-sigma' type=float default=0.5)<line_sep># Parameters for state/action handling parser.add_argument('--frame-stack' type=int default=<none> help='Number of frames stacked (None for disable).')<line_sep>parser.add_argument('--frame-skip' type=int default=<none> help='Number of frames skipped (None for disable).')<line_sep>parser.add_argument('--camera-atomic-actions' type=int default=10)<line_sep>parser.add_argument('--max-range-of-camera' type=float default=10.)<line_sep>parser.add_argument('--use-full-observation' action='store_true' default=<false>)<line_sep>args=parser.parse_args()<assert_stmt>args.expert_demo_path<is><not><none> "DQfD needs collected \ expert demonstrations"<import_stmt>logging<if_stmt>args.logging_filename<is><not><none><block_start>logging.basicConfig(filename=args.logging_filename filemode='w' level=args.logging_level)<block_end><else_stmt><block_start>logging.basicConfig(level=args.logging_level)<block_end>logger=logging.getLogger(__name__)<line_sep>train_seed=args.seed<line_sep>test_seed=2<power>31-1-args.seed<line_sep>chainerrl.misc.set_random_seed(args.seed gpus=(args.gpu ))<line_sep>args.outdir=experiments.prepare_output_dir(args args.outdir)<line_sep>logger.info('Output files are saved in {}'.format(args.outdir))<if_stmt>args.env<eq>'MineRLTreechop-v0'<block_start>branch_sizes=[9 16 args.camera_atomic_actions args.camera_atomic_actions]<block_end><elif_stmt>args.env<in>['MineRLNavigate-v0' 'MineRLNavigateDense-v0' 'MineRLNavigateExtreme-v0' 'MineRLNavigateExtremeDense-v0']<block_start>branch_sizes=[9 16 args.camera_atomic_actions args.camera_atomic_actions 2]<block_end><elif_stmt>args.env<in>['MineRLObtainIronPickaxe-v0' 'MineRLObtainIronPickaxeDense-v0' 'MineRLObtainDiamond-v0' 'MineRLObtainDiamondDense-v0']<block_start>branch_sizes=[9 16 args.camera_atomic_actions args.camera_atomic_actions 32]<block_end><else_stmt><block_start><raise>Exception("Unknown environment")<block_end><def_stmt>make_env env test# wrap env: observation... # NOTE: wrapping order matters! <block_start><if_stmt>args.use_full_observation<block_start>env=FullObservationSpaceWrapper(env)<block_end><elif_stmt>args.env.startswith('MineRLNavigate')<block_start>env=PoVWithCompassAngleWrapper(env)<block_end><else_stmt><block_start>env=ObtainPoVWrapper(env)<block_end><if_stmt>test<and>args.monitor<block_start>env=gym.wrappers.Monitor(env os.path.join(args.outdir 'monitor') mode='evaluation'<if>test<else>'training' video_callable=<lambda>episode_id:<true>)<block_end><if_stmt>args.frame_skip<is><not><none><block_start>env=FrameSkip(env skip=args.frame_skip)<block_end># convert hwc -> chw as Chainer requires env=MoveAxisWrapper(env source=-1 destination=0 use_tuple=args.use_full_observation)<line_sep>#env = ScaledFloatFrame(env) <if_stmt>args.frame_stack<is><not><none><block_start>env=FrameStack(env args.frame_stack channel_order='chw' use_tuple=args.use_full_observation)<block_end># wrap env: action... env=BranchedActionWrapper(env branch_sizes args.camera_atomic_actions args.max_range_of_camera)<if_stmt>test<block_start>env=BranchedRandomizedAction(env branch_sizes args.eval_epsilon)<block_end>env_seed=test_seed<if>test<else>train_seed<line_sep>env.seed(int(env_seed))<line_sep><return>env<block_end>core_env=gym.make(args.env)<line_sep>env=make_env(core_env test=<false>)<line_sep>eval_env=make_env(core_env test=<true>)<line_sep># Q function <if_stmt>args.env.startswith('MineRLNavigate')<block_start><if_stmt>args.use_full_observation<block_start>base_channels=3# RGB <block_end><else_stmt><block_start>base_channels=4# RGB + compass <block_end><block_end><elif_stmt>args.env.startswith('MineRLObtain')<block_start>base_channels=3# RGB <block_end><else_stmt><block_start>base_channels=3<block_end># RGB <if_stmt>args.frame_stack<is><none><block_start>n_input_channels=base_channels<block_end><else_stmt><block_start>n_input_channels=base_channels<times>args.frame_stack<block_end>q_func=CNNBranchingQFunction(branch_sizes n_input_channels=n_input_channels gradient_rescaling=args.gradient_rescaling use_tuple=args.use_full_observation)<def_stmt>phi x# observation -> NN input <block_start><if_stmt>args.use_full_observation<block_start>pov=np.asarray(x[0] dtype=np.float32)<line_sep>others=np.asarray(x[1] dtype=np.float32)<line_sep><return>(pov/255 others)<block_end><else_stmt><block_start><return>np.asarray(x dtype=np.float32)/255<block_end><block_end>explorer=explorers.LinearDecayEpsilonGreedy(1.0 args.final_epsilon args.final_exploration_frames <lambda>:np.array([np.random.randint(n)<for>n branch_sizes]))<line_sep># Draw the computational graph and save it in the output directory. <if_stmt>args.use_full_observation<block_start>sample_obs=tuple([x[<none>]<for>x env.observation_space.sample()])<block_end><else_stmt><block_start>sample_obs=env.observation_space.sample()[<none>]<block_end>chainerrl.misc.draw_computational_graph([q_func(phi(sample_obs))] os.path.join(args.outdir 'model'))<if_stmt>args.optimizer<eq>'rmsprop'<block_start>opt=chainer.optimizers.RMSpropGraves(args.lr alpha=0.95 momentum=0.0 eps=1e-2)<block_end><elif_stmt>args.optimizer<eq>'adam'<block_start>opt=chainer.optimizers.Adam(args.lr)<block_end><if_stmt>args.use_noisy_net<is><none><block_start>opt.setup(q_func)<block_end><if_stmt>args.gradient_rescaling<block_start>opt.add_hook(ScaleGradHook(1/(1+len(q_func.branch_sizes))))<block_end><if_stmt>args.gradient_clipping<block_start>opt.add_hook(chainer.optimizer_hooks.GradientClipping(10.0))<block_end># calculate corresponding `steps` and `eval_interval` according to frameskip maximum_frames=8640000# = 1440 episodes if we count an episode as 6000 frames. <if_stmt>args.frame_skip<is><none><block_start>steps=maximum_frames<line_sep>eval_interval=6000<times>100# (approx.) every 100 episode (counts "1 episode = 6000 steps") <block_end><else_stmt><block_start>steps=maximum_frames<floordiv>args.frame_skip<line_sep>eval_interval=6000<times>100<floordiv>args.frame_skip<block_end># (approx.) every 100 episode (counts "1 episode = 6000 steps") # Anneal beta from beta0 to 1 throughout training betasteps=steps/args.update_interval<line_sep>replay_buffer=PrioritizedDemoReplayBuffer(args.replay_buffer_size alpha=0.4 beta0=0.6 betasteps=betasteps error_max=args.error_max num_steps=args.num_step_return)<line_sep># Fill the demo buffer with expert transitions <if_stmt><not>args.demo<block_start>chosen_dirs=choose_top_experts(args.expert_demo_path args.n_experts logger=logger)<line_sep>fill_buffer(args.env chosen_dirs replay_buffer args.frame_skip args.frame_stack args.camera_atomic_actions args.max_range_of_camera args.use_full_observation logger=logger)<line_sep>logger.info("Demo buffer loaded with {} transitions".format(len(replay_buffer)))<block_end><def_stmt>reward_transform x<block_start><return>np.sign(x)<times>np.log(1+np.abs(x))<block_end><if_stmt>args.use_noisy_net<is><not><none><and>args.use_noisy_net<eq>'before-pretraining'<block_start>chainerrl.links.to_factorized_noisy(q_func sigma_scale=args.noisy_net_sigma)<line_sep>explorer=explorers.Greedy()<line_sep>opt.setup(q_func)<block_end>agent=DQfD(q_func opt replay_buffer gamma=0.99 explorer=explorer n_pretrain_steps=args.n_pretrain_steps demo_supervised_margin=args.demo_supervised_margin bonus_priority_agent=args.bonus_priority_agent bonus_priority_demo=args.bonus_priority_demo loss_coeff_nstep=args.loss_coeff_nstep loss_coeff_supervised=args.loss_coeff_supervised loss_coeff_l2=args.loss_coeff_l2 gpu=args.gpu replay_start_size=args.replay_start_size target_update_interval=args.target_update_interval clip_delta=args.clip_delta update_interval=args.update_interval batch_accumulator=args.batch_accumulator phi=phi reward_transform=reward_transform minibatch_size=args.minibatch_size)<if_stmt>args.use_noisy_net<is><not><none><and>args.use_noisy_net<eq>'after-pretraining'<block_start>chainerrl.links.to_factorized_noisy(q_func sigma_scale=args.noisy_net_sigma)<line_sep>explorer=explorers.Greedy()<if_stmt>args.optimizer<eq>'rmsprop'<block_start>opt=chainer.optimizers.RMSpropGraves(args.lr alpha=0.95 momentum=0.0 eps=1e-2)<block_end><elif_stmt>args.optimizer<eq>'adam'<block_start>opt=chainer.optimizers.Adam(args.lr)<block_end>opt.setup(q_func)<line_sep>opt.add_hook(chainer.optimizer_hooks.WeightDecay(args.loss_coeff_l2))<line_sep>agent.optimizer=opt<line_sep>agent.target_model=<none><line_sep>agent.sync_target_network()<block_end><if_stmt>args.load<block_start>agent.load(args.load)<block_end><if_stmt>args.demo<block_start>eval_stats=experiments.eval_performance(env=eval_env agent=agent n_steps=<none> n_episodes=args.eval_n_runs)<line_sep>logger.info('n_runs: {} mean: {} median: {} stdev: {}'.format(args.eval_n_runs eval_stats['mean'] eval_stats['median'] eval_stats['stdev']))<block_end><else_stmt><block_start>agent.pretrain()<line_sep>evaluator=Evaluator(agent=agent n_steps=<none> n_episodes=args.eval_n_runs eval_interval=eval_interval outdir=args.outdir max_episode_len=<none> env=eval_env step_offset=0 save_best_so_far_agent=<true> logger=logger)<line_sep># Evaluate the agent BEFORE training begins evaluator.evaluate_and_update_max_score(t=0 episodes=0)<line_sep>experiments.train_agent(agent=agent env=env steps=steps outdir=args.outdir max_episode_len=<none> step_offset=0 evaluator=evaluator successful_score=<none> step_hooks=[])<block_end>env.close()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>#------------------------------------------------ #AlCaReco filtering for HCAL isotrk: #------------------------------------------------ <import_from_stmt>Calibration.HcalAlCaRecoProducers.alcaHcalIsotrkProducer_cfi *<import_from_stmt>Calibration.HcalAlCaRecoProducers.alcaHcalIsotrkFilter_cfi *<line_sep>seqALCARECOHcalCalIsoTrkProducerFilter=cms.Sequence(alcaHcalIsotrkProducer<times>alcaHcalIsotrkFilter)<line_sep>
<import_stmt>os<def_stmt>getenv_boolean var_name default_value=<false><block_start>result=default_value<line_sep>env_value=os.getenv(var_name)<if_stmt>env_value<is><not><none><block_start>result=env_value.upper()<in>("TRUE" "1")<block_end><return>result<block_end>API_V1_STR="/api/v1"<line_sep>SECRET_KEY=os.getenvb(b"SECRET_KEY")<if_stmt><not>SECRET_KEY<block_start>SECRET_KEY=os.urandom(32)<block_end>ACCESS_TOKEN_EXPIRE_MINUTES=60<times>24<times>8# 60 minutes * 24 hours * 8 days SERVER_NAME=os.getenv("SERVER_NAME")<line_sep>BACKEND_CORS_ORIGINS=os.getenv("BACKEND_CORS_ORIGINS")<line_sep>PROJECT_NAME=os.getenv("PROJECT_NAME")<line_sep>SENTRY_DSN=os.getenv("SENTRY_DSN")<line_sep>POSTGRES_SERVER=os.getenv("POSTGRES_SERVER")<line_sep>POSTGRES_USER=os.getenv("POSTGRES_USER")<line_sep>POSTGRES_PASSWORD=os.getenv("POSTGRES_PASSWORD")<line_sep>POSTGRES_DB=os.getenv("POSTGRES_DB")<line_sep>SQLALCHEMY_DATABASE_URI=(f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_SERVER}/{POSTGRES_DB}")<line_sep>FIRST_SUPERUSER=os.getenv("FIRST_SUPERUSER")<line_sep>FIRST_SUPERUSER_PASSWORD=os.getenv("FIRST_SUPERUSER_PASSWORD")<line_sep>USERS_OPEN_REGISTRATION=getenv_boolean("USERS_OPEN_REGISTRATION")<line_sep>
# terrascript/provider/hashicorp/googleworkspace.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:17:22 UTC) <import_stmt>terrascript<class_stmt>googleworkspace(terrascript.Provider)<block_start>"""terraform-provider-googleworkspace"""<line_sep>__description__="terraform-provider-googleworkspace"<line_sep>__namespace__="hashicorp"<line_sep>__name__="googleworkspace"<line_sep>__source__="https://github.com/hashicorp/terraform-provider-googleworkspace"<line_sep>__version__="0.4.1"<line_sep>__published__="2021-08-16T19:18:13Z"<line_sep>__tier__="official"<block_end>__all__=["googleworkspace"]<line_sep>
# Copyright 2022 Google. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for preprocessors."""<import_stmt>os<import_stmt>textwrap<import_stmt>unittest.mock<as>mock<import_from_stmt>absl.testing parameterized<import_stmt>numpy<as>np<import_from_stmt>prompt_tuning.data preprocessors<import_stmt>seqio<import_from_stmt>seqio test_utils<import_stmt>tensorflow.compat.v2<as>tf<import_stmt>tensorflow_datasets<as>tfds<line_sep>TEST_DATA=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) "test_data")<line_sep>INPUTS_SIZE=10<line_sep>TARGETS_SIZE=5<line_sep>TEXT_SIZE=10<line_sep>TEST_T5_FEATURES={"inputs":seqio.Feature(vocabulary=seqio.SentencePieceVocabulary(os.path.join(TEST_DATA "t5_vocab") 100) add_eos=<true> required=<false>) "targets":seqio.Feature(vocabulary=seqio.SentencePieceVocabulary(os.path.join(TEST_DATA "t5_vocab") 100) add_eos=<true>)}<def_stmt>create_fake_text_dataset examples:int=10 text_size:int=TEXT_SIZE<block_start>text=np.reshape(# Start at 2 so we skip EOS=1 which could be a problem on any tests that # actually decode the fake inputs. np.arange(2 examples<times>text_size+2) (-1 text_size)).astype(np.int32)<line_sep><return>tf.data.Dataset.from_tensor_slices({"targets":text})<block_end><class_stmt>PreprocessorsTest(tf.test.TestCase)<block_start><def_stmt>test_remove_first_text_token self<block_start>input_strings=["This is my first example" "The second"]<line_sep>gold_strings=[" ".join(s.split()[1:])<for>s input_strings]<line_sep>ds=tf.data.Dataset.from_tensor_slices({"inputs":input_strings})<line_sep>processed_ds=preprocessors.remove_first_text_token(ds)<for_stmt>res,gold zip(processed_ds gold_strings)<block_start>self.assertEqual(res["inputs"].numpy().decode("utf-8") gold)<block_end><block_end><def_stmt>test_add_sentinel_to_beginning self<block_start>vocab_size=100<line_sep>offset=0<line_sep>field="targets"<line_sep>ds=tf.data.Dataset.from_tensor_slices({field:tf.zeros([3 4] dtype=tf.int32) })<line_sep>output_features={field:mock.MagicMock(vocabulary=mock.MagicMock(vocab_size=vocab_size))}<line_sep>processed_ds=preprocessors.add_sentinel_to_beginning(ds output_features field offset)<for_stmt>ex processed_ds<block_start>self.assertEqual(ex[field][0].numpy().item() vocab_size-(offset+1))<block_end><block_end><def_stmt>test_tsv_to_qa self<block_start>fake_data=textwrap.dedent(""" id\tcontext\tquestion\tanswer\tanswers 0\tThe capital of France is Paris\tWhat is the capital of France?\tParis\tParis|||paris 1\tAn ant can carry many times it's body weight making it v strong.\tAre ants strong?\tYes\tYes """.strip("\n"))<line_sep>ds=tf.data.Dataset.from_tensor_slices(fake_data.split("\n")[1:-1])<line_sep>ds=preprocessors.preprocess_tsv_to_qa(ds)<line_sep>gold_data=[{"id":"0" "question":"What is the capital of France ? " "answer":"Paris" "answers":["Paris" "paris"] "context":"The capital of France is Paris" "inputs":"question: What is the capital of France ? context: The capital of"<concat>" France is Paris" "targets":"Paris"} {"id":"1" "question":"Are ants strong ? " "answer":"Yes" "answers":["Yes"] "context":"An ant can carry many times it ' s body weight making it v strong . " "inputs":"question: Are ants strong ? context: An ant can carry many times "<concat>"it ' s body weight making it v strong . " "targets":"Yes"}]<for_stmt>ex,gold zip(ds gold_data)<block_start>self.assertEqual(ex["id"].numpy().decode("utf-8") gold["id"])<line_sep>self.assertEqual(ex["question"].numpy().decode("utf-8") gold["question"])<line_sep>self.assertEqual(ex["answer"].numpy().decode("utf-8") gold["answer"])<line_sep>self.assertEqual(ex["context"].numpy().decode("utf-8") gold["context"])<line_sep>self.assertEqual(ex["targets"].numpy().decode("utf-8") gold["targets"])<for_stmt>answer,gold_answer zip(ex["answers"].numpy() gold["answers"])<block_start>self.assertEqual(answer.decode("utf-8") gold_answer)<block_end><block_end><block_end><def_stmt>test_preprocess_text_generation self<block_start>example=tf.data.Dataset.from_tensor_slices({"source_aligned":{"en":["english input"] "es":["spanish input"]} "target_aligned":{"en":["english target"] "es":["spanish target"]}})<line_sep>processed_example=preprocessors.preprocess_text_generation(example source_key="source_aligned" target_key="target_aligned" task_name=<none> prefix="summarize:" source_nested_key="en" target_nested_key="es" )<line_sep>test_utils.assert_dataset(processed_example {"inputs":"summarize: english input" "targets":"spanish target"})<block_end><block_end><class_stmt>BARTTaskTest(parameterized.TestCase)<block_start>@parameterized.named_parameters(dict(testcase_name="text_infilling" preprocessor=preprocessors.text_infilling) dict(testcase_name="token_deletion" preprocessor=preprocessors.token_deletion))<def_stmt>test_inputs_shorter_than_targets self preprocessor<block_start>ds=create_fake_text_dataset()<line_sep>ds=preprocessor(ds {"inputs":INPUTS_SIZE+1 "targets":TARGETS_SIZE+1} TEST_T5_FEATURES noise_density=0.5)<for_stmt>ex tfds.as_numpy(ds)<block_start>self.assertLess(ex["inputs"].shape[0] ex["targets"].shape[0])<block_end><block_end>@parameterized.named_parameters(dict(testcase_name="text_infilling" preprocessor=preprocessors.text_infilling) dict(testcase_name="token_deletion" preprocessor=preprocessors.token_deletion))<def_stmt>test_extra_id_not_in_targets self preprocessor<block_start>ds=create_fake_text_dataset()<line_sep>ds=preprocessor(ds {"inputs":INPUTS_SIZE+1 "targets":TARGETS_SIZE+1} TEST_T5_FEATURES noise_density=0.5)<line_sep>vocab=TEST_T5_FEATURES["targets"].vocabulary<for_stmt>ex tfds.as_numpy(ds)<block_start>targets_text=vocab.decode(ex["targets"].tolist())<line_sep>self.assertNotIn("extra_id" targets_text)<block_end><block_end>@parameterized.named_parameters(dict(testcase_name="text_infilling" preprocessor=preprocessors.text_infilling) dict(testcase_name="token_deletion" preprocessor=preprocessors.token_deletion))<def_stmt>test_target_tokens_match_original_tokens self preprocessor<block_start>ds=create_fake_text_dataset()<line_sep>processed_ds=preprocessor(ds {"inputs":INPUTS_SIZE+1 "targets":TARGETS_SIZE+1} TEST_T5_FEATURES noise_density=0.5)<for_stmt>processed_ex,ex zip(tfds.as_numpy(processed_ds) tfds.as_numpy(ds))<block_start>np.testing.assert_array_equal(processed_ex["targets"] ex["targets"])<block_end><block_end><def_stmt>test_extra_id_not_in_token_deletion_inputs self<block_start>ds=create_fake_text_dataset()<line_sep>ds=preprocessors.token_deletion(ds {"inputs":INPUTS_SIZE+1 "targets":TARGETS_SIZE+1} TEST_T5_FEATURES noise_density=0.5)<line_sep>vocab=TEST_T5_FEATURES["inputs"].vocabulary<for_stmt>ex tfds.as_numpy(ds)<block_start>inputs_text=vocab.decode(ex["inputs"].tolist())<line_sep>self.assertNotIn("extra_id" inputs_text)<block_end><block_end><def_stmt>test_extra_id_in_text_infilling_inputs self<block_start>ds=create_fake_text_dataset()<line_sep>ds=preprocessors.text_infilling(ds {"inputs":INPUTS_SIZE+1 "targets":TARGETS_SIZE+1} TEST_T5_FEATURES noise_density=0.5)<line_sep>vocab=TEST_T5_FEATURES["inputs"].vocabulary<for_stmt>ex tfds.as_numpy(ds)<block_start>inputs_text=vocab.decode(ex["inputs"].tolist())<line_sep>self.assertIn("extra_id" inputs_text)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
# -*- coding: utf-8 -*- # Copyright (c) 2021 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data shape class."""<import_from_stmt>typing Any Dict List Optional Union<import_from_stmt>neural_compressor.ux.utils.json_serializer JsonSerializer<class_stmt>Shape(JsonSerializer)<block_start>"""Data shape definition."""<def_stmt>__init__ self shape:Optional[str]="" trusted:bool=<false><arrow><none><block_start>"""Object construction."""<line_sep>super().__init__()<line_sep>self.shape=shape<line_sep>self.trusted=trusted<block_end><def_stmt>serialize self serialization_type:str="default" <arrow>Union[Dict[str Any] List[Dict[str Any]]]<block_start>"""Serialize Shape class to dict."""<line_sep>result={}<for_stmt>key,value self.__dict__.items()<block_start><if_stmt>key<in>self._skip<block_start><continue><block_end>result.update({key:value})<block_end><return>result<block_end><block_end>
<import_stmt>math<import_from_stmt>functools partial<import_from_stmt>pytest approx<import_from_stmt>allrank.data.dataset_loading PADDED_Y_VALUE<import_from_stmt>tests.losses.utils neuralNDCG_wrap ndcg_wrap<line_sep>test_cases=[{"stochastic":<false> "transposed":<false>} {"stochastic":<true> "transposed":<false>} {"stochastic":<false> "transposed":<true>} {"stochastic":<true> "transposed":<true>}]<def_stmt>test_neuralNDCG_simple <block_start><for_stmt>tc test_cases<block_start>neuralNDCG_simple(partial(neuralNDCG_wrap **tc))<block_end><block_end><def_stmt>neuralNDCG_simple fun<block_start>y_pred=[0.5 0.2]<line_sep>y_true=[1.0 0.0]<line_sep>result=fun(y_pred y_true)<line_sep>expected=ndcg_wrap(y_pred y_true)<assert_stmt>math.isfinite(result)<assert_stmt>(-1<times>result<eq>approx(expected))<block_end><def_stmt>test_neuralNDCG_longer <block_start><for_stmt>tc test_cases<block_start>neuralNDCG_longer(partial(neuralNDCG_wrap **tc))<block_end><block_end><def_stmt>neuralNDCG_longer fun<block_start>y_pred=[0.5 0.2 0.1 0.4 1.0 -1.0 0.63]<line_sep>y_true=[1.0 2.0 2.0 4.0 1.0 4.0 3.0]<line_sep>result=fun(y_pred y_true)<line_sep>expected=ndcg_wrap(y_pred y_true)<assert_stmt>math.isfinite(result)<assert_stmt>(-1<times>result<eq>approx(expected))<block_end><def_stmt>test_neuralNDCG_stable_for_very_small_prediction <block_start><for_stmt>tc test_cases<block_start>neuralNDCG_stable_for_very_small_prediction(partial(neuralNDCG_wrap **tc))<block_end><block_end><def_stmt>neuralNDCG_stable_for_very_small_prediction fun<block_start>y_pred=[0.5 -1e30]<line_sep>y_true=[1.0 0.0]<line_sep>result=fun(y_pred y_true)<line_sep>expected=ndcg_wrap(y_pred y_true)<assert_stmt>math.isfinite(result)<assert_stmt>(-1<times>result<eq>approx(expected))<block_end><def_stmt>test_neuralNDCG_ignores_padded_value <block_start><for_stmt>tc test_cases<block_start>neuralNDCG_ignores_padded_value(partial(neuralNDCG_wrap **tc))<block_end><block_end><def_stmt>neuralNDCG_ignores_padded_value fun<block_start>y_pred=[0.5 0.2 0.1 0.4 1.0 -1.0 0.63 1. 0.5 0.3]<line_sep>y_true=[1.0 2.0 2.0 4.0 1.0 4.0 3.0 PADDED_Y_VALUE PADDED_Y_VALUE PADDED_Y_VALUE]<line_sep>result=fun(y_pred y_true temperature=0.001)<line_sep>expected=ndcg_wrap(y_pred y_true)<assert_stmt>math.isfinite(result)<assert_stmt>(-1<times>result<eq>approx(expected))<block_end><def_stmt>test_neuralNDCG_at_3 <block_start><for_stmt>tc test_cases<block_start>neuralNDCG_at_3(partial(neuralNDCG_wrap **tc))<block_end><block_end><def_stmt>neuralNDCG_at_3 fun<block_start>y_pred=[0.5 0.2 0.1 0.4 1.0 -1.0 0.63]<line_sep>y_true=[1.0 2.0 2.0 4.0 1.0 4.0 3.0]<line_sep>ats=3<line_sep>result=fun(y_pred y_true k=ats)<line_sep>expected=ndcg_wrap(y_pred y_true ats=[ats])<assert_stmt>math.isfinite(result)<assert_stmt>(-1<times>result<eq>approx(expected))<block_end>
<import_from_stmt>PyObjCTools.TestSupport *<import_stmt>objc<import_stmt>array<import_stmt>sys<import_from_stmt>objc YES NO<import_from_stmt>AppKit *<try_stmt><block_start>unicode<block_end><except_stmt>NameError<block_start>unicode=str<block_end><try_stmt><block_start>long<block_end><except_stmt>NameError<block_start>long=int<block_end><class_stmt>TestNSBitmapImageRep(TestCase)<block_start><def_stmt>testInstantiation self# widthxheight RGB 24bpp image <block_start>width=256<line_sep>height=256<line_sep>dataPlanes=(<none> <none> <none> <none> <none>)<line_sep>dataPlanes=<none><line_sep>i1=NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes width height 8 3 NO NO NSDeviceRGBColorSpace 0 0)<line_sep>self.assertTrue(i1)<line_sep>i2=NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(<none> width height 8 3 NO NO NSDeviceRGBColorSpace 0 0)<line_sep>self.assertTrue(i2)<block_end><def_stmt>testPixelFormat self<block_start>width=16<line_sep>height=16<line_sep>i1=NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_(<none> width height 8 3 NO NO NSDeviceRGBColorSpace NSAlphaFirstBitmapFormat 0 0)<line_sep>self.assertIsInstance(i1 NSBitmapImageRep)<line_sep>singlePlane=objc.allocateBuffer(width<times>height<times>4)<for_stmt>i range(0 width<times>height)<block_start>si=i<times>4<line_sep>singlePlane[si]=1<line_sep>singlePlane[si+1]=2<line_sep>singlePlane[si+2]=3<line_sep>singlePlane[si+3]=4<block_end>dataPlanes=(singlePlane <none> <none> <none> <none>)<line_sep># test non-planar, premade buffer i2=NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_(dataPlanes width height 8 3 NO NO NSDeviceRGBColorSpace NSAlphaFirstBitmapFormat 0 0)<line_sep>self.assertIsInstance(i2 NSBitmapImageRep)<line_sep>bitmapData=i2.bitmapData()<line_sep>self.assertEqual(len(bitmapData) width<times>height<times>4)<block_end><def_stmt>testImageData self<block_start>width=256<line_sep>height=256<line_sep>rPlane=array.array('B')<line_sep>rPlane.fromlist([y%256<for>y range(0 height)<for>x range(0 width)])<if_stmt>sys.version_info[0]<eq>3<block_start>buffer=memoryview<block_end><else_stmt><block_start><import_from_stmt>__builtin__ buffer<block_end>rPlane=buffer(rPlane)<line_sep>gPlane=array.array('B')<line_sep>gPlane.fromlist([y%256<for>y range(0 height)<for>x range(width 0 -1)])<line_sep>gPlane=buffer(gPlane)<line_sep>bPlane=array.array('B')<line_sep>bPlane.fromlist([x%256<for>y range(0 height)<for>x range(0 width)])<line_sep>bPlane=buffer(bPlane)<line_sep>dataPlanes=(rPlane gPlane bPlane <none> <none>)<line_sep># test planar, pre-made buffer i1=NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes width height 8 3 NO YES NSDeviceRGBColorSpace 0 0)<line_sep>self.assertTrue(i1)<line_sep>singlePlane=objc.allocateBuffer(width<times>height<times>3)<for_stmt>i range(0 width<times>height)<block_start>si=i<times>3<if_stmt>sys.version_info[0]<eq>2<block_start>singlePlane[si]=rPlane[i]<line_sep>singlePlane[si+1]=gPlane[i]<line_sep>singlePlane[si+2]=bPlane[i]<block_end><else_stmt><block_start><def_stmt>as_byte v<block_start><if_stmt>isinstance(v int)<block_start><return>v<block_end><else_stmt><block_start><return>ord(v)<block_end><block_end>singlePlane[si]=as_byte(rPlane[i])<line_sep>singlePlane[si+1]=as_byte(gPlane[i])<line_sep>singlePlane[si+2]=as_byte(bPlane[i])<block_end><block_end>dataPlanes=(singlePlane <none> <none> <none> <none>)<line_sep># test non-planar, premade buffer i2=NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes width height 8 3 NO NO NSDeviceRGBColorSpace 0 0)<line_sep># test grey scale greyPlane=array.array('B')<line_sep>greyPlane.fromlist([x%256<for>x range(0 height)<for>x range(0 width)])<line_sep>greyPlanes=(greyPlane <none> <none> <none> <none>)<line_sep>greyImage=NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(greyPlanes width height 8 1 NO YES NSCalibratedWhiteColorSpace width 8)<line_sep># test planar, NSBIR allocated buffer i3=NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(<none> width height 8 3 NO YES NSDeviceRGBColorSpace 0 0)<line_sep>r,g,b,a,o=i3.getBitmapDataPlanes_()<line_sep>self.assertTrue(r)<line_sep>self.assertTrue(g)<line_sep>self.assertTrue(b)<line_sep>self.assertTrue(<not>a)<line_sep>self.assertTrue(<not>o)<line_sep>self.assertEqual(len(r) len(rPlane))<line_sep>self.assertEqual(len(g) len(gPlane))<line_sep>self.assertEqual(len(b) len(bPlane))<line_sep>r[0:len(r)]=rPlane[0:len(rPlane)]<line_sep>g[0:len(g)]=gPlane[0:len(gPlane)]<line_sep>b[0:len(b)]=bPlane[0:len(bPlane)]<line_sep>bitmapData=i2.bitmapData()<line_sep>self.assertEqual(len(bitmapData) len(singlePlane))<try_stmt><block_start>memoryview<block_end><except_stmt>NameError<block_start>self.assertEqual(bitmapData singlePlane)<block_end><else_stmt><block_start>self.assertEqual(bitmapData.tobytes() singlePlane)<block_end>a=array.array('L' [255]<times>4)<line_sep>self.assertArgIsOut(NSBitmapImageRep.getPixel_atX_y_ 0)<line_sep>d=i2.getPixel_atX_y_(a 1 1)<line_sep>self.assertIs(a d)<block_end><block_end><class_stmt>TestBadCreation(TestCase)# Redirect stderr to /dev/null for the duration of this test, # NSBitmapImageRep will write an error message to stderr. <block_start><def_stmt>setUp self<block_start><import_stmt>os<line_sep>self.duppedStderr=os.dup(2)<line_sep>fp=os.open('/dev/null' os.O_RDWR)<line_sep>os.dup2(fp 2)<line_sep>os.close(fp)<block_end><def_stmt>tearDown self<block_start><import_stmt>os<line_sep>os.dup2(self.duppedStderr 2)<block_end><def_stmt>test_AllocInit self<block_start>y=NSBitmapImageRep.alloc()<try_stmt><block_start>self.assertRaises(ValueError y.init)<block_end><finally_stmt><block_start>width=256<line_sep>height=256<line_sep>dataPlanes=(<none> <none> <none> <none> <none>)<line_sep>y=y.initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes width height 8 3 NO NO NSDeviceRGBColorSpace 0 0)<block_end><block_end><def_stmt>testConstants self<block_start>self.assertEqual(NSTIFFCompressionNone 1)<line_sep>self.assertEqual(NSTIFFCompressionCCITTFAX3 3)<line_sep>self.assertEqual(NSTIFFCompressionCCITTFAX4 4)<line_sep>self.assertEqual(NSTIFFCompressionLZW 5)<line_sep>self.assertEqual(NSTIFFCompressionJPEG 6)<line_sep>self.assertEqual(NSTIFFCompressionNEXT 32766)<line_sep>self.assertEqual(NSTIFFCompressionPackBits 32773)<line_sep>self.assertEqual(NSTIFFCompressionOldJPEG 32865)<line_sep>self.assertEqual(NSTIFFFileType 0)<line_sep>self.assertEqual(NSBMPFileType 1)<line_sep>self.assertEqual(NSGIFFileType 2)<line_sep>self.assertEqual(NSJPEGFileType 3)<line_sep>self.assertEqual(NSPNGFileType 4)<line_sep>self.assertEqual(NSJPEG2000FileType 5)<line_sep>self.assertEqual(NSImageRepLoadStatusUnknownType -1)<line_sep>self.assertEqual(NSImageRepLoadStatusReadingHeader -2)<line_sep>self.assertEqual(NSImageRepLoadStatusWillNeedAllData -3)<line_sep>self.assertEqual(NSImageRepLoadStatusInvalidData -4)<line_sep>self.assertEqual(NSImageRepLoadStatusUnexpectedEOF -5)<line_sep>self.assertEqual(NSImageRepLoadStatusCompleted -6)<line_sep>self.assertEqual(NSAlphaFirstBitmapFormat 1<lshift>0)<line_sep>self.assertEqual(NSAlphaNonpremultipliedBitmapFormat 1<lshift>1)<line_sep>self.assertEqual(NSFloatingPointSamplesBitmapFormat 1<lshift>2)<line_sep>self.assertIsInstance(NSImageCompressionMethod unicode)<line_sep>self.assertIsInstance(NSImageCompressionFactor unicode)<line_sep>self.assertIsInstance(NSImageDitherTransparency unicode)<line_sep>self.assertIsInstance(NSImageRGBColorTable unicode)<line_sep>self.assertIsInstance(NSImageInterlaced unicode)<line_sep>self.assertIsInstance(NSImageColorSyncProfileData unicode)<line_sep>self.assertIsInstance(NSImageFrameCount unicode)<line_sep>self.assertIsInstance(NSImageCurrentFrame unicode)<line_sep>self.assertIsInstance(NSImageCurrentFrameDuration unicode)<line_sep>self.assertIsInstance(NSImageLoopCount unicode)<line_sep>self.assertIsInstance(NSImageGamma unicode)<line_sep>self.assertIsInstance(NSImageProgressive unicode)<line_sep>self.assertIsInstance(NSImageEXIFData unicode)<line_sep>self.assertIsInstance(NSImageFallbackBackgroundColor unicode)<block_end><def_stmt>testTiffCompression self<block_start>lst,nr=NSBitmapImageRep.getTIFFCompressionTypes_count_(<none> <none>)<line_sep>self.assertIsInstance(lst tuple)<line_sep>self.assertIsInstance(nr (int long))<line_sep>self.assertEqual(len(lst) nr)<line_sep>self.assertNotEqual(len(lst) 0)<line_sep>self.assertIsInstance(lst[0] (int long))<block_end><def_stmt>testMethods self<block_start>self.assertResultIsBOOL(NSBitmapImageRep.isPlanar)<line_sep>self.assertResultIsBOOL(NSBitmapImageRep.canBeCompressedUsing_)<line_sep>self.assertArgIsBOOL(NSBitmapImageRep.incrementalLoadFromData_complete_ 1)<line_sep>self.assertArgIsOut(NSBitmapImageRep.getCompression_factor_ 0)<line_sep>self.assertArgIsOut(NSBitmapImageRep.getCompression_factor_ 1)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# Python Standard Library Imports <import_stmt>base64<import_stmt>json<def_stmt>build_async_task_result content content_type filename<block_start>"""Builds an Async Task result from JSON This is necessary if we want to return multiple values, as the result by default is just a plain string. """<line_sep>payload={'content':base64.b64encode(content) 'content_type':content_type 'filename':filename }<line_sep>result=json.dumps(payload)<line_sep><return>result<block_end><def_stmt>extract_async_task_result_json_values result_data<block_start>"""Companion function to perform the inverse of `build_async_task_result()` """<line_sep>payload=json.loads(result_data)<line_sep>content=base64.b64decode(payload['content'])<line_sep>content_type=payload['content_type']<line_sep>filename=payload['filename']<line_sep><return>(content content_type filename )<block_end>
<import_stmt>re<import_from_stmt>logging Logger<import_from_stmt>typing Dict<import_stmt>yaml<line_sep>OptionValue=int<or>float<or>bool<or>str<class_stmt>BasePreprocessor()<block_start>'''Base preprocessor. All preprocessors must inherit from this one.'''<line_sep># pylint: disable=too-many-instance-attributes defaults={}<line_sep>tags=()<line_sep>@staticmethod<def_stmt>get_options options_string:str<arrow>Dict[str OptionValue]<block_start>'''Get a dictionary of typed options from a string with XML attributes. :param options_string: String of XML attributes :returns: Dictionary with options '''<if_stmt><not>options_string<block_start><return>{}<block_end>option_pattern=re.compile(r'(?P<key>[A-Za-z_:][0-9A-Za-z_:\-\.]*)=(\'|")(?P<value>.+?)\2' flags=re.DOTALL)<line_sep><return>{option.group('key'):yaml.load(option.group('value') yaml.Loader)<for>option option_pattern.finditer(options_string)}<block_end><def_stmt>__init__ self context:dict logger:Logger quiet=<false> debug=<false> options={}# pylint: disable=dangerous-default-value # pylint: disable=too-many-arguments <block_start>self.project_path=context['project_path']<line_sep>self.config=context['config']<line_sep>self.context=context<line_sep>self.logger=logger<line_sep>self.quiet=quiet<line_sep>self.debug=debug<line_sep>self.options={**self.defaults **options}<line_sep>self.working_dir=self.project_path/self.config['tmp_dir']<if_stmt>self.tags<block_start>self.pattern=re.compile(rf'(?<!\<)\<(?P<tag>{"|".join(self.tags)})'+r'(\s(?P<options>[^\<\>]*))?\>'+r'(?P<body>.*?)\<\/(?P=tag)\>' flags=re.DOTALL)<block_end><block_end><def_stmt>apply self<block_start>'''Run the preprocessor against the project directory. Must be implemented by every preprocessor. '''<line_sep><raise>NotImplementedError<block_end><block_end>
# Copyright 2020-2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """operator dsl function: fake_quant_with_min_max_args"""<import_stmt>akg<import_from_stmt>akg tvm topi<import_from_stmt>akg.utils.format_transform get_shape<import_stmt>akg.utils<as>utils<import_from_stmt>akg.ops.math.ascend Floor<def_stmt>nudge_min_max min max num_bits narrow_range<block_start>""" Calculate the maximum and minimum values of the quantization Args: min: scalar, input min max: input max num_bits: scalar Defaults to 8. num_bits is the bitwidth of the quantization, range [2,16] narrow_range: bool Returns: nudged_min, nudged_max, scale """<line_sep>quant_max=(2<power>num_bits)-1<if_stmt>narrow_range<is><false><block_start>quant_min=0.00<block_end><else_stmt><block_start>quant_min=1.00<block_end>scale=(max-min)/(float(quant_max)-quant_min)<line_sep>zero_point_from_min=quant_min-min/scale<line_sep># Calculate the maximum and minimum values of the quantization <if_stmt>zero_point_from_min<l>quant_min<block_start>nudged_zero_point=quant_min<block_end><elif_stmt>zero_point_from_min<g>quant_max<block_start>nudged_zero_point=quant_max<block_end><else_stmt><block_start>nudged_zero_point=(zero_point_from_min+0.5)<floordiv>1<block_end>nudged_min=(quant_min-nudged_zero_point)<times>scale<line_sep>nudged_max=(quant_max-nudged_zero_point)<times>scale<line_sep><return>nudged_min nudged_max scale<block_end>@utils.check_input_type(tvm.tensor.Tensor (float int type(<none>)) (float int type(<none>)) (int type(<none>)) (bool type(<none>)))<def_stmt>fake_quant_with_min_max_args input_data min=-6 max=6 num_bits=8 narrow_range=<false><block_start>""" Computes Fake-quantize the 'input_data' tensor, type float32 to 'output_data' tensor of same type output_data = (floor(clamped_shifted * inv_nudged_scale + 0.5f))) * scale + nudged_min scale = (max-min) / (quant_max-quant_min) Args: data_x1 (tvm.tensor.Tensor): Tensor of dtype "float32" min ([float, int]): scalar, defaults to -6 max ([float, int]): scalar, defaults to 6. [min; max] define the clamping range for the input_data data num_bits ([float, int]): Defaults to 8. num_bits is the bitwidth of the quantization,between 2 and 16 narrow_range ([bool]): True, quantized into the quantization range [1; 2^num_bits - 1] False,quantized into the quantization range [0; 2^num_bits - 1] Returns: tvm.tensor.Tensor """<line_sep>shape=get_shape(input_data)<line_sep>utils.check_shape(shape)<line_sep>dtype=input_data.dtype<line_sep>utils.ops_dtype_check(dtype utils.DtypeForDavinci.FLOAT32)<line_sep>nudged_min,nudged_max,scale=nudge_min_max(min max num_bits narrow_range)<line_sep>zero_tensor=tvm.compute(input_data.shape <lambda>*i:tvm.const(0 dtype="float32") name="zero_tensor")<line_sep>nudged_max_tensor=topi.add(zero_tensor nudged_max)<line_sep>nudged_min_tensor=topi.add(zero_tensor nudged_min)<line_sep>inv_nudged_scale=1.00/scale<line_sep># Transform the input between nudged_max and nudged_min clamped_vmin=topi.minimum(input_data nudged_max_tensor)<line_sep>clamped=topi.maximum(clamped_vmin nudged_min_tensor)<line_sep># Calculate the quantized and dequantized results clamped_shifted=topi.subtract(clamped nudged_min_tensor)<line_sep>vmul_shifted=topi.multiply(clamped_shifted inv_nudged_scale)<line_sep>vadds_shifted=topi.add(vmul_shifted 0.5)<line_sep>floor_vadds_shifted=Floor(vadds_shifted)<line_sep>floor_cast=akg.lang.ascend.cast_to(floor_vadds_shifted dtype)<line_sep>res_scale=topi.multiply(floor_cast scale)<line_sep>res=topi.add(res_scale nudged_min_tensor)<line_sep><return>res<block_end>
""" Unit tests """<import_from_stmt>django.test TestCase<import_from_stmt>django.conf settings<class_stmt>BasicTests(TestCase)<block_start><def_stmt>test_configuration self<block_start>""" Test that the configuration is sane. """<line_sep>self.assertTrue('ROLLBAR'<in>dir(settings) msg='The ROLLBAR setting is not present.')<line_sep>self.assertTrue(settings.ROLLBAR.get('access_token') msg='The ROLLBAR["access_token"] setting is blank.')<block_end><block_end>
# # Copyright 2015 <NAME>. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>unittest<import_stmt>testbase<import_stmt>util<import_stmt>time<import_stmt>gateway_mgmt<import_stmt>redis_mgmt<import_stmt>smr_mgmt<import_stmt>default_cluster<import_stmt>config<import_stmt>load_generator<import_stmt>telnet<import_stmt>json<import_stmt>constant<as>c<class_stmt>TestHeartbeatChecker(unittest.TestCase)<block_start>cluster=config.clusters[0]<line_sep>leader_cm=config.clusters[0]['servers'][0]<line_sep>max_load_generator=1<line_sep>load_gen_thrd_list={}<line_sep>key_base='key_thbc'<line_sep>@classmethod<def_stmt>setUpClass cls<block_start><return>0<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><return>0<block_end><def_stmt>setUp self<block_start>util.set_process_logfile_prefix('TestHeartbeatChecker_%s'%self._testMethodName)<line_sep>self.conf_checker=default_cluster.initialize_starting_up_smr_before_redis(self.cluster)<line_sep>self.assertIsNotNone(self.conf_checker 'failed to initialize cluster')<block_end><def_stmt>tearDown self<block_start>testbase.defaultTearDown(self)<block_end><def_stmt>getseq_log self s<block_start>smr=smr_mgmt.SMR(s['id'])<try_stmt><block_start>ret=smr.connect(s['ip'] s['smr_mgmt_port'])<if_stmt>ret<ne>0<block_start><return><block_end>smr.write('getseq log\r\n')<line_sep>response=smr.read_until('\r\n' 1)<line_sep>util.log('getseq log (pgs%d) = %s'%(s['id'] response[:-2]))<line_sep>smr.disconnect()<block_end><except_stmt>IOError<block_start><pass><block_end><block_end><def_stmt>get_expected_smr_state self server expected max_try=60<block_start><for_stmt>i range(0 max_try)<block_start>state=util.get_smr_state(server self.leader_cm)<if_stmt>state<eq>expected<block_start><break><line_sep><block_end>time.sleep(1)<block_end><return>state<block_end><def_stmt>state_transition self<block_start>server=util.get_server_by_role(self.cluster['servers'] 'slave')<line_sep>self.assertNotEquals(server <none> 'failed to get_server_by_role-slave')<line_sep># get gateway info ip,port=util.get_rand_gateway(self.cluster)<line_sep>gw=gateway_mgmt.Gateway(self.cluster['servers'][0]['id'])<line_sep># check initial state state=self.get_expected_smr_state(server 'N')<line_sep>role=util.get_role_of_server(server)<line_sep>self.assertEquals('N' state 'server%d - state:%s, role:%s, expected:N'%(server['id'] state role))<line_sep># shutdown ret=testbase.request_to_shutdown_smr(server)<line_sep>self.assertEquals(ret 0 'failed to shutdown smr')<line_sep>ret=testbase.request_to_shutdown_redis(server)<line_sep>self.assertEquals(ret 0 'failed to shutdown redis')<line_sep>time.sleep(3)<line_sep># check state F expected='F'<line_sep>state=self.get_expected_smr_state(server expected)<line_sep>self.assertEquals(expected state 'server%d - state:%s, but expected:%s'%(server['id'] state expected))<line_sep># set value ret=gw.connect(ip port)<line_sep>self.assertEquals(ret 0 'failed to connect to gateway, %s:%d'%(ip port))<line_sep>timestamp=0.0<for_stmt>i range(0 100)<block_start>timestamp=time.time()<line_sep>key='new_key_haha'<line_sep>cmd='set %s %f\r\n'%(key timestamp)<line_sep>gw.write(cmd)<line_sep>res=gw.read_until('\r\n')<line_sep>self.assertEquals(res '+OK\r\n')<block_end>gw.disconnect()<line_sep># recovery ret=testbase.request_to_start_smr(server)<line_sep>self.assertEquals(ret 0 'failed to start smr')<line_sep>ret=testbase.request_to_start_redis(server)<line_sep>self.assertEquals(ret 0 'failed to start redis')<line_sep>ret=testbase.wait_until_finished_to_set_up_role(server 10)<line_sep>self.assertEquals(ret 0 'failed to role change. smr_id:%d'%(server['id']))<line_sep>time.sleep(5)<line_sep>redis=redis_mgmt.Redis(server['id'])<line_sep>ret=redis.connect(server['ip'] server['redis_port'])<line_sep>self.assertEquals(ret 0 'failed to connect to redis')<line_sep># check state N expected='N'<line_sep>max_try=20<for_stmt>i range(0 max_try)<block_start>state=self.get_expected_smr_state(server expected)<if_stmt>state<eq>expected<block_start><break><block_end>time.sleep(1)<block_end>role=util.get_role_of_server(server)<line_sep>self.assertEquals(expected state 'server%d - state:%s, role:%s, but expected:%s'%(server['id'] state role expected))<block_end><def_stmt>test_1_state_transition self<block_start>util.print_frame()<line_sep>self.state_transition()<block_end><def_stmt>get_mss self# get master, slave1, and slave2 <block_start>master=util.get_server_by_role(self.cluster['servers'] 'master')<line_sep>self.assertNotEquals(master <none> 'failed to get master')<line_sep>slave1=util.get_server_by_role(self.cluster['servers'] 'slave')<line_sep>self.assertNotEquals(slave1 <none> 'failed to get slave1')<line_sep>slave2=<none><for_stmt>server self.cluster['servers']<block_start>id=server['id']<if_stmt>id<ne>master['id']<and>id<ne>slave1['id']<block_start>slave2=server<line_sep><break><block_end><block_end>self.assertNotEquals(slave2 <none> 'failed to get slave2')<line_sep><return>master slave1 slave2<block_end><def_stmt>test_2_consistent_after_failover self<block_start>util.print_frame()<for_stmt>i range(3)<block_start>util.log('loop %d'%i)<line_sep>self.consistent_after_failover()<block_end><block_end><def_stmt>consistent_after_failover self<block_start>max=10000<line_sep>wait_count=15<line_sep>key='caf'<line_sep># get master, slave1, and slave2 master,slave1,slave2=self.get_mss()<line_sep># set value ip,port=util.get_rand_gateway(self.cluster)<line_sep>gw=gateway_mgmt.Gateway(ip)<line_sep>gw.connect(ip port)<for_stmt>i range(0 max)<block_start>cmd='set %s%d %d\r\n'%(key i i)<line_sep>gw.write(cmd)<line_sep>res=gw.read_until('\r\n')<line_sep>self.assertEquals(res '+OK\r\n')<block_end>time.sleep(5)<line_sep># shutdown servers=[master slave1 slave2]<for_stmt>server servers<block_start>util.log('before shutdown pgs%d'%server['id'])<for_stmt>s servers<block_start>self.getseq_log(s)<block_end>ret=testbase.request_to_shutdown_smr(server)<line_sep>self.assertEqual(ret 0 'failed to shutdown smr, server:%d'%server['id'])<line_sep>ret=testbase.request_to_shutdown_redis(server)<line_sep>self.assertEquals(ret 0 'failed to shutdown redis')<block_end>time.sleep(5)<line_sep># check state F <for_stmt>server servers<block_start>state=self.get_expected_smr_state(server 'F')<line_sep>self.assertEquals('F' state 'server%d - state:%s'%(server['id'] state))<block_end># recovery <for_stmt>server servers<block_start>ret=testbase.request_to_start_smr(server)<line_sep>self.assertEqual(ret 0 'failed to start smr, server:%d'%server['id'])<line_sep>ret=testbase.request_to_start_redis(server <false>)<line_sep>self.assertEqual(ret 0 'failed to start redis, server:%d'%server['id'])<line_sep>util.log('after restart pgs%d'%server['id'])<for_stmt>s servers<block_start>self.getseq_log(s)<block_end><block_end>time.sleep(5)<line_sep># wait for master election <for_stmt>i xrange(10)<block_start>ret=util.check_cluster(self.cluster['cluster_name'] self.leader_cm['ip'] self.leader_cm['cm_port'])<if_stmt>ret<block_start><break><block_end>time.sleep(1)<block_end># check state <for_stmt>server servers<block_start>ret=testbase.wait_until_finished_to_set_up_role(server wait_count)<line_sep>self.assertEquals(ret 0 'failed to role change. server:%d'%(server['id']))<line_sep>state=self.get_expected_smr_state(server 'N')<line_sep>role=util.get_role_of_server(server)<line_sep>self.assertEquals('N' state 'server%d - state:%s, role:%s'%(server['id'] state role))<block_end>the_number_of_master=0<line_sep>the_number_of_slave=0<for_stmt>server servers<block_start>role=util.get_role_of_server(server)<if_stmt>role<eq>c.ROLE_MASTER<block_start>the_number_of_master=the_number_of_master+1<block_end><elif_stmt>role<eq>c.ROLE_SLAVE<block_start>the_number_of_slave=the_number_of_slave+1<block_end><block_end>self.assertTrue(1<eq>the_number_of_master<and>2<eq>the_number_of_slave 'failed to set roles, the number of master:%d, the number of slave:%d'%(the_number_of_master the_number_of_slave))<line_sep># get master, slave1, and slave2 master,slave1,slave2=self.get_mss()<line_sep># connect to a master`s redis and set data redis=redis_mgmt.Redis(master['id'])<line_sep>ret=redis.connect(master['ip'] master['redis_port'])<line_sep>self.assertEquals(ret 0 'failed to connect to redis, server:%d'%master['id'])<for_stmt>i range(max max<times>2)<block_start>cmd='set %s%d %d\r\n'%(key i i)<line_sep>redis.write(cmd)<line_sep>res=redis.read_until('\r\n')<line_sep>self.assertEquals(res '+OK\r\n' 'failed to get response, server:%d'%master['id'])<block_end>redis.disconnect()<line_sep># check slaves`s data slaves=[slave1 slave2]<for_stmt>slave slaves<block_start>slave_redis=redis_mgmt.Redis(slave['id'])<line_sep>ret=slave_redis.connect(slave['ip'] slave['redis_port'])<line_sep>self.assertEquals(ret 0 'failed to connect to redis, server:%d'%slave['id'])<for_stmt>i range(0 max<times>2)<block_start>cmd='get %s%d\r\n'%(key i)<line_sep>slave_redis.write(cmd)<line_sep>trash=slave_redis.read_until('\r\n')<line_sep>res=slave_redis.read_until('\r\n')<line_sep>self.assertEquals(res '%d\r\n'%i 'inconsistent, server:%d, expected %d but %s'%(slave['id'] i res))<block_end>slave_redis.disconnect()<block_end><block_end><def_stmt>test_3_heartbeat_target_connection_count self<block_start>util.print_frame()<line_sep>util.log('wait until all connections are established')<for_stmt>i range(1 8)<block_start>time.sleep(1)<line_sep>util.log('%d sec'%i)<block_end># check pgs <for_stmt>server self.cluster['servers']<block_start>before_cnt_redis=util.get_clients_count_of_redis(server['ip'] server['redis_port'])<line_sep>before_cnt_smr=util.get_clients_count_of_smr(server['smr_mgmt_port'])<line_sep>cmd='pgs_leave %s %d forced'%(self.cluster['cluster_name'] server['id'])<line_sep>ret=util.cm_command(self.leader_cm['ip'] self.leader_cm['cm_port'] cmd)<line_sep>jobj=json.loads(ret)<line_sep>self.assertEqual(jobj['state'] 'success' 'failed : cmd="%s", reply="%s"'%(cmd ret[:-2]))<line_sep>util.log('succeeded : cmd="%s", reply="%s"'%(cmd ret[:-2]))<line_sep># check redis success=<false><for_stmt>i range(5)<block_start>after_cnt=util.get_clients_count_of_redis(server['ip'] server['redis_port'])<if_stmt>after_cnt<le>2<block_start>success=<true><line_sep><break><block_end>time.sleep(1)<block_end>self.assertEquals(success <true> 'failed : the number of connections to redis%d(%s:%d) is %d, exptected:n<=2, before=%d'%(server['id'] server['ip'] server['redis_port'] after_cnt before_cnt_redis))<line_sep>util.log('succeeded : the number of connections to redis%d(%s:%d) is %d, exptected=n<=2, before=%d'%(server['id'] server['ip'] server['redis_port'] after_cnt before_cnt_redis))<line_sep># check smr success=<false><line_sep>expected=1<for_stmt>i range(5)<block_start>after_cnt=util.get_clients_count_of_smr(server['smr_mgmt_port'])<if_stmt>after_cnt<eq>expected<block_start>success=<true><line_sep><break><block_end>time.sleep(1)<block_end>self.assertEquals(success <true> 'failed : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d'%(server['id'] server['ip'] server['smr_mgmt_port'] after_cnt expected before_cnt_smr))<line_sep>util.log('succeeded : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d'%(server['id'] server['ip'] server['smr_mgmt_port'] after_cnt expected before_cnt_smr))<line_sep># Go back to initial configuration self.assertTrue(util.pgs_join(self.leader_cm['ip'] self.leader_cm['cm_port'] server['cluster_name'] server['id']) 'failed to join pgs %d'%server['id'])<block_end># check gateway <for_stmt>server self.cluster['servers']<block_start>before_cnt=util.get_clients_count_of_gw(server['ip'] server['gateway_port'])<line_sep>cmd='gw_del %s %d'%(self.cluster['cluster_name'] server['id'])<line_sep>ret=util.cm_command(self.leader_cm['ip'] self.leader_cm['cm_port'] cmd)<line_sep>jobj=json.loads(ret)<line_sep>self.assertEqual(jobj['state'] 'success' 'failed : cmd="%s", reply="%s"'%(cmd ret[:-2]))<line_sep>util.log('succeeded : cmd="%s", reply="%s"'%(cmd ret[:-2]))<line_sep>success=<false><line_sep>expected=1<for_stmt>i range(5)<block_start>after_cnt=util.get_clients_count_of_gw(server['ip'] server['gateway_port'])<if_stmt>after_cnt<eq>expected<block_start>success=<true><line_sep><break><block_end>time.sleep(1)<block_end>self.assertEquals(success <true> 'failed : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.'%(server['id'] server['ip'] server['gateway_port'] after_cnt expected))<line_sep>util.log('succeeded : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.'%(server['id'] server['ip'] server['gateway_port'] after_cnt expected))<line_sep># Go back to initial configuration self.assertTrue(util.gw_add(server['cluster_name'] server['id'] server['pm_name'] server['ip'] server['gateway_port'] self.leader_cm['ip'] self.leader_cm['cm_port']) 'failed to add gw %d'%server['id'])<block_end><block_end><def_stmt>test_4_elect_master_randomly self<block_start>util.print_frame()<for_stmt>i range(1)<block_start>self.elect_master_randomly()<block_end><block_end><def_stmt>elect_master_randomly self# set data <block_start>ip,port=util.get_rand_gateway(self.cluster)<line_sep>gw=gateway_mgmt.Gateway('0')<line_sep>gw.connect(ip port)<for_stmt>i range(0 1000)<block_start>cmd='set %s%d %d\r\n'%(self.key_base i i)<line_sep>gw.write(cmd)<line_sep>res=gw.read_until('\r\n')<line_sep>self.assertEqual(res '+OK\r\n' 'failed to set values to gw(%s:%d). cmd:%s, res:%s'%(ip port cmd[:-2] res[:-2]))<block_end>server_ids=[]<for_stmt>server self.cluster['servers']<block_start>server_ids.append(server['id'])<block_end><for_stmt>try_cnt range(30)# get master, slave1, slave2 <block_start>m,s1,s2=util.get_mss(self.cluster)<line_sep>self.assertNotEqual(m <none> 'master is None.')<line_sep>self.assertNotEqual(s1 <none> 'slave1 is None.')<line_sep>self.assertNotEqual(s2 <none> 'slave2 is None.')<line_sep>util.log('master id : %d'%m['id'])<if_stmt>try_cnt<ne>0<block_start><if_stmt>m['id']<in>server_ids<block_start>server_ids.remove(m['id'])<block_end><block_end>smr=smr_mgmt.SMR(m['id'])<line_sep>ret=smr.connect(m['ip'] m['smr_mgmt_port'])<line_sep>self.assertEqual(ret 0 'failed to connect to master. %s:%d'%(m['ip'] m['smr_mgmt_port']))<line_sep>cmd='role lconn\r\n'<line_sep>smr.write(cmd)<line_sep>reply=smr.read_until('\r\n')<line_sep>self.assertEqual(reply '+OK\r\n' 'failed : cmd="%s", reply="%s"'%(cmd[:-2] reply[:-2]))<line_sep>util.log('succeeded : cmd="%s", reply="%s"'%(cmd[:-2] reply[:-2]))<line_sep># wait until role-change is finished <for_stmt>role_change_try_cnt range(5)<block_start>count_master=0<line_sep>count_slave=0<for_stmt>server self.cluster['servers']<block_start>real_role=util.get_role_of_server(server)<line_sep>real_role=util.roleNumberToChar(real_role)<if_stmt>real_role<eq>'M'<block_start>count_master=count_master+1<block_end><elif_stmt>real_role<eq>'S'<block_start>count_slave=count_slave+1<block_end><block_end><if_stmt>count_master<eq>1<and>count_slave<eq>2<block_start><break><line_sep><block_end>time.sleep(1)<block_end># check the number of master and slave self.assertEqual(count_master 1 'failed : the number of master is not 1, count_master=%d, count_slave=%d'%(count_master count_slave))<line_sep>self.assertEqual(count_slave 2 'failed : the number of slave is not 2, count_master=%d, count_slave=%d'%(count_master count_slave))<line_sep>util.log('succeeded : the number of master is 1 and the number of slave is 2')<line_sep># check states of all pgs in pg <for_stmt>try_cnt range(3)<block_start>ok=<true><for_stmt>s self.cluster['servers']<block_start>real_role=util.get_role_of_server(s)<line_sep>real_role=util.roleNumberToChar(real_role)<line_sep>smr_info=util.get_smr_info(s self.leader_cm)<line_sep>cc_role=smr_info['smr_Role']<line_sep>cc_hb=smr_info['hb']<if_stmt>cc_hb<ne>'Y'<block_start>ok=<false><block_end><if_stmt>real_role<ne>cc_role<block_start>ok=<false><block_end><if_stmt>ok<block_start>util.log('succeeded : a role of real pgs is the same with a role in cc, id=%d, real=%s, cc=%s, hb=%s'%(s['id'] real_role cc_role cc_hb))<block_end><else_stmt><block_start>util.log('\n\n**********************************************************\n\nretry: a role of real pgs is not the same with a role in cc, id=%d, real=%s, cc=%s, hb=%s'%(s['id'] real_role cc_role cc_hb))<block_end><block_end><if_stmt>ok<eq><false><block_start>time.sleep(0.5)<block_end><else_stmt><block_start><break><block_end><block_end>self.assertTrue(ok 'failed : role check')<if_stmt>len(server_ids)<eq>0<block_start>util.log('succeeded : all smrs have been as a master')<line_sep><return>0<block_end><block_end>self.assertEqual(0 len(server_ids) 'failed : remains server ids=[%s]'%(','.join('%d'%id<for>id server_ids)))<line_sep><return>0<block_end><def_stmt>test_5_from_n_to_1_heartbeat_checkers self<block_start>util.print_frame()<for_stmt>i range(0 len(self.cluster['servers'])-1)<block_start>util.log('loop %d'%i)<line_sep>server=self.cluster['servers'][i]<line_sep>self.assertEquals(0 testbase.request_to_shutdown_cm(server) 'failed to request_to_shutdown_cm, server:%d'%server['id'])<line_sep>time.sleep(20)<line_sep>self.leader_cm=self.cluster['servers'][i+1]<line_sep>self.match_cluster_info(self.leader_cm['ip'] self.leader_cm['cm_port'] self.cluster)<line_sep>self.state_transition()<block_end># Go back to initial configuration self.assertTrue(util.recover_confmaster(self.cluster [0 1] 0) 'failed to recover confmaster.')<block_end><def_stmt>test_6_from_3_to_6_heartbeat_checkers self<block_start>util.print_frame()<line_sep>hbc_svr_list=[]<line_sep>i=5000+len(self.cluster['servers'])<for_stmt>server self.cluster['servers']<block_start>i=i+1<line_sep>hbc_svr={}<line_sep>hbc_svr['id']=i<line_sep>hbc_svr['ip']=server['ip']<line_sep>hbc_svr['zk_port']=server['zk_port']<line_sep>hbc_svr_list.append(hbc_svr)<line_sep>ret=testbase.setup_cm(i)<line_sep>self.assertEquals(0 ret 'failed to copy heartbeat checker, server:%d'%hbc_svr['id'])<line_sep>ret=testbase.request_to_start_cm(i i)<line_sep>self.assertEquals(0 ret 'failed to request_to_start_cm, server:%d'%hbc_svr['id'])<line_sep>self.state_transition()<block_end># Go back to initial configuration <for_stmt>hbc_svr hbc_svr_list<block_start>self.assertEqual(0 testbase.request_to_shutdown_cm(hbc_svr) 'failed to shutdown confmaster')<block_end><block_end><def_stmt>test_7_remaining_hbc_connection self<block_start>util.print_frame()<line_sep># check pgs <for_stmt>server self.cluster['servers']<block_start>before_cnt_redis=util.get_clients_count_of_redis(server['ip'] server['redis_port'])<line_sep>before_cnt_smr=util.get_clients_count_of_smr(server['smr_mgmt_port'])<line_sep>cmd='pgs_leave %s %d forced\r\npgs_del %s %d'%(self.cluster['cluster_name'] server['id'] self.cluster['cluster_name'] server['id'])<line_sep>util.cm_command(self.leader_cm['ip'] self.leader_cm['cm_port'] cmd)<block_end><for_stmt>server self.cluster['servers']# check redis <block_start>success=<false><for_stmt>i range(5)<block_start>after_cnt=util.get_clients_count_of_redis(server['ip'] server['redis_port'])<if_stmt>after_cnt<le>2<block_start>success=<true><line_sep><break><block_end>time.sleep(1)<block_end>self.assertEquals(success <true> 'failed : the number of connections to redis%d(%s:%d) is %d, exptected=n<=2, before=%d'%(server['id'] server['ip'] server['redis_port'] after_cnt before_cnt_redis))<line_sep>util.log('succeeded : the number of connections to redis%d(%s:%d) is %d, exptected=n<=2, before=%d'%(server['id'] server['ip'] server['redis_port'] after_cnt before_cnt_redis))<line_sep># check smr success=<false><line_sep>expected=0<for_stmt>i range(5)<block_start>after_cnt=util.get_clients_count_of_smr(server['smr_mgmt_port'])<if_stmt>after_cnt<eq>expected<block_start>success=<true><line_sep><break><block_end>time.sleep(1)<block_end>self.assertEquals(success <true> 'failed : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d'%(server['id'] server['ip'] server['smr_mgmt_port'] after_cnt expected before_cnt_smr))<line_sep>util.log('succeeded : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d'%(server['id'] server['ip'] server['smr_mgmt_port'] after_cnt expected before_cnt_smr))<block_end># check gateway <for_stmt>server self.cluster['servers']<block_start>before_cnt=util.get_clients_count_of_gw(server['ip'] server['gateway_port'])<line_sep>cmd='gw_del %s %d'%(self.cluster['cluster_name'] server['id'])<line_sep>util.cm_command(self.leader_cm['ip'] self.leader_cm['cm_port'] cmd)<block_end><for_stmt>server self.cluster['servers']<block_start>success=<false><line_sep>expected=1<for_stmt>i range(5)<block_start>after_cnt=util.get_clients_count_of_gw(server['ip'] server['gateway_port'])<if_stmt>after_cnt<eq>expected<block_start>success=<true><line_sep><break><block_end>time.sleep(1)<block_end>self.assertEquals(success <true> 'failed : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.'%(server['id'] server['ip'] server['gateway_port'] after_cnt expected))<line_sep>util.log('succeeded : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.'%(server['id'] server['ip'] server['gateway_port'] after_cnt expected))<block_end># Go back to initial configuration # Cleanup PG self.assertTrue(util.cm_success(util.cm_command(self.leader_cm['ip'] self.leader_cm['cm_port'] 'pg_del %s %d'%(self.cluster['cluster_name'] self.cluster['servers'][0]['pg_id'])))[0])<line_sep># Cleanup processes of PGS and GW <for_stmt>s self.cluster['servers']<block_start>self.assertEqual(0 util.shutdown_redis(s['id'] s['redis_port']) 'failed to kill redis %d process'%s['id'])<line_sep>self.assertEqual(0 util.shutdown_smr(s['id'] s['ip'] s['smr_base_port']) 'failed to kill smr %d process'%s['id'])<line_sep>self.assertEqual(0 util.shutdown_gateway(s['id'] s['gateway_port']) 'failed to kill gw %d process'%s['id'])<block_end># Recover PG self.assertTrue(util.install_pg(self.cluster self.cluster['servers'] self.cluster['servers'][0] start_gw=<true>) 'failed to recover PGS and GW in a PM')<block_end><def_stmt>match_cluster_info self cm_ip cm_port cluster# Cluster <block_start>cluster_info=util.cluster_info(cm_ip cm_port cluster['cluster_name'])['cluster_info']<line_sep>self.assertEquals(cluster_info['PN_PG_Map'] '0 8192')<line_sep>self.assertEquals(cluster_info['Key_Space_Size'] 8192)<line_sep># PG <for_stmt>pg_id cluster['pg_id_list']<block_start>pg=util.pg_info(cm_ip cm_port cluster['cluster_name'] pg_id)<line_sep>self.assertIsNotNone(pg)<block_end><for_stmt>s self.cluster['servers']# GW <block_start>gw_info=util.get_gw_info(cm_ip cm_port cluster['cluster_name'] s['id'])<line_sep>self.assertEquals(gw_info['port'] s['gateway_port'])<line_sep>self.assertEquals(gw_info['state'] 'N')<line_sep>self.assertEquals(gw_info['hb'] 'Y')<line_sep>self.assertEquals(gw_info['pm_Name'] s['pm_name'])<line_sep>self.assertEquals(gw_info['pm_IP'] s['ip'])<line_sep># PGS pgs_info=util.get_pgs_info(cm_ip cm_port cluster['cluster_name'] s['id'])<line_sep>self.assertEquals(pgs_info['pg_ID'] s['pg_id'])<line_sep>self.assertEquals(pgs_info['pm_Name'] s['pm_name'])<line_sep>self.assertEquals(pgs_info['pm_IP'] s['ip'])<line_sep>self.assertEquals(pgs_info['backend_Port_Of_Redis'] s['redis_port'])<line_sep>self.assertEquals(pgs_info['replicator_Port_Of_SMR'] s['smr_base_port'])<line_sep>self.assertEquals(pgs_info['management_Port_Of_SMR'] s['smr_mgmt_port'])<line_sep>self.assertEquals(pgs_info['state'] 'N')<line_sep>self.assertEquals(pgs_info['hb'] 'Y')<line_sep>self.assertEquals(pgs_info['color'] 'GREEN')<line_sep>self.assertTrue(pgs_info['smr_Role']<eq>'M'<or>pgs_info['smr_Role']<eq>'S')<line_sep>self.assertEquals(pgs_info['old_master_version'] '201')<block_end><block_end><block_end>
<import_from_stmt>pyxtal pyxtal<import_from_stmt>ase.io read<import_from_stmt>ase.spacegroup.symmetrize prep_symmetry<import_from_stmt>spglib get_symmetry_dataset<line_sep>#ans1 = get_symmetry_dataset(s, symprec=1e-2) #print(ans1) s=pyxtal()<line_sep>s.from_seed('bug.vasp' tol=1e-2)<line_sep>print(s)<line_sep>#s1=s.subgroup(eps=0.1, group_type='t+k', max_cell=4) #for a in s1: # print(a) #s1=s.subgroup(eps=0.1, group_type='k', max_cell=4) #for a in s1: # print(a) #permutation = {"C":"Si", "Si":"C"} #for i in range(100): # struc = s.subgroup_once(0.01, None, permutation, max_cell=1) # print(struc.group.number, struc.formula) <for_stmt>i range(100)<block_start>struc=s.subgroup_once(0.2 <none> <none> 't+k' max_cell=2)<line_sep>print(struc.group.number struc.formula)<block_end>#for i in range(1000): # struc = s.subgroup_with_substitution(permutation, once=True, max_cell=4) # print(struc)
""" ********************************************************************* This file is part of: The Acorn Project https://wwww.twistedfields.com/research ********************************************************************* Copyright (c) 2019-2021 <NAME>, Twisted Fields LLC Copyright (c) 2021 The Acorn Project contributors (cf. AUTHORS.md). Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ********************************************************************* """<line_sep># Modified from example file # Paranoid Pirate Worker by <NAME> <dln(at)eintr(dot)org> <import_from_stmt>random randint<import_stmt>time<import_stmt>zmq<import_stmt>redis<import_stmt>zmq_server<line_sep># keep the two imported to keep pickle working # TODO: avoid this by moving the class defs to a separate module. <import_from_stmt>master_process Robot RobotCommand<line_sep>REDIS_PORT=6379<line_sep>HEARTBEAT_LIVENESS=3<line_sep>HEARTBEAT_INTERVAL=1<line_sep>INTERVAL_INIT=1<line_sep>INTERVAL_MAX=32<line_sep># Paranoid Pirate Protocol constants PPP_READY=b"\x01"# Signals worker is ready PPP_HEARTBEAT=b"\x02"# Signals worker heartbeat <def_stmt>worker_socket context poller<block_start>"""Helper function that returns a new configured socket connected to the Paranoid Pirate queue"""<line_sep>worker=context.socket(zmq.DEALER)# DEALER identity=b"%04X-%04X"%(randint(0 0x10000) randint(0 0x10000))<line_sep>worker.setsockopt(zmq.IDENTITY identity)<line_sep>poller.register(worker zmq.POLLIN)<line_sep>worker.connect("tcp://localhost:5569")<line_sep>worker.send(PPP_READY)<line_sep><return>worker<block_end><def_stmt>main <block_start>r=redis.Redis(host='localhost' port=REDIS_PORT)<line_sep>context=zmq.Context(1)<line_sep>poller=zmq.Poller()<line_sep>liveness=HEARTBEAT_LIVENESS<line_sep>interval=INTERVAL_INIT<line_sep>heartbeat_at=time.time()+HEARTBEAT_INTERVAL<line_sep>worker=worker_socket(context poller)<line_sep>cycles=0<while_stmt><true><block_start>socks=dict(poller.poll(HEARTBEAT_INTERVAL<times>1000))<line_sep># Handle worker activity on backend <if_stmt>socks.get(worker)<eq>zmq.POLLIN# Get message # - 3-part envelope + content -> request # - 1-part HEARTBEAT -> heartbeat <block_start>frames=worker.recv_multipart()<if_stmt><not>frames<block_start><break><block_end># Interrupted <if_stmt>len(frames)<ge>5<block_start>cycles<augadd>1<line_sep>print("I: Normal reply")<line_sep># print(len(frames)) # print(frames) ident,zero_frame,idx,command,key,msg=frames<line_sep>return_command,reply=zmq_server.handle_command(r command key msg)<line_sep>worker.send_multipart([ident zero_frame idx return_command reply])<line_sep># worker.send_multipart(frames) liveness=HEARTBEAT_LIVENESS<block_end><elif_stmt>len(frames)<eq>1<and>frames[0]<eq>PPP_HEARTBEAT<block_start>print("I: Queue heartbeat")<line_sep>liveness=HEARTBEAT_LIVENESS<block_end><else_stmt><block_start>print("E: Invalid message: %s"%frames)<block_end>interval=INTERVAL_INIT<block_end><else_stmt><block_start>liveness<augsub>1<if_stmt>liveness<eq>0<block_start>print("W: Heartbeat failure, can't reach queue")<line_sep>print("W: Reconnecting in %0.2fs..."%interval)<line_sep>time.sleep(interval)<if_stmt>interval<l>INTERVAL_MAX<block_start>interval<augmul>2<block_end>poller.unregister(worker)<line_sep>worker.setsockopt(zmq.LINGER 0)<line_sep>worker.close()<line_sep>worker=worker_socket(context poller)<line_sep>liveness=HEARTBEAT_LIVENESS<block_end><block_end><if_stmt>time.time()<g>heartbeat_at<block_start>heartbeat_at=time.time()+HEARTBEAT_INTERVAL<line_sep>print("I: Worker heartbeat")<line_sep>worker.send(PPP_HEARTBEAT)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>os<import_from_stmt>tqdm tqdm<import_from_stmt>fastmri_recon.config *<import_from_stmt>fastmri_recon.data.datasets.fastmri_pyfunc train_masked_kspace_dataset_from_indexable<as>singlecoil_dataset<import_from_stmt>fastmri_recon.evaluate.metrics.np_metrics METRIC_FUNCS Metrics<import_from_stmt>fastmri_recon.models.subclassed_models.denoisers.proposed_params build_model_from_specs<import_from_stmt>fastmri_recon.models.subclassed_models.multiscale_complex MultiscaleComplex<def_stmt>evaluate_xpdnet_dealiasing model_fun model_kwargs run_id n_scales=0 n_epochs=200 contrast='CORPD_FBK' af=4 n_samples=<none> cuda_visible_devices='0123' <block_start>val_path=f'{FASTMRI_DATA_DIR}singlecoil_val/'<line_sep>os.environ["CUDA_VISIBLE_DEVICES"]=','.join(cuda_visible_devices)<line_sep>val_set=singlecoil_dataset(val_path AF=af contrast=contrast inner_slices=<none> rand=<false> scale_factor=1e6 )<if_stmt>n_samples<is><not><none><block_start>val_set=val_set.take(n_samples)<block_end><else_stmt><block_start>val_set=val_set.take(199)<block_end>model=MultiscaleComplex(model_fun=model_fun model_kwargs=model_kwargs res=<false> n_scales=n_scales fastmri_format=<true> )<line_sep>model(next(iter(val_set))[0])<line_sep>model.load_weights(f'{CHECKPOINTS_DIR}checkpoints/{run_id}-{n_epochs:02d}.hdf5')<line_sep>m=Metrics(METRIC_FUNCS)<for_stmt>x,y_true tqdm(val_set.as_numpy_iterator() total=199<if>n_samples<is><none><else>n_samples)<block_start>y_pred=model.predict(x batch_size=1)<line_sep>m.push(y_true[<ellipsis> 0] y_pred[<ellipsis> 0])<block_end><return>['PSNR' 'SSIM'] list(m.means().values())<block_end>
<import_from_stmt>conans ConanFile CMake tools<import_stmt>os<line_sep>required_conan_version=">=1.33.0"<class_stmt>OpenColorIOConan(ConanFile)<block_start>name="opencolorio"<line_sep>description="A color management framework for visual effects and animation."<line_sep>license="BSD-3-Clause"<line_sep>homepage="https://opencolorio.org/"<line_sep>url="https://github.com/conan-io/conan-center-index"<line_sep>settings="os" "compiler" "build_type" "arch"<line_sep>options={"shared":[<true> <false>] "fPIC":[<true> <false>] "use_sse":[<true> <false>]}<line_sep>default_options={"shared":<false> "fPIC":<true> "use_sse":<true>}<line_sep>generators="cmake" "cmake_find_package"<line_sep>exports_sources=["CMakeLists.txt" "patches/*"]<line_sep>topics=("colors" "visual" "effects" "animation")<line_sep>_cmake=<none><line_sep>@property<def_stmt>_source_subfolder self<block_start><return>"source_subfolder"<block_end>@property<def_stmt>_build_subfolder self<block_start><return>"build_subfolder"<block_end><def_stmt>config_options self<block_start><if_stmt>self.settings.os<eq>"Windows"<block_start><del_stmt>self.options.fPIC<block_end><if_stmt>self.settings.arch<not><in>["x86" "x86_64"]<block_start><del_stmt>self.options.use_sse<block_end><block_end><def_stmt>configure self<block_start><if_stmt>self.options.shared<block_start><del_stmt>self.options.fPIC<block_end><block_end><def_stmt>validate self<block_start><if_stmt>self.settings.compiler.get_safe("cppstd")<block_start>tools.check_min_cppstd(self 11)<block_end><block_end><def_stmt>requirements self# TODO: add GLUT (needed for ociodisplay tool) <block_start>self.requires("lcms/2.12")<line_sep>self.requires("yaml-cpp/0.7.0")<if_stmt>tools.Version(self.version)<l>"2.1.0"<block_start>self.requires("tinyxml/2.6.2")<block_end><if_stmt>tools.Version(self.version)<ge>"2.1.0"<block_start>self.requires("pystring/1.1.3")<block_end>self.requires("expat/2.4.1")<line_sep>self.requires("openexr/2.5.7")<block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version] destination=self._source_subfolder strip_root=<true>)<block_end><def_stmt>_configure_cmake self<block_start><if_stmt>self._cmake<block_start><return>self._cmake<block_end>self._cmake=CMake(self)<if_stmt>tools.Version(self.version)<ge>"2.1.0"<block_start>self._cmake.definitions["OCIO_BUILD_PYTHON"]=<false><block_end><else_stmt><block_start>self._cmake.definitions["OCIO_BUILD_SHARED"]=self.options.shared<line_sep>self._cmake.definitions["OCIO_BUILD_STATIC"]=<not>self.options.shared<line_sep>self._cmake.definitions["OCIO_BUILD_PYGLUE"]=<false><line_sep>self._cmake.definitions["USE_EXTERNAL_YAML"]=<true><line_sep>self._cmake.definitions["USE_EXTERNAL_TINYXML"]=<true><line_sep>self._cmake.definitions["USE_EXTERNAL_LCMS"]=<true><block_end>self._cmake.definitions["OCIO_USE_SSE"]=self.options.get_safe("use_sse" <false>)<line_sep># openexr 2.x provides Half library self._cmake.definitions["OCIO_USE_OPENEXR_HALF"]=<true><line_sep>self._cmake.definitions["OCIO_BUILD_APPS"]=<true><line_sep>self._cmake.definitions["OCIO_BUILD_DOCS"]=<false><line_sep>self._cmake.definitions["OCIO_BUILD_TESTS"]=<false><line_sep>self._cmake.definitions["OCIO_BUILD_GPU_TESTS"]=<false><line_sep>self._cmake.definitions["OCIO_USE_BOOST_PTR"]=<false><line_sep># avoid downloading dependencies self._cmake.definitions["OCIO_INSTALL_EXT_PACKAGE"]="NONE"<if_stmt>self.settings.compiler<eq>"Visual Studio"<and><not>self.options.shared# define any value because ifndef is used <block_start>self._cmake.definitions["OpenColorIO_SKIP_IMPORTS"]=<true><block_end>self._cmake.configure(build_folder=self._build_subfolder)<line_sep><return>self._cmake<block_end><def_stmt>_patch_sources self<block_start><for_stmt>patch self.conan_data.get("patches" {}).get(self.version [])<block_start>tools.patch(**patch)<block_end><for_stmt>module ("expat" "lcms2" "pystring" "yaml-cpp" "Imath")<block_start>tools.remove_files_by_mask(os.path.join(self._source_subfolder "share" "cmake" "modules") "Find"+module+".cmake")<block_end><block_end><def_stmt>build self<block_start>self._patch_sources()<line_sep>cm=self._configure_cmake()<line_sep>cm.build()<block_end><def_stmt>package self<block_start>cm=self._configure_cmake()<line_sep>cm.install()<if_stmt><not>self.options.shared<block_start>self.copy("*" src=os.path.join(self.package_folder "lib" "static") dst="lib")<line_sep>tools.rmdir(os.path.join(self.package_folder "lib" "static"))<block_end>tools.rmdir(os.path.join(self.package_folder "cmake"))<line_sep>tools.rmdir(os.path.join(self.package_folder "lib" "pkgconfig"))<line_sep>tools.rmdir(os.path.join(self.package_folder "lib" "cmake"))<line_sep>tools.rmdir(os.path.join(self.package_folder "share"))<line_sep># nop for 2.x tools.remove_files_by_mask(self.package_folder "OpenColorIOConfig*.cmake")<line_sep>tools.remove_files_by_mask(os.path.join(self.package_folder "bin") "*.pdb")<line_sep>self.copy("LICENSE" src=self._source_subfolder dst="licenses")<block_end><def_stmt>package_info self<block_start>self.cpp_info.names["cmake_find_package"]="OpenColorIO"<line_sep>self.cpp_info.names["cmake_find_package_multi"]="OpenColorIO"<line_sep>self.cpp_info.names["pkg_config"]="OpenColorIO"<line_sep>self.cpp_info.libs=tools.collect_libs(self)<if_stmt>tools.Version(self.version)<l>"2.1.0"<block_start><if_stmt><not>self.options.shared<block_start>self.cpp_info.defines.append("OpenColorIO_STATIC")<block_end><block_end><if_stmt>self.settings.os<eq>"Macos"<block_start>self.cpp_info.frameworks.extend(["Foundation" "IOKit" "ColorSync" "CoreGraphics"])<block_end><if_stmt>self.settings.compiler<eq>"Visual Studio"<and><not>self.options.shared<block_start>self.cpp_info.defines.append("OpenColorIO_SKIP_IMPORTS")<block_end>bin_path=os.path.join(self.package_folder "bin")<line_sep>self.output.info("Appending PATH env var with: {}".format(bin_path))<line_sep>self.env_info.PATH.append(bin_path)<block_end><block_end>
<def_stmt>_init <block_start><import_stmt>atexit<import_stmt>os<import_stmt>sys<try_stmt><block_start><import_stmt>readline<block_end><except_stmt>Exception<block_start>readline=<none><block_end><import_stmt>types<import_stmt>time<import_stmt>uuid<import_stmt>json<import_stmt>pprint<import_stmt>hashlib<import_stmt>subprocess<import_stmt>datetime<try_stmt><block_start><import_stmt>__builtin__<block_end><except_stmt>ImportError<block_start><import_stmt>builtins<as>__builtin__<block_end>PY2=sys.version_info[0]<eq>2<line_sep>__import__('rlcompleter')<line_sep>histdir=os.path.expanduser('~/.pyhist')<try_stmt><block_start>os.makedirs(histdir)<block_end><except_stmt>OSError<block_start><pass><block_end><if_stmt>PY2<block_start>text_type=unicode<block_end><else_stmt><block_start>text_type=str<block_end><def_stmt>_b x<block_start><if_stmt><not>isinstance(x bytes)<block_start>x=x.encode('utf-8')<block_end><return>x<block_end>histfile=os.path.join(histdir hashlib.sha1(os.path.normpath(_b(os.path.abspath(sys.prefix)))).hexdigest())<if_stmt>readline<is><not><none><block_start><try_stmt><block_start>readline.read_history_file(histfile)<block_end><except_stmt>IOError<block_start><pass><block_end><if_stmt>'libedit'<in>readline.__doc__<block_start>readline.parse_and_bind("bind '\t' rl_complete")<block_end><else_stmt><block_start>readline.parse_and_bind("tab: complete")<block_end>atexit.register(readline.write_history_file histfile)<block_end><def_stmt>_magic_uuid val=<none><block_start><if_stmt>val<is><none><block_start><return>uuid.uuid4()<block_end><elif_stmt>isinstance(val uuid.UUID)<block_start><return>val<block_end><elif_stmt>len(val)<eq>16<block_start><return>uuid.UUID(bytes=val)<block_end><return>uuid.UUID(val)<block_end><def_stmt>_dump_json x as_string=<false> indent=2 cp=<false><block_start>s='\n'.join(x.rstrip()<for>x json.dumps(x indent=indent).rstrip().splitlines())<if_stmt>cp<block_start>_copy(s)<block_end><if_stmt>as_string<block_start><return>s<block_end>print(s)<block_end><def_stmt>_cat path<block_start><with_stmt>open(path 'rb')<as>f<block_start><return>f.read()<block_end><block_end><def_stmt>_tcat path<block_start><return>_cat(path).decode('utf-8')<block_end><def_stmt>_paste <block_start><return>subprocess.Popen(['pbpaste'] stdout=subprocess.PIPE).communicate()[0]<block_end><def_stmt>_tpaste <block_start><return>_paste().decode('utf-8')<block_end><def_stmt>_jpaste <block_start><return>json.loads(_paste())<block_end><def_stmt>_copy val<block_start><if_stmt>isinstance(val text_type)<block_start>val=val.encode('utf-8')<block_end><return>subprocess.Popen(['pbcopy'] stdin=subprocess.PIPE).communicate(val)<block_end><def_stmt>_jcopy val indent=<none><block_start>_copy(_dump_json(val indent=indent as_string=<true>))<block_end>helpers=types.ModuleType('helpers')<line_sep>helpers.histfile=histfile<line_sep>helpers.pp=pprint.pprint<line_sep>helpers.uuid=_magic_uuid<line_sep>helpers.UUID=uuid.UUID<line_sep>helpers.uuid3=uuid.uuid3<line_sep>helpers.uuid4=uuid.uuid4<line_sep>helpers.uuid5=uuid.uuid5<line_sep>helpers.dt=datetime.datetime<line_sep>helpers.datetime=datetime.datetime<line_sep>helpers.td=datetime.timedelta<line_sep>helpers.timedelta=datetime.timedelta<line_sep>helpers.time=time.time<line_sep>helpers.j=_dump_json<line_sep>helpers.cat=_cat<line_sep>helpers.tcat=_tcat<line_sep>helpers.cp=_copy<line_sep>helpers.jcp=_jcopy<line_sep>helpers.copy=_copy<line_sep>helpers.jcopy=_jcopy<line_sep>helpers.paste=_paste<line_sep>helpers.tpaste=_tpaste<line_sep>helpers.jpaste=_jpaste<line_sep>__builtin__.h=helpers<line_sep>__builtin__.true=<true><line_sep>__builtin__.false=<false><line_sep>__builtin__.null=<none><block_end>_init()<del_stmt>_init<line_sep>
<import_stmt>turtle<line_sep>tina=turtle.Turtle()<line_sep>tina.pencolor("#ffcc33")<line_sep>tina.fillcolor("#ffcc33")<line_sep>tina.pensize(5)<line_sep>tina.begin_fill()<line_sep>tina.circle(80 360)<line_sep>tina.end_fill()<line_sep>tina.penup()<line_sep>tina.goto(-40 100)<line_sep>tina.pendown()<line_sep>tina.pencolor("#000000")<line_sep>tina.setheading(30)<line_sep>tina.circle((-30) 60)<line_sep>tina.penup()<line_sep>tina.goto(20 100)<line_sep>tina.pendown()<line_sep>tina.setheading(30)<line_sep>tina.circle((-30) 60)<line_sep>tina.penup()<line_sep>tina.goto(-20 60)<line_sep>tina.pendown()<line_sep>tina.setheading(-30)<line_sep>tina.circle(50 60)<line_sep>tina.penup()<line_sep>tina.goto(-30 -30)<line_sep>tina.pendown()<line_sep>tina.pencolor("#ffcc33")<line_sep>tina.setheading(60)<for_stmt>i range(0 12 1)<block_start>tina.circle((-35) 120)<line_sep>tina.left(150)<block_end>tina.hideturtle()<line_sep>
<import_from_stmt>checkov.arm.base_registry Registry<line_sep>arm_resource_registry=Registry()<line_sep>arm_parameter_registry=Registry()<line_sep>
<def_stmt>solve board i=0 j=0<block_start>i,j=nextCell(board i j)<if_stmt>i<eq>-1<block_start><return><true><block_end><for_stmt>e range(1 10)<block_start><if_stmt>isValid(board i j e)<block_start>board[i][j]=e<if_stmt>solve(board i j)<block_start><return><true><block_end>board[i][j]=0<block_end><block_end><return><false><block_end><def_stmt>print_board board<block_start><for_stmt>i range(len(board))<block_start><if_stmt>i%3<eq>0<and>i<ne>0<block_start>print("------------------------")<block_end><for_stmt>j range(len(board[0]))<block_start><if_stmt>j%3<eq>0<and>j<ne>0<block_start>print("|" end="")<block_end><if_stmt>j<eq>8<block_start>print(board[i][j])<block_end><else_stmt><block_start>print(str(board[i][j])+" " end="")<block_end><block_end><block_end><block_end><def_stmt>nextCell board i j<block_start><for_stmt>x range(i 9)<block_start><for_stmt>y range(j 9)<block_start><if_stmt>board[x][y]<eq>0<block_start><return>x y<block_end><block_end><block_end><for_stmt>x range(0 9)<block_start><for_stmt>y range(0 9)<block_start><if_stmt>board[x][y]<eq>0<block_start><return>x y<block_end><block_end><block_end><return>-1 -1<block_end><def_stmt>isValid board x y n<block_start><for_stmt>i range(9)<block_start><if_stmt>board[x][i]<eq>n<or>board[i][y]<eq>n<block_start><return><false><block_end><block_end>new_x=x<floordiv>3<times>3<line_sep>new_y=y<floordiv>3<times>3<for_stmt>i range(3)<block_start><for_stmt>j range(3)<block_start><if_stmt>board[new_x+i][new_y+j]<eq>n<block_start><return><false><block_end><block_end><block_end><return><true><block_end><if_stmt>__name__<eq>"__main__"<block_start>print("Enter the numbers row by row, and put 0 for empty space:")<line_sep>board=[[int(input())<for>x range(9)]<for>y range(9)]<line_sep>solve(board)<line_sep>print_board(board)<line_sep>""" Let's say we have this board: Empty space is replaced with 0. [[5, 3, 0, 0, 7, 0, 0, 0, 0], [6, 0, 0, 1, 9, 5, 0, 0, 0], [0, 9, 8, 0, 0, 0, 0, 6, 0], [8, 0, 0, 0, 6, 0, 0, 0, 3], [4, 0, 0, 8, 0, 3, 0, 0, 1], [7, 0, 0, 0, 2, 0, 0, 0, 6], [0, 6, 0, 0, 0, 0, 2, 8, 0], [0, 0, 0, 4, 1, 9, 0, 0, 5], [0, 0, 0, 0, 8, 0, 0, 7, 9]] # When the program asks for input we give like this : 5, 3, 0, 0, 7, 0, 0, 0, 0,6, 0, 0, 1, 9, 5, 0, 0, 0,0, 9, 8, 0, 0, 0, 0, 6, 0, 8, 0, 0, 0, 6, 0, 0, 0, 3,4, 0, 0, 8, 0, 3, 0, 0, 1, 7, 0, 0, 0, 2, 0, 0, 0, 6, 0, 6, 0, 0, 0, 0, 2, 8, 0,0, 0, 0, 4, 1, 9, 0, 0, 5, 0, 0, 0, 0, 8, 0, 0, 7, 9 #output will look like this: 5 3 4 |6 7 8 |9 1 2 6 7 2 |1 9 5 |3 4 8 1 9 8 |3 4 2 |5 6 7 ------------------------ 8 5 9 |7 6 1 |4 2 3 4 2 6 |8 5 3 |7 9 1 7 1 3 |9 2 4 |8 5 6 ------------------------ 9 6 1 |5 3 7 |2 8 4 2 8 7 |4 1 9 |6 3 5 3 4 5 |2 8 6 |1 7 9 """<block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making GameAISDK available. This source code file is licensed under the GNU General Public License Version 3. For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package. Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. """<import_stmt>logging<import_from_stmt>collections OrderedDict<import_from_stmt>....common.define DQN_ACTION_TYPES CONTACTS AI_ACTION_TYPES<import_from_stmt>....config_manager.ai.ai_manager AIManager AIAlgorithmType<import_from_stmt>.action_data ActionData<import_from_stmt>...utils get_value<line_sep>logger=logging.getLogger("sdktool")<class_stmt>DqnActionData(ActionData)<block_start>@staticmethod<def_stmt>get_game_action_inner <block_start><return>AIManager().get_game_action(AIAlgorithmType.DQN)<block_end>@staticmethod<def_stmt>get_ai_action_inner <block_start><return>AIManager().get_ai_action(AIAlgorithmType.DQN)<block_end><def_stmt>game_action_extend_param self<block_start>param=OrderedDict()<line_sep>out_params=OrderedDict()<line_sep>out_params['path']=''<line_sep>out_params['region']=OrderedDict()<line_sep>out_params['region']['x']=0<line_sep>out_params['region']['y']=0<line_sep>out_params['region']['w']=0<line_sep>out_params['region']['h']=0<line_sep>param['actionRegion']=out_params<line_sep>param['durationMS']=0<line_sep><return>param<block_end><def_stmt>get_type_param self<block_start>out_params=OrderedDict()<line_sep>out_params['path']=''<line_sep>out_params['region']=OrderedDict()<line_sep>out_params['region']['x']=0<line_sep>out_params['region']['y']=0<line_sep>out_params['region']['w']=0<line_sep>out_params['region']['h']=0<line_sep><return>out_params<block_end><def_stmt>get_game_action_type_param self<block_start><return>DQN_ACTION_TYPES<block_end><def_stmt>init_swipe_params self params=<none><block_start><if_stmt>params<is><none><block_start>params=OrderedDict()<block_end>swipe_param=OrderedDict()<line_sep>swipe_param['startX']=get_value(params 'startX' 0)<line_sep>swipe_param['startY']=get_value(params 'startY' 0)<line_sep>swipe_param['endX']=get_value(params 'endX' 0)<line_sep>swipe_param['endY']=get_value(params 'endY' 0)<line_sep><return>swipe_param<block_end><def_stmt>new_game_action self action_name game_action<block_start>action_value=OrderedDict()<line_sep>action_value['id']=game_action.alloc_id()<line_sep>action_value['name']=action_name<line_sep>action_value['contact']=CONTACTS[0]<line_sep>action_value['sceneTask']=-1<line_sep>action_value['type']=AI_ACTION_TYPES[0]<line_sep><return>action_value<block_end><block_end>
<import_stmt>math<import_stmt>fastdtw<import_stmt>numpy<line_sep>_logdb_const=10.0/numpy.log(10.0)<times>numpy.sqrt(2.0)<line_sep># should work on torch and numpy arrays <def_stmt>_sqrt x<block_start>isnumpy=isinstance(x numpy.ndarray)<line_sep>isscalar=numpy.isscalar(x)<line_sep><return>numpy.sqrt(x)<if>isnumpy<else>math.sqrt(x)<if>isscalar<else>x.sqrt()<block_end><def_stmt>_exp x<block_start>isnumpy=isinstance(x numpy.ndarray)<line_sep>isscalar=numpy.isscalar(x)<line_sep><return>numpy.exp(x)<if>isnumpy<else>math.exp(x)<if>isscalar<else>x.exp()<block_end><def_stmt>_sum x<block_start><if_stmt>isinstance(x list)<or>isinstance(x numpy.ndarray)<block_start><return>numpy.sum(x)<block_end><return>float(x.sum())<block_end><def_stmt>melcd X Y lengths=<none><block_start>"""Mel-cepstrum distortion (MCD). The function computes MCD for time-aligned mel-cepstrum sequences. Args: X (ndarray): Input mel-cepstrum, shape can be either of (``D``,), (``T x D``) or (``B x T x D``). Both Numpy and torch arrays are supported. Y (ndarray): Target mel-cepstrum, shape can be either of (``D``,), (``T x D``) or (``B x T x D``). Both Numpy and torch arrays are supported. lengths (list): Lengths of padded inputs. This should only be specified if you give mini-batch inputs. Returns: float: Mean mel-cepstrum distortion in dB. .. note:: The function doesn't check if inputs are actually mel-cepstrum. """<line_sep># summing against feature axis, and then take mean against time axis # Eq. (1a) # https://www.cs.cmu.edu/~awb/papers/sltu2008/kominek_black.sltu_2008.pdf <if_stmt>lengths<is><none><block_start>z=X-Y<line_sep>r=_sqrt((z<times>z).sum(-1))<if_stmt><not>numpy.isscalar(r)<block_start>r=r.mean()<block_end><return>_logdb_const<times>r<block_end># Case for 1-dim features. <if_stmt>len(X.shape)<eq>2# Add feature axis <block_start>X,Y=X[: : <none>] Y[: : <none>]<block_end>s=0.0<line_sep>T=_sum(lengths)<for_stmt>x,y,length zip(X Y lengths)<block_start>x,y=x[:length] y[:length]<line_sep>z=x-y<line_sep>s<augadd>_sqrt((z<times>z).sum(-1)).sum()<block_end><return>_logdb_const<times>s/T<block_end><class_stmt>DTWAligner(object)<block_start>""" from https://github.com/r9y9/nnmnkwii/blob/4cade86b5c35b4e35615a2a8162ddc638018af0e/nnmnkwii/preprocessing/alignment.py#L14 """<def_stmt>__init__ self x y dist=<lambda>x y:numpy.linalg.norm(x-y) radius=1<arrow><none><block_start><assert_stmt>x.ndim<eq>2<and>y.ndim<eq>2<line_sep>_,path=fastdtw.fastdtw(x y radius=radius dist=dist)<line_sep>path=numpy.array(path)<line_sep>self.normed_path_x=path[: 0]/len(x)<line_sep>self.normed_path_y=path[: 1]/len(y)<block_end><def_stmt>align_x self x<block_start>path=self._interp_path(self.normed_path_x len(x))<line_sep><return>x[path]<block_end><def_stmt>align_y self y<block_start>path=self._interp_path(self.normed_path_y len(y))<line_sep><return>y[path]<block_end><def_stmt>align self x y<block_start><return>self.align_x(x) self.align_y(y)<block_end>@staticmethod<def_stmt>align_and_transform x y *args **kwargs<block_start>aligner=DTWAligner(*args x=x y=y **kwargs)<line_sep><return>aligner.align(x y)<block_end>@staticmethod<def_stmt>_interp_path normed_path:numpy.ndarray target_length:int<block_start>path=numpy.floor(normed_path<times>target_length).astype(numpy.int)<line_sep><return>path<block_end><block_end><class_stmt>MelCepstrumAligner(DTWAligner)<block_start><def_stmt>__init__ self x y *args **kwargs<arrow><none><block_start>x=self._calc_aligner_feature(x)<line_sep>y=self._calc_aligner_feature(y)<line_sep>kwargs.update(dist=melcd)<line_sep>super().__init__(x y *args **kwargs)<block_end>@classmethod<def_stmt>_calc_delta cls x<block_start>x=numpy.zeros_like(x x.dtype)<line_sep>x[:-1]=x[1:]-x[:-1]<line_sep>x[-1]=0<line_sep><return>x<block_end>@classmethod<def_stmt>_calc_aligner_feature cls x<block_start>d=cls._calc_delta(x)<line_sep>feature=numpy.concatenate((x d) axis=1)[: 1:]<line_sep><return>feature<block_end><block_end>
<import_from_stmt>setuptools setup find_packages<if_stmt>__name__<eq>"__main__"<block_start>setup(name="snapx" author="<EMAIL>" version="0.0.1" packages=find_packages() description="""SnapX: An experimental SNAP API with NetworkX-like interface""")<block_end>