content
stringlengths
0
1.55M
#------------------------------------------------------------------------------ # Copyright (c) 2013-2017, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. #------------------------------------------------------------------------------ <import_from_stmt>atom.api Typed<import_from_stmt>enaml.widgets.timer ProxyTimer<import_from_stmt>.QtCore QTimer<import_from_stmt>.qt_toolkit_object QtToolkitObject<class_stmt>QtTimer(QtToolkitObject ProxyTimer)<block_start>""" A Qt implementation of an Enaml ProxyTimer. """<line_sep>#: A reference to the widget created by the proxy. widget=Typed(QTimer)<line_sep>#-------------------------------------------------------------------------- # Initialization #-------------------------------------------------------------------------- <def_stmt>create_widget self<block_start>""" Create the underlying timer object. """<line_sep>self.widget=QTimer()<block_end><def_stmt>init_widget self<block_start>""" Initialize the widget. """<line_sep>super(QtTimer self).init_widget()<line_sep>d=self.declaration<line_sep>self.set_interval(d.interval)<line_sep>self.set_single_shot(d.single_shot)<line_sep>self.widget.timeout.connect(self.on_timeout)<block_end><def_stmt>destroy self<block_start>""" A reimplemented destructor. This stops the timer before invoking the superclass destructor. """<line_sep>self.widget.stop()<line_sep>super(QtTimer self).destroy()<block_end>#-------------------------------------------------------------------------- # Signal Handlers #-------------------------------------------------------------------------- <def_stmt>on_timeout self<block_start>""" Handle the timeout signal for the timer. """<line_sep>d=self.declaration<if_stmt>d<is><not><none><block_start>d.timeout()<block_end><block_end>#-------------------------------------------------------------------------- # ProxyTimer API #-------------------------------------------------------------------------- <def_stmt>set_interval self interval<block_start>""" Set the interval on the timer. """<line_sep>self.widget.setInterval(interval)<block_end><def_stmt>set_single_shot self single_shot<block_start>""" Set the single shot flag on the timer. """<line_sep>self.widget.setSingleShot(single_shot)<block_end><def_stmt>start self<block_start>""" Start or restart the timer. """<line_sep>self.widget.start()<block_end><def_stmt>stop self<block_start>""" Stop the timer. """<line_sep>self.widget.stop()<block_end><def_stmt>is_running self<block_start>""" Get whether or not the timer is running. """<line_sep><return>self.widget.isActive()<block_end><block_end>
<import_stmt>math<import_from_stmt>pypy.module.cpyext pystrtod<import_from_stmt>pypy.module.cpyext.test.test_api BaseApiTest raises_w<import_from_stmt>rpython.rtyper.lltypesystem rffi<import_from_stmt>rpython.rtyper.lltypesystem lltype<import_from_stmt>pypy.module.cpyext.pystrtod PyOS_string_to_double<class_stmt>TestPyOS_string_to_double(BaseApiTest)<block_start><def_stmt>test_simple_float self space<block_start>s=rffi.str2charp('0.4')<line_sep>null=lltype.nullptr(rffi.CCHARPP.TO)<line_sep>r=PyOS_string_to_double(space s null <none>)<assert_stmt>r<eq>0.4<line_sep>rffi.free_charp(s)<block_end><def_stmt>test_empty_string self space<block_start>s=rffi.str2charp('')<line_sep>null=lltype.nullptr(rffi.CCHARPP.TO)<with_stmt>raises_w(space ValueError)<block_start>PyOS_string_to_double(space s null <none>)<block_end>rffi.free_charp(s)<block_end><def_stmt>test_bad_string self space<block_start>s=rffi.str2charp(' 0.4')<line_sep>null=lltype.nullptr(rffi.CCHARPP.TO)<with_stmt>raises_w(space ValueError)<block_start>PyOS_string_to_double(space s null <none>)<block_end>rffi.free_charp(s)<block_end><def_stmt>test_overflow_pos self space<block_start>s=rffi.str2charp('1e500')<line_sep>null=lltype.nullptr(rffi.CCHARPP.TO)<line_sep>r=PyOS_string_to_double(space s null <none>)<assert_stmt>math.isinf(r)<assert_stmt>r<g>0<line_sep>rffi.free_charp(s)<block_end><def_stmt>test_overflow_neg self space<block_start>s=rffi.str2charp('-1e500')<line_sep>null=lltype.nullptr(rffi.CCHARPP.TO)<line_sep>r=PyOS_string_to_double(space s null <none>)<assert_stmt>math.isinf(r)<assert_stmt>r<l>0<line_sep>rffi.free_charp(s)<block_end><def_stmt>test_overflow_exc self space<block_start>s=rffi.str2charp('1e500')<line_sep>null=lltype.nullptr(rffi.CCHARPP.TO)<with_stmt>raises_w(space ValueError)<block_start>PyOS_string_to_double(space s null space.w_ValueError)<block_end>rffi.free_charp(s)<block_end><def_stmt>test_endptr_number self space<block_start>s=rffi.str2charp('0.4')<line_sep>endp=lltype.malloc(rffi.CCHARPP.TO 1 flavor='raw')<line_sep>r=PyOS_string_to_double(space s endp <none>)<assert_stmt>r<eq>0.4<line_sep>endp_addr=rffi.cast(rffi.LONG endp[0])<line_sep>s_addr=rffi.cast(rffi.LONG s)<assert_stmt>endp_addr<eq>s_addr+3<line_sep>rffi.free_charp(s)<line_sep>lltype.free(endp flavor='raw')<block_end><def_stmt>test_endptr_tail self space<block_start>s=rffi.str2charp('0.4 foo')<line_sep>endp=lltype.malloc(rffi.CCHARPP.TO 1 flavor='raw')<line_sep>r=PyOS_string_to_double(space s endp <none>)<assert_stmt>r<eq>0.4<line_sep>endp_addr=rffi.cast(rffi.LONG endp[0])<line_sep>s_addr=rffi.cast(rffi.LONG s)<assert_stmt>endp_addr<eq>s_addr+3<line_sep>rffi.free_charp(s)<line_sep>lltype.free(endp flavor='raw')<block_end><def_stmt>test_endptr_no_conversion self space<block_start>s=rffi.str2charp('foo')<line_sep>endp=lltype.malloc(rffi.CCHARPP.TO 1 flavor='raw')<with_stmt>raises_w(space ValueError)<block_start>PyOS_string_to_double(space s endp <none>)<block_end>endp_addr=rffi.cast(rffi.LONG endp[0])<line_sep>s_addr=rffi.cast(rffi.LONG s)<assert_stmt>endp_addr<eq>s_addr<line_sep>rffi.free_charp(s)<line_sep>lltype.free(endp flavor='raw')<block_end><block_end><class_stmt>TestPyOS_double_to_string(BaseApiTest)<block_start><def_stmt>test_format_code self api<block_start>ptype=lltype.malloc(rffi.INTP.TO 1 flavor='raw')<line_sep>r=api.PyOS_double_to_string(150.0 'e' 1 0 ptype)<assert_stmt>'1.5e+02'<eq>rffi.charp2str(r)<line_sep>type_value=rffi.cast(lltype.Signed ptype[0])<assert_stmt>pystrtod.Py_DTST_FINITE<eq>type_value<line_sep>rffi.free_charp(r)<line_sep>lltype.free(ptype flavor='raw')<block_end><def_stmt>test_precision self api<block_start>ptype=lltype.malloc(rffi.INTP.TO 1 flavor='raw')<line_sep>r=api.PyOS_double_to_string(3.14159269397 'g' 5 0 ptype)<assert_stmt>'3.1416'<eq>rffi.charp2str(r)<line_sep>type_value=rffi.cast(lltype.Signed ptype[0])<assert_stmt>pystrtod.Py_DTST_FINITE<eq>type_value<line_sep>rffi.free_charp(r)<line_sep>lltype.free(ptype flavor='raw')<block_end><def_stmt>test_flags_sign self api<block_start>ptype=lltype.malloc(rffi.INTP.TO 1 flavor='raw')<line_sep>r=api.PyOS_double_to_string(-3.14 'g' 3 1 ptype)<assert_stmt>'-3.14'<eq>rffi.charp2str(r)<line_sep>type_value=rffi.cast(lltype.Signed ptype[0])<assert_stmt>pystrtod.Py_DTST_FINITE<eq>type_value<line_sep>rffi.free_charp(r)<line_sep>lltype.free(ptype flavor='raw')<block_end><def_stmt>test_flags_add_dot_0 self api<block_start>ptype=lltype.malloc(rffi.INTP.TO 1 flavor='raw')<line_sep>r=api.PyOS_double_to_string(3 'g' 5 2 ptype)<assert_stmt>'3.0'<eq>rffi.charp2str(r)<line_sep>type_value=rffi.cast(lltype.Signed ptype[0])<assert_stmt>pystrtod.Py_DTST_FINITE<eq>type_value<line_sep>rffi.free_charp(r)<line_sep>lltype.free(ptype flavor='raw')<block_end><def_stmt>test_flags_alt self api<block_start>ptype=lltype.malloc(rffi.INTP.TO 1 flavor='raw')<line_sep>r=api.PyOS_double_to_string(314. 'g' 3 4 ptype)<assert_stmt>'314.'<eq>rffi.charp2str(r)<line_sep>type_value=rffi.cast(lltype.Signed ptype[0])<assert_stmt>pystrtod.Py_DTST_FINITE<eq>type_value<line_sep>rffi.free_charp(r)<line_sep>lltype.free(ptype flavor='raw')<block_end><def_stmt>test_ptype_nan self api<block_start>ptype=lltype.malloc(rffi.INTP.TO 1 flavor='raw')<line_sep>r=api.PyOS_double_to_string(float('nan') 'g' 3 4 ptype)<assert_stmt>'nan'<eq>rffi.charp2str(r)<line_sep>type_value=rffi.cast(lltype.Signed ptype[0])<assert_stmt>pystrtod.Py_DTST_NAN<eq>type_value<line_sep>rffi.free_charp(r)<line_sep>lltype.free(ptype flavor='raw')<block_end><def_stmt>test_ptype_infinity self api<block_start>ptype=lltype.malloc(rffi.INTP.TO 1 flavor='raw')<line_sep>r=api.PyOS_double_to_string(1e200<times>1e200 'g' 0 0 ptype)<assert_stmt>'inf'<eq>rffi.charp2str(r)<line_sep>type_value=rffi.cast(lltype.Signed ptype[0])<assert_stmt>pystrtod.Py_DTST_INFINITE<eq>type_value<line_sep>rffi.free_charp(r)<line_sep>lltype.free(ptype flavor='raw')<block_end><def_stmt>test_ptype_null self api<block_start>ptype=lltype.nullptr(rffi.INTP.TO)<line_sep>r=api.PyOS_double_to_string(3.14 'g' 3 0 ptype)<assert_stmt>'3.14'<eq>rffi.charp2str(r)<assert_stmt>ptype<eq>lltype.nullptr(rffi.INTP.TO)<line_sep>rffi.free_charp(r)<block_end><block_end>
<import_stmt>pytest<import_stmt>cudf<import_stmt>mock<import_from_stmt>cuxfilter.charts.core.non_aggregate.core_non_aggregate BaseNonAggregate <import_from_stmt>cuxfilter.dashboard DashBoard<import_from_stmt>cuxfilter DataFrame<import_from_stmt>cuxfilter.layouts chart_view<class_stmt>TestCoreNonAggregateChart<block_start><def_stmt>test_variables self<block_start>bnac=BaseNonAggregate()<line_sep># BaseChart variables <assert_stmt>bnac.chart_type<is><none><assert_stmt>bnac.x<is><none><assert_stmt>bnac.y<is><none><assert_stmt>bnac.aggregate_fn<eq>"count"<assert_stmt>bnac.color<is><none><assert_stmt>bnac.height<eq>0<assert_stmt>bnac.width<eq>0<assert_stmt>bnac.add_interaction<is><true><assert_stmt>bnac.chart<is><none><assert_stmt>bnac.source<is><none><assert_stmt>bnac.source_backup<is><none><assert_stmt>bnac.data_points<eq>0<assert_stmt>bnac._library_specific_params<eq>{}<assert_stmt>bnac.stride<is><none><assert_stmt>bnac.stride_type<eq>int<assert_stmt>bnac.min_value<eq>0.0<assert_stmt>bnac.max_value<eq>0.0<assert_stmt>bnac.x_label_map<eq>{}<assert_stmt>bnac.y_label_map<eq>{}<assert_stmt>bnac.title<eq>""<line_sep># test chart name setter bnac.x="x"<line_sep>bnac.y="y"<line_sep>bnac.chart_type="test_chart_type"<assert_stmt>bnac.name<eq>"x_y_count_test_chart_type_"<line_sep># BaseNonAggregateChart variables <assert_stmt>bnac.use_data_tiles<is><false><assert_stmt>bnac.reset_event<is><none><assert_stmt>bnac.x_range<is><none><assert_stmt>bnac.y_range<is><none><assert_stmt>bnac.aggregate_col<is><none><block_end><def_stmt>test_label_mappers self<block_start>bnac=BaseNonAggregate()<line_sep>library_specific_params={"x_label_map":{"a":1 "b":2} "y_label_map":{"a":1 "b":2} }<line_sep>bnac.library_specific_params=library_specific_params<assert_stmt>bnac.x_label_map<eq>{"a":1 "b":2}<assert_stmt>bnac.y_label_map<eq>{"a":1 "b":2}<block_end>@pytest.mark.parametrize("chart, _chart" [(<none> <none>) (1 1)])<def_stmt>test_view self chart _chart<block_start>bnac=BaseNonAggregate()<line_sep>bnac.chart=chart<line_sep>bnac.width=400<line_sep>bnac.title="test_title"<assert_stmt>str(bnac.view())<eq>str(chart_view(_chart width=bnac.width title=bnac.title))<block_end><def_stmt>test_get_selection_geometry_callback self<block_start>bnac=BaseNonAggregate()<line_sep>df=cudf.DataFrame({"a":[1 2 2] "b":[3 4 5]})<line_sep>dashboard=DashBoard(dataframe=DataFrame.from_dataframe(df))<assert_stmt>(bnac.get_selection_geometry_callback(dashboard).__name__<eq>"selection_callback")<assert_stmt>callable(type(bnac.get_selection_geometry_callback(dashboard)))<block_end><def_stmt>test_box_selection_callback self<block_start>bnac=BaseNonAggregate()<line_sep>bnac.x="a"<line_sep>bnac.y="b"<line_sep>bnac.chart_type="temp"<line_sep>self.result=<none><def_stmt>t_function data patch_update=<false><block_start>self.result=data<block_end>bnac.reload_chart=t_function<line_sep>df=cudf.DataFrame({"a":[1 2 2] "b":[3 4 5]})<line_sep>dashboard=DashBoard(dataframe=DataFrame.from_dataframe(df))<line_sep>dashboard._active_view=bnac<class_stmt>evt<block_start>geometry=dict(x0=1 x1=2 y0=3 y1=4 type="rect")<block_end>t=bnac.get_selection_geometry_callback(dashboard)<line_sep>t(evt)<assert_stmt>self.result.equals(df.query("1<=a<=2 and 3<=b<=4"))<block_end><def_stmt>test_lasso_election_callback self<block_start>bnac=BaseNonAggregate()<line_sep>bnac.x="a"<line_sep>bnac.y="b"<line_sep>bnac.chart_type="temp"<def_stmt>t_function data patch_update=<false><block_start>self.result=data<block_end>bnac.reload_chart=t_function<line_sep>df=cudf.DataFrame({"a":[1 2 2] "b":[3 4 5]})<line_sep>dashboard=DashBoard(dataframe=DataFrame.from_dataframe(df))<class_stmt>evt<block_start>geometry=dict(x=[1 1 2] y=[1 2 1] type="poly")<line_sep>final=<true><block_end>t=bnac.get_selection_geometry_callback(dashboard)<with_stmt>mock.patch("cuspatial.point_in_polygon")<as>pip<block_start>pip.return_value=cudf.DataFrame({"selection":[<true> <false> <true>]})<line_sep>t(evt)<assert_stmt>pip.called<block_end><block_end>@pytest.mark.parametrize("data, _data" [(cudf.DataFrame() cudf.DataFrame()) (cudf.DataFrame({"a":[1 2 2] "b":[3 4 5]}) cudf.DataFrame({"a":[1 2 2] "b":[3 4 5]}) ) ] )<def_stmt>test_calculate_source self data _data<block_start>""" Calculate source just calls to the format_source_data function which is implemented by chart types inheriting this class. """<line_sep>bnac=BaseNonAggregate()<line_sep>self.result=<none><def_stmt>t_function data patch_update=<false><block_start>self.result=data<block_end>bnac.format_source_data=t_function<line_sep>bnac.calculate_source(data)<assert_stmt>self.result.equals(_data)<block_end>@pytest.mark.parametrize("x_range, y_range, query, local_dict" [((1 2) (3 4) "@x_min<=x<=@x_max and @y_min<=y<=@y_max" {"x_min":1 "x_max":2 "y_min":3 "y_max":4} ) ((0 2) (3 5) "@x_min<=x<=@x_max and @y_min<=y<=@y_max" {"x_min":0 "x_max":2 "y_min":3 "y_max":5} ) ] )<def_stmt>test_compute_query_dict self x_range y_range query local_dict<block_start>bnac=BaseNonAggregate()<line_sep>bnac.chart_type="test"<line_sep>bnac.x="x"<line_sep>bnac.y="y"<line_sep>bnac.x_range=x_range<line_sep>bnac.y_range=y_range<line_sep>df=cudf.DataFrame({"x":[1 2 2] "y":[3 4 5]})<line_sep>dashboard=DashBoard(dataframe=DataFrame.from_dataframe(df))<line_sep>bnac.compute_query_dict(dashboard._query_str_dict dashboard._query_local_variables_dict)<line_sep>bnac_key=(f"{bnac.x}_{bnac.y}"<concat>f"{'_'+bnac.aggregate_col<if>bnac.aggregate_col<else>''}"<concat>f"_{bnac.aggregate_fn}_{bnac.chart_type}_{bnac.title}")<assert_stmt>dashboard._query_str_dict[bnac_key]<eq>query<for_stmt>key local_dict<block_start><assert_stmt>(dashboard._query_local_variables_dict[key]<eq>local_dict[key])<block_end><block_end>@pytest.mark.parametrize("add_interaction, reset_event, event_1, event_2" [(<true> <none> "selection_callback" <none>) (<true> "test_event" "selection_callback" "reset_callback") (<false> "test_event" <none> "reset_callback") ] )<def_stmt>test_add_events self add_interaction reset_event event_1 event_2<block_start>bnac=BaseNonAggregate()<line_sep>bnac.add_interaction=add_interaction<line_sep>bnac.reset_event=reset_event<line_sep>df=cudf.DataFrame({"a":[1 2 2] "b":[3 4 5]})<line_sep>dashboard=DashBoard(dataframe=DataFrame.from_dataframe(df))<line_sep>self.event_1=<none><line_sep>self.event_2=<none><def_stmt>t_func fn<block_start>self.event_1=fn.__name__<block_end><def_stmt>t_func1 event fn<block_start>self.event_2=fn.__name__<block_end>bnac.add_selection_geometry_event=t_func<line_sep>bnac.add_event=t_func1<line_sep>bnac.add_events(dashboard)<assert_stmt>self.event_1<eq>event_1<assert_stmt>self.event_2<eq>event_2<block_end><def_stmt>test_add_reset_event self<block_start>bnac=BaseNonAggregate()<line_sep>bnac.chart_type="test"<line_sep>bnac.x="a"<line_sep>bnac.x_range=(0 2)<line_sep>bnac.y_range=(3 5)<line_sep>df=cudf.DataFrame({"a":[1 2 2] "b":[3 4 5]})<line_sep>dashboard=DashBoard(dataframe=DataFrame.from_dataframe(df))<line_sep>dashboard._active_view=bnac<def_stmt>t_func1 event fn<block_start>fn("event")<block_end>bnac.add_event=t_func1<line_sep>bnac.add_reset_event(dashboard)<assert_stmt>bnac.x_range<is><none><assert_stmt>bnac.y_range<is><none><block_end><def_stmt>test_query_chart_by_range self<block_start>bnac=BaseNonAggregate()<line_sep>bnac.chart_type="test"<line_sep>bnac.x="a"<line_sep>bnac_1=BaseNonAggregate()<line_sep>bnac_1.chart_type="test"<line_sep>bnac_1.x="b"<line_sep>query_tuple=(4 5)<line_sep>df=cudf.DataFrame({"a":[1 2 3 4] "b":[3 4 5 6]})<line_sep>bnac.source=df<line_sep>self.result=<none><line_sep>self.patch_update=<none><def_stmt>t_func data patch_update<block_start>self.result=data<line_sep>self.patch_update=patch_update<block_end># creating a dummy reload chart fn as its not implemented in core # non aggregate chart class bnac.reload_chart=t_func<line_sep>bnac.query_chart_by_range(active_chart=bnac_1 query_tuple=query_tuple datatile=<none>)<assert_stmt>self.result.to_string()<eq>" a b\n1 2 4\n2 3 5"<assert_stmt>self.patch_update<is><false><block_end>@pytest.mark.parametrize("new_indices, result" [([4 5] " a b\n1 2 4\n2 3 5") ([] " a b\n0 1 3\n1 2 4\n2 3 5\n3 4 6") ([3] " a b\n0 1 3") ] )<def_stmt>test_query_chart_by_indices self new_indices result<block_start>bnac=BaseNonAggregate()<line_sep>bnac.chart_type="test"<line_sep>bnac.x="a"<line_sep>bnac_1=BaseNonAggregate()<line_sep>bnac_1.chart_type="test"<line_sep>bnac_1.x="b"<line_sep>new_indices=new_indices<line_sep>df=cudf.DataFrame({"a":[1 2 3 4] "b":[3 4 5 6]})<line_sep>bnac.source=df<line_sep>self.result=<none><line_sep>self.patch_update=<none><def_stmt>t_func data patch_update<block_start>self.result=data<line_sep>self.patch_update=patch_update<block_end># creating a dummy reload chart fn as its not implemented in core # non aggregate chart class bnac.reload_chart=t_func<line_sep>bnac.query_chart_by_indices(active_chart=bnac_1 old_indices=[] new_indices=new_indices datatile=<none> )<assert_stmt>self.result.to_string()<eq>result<assert_stmt>self.patch_update<is><false><block_end><block_end>
<class_stmt>Session(list)<block_start>"""Abstract Session class"""<def_stmt>to_strings self user_id session_id<block_start>"""represent session as list of strings (one per event)"""<line_sep>user_id,session_id=str(user_id) str(session_id)<line_sep>session_type=self.get_type()<line_sep>strings=[]<for_stmt>event,product self<block_start>columns=[user_id session_type session_id event str(product)]<line_sep>strings.append(','.join(columns))<block_end><return>strings<block_end><def_stmt>get_type self<block_start><raise>NotImplemented<block_end><block_end><class_stmt>OrganicSessions(Session)<block_start><def_stmt>__init__ self<block_start>super(OrganicSessions self).__init__()<block_end><def_stmt>next self context product<block_start>self.append({'t':context.time() 'u':context.user() 'z':'pageview' 'v':product})<block_end><def_stmt>get_type self<block_start><return>'organic'<block_end><def_stmt>get_views self<block_start><return>[p<for>_,_,e,p self<if>e<eq>'pageview']<block_end><block_end>
<import_stmt>argparse<import_stmt>matplotlib.pyplot<as>plt<import_stmt>torch<import_from_stmt>pytorch_warmup *<def_stmt>get_rates warmup_cls beta2 max_step<block_start>rates=[]<line_sep>p=torch.nn.Parameter(torch.arange(10 dtype=torch.float32))<line_sep>optimizer=torch.optim.Adam([{'params':p}] lr=1.0 betas=(0.9 beta2))<line_sep>lr_scheduler=torch.optim.lr_scheduler.LambdaLR(optimizer lr_lambda=<lambda>step:1.0)<line_sep>warmup_scheduler=warmup_cls(optimizer)<for_stmt>step range(1 max_step+1)<block_start>rates.append(optimizer.param_groups[0]['lr'])<line_sep>optimizer.zero_grad()<line_sep>optimizer.step()<line_sep>lr_scheduler.step()<line_sep>warmup_scheduler.dampen()<block_end><return>rates<block_end>parser=argparse.ArgumentParser(description='Warmup schedule')<line_sep>parser.add_argument('--output' type=str default='none' choices=['none' 'png' 'pdf'] help='Output file type (default: none)')<line_sep>args=parser.parse_args()<line_sep>beta2=0.999<line_sep>max_step=3000<line_sep>plt.plot(range(1 max_step+1) get_rates(RAdamWarmup beta2 max_step) label='RAdam')<line_sep>plt.plot(range(1 max_step+1) get_rates(UntunedExponentialWarmup beta2 max_step) label='Untuned Exponential')<line_sep>plt.plot(range(1 max_step+1) get_rates(UntunedLinearWarmup beta2 max_step) label='Untuned Linear')<line_sep>plt.legend()<line_sep>plt.title('Warmup Schedule')<line_sep>plt.xlabel('Iteration')<line_sep>plt.ylabel(r'Warmup factor $(\omega_t)$')<if_stmt>args.output<eq>'none'<block_start>plt.show()<block_end><else_stmt><block_start>plt.savefig(f'warmup_schedule.{args.output}')<block_end>
""" Keepkey ******* """<import_from_stmt>..errors DEVICE_NOT_INITIALIZED DeviceNotReadyError common_err_msgs handle_errors <import_from_stmt>.trezorlib protobuf<as>p<import_from_stmt>.trezorlib.transport hid udp webusb <import_from_stmt>.trezor TrezorClient HID_IDS WEBUSB_IDS<import_from_stmt>.trezorlib.messages DebugLinkState Features HDNodeType ResetDevice <import_from_stmt>typing Any Dict List Optional <line_sep>py_enumerate=enumerate# Need to use the enumerate built-in but there's another function already named that KEEPKEY_HID_IDS={(0x2B24 0x0001)}<line_sep>KEEPKEY_WEBUSB_IDS={(0x2B24 0x0002)}<line_sep>KEEPKEY_SIMULATOR_PATH='127.0.0.1:11044'<line_sep>HID_IDS.update(KEEPKEY_HID_IDS)<line_sep>WEBUSB_IDS.update(KEEPKEY_WEBUSB_IDS)<class_stmt>KeepkeyFeatures(Features)# type: ignore <block_start><def_stmt>__init__ self * firmware_variant:Optional[str]=<none> firmware_hash:Optional[bytes]=<none> **kwargs:Any <arrow><none><block_start>super().__init__(**kwargs)<line_sep>self.firmware_variant=firmware_variant<line_sep>self.firmware_hash=firmware_hash<block_end>@classmethod<def_stmt>get_fields cls<arrow>Dict[int p.FieldInfo]<block_start><return>{1:('vendor' p.UnicodeType <none>) 2:('major_version' p.UVarintType <none>) 3:('minor_version' p.UVarintType <none>) 4:('patch_version' p.UVarintType <none>) 5:('bootloader_mode' p.BoolType <none>) 6:('device_id' p.UnicodeType <none>) 7:('pin_protection' p.BoolType <none>) 8:('passphrase_protection' p.BoolType <none>) 9:('language' p.UnicodeType <none>) 10:('label' p.UnicodeType <none>) 12:('initialized' p.BoolType <none>) 13:('revision' p.BytesType <none>) 14:('bootloader_hash' p.BytesType <none>) 15:('imported' p.BoolType <none>) 16:('unlocked' p.BoolType <none>) 21:('model' p.UnicodeType <none>) 22:('firmware_variant' p.UnicodeType <none>) 23:('firmware_hash' p.BytesType <none>) 24:('no_backup' p.BoolType <none>) 25:('wipe_code_protection' p.BoolType <none>) }<block_end><block_end><class_stmt>KeepkeyResetDevice(ResetDevice)# type: ignore <block_start><def_stmt>__init__ self * auto_lock_delay_ms:Optional[int]=<none> **kwargs:Any <arrow><none><block_start>super().__init__(**kwargs)<line_sep>self.auto_lock_delay_ms=auto_lock_delay_ms<block_end>@classmethod<def_stmt>get_fields cls<arrow>Dict[int p.FieldInfo]<block_start><return>{1:('display_random' p.BoolType <none>) 2:('strength' p.UVarintType 256) # default=256 3:('passphrase_protection' p.BoolType <none>) 4:('pin_protection' p.BoolType <none>) 5:('language' p.UnicodeType "en-US") # default=en-US 6:('label' p.UnicodeType <none>) 7:('no_backup' p.BoolType <none>) 8:('auto_lock_delay_ms' p.UVarintType <none>) 9:('u2f_counter' p.UVarintType <none>) }<block_end><block_end><class_stmt>KeepkeyDebugLinkState(DebugLinkState)# type: ignore <block_start><def_stmt>__init__ self * recovery_cipher:Optional[str]=<none> recovery_auto_completed_word:Optional[str]=<none> firmware_hash:Optional[bytes]=<none> storage_hash:Optional[bytes]=<none> **kwargs:Any <arrow><none><block_start>super().__init__(**kwargs)<line_sep>self.recovery_cipher=recovery_cipher<line_sep>self.recovery_auto_completed_word=recovery_auto_completed_word<line_sep>self.firmware_hash=firmware_hash<line_sep>self.storage_hash=storage_hash<block_end>@classmethod<def_stmt>get_fields cls<arrow>Dict[int p.FieldType]<block_start><return>{1:('layout' p.BytesType <none>) 2:('pin' p.UnicodeType <none>) 3:('matrix' p.UnicodeType <none>) 4:('mnemonic_secret' p.BytesType <none>) 5:('node' HDNodeType <none>) 6:('passphrase_protection' p.BoolType <none>) 7:('reset_word' p.UnicodeType <none>) 8:('reset_entropy' p.BytesType <none>) 9:('recovery_fake_word' p.UnicodeType <none>) 10:('recovery_word_pos' p.UVarintType <none>) 11:('recovery_cipher' p.UnicodeType <none>) 12:('recovery_auto_completed_word' p.UnicodeType <none>) 13:('firmware_hash' p.BytesType <none>) 14:('storage_hash' p.BytesType <none>) }<block_end><block_end><class_stmt>KeepkeyClient(TrezorClient)<block_start><def_stmt>__init__ self path:str password:str="" expert:bool=<false><arrow><none><block_start>""" The `KeepkeyClient` is a `HardwareWalletClient` for interacting with the Keepkey. As Keepkeys are clones of the Trezor 1, please refer to `TrezorClient` for documentation. """<line_sep>super(KeepkeyClient self).__init__(path password expert KEEPKEY_HID_IDS KEEPKEY_WEBUSB_IDS KEEPKEY_SIMULATOR_PATH)<line_sep>self.type='Keepkey'<line_sep>self.client.vendors=("keepkey.com")<line_sep>self.client.minimum_versions={"K1-14AM":(0 0 0)}<line_sep>self.client.map_type_to_class_override[KeepkeyFeatures.MESSAGE_WIRE_TYPE]=KeepkeyFeatures<line_sep>self.client.map_type_to_class_override[KeepkeyResetDevice.MESSAGE_WIRE_TYPE]=KeepkeyResetDevice<if_stmt>self.simulator<block_start>self.client.debug.map_type_to_class_override[KeepkeyDebugLinkState.MESSAGE_WIRE_TYPE]=KeepkeyDebugLinkState<block_end><block_end><block_end><def_stmt>enumerate password:str=""<arrow>List[Dict[str Any]]<block_start>results=[]<line_sep>devs=hid.HidTransport.enumerate(usb_ids=KEEPKEY_HID_IDS)<line_sep>devs.extend(webusb.WebUsbTransport.enumerate(usb_ids=KEEPKEY_WEBUSB_IDS))<line_sep>devs.extend(udp.UdpTransport.enumerate(KEEPKEY_SIMULATOR_PATH))<for_stmt>dev devs<block_start>d_data:Dict[str Any]={}<line_sep>d_data['type']='keepkey'<line_sep>d_data['model']='keepkey'<line_sep>d_data['path']=dev.get_path()<line_sep>client=<none><with_stmt>handle_errors(common_err_msgs["enumerate"] d_data)<block_start>client=KeepkeyClient(d_data['path'] password)<try_stmt><block_start>client.client.refresh_features()<block_end><except_stmt>TypeError<block_start><continue><block_end><if_stmt>'keepkey'<not><in>client.client.features.vendor<block_start><continue><block_end>d_data['label']=client.client.features.label<if_stmt>d_data['path'].startswith('udp:')<block_start>d_data['model']<augadd>'_simulator'<block_end>d_data['needs_pin_sent']=client.client.features.pin_protection<and><not>client.client.features.unlocked<line_sep>d_data['needs_passphrase_sent']=client.client.features.passphrase_protection# always need the passphrase sent for Keepkey if it has passphrase protection enabled <if_stmt>d_data['needs_pin_sent']<block_start><raise>DeviceNotReadyError('Keepkey is locked. Unlock by using \'promptpin\' and then \'sendpin\'.')<block_end><if_stmt>d_data['needs_passphrase_sent']<and><not>password<block_start><raise>DeviceNotReadyError("Passphrase needs to be specified before the fingerprint information can be retrieved")<block_end><if_stmt>client.client.features.initialized<block_start>d_data['fingerprint']=client.get_master_fingerprint().hex()<line_sep>d_data['needs_passphrase_sent']=<false># Passphrase is always needed for the above to have worked, so it's already sent <block_end><else_stmt><block_start>d_data['error']='Not initialized'<line_sep>d_data['code']=DEVICE_NOT_INITIALIZED<block_end><block_end><if_stmt>client<block_start>client.close()<block_end>results.append(d_data)<block_end><return>results<block_end>
<import_stmt>unittest<import_stmt>numpy<as>np<import_from_stmt>astroNN.lamost wavelength_solution pseudo_continuum<class_stmt>LamostToolsTestCase(unittest.TestCase)<block_start><def_stmt>test_wavelength_solution self<block_start>wavelength_solution()<line_sep>wavelength_solution(dr=5)<line_sep>self.assertRaises(ValueError wavelength_solution dr=1)<block_end><def_stmt>test_norm self<block_start>pseudo_continuum(np.ones(3909) np.ones(3909))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- <import_from_stmt>.fileservice FileService<import_from_stmt>.models Share ShareProperties File FileProperties Directory DirectoryProperties FileRange ContentSettings CopyProperties SharePermissions FilePermissions DeleteSnapshot <line_sep>
"""Python wrapper around the _clibs PicoSAT extension."""<import_stmt>os<import_from_stmt>tt.errors.arguments InvalidArgumentTypeError InvalidArgumentValueError <if_stmt>os.environ.get('READTHEDOCS')<ne>'True'<block_start><import_from_stmt>tt._clibs picosat<as>_c_picosat<line_sep>VERSION=_c_picosat.VERSION<block_end><def_stmt>sat_one clauses assumptions=<none><block_start>"""Find a solution that satisfies the specified clauses and assumptions. This provides a light Python wrapper around the same method in the PicoSAT C-extension. While completely tested and usable, this method is probably not as useful as the interface provided through the :func:`sat_one <tt.expressions.bexpr.BooleanExpression.sat_one>` method in the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>` class. :param clauses: CNF (AND of ORs) clauses; positive integers represent non-negated terms and negative integers represent negated terms. :type clauses: List[List[:class:`int <python:int>`]] :param assumptions: Assumed terms; same negation logic from ``clauses`` applies here. Note that assumptions *cannot* be an empty list; leave it as ``None`` if there are no assumptions to include. :type assumptions: List[:class:`int <python:int>`] :returns: If solution is found, a list of ints representing the terms of the solution; otherwise, if no solution found, ``None``. :rtype: List[:class:`int <python:int>`] or ``None`` :raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of ints or ``assumptions`` is not a list of ints. :raises InvalidArgumentValueError: If any literal ints are equal to zero. Let's look at a simple example with no satisfiable solution:: >>> from tt import picosat >>> picosat.sat_one([[1], [-1]]) is None True Here's an example where a solution exists:: >>> picosat.sat_one([[1, 2, 3], [-2, -3], [1, -2], [2, -3], [-2]]) [1, -2, -3] Finally, here's an example using assumptions:: >>> picosat.sat_one([[1, 2, 3], [2, 3]], assumptions=[-1, -3]) [-1, 2, -3] """<try_stmt><block_start><return>_c_picosat.sat_one(clauses assumptions=assumptions)<block_end><except_stmt>TypeError<as>e<block_start><raise>InvalidArgumentTypeError(str(e))<block_end><except_stmt>ValueError<as>e<block_start><raise>InvalidArgumentValueError(str(e))<block_end><block_end><def_stmt>sat_all clauses assumptions=<none><block_start>"""Find all solutions that satisfy the specified clauses and assumptions. This provides a light Python wrapper around the same method in the PicoSAT C-extension. While completely tested and usable, this method is probably not as useful as the interface provided through the :func:`sat_all <tt.expressions.bexpr.BooleanExpression.sat_all>` method in the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>` class. :param clauses: CNF (AND of ORs) clauses; positive integers represent non-negated terms and negative integers represent negated terms. :type clauses: List[List[:class:`int <python:int>`]] :param assumptions: Assumed terms; same negation logic from ``clauses`` applies here. Note that assumptions *cannot* be an empty list; leave it as ``None`` if there are no assumptions to include. :type assumptions: List[:class:`int <python:int>`] :returns: An iterator of solutions; if no satisfiable solutions exist, the iterator will be empty. :rtype: Iterator[List[:class:`int <python:int>`]] :raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of ints or ``assumptions`` is not a list of ints. :raises InvalidArgumentValueError: If any literal ints are equal to zero. Here's an example showing the basic usage:: >>> from tt import picosat >>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]]): ... print(solution) ... [1, 2, 3, 4] [1, 2, 3, -4] [1, 2, -3, 4] [1, 2, -3, -4] [1, -2, 3, 4] [1, -2, 3, -4] We can cut down on some of the above solutions by including an assumption:: >>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]], ... assumptions=[-3]): ... print(solution) ... [1, 2, -3, 4] [1, 2, -3, -4] """<try_stmt><block_start><return>_c_picosat.sat_all(clauses assumptions=assumptions)<block_end><except_stmt>TypeError<as>e<block_start><raise>InvalidArgumentTypeError(str(e))<block_end><except_stmt>ValueError<as>e<block_start><raise>InvalidArgumentValueError(str(e))<block_end><block_end>
""" Schedule adjustments are functions that accept a `datetime` and modify it in some way. Adjustments have the signature `Callable[[datetime], datetime]`. """<import_from_stmt>datetime datetime timedelta<import_from_stmt>typing Callable<import_stmt>pendulum<import_stmt>prefect.schedules.filters<def_stmt>add interval:timedelta<arrow>Callable[[datetime] datetime]<block_start>""" Adjustment that adds a specified interval to the date. Args: - interval (timedelta): the amount of time to add Returns: - Callable[[datetime], bool]: the adjustment function """<def_stmt>_adjustment_fn dt:datetime<arrow>datetime<block_start><return>pendulum.instance(dt)+interval<block_end><return>_adjustment_fn<block_end><def_stmt>next_weekday dt:datetime<arrow>datetime<block_start>""" Adjustment that advances a date to the next weekday. If the date is already a weekday, it is returned unadjusted. Args: - dt (datetime): the datetime to adjust Returns: - datetime: the adjusted datetime """<line_sep>pdt=pendulum.instance(dt)<while_stmt><not>prefect.schedules.filters.is_weekday(pdt)<block_start>pdt=pdt.add(days=1)<block_end><return>pdt<block_end>
<import_from_stmt>collections.abc Iterable<import_stmt>warnings<import_from_stmt>hdmf.utils docval popargs call_docval_func get_docval<import_from_stmt>. register_class CORE_NAMESPACE<import_from_stmt>.core NWBDataInterface NWBData<class_stmt>RetinotopyImage(NWBData)<block_start>"""Gray-scale anatomical image of cortical surface. Array structure: [rows][columns] """<line_sep>__nwbfields__=('bits_per_pixel' 'dimension' 'format' 'field_of_view')<line_sep>@docval({'name':'name' 'type':str 'doc':'Name of this retinotopy image'} {'name':'data' 'type':Iterable 'doc':'Data field.'} {'name':'bits_per_pixel' 'type':int 'doc':'Number of bits used to represent each value. This is necessary to determine maximum '<concat>'(white) pixel value.'} {'name':'dimension' 'type':Iterable 'shape':(2 ) 'doc':'Number of rows and columns in the image.'} {'name':'format' 'type':Iterable 'doc':'Format of image. Right now only "raw" supported.'} {'name':'field_of_view' 'type':Iterable 'shape':(2 ) 'doc':'Size of viewing area, in meters.'})<def_stmt>__init__ self **kwargs<block_start>bits_per_pixel,dimension,format,field_of_view=popargs('bits_per_pixel' 'dimension' 'format' 'field_of_view' kwargs)<line_sep>call_docval_func(super().__init__ kwargs)<line_sep>self.bits_per_pixel=bits_per_pixel<line_sep>self.dimension=dimension<line_sep>self.format=format<line_sep>self.field_of_view=field_of_view<block_end><block_end><class_stmt>FocalDepthImage(RetinotopyImage)<block_start>"""Gray-scale image taken with same settings/parameters (e.g., focal depth, wavelength) as data collection. Array format: [rows][columns]. """<line_sep>__nwbfields__=('focal_depth' )<line_sep>@docval(*get_docval(RetinotopyImage.__init__) {'name':'focal_depth' 'type':'float' 'doc':'Focal depth offset, in meters.'})<def_stmt>__init__ self **kwargs<block_start>focal_depth=popargs('focal_depth' kwargs)<line_sep>call_docval_func(super().__init__ kwargs)<line_sep>self.focal_depth=focal_depth<block_end><block_end><class_stmt>RetinotopyMap(NWBData)<block_start>"""Abstract two-dimensional map of responses to stimuli along a single response axis (e.g., altitude) """<line_sep>__nwbfields__=('field_of_view' 'dimension')<line_sep>@docval({'name':'name' 'type':str 'doc':'the name of this axis map'} {'name':'data' 'type':Iterable 'shape':(<none> <none>) 'doc':'data field.'} {'name':'field_of_view' 'type':Iterable 'shape':(2 ) 'doc':'Size of viewing area, in meters.'} {'name':'dimension' 'type':Iterable 'shape':(2 ) 'doc':'Number of rows and columns in the image'})<def_stmt>__init__ self **kwargs<block_start>field_of_view,dimension=popargs('field_of_view' 'dimension' kwargs)<line_sep>call_docval_func(super().__init__ kwargs)<line_sep>self.field_of_view=field_of_view<line_sep>self.dimension=dimension<block_end><block_end><class_stmt>AxisMap(RetinotopyMap)<block_start>"""Abstract two-dimensional map of responses to stimuli along a single response axis (e.g., altitude) with unit """<line_sep>__nwbfields__=('unit' )<line_sep>@docval(*get_docval(RetinotopyMap.__init__ 'name' 'data' 'field_of_view') {'name':'unit' 'type':str 'doc':'Unit that axis data is stored in (e.g., degrees)'} *get_docval(RetinotopyMap.__init__ 'dimension'))<def_stmt>__init__ self **kwargs<block_start>unit=popargs('unit' kwargs)<line_sep>call_docval_func(super().__init__ kwargs)<line_sep>self.unit=unit<block_end><block_end>@register_class('ImagingRetinotopy' CORE_NAMESPACE)<class_stmt>ImagingRetinotopy(NWBDataInterface)<block_start>""" Intrinsic signal optical imaging or widefield imaging for measuring retinotopy. Stores orthogonal maps (e.g., altitude/azimuth; radius/theta) of responses to specific stimuli and a combined polarity map from which to identify visual areas. This group does not store the raw responses imaged during retinotopic mapping or the stimuli presented, but rather the resulting phase and power maps after applying a Fourier transform on the averaged responses. Note: for data consistency, all images and arrays are stored in the format [row][column] and [row, col], which equates to [y][x]. Field of view and dimension arrays may appear backward (i.e., y before x). """<line_sep>__nwbfields__=({'name':'sign_map' 'child':<true>} {'name':'axis_1_phase_map' 'child':<true>} {'name':'axis_1_power_map' 'child':<true>} {'name':'axis_2_phase_map' 'child':<true>} {'name':'axis_2_power_map' 'child':<true>} {'name':'focal_depth_image' 'child':<true>} {'name':'vasculature_image' 'child':<true>} 'axis_descriptions')<line_sep>@docval({'name':'sign_map' 'type':RetinotopyMap 'doc':'Sine of the angle between the direction of the gradient in axis_1 and axis_2.'} {'name':'axis_1_phase_map' 'type':AxisMap 'doc':'Phase response to stimulus on the first measured axis.'} {'name':'axis_1_power_map' 'type':AxisMap 'doc':'Power response on the first measured axis. Response is scaled so 0.0 is no power in '<concat>'the response and 1.0 is maximum relative power.'} {'name':'axis_2_phase_map' 'type':AxisMap 'doc':'Phase response to stimulus on the second measured axis.'} {'name':'axis_2_power_map' 'type':AxisMap 'doc':'Power response on the second measured axis. Response is scaled so 0.0 is no '<concat>'power in the response and 1.0 is maximum relative power.'} {'name':'axis_descriptions' 'type':Iterable 'shape':(2 ) 'doc':'Two-element array describing the contents of the two response axis fields. '<concat>'Description should be something like ["altitude", "azimuth"] or ["radius", "theta"].'} {'name':'focal_depth_image' 'type':FocalDepthImage 'doc':'Gray-scale image taken with same settings/parameters (e.g., focal depth, wavelength) '<concat>'as data collection. Array format: [rows][columns].'} {'name':'vasculature_image' 'type':RetinotopyImage 'doc':'Gray-scale anatomical image of cortical surface. Array structure: [rows][columns].'} {'name':'name' 'type':str 'doc':'the name of this container' 'default':'ImagingRetinotopy'})<def_stmt>__init__ self **kwargs<block_start>axis_1_phase_map,axis_1_power_map,axis_2_phase_map,axis_2_power_map,axis_descriptions,focal_depth_image,sign_map,vasculature_image=popargs('axis_1_phase_map' 'axis_1_power_map' 'axis_2_phase_map' 'axis_2_power_map' 'axis_descriptions' 'focal_depth_image' 'sign_map' 'vasculature_image' kwargs)<line_sep>call_docval_func(super().__init__ kwargs)<line_sep>warnings.warn("The ImagingRetinotopy class currently cannot be written to or read from a file. "<concat>"This is a known bug and will be fixed in a future release of PyNWB.")<line_sep>self.axis_1_phase_map=axis_1_phase_map<line_sep>self.axis_1_power_map=axis_1_power_map<line_sep>self.axis_2_phase_map=axis_2_phase_map<line_sep>self.axis_2_power_map=axis_2_power_map<line_sep>self.axis_descriptions=axis_descriptions<line_sep>self.focal_depth_image=focal_depth_image<line_sep>self.sign_map=sign_map<line_sep>self.vasculature_image=vasculature_image<block_end><block_end>
""" Copyright (c) 2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_from_stmt>nncf.common.quantization.quantizer_propagation.structs QuantizationTrait<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXConvolutionMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXLinearMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXSigmoidMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXHardSigmoidMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXAveragePoolMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXGlobalAveragePoolMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXAddLayerMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXMulLayerMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXConcatLayerMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXBatchNormMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXResizeMetatype<import_from_stmt>nncf.experimental.onnx.graph.metatypes.onnx_ops ONNXSoftmaxMetatype<import_from_stmt>nncf.common.graph.operator_metatypes UnknownMetatype<line_sep>DEFAULT_ONNX_QUANT_TRAIT_TO_OP_DICT={QuantizationTrait.INPUTS_QUANTIZABLE:[ONNXConvolutionMetatype ONNXLinearMetatype ONNXAveragePoolMetatype ONNXGlobalAveragePoolMetatype ONNXAddLayerMetatype ONNXMulLayerMetatype ONNXBatchNormMetatype ONNXHardSigmoidMetatype ONNXResizeMetatype ] QuantizationTrait.NON_QUANTIZABLE:[ONNXSigmoidMetatype ONNXSoftmaxMetatype UnknownMetatype] QuantizationTrait.CONCAT:[ONNXConcatLayerMetatype] QuantizationTrait.OUTPUT_QUANTIZATION_AS_WEIGHTS:[]}<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools *<line_sep># Documentation of the MVA # https://twiki.cern.ch/twiki/bin/viewauth/CMS/MultivariateElectronIdentificationRun2 # https://rembserj.web.cern.ch/rembserj/notes/Electron_MVA_ID_2017_documentation # # In this file we define the locations of the MVA weights, cuts on the MVA values # for specific working points, and configure those cuts in VID # # The tag is an extra string attached to the names of the products # such as ValueMaps that needs to distinguish cases when the same MVA estimator # class is used with different tuning/weights mvaTag="Fall17NoIsoV1"<line_sep># There are 6 categories in this MVA. They have to be configured in this strict order # (cuts and weight files order): # 0 EB1 (eta<0.8) pt 5-10 GeV | pt < ptSplit && |eta| < ebSplit # 1 EB2 (eta>=0.8) pt 5-10 GeV | pt < ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit # 2 EE pt 5-10 GeV | pt < ptSplit && |eta| >= ebeeSplit # 3 EB1 (eta<0.8) pt 10-inf GeV | pt >= ptSplit && |eta| < ebSplit # 4 EB2 (eta>=0.8) pt 10-inf GeV | pt >= ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit # 5 EE pt 10-inf GeV | pt >= ptSplit && |eta| >= ebeeSplit mvaFall17WeightFiles_V1=cms.vstring("RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_5_2017_puinfo_BDT.weights.xml.gz" "RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_5_2017_puinfo_BDT.weights.xml.gz" "RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_5_2017_puinfo_BDT.weights.xml.gz" "RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_10_2017_puinfo_BDT.weights.xml.gz" "RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_10_2017_puinfo_BDT.weights.xml.gz" "RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_10_2017_puinfo_BDT.weights.xml.gz")<line_sep>## The working point for this MVA that is expected to have about 90% signal # WP tuned to give about 90 and 80% signal efficiecny for electrons from Drell-Yan with pT > 25 GeV # The working point for the low pt categories is just taken over from the high pt idName90="mvaEleID-Fall17-noIso-V1-wp90"<line_sep>MVA_WP90=EleMVA_WP(idName=idName90 mvaTag=mvaTag cutCategory0="0.9165112826974601 - exp(-pt / 2.7381703555094217) * 1.03549199648109" # EB1 low pt cutCategory1="0.8655738322220173 - exp(-pt / 2.4027944652597073) * 0.7975615613282494" # EB2 low pt cutCategory2="-3016.035055227131 - exp(-pt / -52140.61856333602) * -3016.3029387236506" # EE low pt cutCategory3="0.9616542816132922 - exp(-pt / 8.757943837889817) * 3.1390200321591206" # EB1 cutCategory4="0.9319258011430132 - exp(-pt / 8.846057432565809) * 3.5985063793347787" # EB2 cutCategory5="0.8899260780999244 - exp(-pt / 10.124234115859881) * 4.352791250718547" # EE )<line_sep>idName80="mvaEleID-Fall17-noIso-V1-wp80"<line_sep>MVA_WP80=EleMVA_WP(idName=idName80 mvaTag=mvaTag cutCategory0="0.9530240956555949 - exp(-pt / 2.7591425841003647) * 0.4669644718545271" # EB1 low pt cutCategory1="0.9336564763961019 - exp(-pt / 2.709276284272272) * 0.33512286599215946" # EB2 low pt cutCategory2="0.9313133688365339 - exp(-pt / 1.5821934800715558) * 3.8889462619659265" # EE low pt cutCategory3="0.9825268564943458 - exp(-pt / 8.702601455860762) * 1.1974861596609097" # EB1 cutCategory4="0.9727509457929913 - exp(-pt / 8.179525631018565) * 1.7111755094657688" # EB2 cutCategory5="0.9562619539540145 - exp(-pt / 8.109845366281608) * 3.013927699126942" # EE )<line_sep>### WP tuned for HZZ analysis with very high efficiency (about 98%) # The working points were found by requiring the same signal efficiencies in # each category as for the Spring 16 HZZ ID # (see RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring16_HZZ_V1_cff.py) idNamewpLoose="mvaEleID-Fall17-noIso-V1-wpLoose"<line_sep>MVA_WPLoose=EleMVA_WP(idName=idNamewpLoose mvaTag=mvaTag cutCategory0="-0.13285867293779202" # EB1 low pt cutCategory1="-0.31765300958836074" # EB2 low pt cutCategory2="-0.0799205914718861" # EE low pt cutCategory3="-0.856871961305474" # EB1 cutCategory4="-0.8107642141584835" # EB2 cutCategory5="-0.7179265933023059"# EE )<line_sep># # Finally, set up VID configuration for all cuts # # Create the PSet that will be fed to the MVA value map producer mvaEleID_Fall17_noIso_V1_producer_config=cms.PSet(mvaName=cms.string(mvaClassName) mvaTag=cms.string(mvaTag) # Category parameters nCategories=cms.int32(6) categoryCuts=cms.vstring(*EleMVA_6CategoriesCuts) # Weight files and variable definitions weightFileNames=mvaFall17WeightFiles_V1 variableDefinition=cms.string("RecoEgamma/ElectronIdentification/data/ElectronMVAEstimatorRun2Fall17V1Variables.txt"))<line_sep># Create the VPset's for VID cuts mvaEleID_Fall17_V1_wpLoose=configureVIDMVAEleID(MVA_WPLoose)<line_sep>mvaEleID_Fall17_V1_wp90=configureVIDMVAEleID(MVA_WP90)<line_sep>mvaEleID_Fall17_V1_wp80=configureVIDMVAEleID(MVA_WP80)<line_sep>mvaEleID_Fall17_V1_wpLoose.isPOGApproved=cms.untracked.bool(<true>)<line_sep>mvaEleID_Fall17_V1_wp90.isPOGApproved=cms.untracked.bool(<true>)<line_sep>mvaEleID_Fall17_V1_wp80.isPOGApproved=cms.untracked.bool(<true>)<line_sep>
# ============================================================================== # Copyright 2018-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """nGraph TensorFlow bridge elementwise operations test """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<line_sep>tf.compat.v1.disable_eager_execution()<import_from_stmt>common NgraphTest<class_stmt>TestElementwiseOperations(NgraphTest)<block_start>@pytest.mark.parametrize(("v1" "v2" "expected") ((1.0 -1.0 [1.0]) (100 200 ([200] )) ([0.0 5.0 10.0] [6.0] (np.array([[6.0 6.0 10.0]]) ))))<def_stmt>test_maximum self v1 v2 expected<block_start>val1=tf.compat.v1.placeholder(tf.float32 shape=(<none>))<line_sep>val2=tf.compat.v1.placeholder(tf.float32 shape=(<none>))<line_sep>out=tf.maximum(val1 val2)<line_sep>sess_fn=<lambda>sess:sess.run((out ) feed_dict={val1:(v1 ) val2:(v2 )})[0]<assert_stmt>(self.with_ngraph(sess_fn)<eq>self.without_ngraph(sess_fn)).all()<assert_stmt>(self.with_ngraph(sess_fn)<eq>expected).all()<block_end>@pytest.mark.parametrize(("v1" "v2" "expected") ((1.4 1.0 [<false>]) (-1.0 -1.0 ([<true>] )) (-1.0 1000 [<true>]) (200 200 ([<true>] )) ([-1.0 1.0 -4] [0.1 0.1 -4] (np.array([[<true> <false> <true>]]) )) ([-1.0 1.0 -4] [-1.0] (np.array([[<true> <false> <true>]]) ))))<def_stmt>test_less_equal self v1 v2 expected<block_start>val1=tf.compat.v1.placeholder(tf.float32 shape=(<none>))<line_sep>val2=tf.compat.v1.placeholder(tf.float32 shape=(<none>))<line_sep>out=tf.less_equal(val1 val2)<line_sep>sess_fn=<lambda>sess:sess.run((out ) feed_dict={val1:(v1 ) val2:(v2 )})[0]<assert_stmt>(self.with_ngraph(sess_fn)<eq>self.without_ngraph(sess_fn)).all()<assert_stmt>(self.with_ngraph(sess_fn)<eq>expected).all()<block_end>@pytest.mark.parametrize(("v1" "v2" "expected") ((1.4 1.0 [<false>]) (-1.0 -1.0 ([<false>] )) (-1.0 1000 [<true>]) (200 200 ([<false>] )) ([-1.0 1.0 -4] [0.1 0.1 -4] (np.array([[<true> <false> <false>]]) )) ([-1.0 1.0 -4] [-1.0] (np.array([[<false> <false> <true>]]) ))))<def_stmt>test_less self v1 v2 expected<block_start>val1=tf.compat.v1.placeholder(tf.float32 shape=(<none>))<line_sep>val2=tf.compat.v1.placeholder(tf.float32 shape=(<none>))<line_sep>out=tf.less(val1 val2)<line_sep>sess_fn=<lambda>sess:sess.run((out ) feed_dict={val1:(v1 ) val2:(v2 )})[0]<assert_stmt>(self.with_ngraph(sess_fn)<eq>self.without_ngraph(sess_fn)).all()<assert_stmt>(self.with_ngraph(sess_fn)<eq>expected).all()<block_end>@pytest.mark.parametrize(("v1" "v2" "expected") ((1.4 1.0 [<true>]) (-1.0 -1.0 ([<true>] )) (-1.0 1000 [<false>]) (200 200 ([<true>] )) ([-1.0 1.0 -4] [0.1 0.1 -4] (np.array([[<false> <true> <true>]]) )) ([-1.0 1.0 -4] [-1.0] (np.array([[<true> <true> <false>]]) ))))<def_stmt>test_greater_equal self v1 v2 expected<block_start>val1=tf.compat.v1.placeholder(tf.float32 shape=(<none>))<line_sep>val2=tf.compat.v1.placeholder(tf.float32 shape=(<none>))<line_sep>out=tf.greater_equal(val1 val2)<line_sep>sess_fn=<lambda>sess:sess.run((out ) feed_dict={val1:(v1 ) val2:(v2 )})[0]<assert_stmt>(self.with_ngraph(sess_fn)<eq>self.without_ngraph(sess_fn)).all()<assert_stmt>(self.with_ngraph(sess_fn)<eq>expected).all()<block_end>@pytest.mark.parametrize(("v1" "v2" "expected") ((1.4 1.0 [<true>]) (-1.0 -1.0 ([<false>] )) (-1.0 1000 [<false>]) (200 200 ([<false>] )) ([-1.0 1.0 -4] [0.1 0.1 -4] (np.array([[<false> <true> <false>]]) )) ([-1.0 1.0 -4] [-1.0] (np.array([[<false> <true> <false>]]) ))))<def_stmt>test_greater self v1 v2 expected<block_start>val1=tf.compat.v1.placeholder(tf.float32 shape=(<none>))<line_sep>val2=tf.compat.v1.placeholder(tf.float32 shape=(<none>))<line_sep>out=tf.greater(val1 val2)<line_sep>sess_fn=<lambda>sess:sess.run((out ) feed_dict={val1:(v1 ) val2:(v2 )})[0]<assert_stmt>(self.with_ngraph(sess_fn)<eq>self.without_ngraph(sess_fn)).all()<assert_stmt>(self.with_ngraph(sess_fn)<eq>expected).all()<block_end>@pytest.mark.parametrize(("v1" "v2" "expected") ((<true> <true> [<true>]) (<true> <false> ([<false>] )) (1.0 -2.0 ([<true>] )) (<false> 100 ([<false>] )) ([<false> <true> <false>] [<true>] (np.array([[<false> <true> <false>]]) ))))<def_stmt>test_logical_and self v1 v2 expected<block_start>val1=tf.compat.v1.placeholder(tf.bool shape=(<none>))<line_sep>val2=tf.compat.v1.placeholder(tf.bool shape=(<none>))<line_sep>out=tf.logical_and(val1 val2)<line_sep>sess_fn=<lambda>sess:sess.run((out ) feed_dict={val1:(v1 ) val2:(v2 )})[0]<assert_stmt>(self.with_ngraph(sess_fn)<eq>self.without_ngraph(sess_fn)).all()<assert_stmt>(self.with_ngraph(sess_fn)<eq>expected).all()<block_end>@pytest.mark.parametrize(("test_input" "expected") ((<false> <true>) (<true> <false>)))<def_stmt>test_logicalnot_1d self test_input expected<block_start>val=tf.compat.v1.placeholder(tf.bool shape=(1 ))<line_sep>out=tf.logical_not(val)<line_sep>sess_fn=<lambda>sess:sess.run((out ) feed_dict={val:(test_input )})[0]<assert_stmt>(self.with_ngraph(sess_fn)<eq>self.without_ngraph(sess_fn)).all()<assert_stmt>(self.with_ngraph(sess_fn)<eq>expected).all()<block_end><def_stmt>test_logicalnot_2d self<block_start>test_input=((<true> <false> <true>) (<false> <true> <false>))<line_sep>expected=np.logical_not(test_input)<line_sep>val=tf.compat.v1.placeholder(tf.bool shape=(2 3))<line_sep>out=tf.logical_not(val)<line_sep>sess_fn=<lambda>sess:sess.run((out ) feed_dict={val:test_input})[0]<assert_stmt>(self.with_ngraph(sess_fn)<eq>self.without_ngraph(sess_fn)).all()<assert_stmt>(self.with_ngraph(sess_fn)<eq>expected).all()<block_end><block_end>
# Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Input generator for image data."""<import_stmt>os<import_stmt>lingvo.compat<as>tf<import_from_stmt>lingvo.core base_input_generator<import_from_stmt>tensorflow.python.ops io_ops<class_stmt>_MnistInputBase(base_input_generator.BaseTinyDatasetInput)<block_start>"""Base input params for MNIST."""<line_sep>@classmethod<def_stmt>Params cls<block_start>"""Defaults params."""<line_sep>p=super().Params()<line_sep>p.data_dtype=tf.uint8<line_sep>p.data_shape=(28 28 1)<line_sep>p.label_dtype=tf.uint8<line_sep><return>p<block_end><def_stmt>_Preprocess self raw<block_start>data=tf.stack([tf.image.per_image_standardization(img)<for>img tf.unstack(raw)])<line_sep>data.set_shape(raw.shape)<line_sep><return>data<block_end><block_end><class_stmt>MnistTrainInput(_MnistInputBase)<block_start>"""MNist training set."""<line_sep>@classmethod<def_stmt>Params cls<block_start>"""Defaults params."""<line_sep>p=super().Params()<line_sep>p.data='x_train'<line_sep>p.label='y_train'<line_sep>p.num_samples=60000<line_sep>p.batch_size=256<line_sep>p.repeat=<true><line_sep><return>p<block_end><block_end><class_stmt>MnistTestInput(_MnistInputBase)<block_start>"""MNist test set."""<line_sep>@classmethod<def_stmt>Params cls<block_start>"""Defaults params."""<line_sep>p=super().Params()<line_sep>p.data='x_test'<line_sep>p.label='y_test'<line_sep>p.num_samples=10000<line_sep>p.batch_size=256<line_sep>p.repeat=<false><line_sep><return>p<block_end><block_end><def_stmt>_GetRandomImages batch_size<block_start>images=tf.random.uniform((batch_size 28 28 1) 0 255 tf.int32)<line_sep><return>tf.cast(images tf.uint8)<block_end><def_stmt>_GetRandomLabels batch_size<block_start>labels=tf.random.categorical(0.1<times>tf.ones((1 10)) batch_size)<line_sep><return>tf.cast(labels tf.uint8)<block_end><def_stmt>FakeMnistData tmpdir train_size=60000 test_size=10000<block_start>"""Fake Mnist data for unit tests."""<line_sep>data_path=os.path.join(tmpdir 'ckpt')<with_stmt>tf.Graph().as_default()<block_start>tf.random.set_seed(91)<with_stmt>tf.Session()<as>sess<block_start>sess.run(io_ops.save_v2(data_path tensor_names=['x_train' 'y_train' 'x_test' 'y_test'] shape_and_slices=['' '' '' ''] tensors=[_GetRandomImages(train_size) _GetRandomLabels(train_size) _GetRandomImages(test_size) _GetRandomLabels(test_size)]))<block_end><block_end><return>data_path<block_end>
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right <import_from_stmt>collections deque<import_from_stmt>collections defaultdict<class_stmt>Solution(object)<block_start><def_stmt>verticalOrder self root<block_start>""" :type root: TreeNode :rtype: List[List[int]] """<if_stmt><not>root<block_start><return>[]<block_end>queue=deque([(root 0)])<line_sep>verticalNodeMap=defaultdict(list)<while_stmt>queue<block_start>node,horrizotalDistace=queue.popleft()<if_stmt>node<block_start>verticalNodeMap[horrizotalDistace].append(node.val)<line_sep>queue.append((node.left horrizotalDistace-1))<line_sep>queue.append((node.right horrizotalDistace+1))<block_end><block_end>minHorrizotalDistace,maxHorrizotalDistace=min(verticalNodeMap.keys()) max(verticalNodeMap.keys())<line_sep>result=[]<for_stmt>key range(minHorrizotalDistace maxHorrizotalDistace+1)<block_start>result.append(verticalNodeMap[key])<block_end><return>result<block_end><block_end># My solution during mock, getting TLE, don't know why <import_from_stmt>collections defaultdict<import_from_stmt>collections deque<class_stmt>Solution(object)<block_start><def_stmt>verticalOrder self root<block_start>""" :type root: TreeNode :rtype: List[List[int]] """<if_stmt><not>root<block_start><return>[]<block_end>orderMap=defaultdict(list)<line_sep>queue=deque([(root 0)])<while_stmt>queue<block_start>currentNode,vLine=queue.popleft()<if_stmt>currentNode<block_start>orderMap[vLine].append(root.val)<line_sep>queue.append((root.left vLine-1))<line_sep>queue.append((root.right vLine+1))<block_end><block_end>result=[]<for_stmt>i range(min(orderMap.keys()) max(orderMap.keys())+1)<block_start>result.append(orderMap[i])<block_end><return>result<block_end><block_end>
"""Test API utilities."""<import_stmt>json<import_from_stmt>pytradfri.api.libcoap_api APIFactory<import_from_stmt>pytradfri.gateway Gateway<def_stmt>test_constructor_timeout_passed_to_subprocess monkeypatch<block_start>"""Test that original timeout is passed to subprocess."""<line_sep>capture={}<def_stmt>capture_args *args **kwargs<block_start>capture.update(kwargs)<line_sep><return>json.dumps([])<block_end>monkeypatch.setattr("subprocess.check_output" capture_args)<line_sep>api=APIFactory("anything" timeout=20 psk="abc")<line_sep>api.request(Gateway().get_devices())<assert_stmt>capture["timeout"]<eq>20<block_end><def_stmt>test_custom_timeout_passed_to_subprocess monkeypatch<block_start>"""Test that custom timeout is passed to subprocess."""<line_sep>capture={}<def_stmt>capture_args *args **kwargs<block_start>capture.update(kwargs)<line_sep><return>json.dumps([])<block_end>monkeypatch.setattr("subprocess.check_output" capture_args)<line_sep>api=APIFactory("anything" psk="abc")<line_sep>api.request(Gateway().get_devices() timeout=1)<assert_stmt>capture["timeout"]<eq>1<block_end>
<import_stmt>re<line_sep>regex=re.compile('[^a-zA-Z]')<def_stmt>score_word word corpus=<none><block_start>word=regex.sub('' word)# leave only alpha score=0<line_sep>consec_bonus=2<for_stmt>i,letter enumerate(word)<block_start><if_stmt>letter.islower()<block_start><continue><block_end><if_stmt>i<g>0<and>word[i-1].upper()<block_start>score<augadd>consec_bonus<block_end><if_stmt>i<eq>0<block_start>score<augadd>10<block_end><elif_stmt>(i<eq>1)<or>(i<eq>len(word)-1)<block_start>score<augadd>3<block_end><else_stmt><block_start>score<augadd>1<block_end><if_stmt>(i<ge>1)<and>(corpus<is><not><none>)<and>(word[i:].lower()<in>corpus)<block_start>score<augadd>len(word[i:])-1<block_end><block_end><return>score<block_end><def_stmt>score_acronym capitalized_acronym corpus=<none><block_start>""" For each capitalized letter in the acronym: * 10 points if first letter in a word (with exception of first letter) * 3 point if second or last letter in a word * 1 point otherwise * N bonus points if begins an N-length valid sub-word (ex: multiVariable -> 8 bonus points) * 2 bonus points if immediately following a capitalizd letter """<line_sep><return>sum([score_word(word corpus=corpus)<for>word capitalized_acronym.split(' ')])-10<block_end>
<import_from_stmt>django.core.exceptions PermissionDenied<import_from_stmt>django.shortcuts get_object_or_404 redirect<import_from_stmt>django.template.response TemplateResponse<import_from_stmt>django.urls reverse<import_from_stmt>django.utils.translation gettext<as>_<import_from_stmt>wagtail.admin messages<import_from_stmt>wagtail.admin.views.pages.utils get_valid_next_url_from_request<import_from_stmt>wagtail.core hooks<import_from_stmt>wagtail.core.models Page UserPagePermissionsProxy<def_stmt>unpublish request page_id<block_start>page=get_object_or_404(Page id=page_id).specific<line_sep>user_perms=UserPagePermissionsProxy(request.user)<if_stmt><not>user_perms.for_page(page).can_unpublish()<block_start><raise>PermissionDenied<block_end>next_url=get_valid_next_url_from_request(request)<if_stmt>request.method<eq>'POST'<block_start>include_descendants=request.POST.get("include_descendants" <false>)<for_stmt>fn hooks.get_hooks('before_unpublish_page')<block_start>result=fn(request page)<if_stmt>hasattr(result 'status_code')<block_start><return>result<block_end><block_end>page.unpublish(user=request.user)<if_stmt>include_descendants<block_start><for_stmt>live_descendant_page page.get_descendants().live().defer_streamfields().specific()<block_start><if_stmt>user_perms.for_page(live_descendant_page).can_unpublish()<block_start>live_descendant_page.unpublish()<block_end><block_end><block_end><for_stmt>fn hooks.get_hooks('after_unpublish_page')<block_start>result=fn(request page)<if_stmt>hasattr(result 'status_code')<block_start><return>result<block_end><block_end>messages.success(request _("Page '{0}' unpublished.").format(page.get_admin_display_title()) buttons=[messages.button(reverse('wagtailadmin_pages:edit' args=(page.id )) _('Edit'))])<if_stmt>next_url<block_start><return>redirect(next_url)<block_end><return>redirect('wagtailadmin_explore' page.get_parent().id)<block_end><return>TemplateResponse(request 'wagtailadmin/pages/confirm_unpublish.html' {'page':page 'next':next_url 'live_descendant_count':page.get_descendants().live().count() })<block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_from_future_stmt> division<import_stmt>numpy<as>np<import_stmt>cPickle<as>pickle<import_from_stmt>keras backend<as>K<import_from_stmt>keras.utils np_utils<import_from_stmt>keras.preprocessing sequence<import_from_stmt>random shuffle<import_stmt>itertools<def_stmt>load_dataset filename<block_start><with_stmt>open(filename 'rb')<as>f<block_start><return>pickle.load(f)<block_end><block_end><def_stmt>padded_batch_input input indices=<none> dtype=K.floatx() maxlen=<none><block_start><if_stmt>indices<is><none><block_start>indices=np.arange(len(input))<block_end>batch_input=[input[i]<for>i indices]<line_sep><return>sequence.pad_sequences(batch_input maxlen dtype padding='post')<block_end><def_stmt>categorical_batch_target target classes indices=<none> dtype=K.floatx()<block_start><if_stmt>indices<is><none><block_start>indices=np.arange(len(target))<block_end>batch_target=[min(target[i] classes-1)<for>i indices]<line_sep><return>np_utils.to_categorical(batch_target classes).astype(dtype)<block_end><def_stmt>lengthGroup length<block_start><if_stmt>length<l>150<block_start><return>0<block_end><if_stmt>length<l>240<block_start><return>1<block_end><if_stmt>length<l>380<block_start><return>2<block_end><if_stmt>length<l>520<block_start><return>3<block_end><if_stmt>length<l>660<block_start><return>4<block_end><return>5<block_end><class_stmt>BatchGen(object)<block_start><def_stmt>__init__ self inputs targets=<none> batch_size=<none> stop=<false> shuffle=<true> balance=<false> dtype=K.floatx() flatten_targets=<false> sort_by_length=<false> group=<false> maxlen=<none><block_start><assert_stmt>len(set([len(i)<for>i inputs]))<eq>1<assert_stmt>(<not>shuffle<or><not>sort_by_length)<line_sep>self.inputs=inputs<line_sep>self.nb_samples=len(inputs[0])<line_sep>self.batch_size=batch_size<if>batch_size<else>self.nb_samples<line_sep>self.dtype=dtype<line_sep>self.stop=stop<line_sep>self.shuffle=shuffle<line_sep>self.balance=balance<line_sep>self.targets=targets<line_sep>self.flatten_targets=flatten_targets<if_stmt>isinstance(maxlen (list tuple))<block_start>self.maxlen=maxlen<block_end><else_stmt><block_start>self.maxlen=[maxlen]<times>len(inputs)<block_end>self.sort_by_length=<none><if_stmt>sort_by_length<block_start>self.sort_by_length=np.argsort([-len(p)<for>p inputs[0]])<block_end># if self.targets and self.balance: # self.class_weight = class_weight(self.targets) self.generator=self._generator()<line_sep>self._steps=-(-self.nb_samples<floordiv>self.batch_size)# round up self.groups=<none><if_stmt>group<is><not><false><block_start>indices=np.arange(self.nb_samples)<line_sep>ff=<lambda>i:lengthGroup(len(inputs[0][i]))<line_sep>indices=np.argsort([ff(i)<for>i indices])<line_sep>self.groups=itertools.groupby(indices ff)<line_sep>self.groups={k:np.array(list(v))<for>k,v self.groups}<block_end><block_end><def_stmt>_generator self<block_start><while_stmt><true><block_start><if_stmt>self.shuffle<block_start>permutation=np.random.permutation(self.nb_samples)<block_end><elif_stmt>self.sort_by_length<is><not><none><block_start>permutation=self.sort_by_length<block_end><elif_stmt>self.groups<is><not><none># permutation = np.arange(self.nb_samples) # tmp = permutation.copy() # for id in self.group_ids: # mask = (self.groups==id) # tmp[mask] = np.random.permutation(permutation[mask]) # permutation = tmp # import ipdb # ipdb.set_trace() <block_start><for_stmt>k,v self.groups.items()<block_start>np.random.shuffle(v)<block_end>tmp=np.concatenate(self.groups.values())<line_sep>batches=np.array_split(tmp self._steps)<line_sep>remainder=[]<if_stmt>len(batches[-1])<l>self._steps<block_start>remainder=batches[-1:]<line_sep>batches=batches[:-1]<block_end>shuffle(batches)<line_sep>batches<augadd>remainder<line_sep>permutation=np.concatenate(batches)<block_end><else_stmt><block_start>permutation=np.arange(self.nb_samples)<block_end>i=0<line_sep>longest=767<while_stmt>i<l>self.nb_samples<block_start><if_stmt>self.sort_by_length<is><not><none><block_start>bs=self.batch_size<times>767<floordiv>self.inputs[0][permutation[i]].shape[0]<block_end><else_stmt><block_start>bs=self.batch_size<block_end>indices=permutation[i:i+bs]<line_sep>i=i+bs<line_sep># for i in range(0, self.nb_samples, self.batch_size): # indices = permutation[i : i + self.batch_size] batch_X=[padded_batch_input(x indices self.dtype maxlen)<for>x,maxlen zip(self.inputs self.maxlen)]<line_sep>P=batch_X[0].shape[1]<if_stmt><not>self.targets<block_start><yield>batch_X<line_sep><continue><block_end>batch_Y=[categorical_batch_target(target P indices self.dtype)<for>target self.targets]<if_stmt>self.flatten_targets<block_start>batch_Y=np.concatenate(batch_Y axis=-1)<block_end><if_stmt><not>self.balance<block_start><yield>(batch_X batch_Y)<line_sep><continue><block_end># batch_W = np.array([self.class_weight[y] for y in batch_targets]) batch_W=np.array([bs/self.batch_size<for>x batch_X[0]]).astype(self.dtype)<line_sep><yield>(batch_X batch_Y batch_W)<block_end><if_stmt>self.stop<block_start><raise>StopIteration<block_end><block_end><block_end><def_stmt>__iter__ self<block_start><return>self.generator<block_end><def_stmt>next self<block_start><return>self.generator.next()<block_end><def_stmt>__next__ self<block_start><return>self.generator.__next__()<block_end><def_stmt>steps self<block_start><if_stmt>self.sort_by_length<is><none><block_start><return>self._steps<block_end>print("Steps was called")<if_stmt>self.shuffle<block_start>permutation=np.random.permutation(self.nb_samples)<block_end><elif_stmt>self.sort_by_length<is><not><none><block_start>permutation=self.sort_by_length<block_end><else_stmt><block_start>permutation=np.arange(self.nb_samples)<block_end>i=0<line_sep>longest=767<line_sep>self._steps=0<while_stmt>i<l>self.nb_samples<block_start>bs=self.batch_size<times>767<floordiv>self.inputs[0][permutation[i]].shape[0]<line_sep>i=i+bs<line_sep>self._steps<augadd>1<block_end><return>self._steps<block_end><block_end>batch_gen=BatchGen# for backward compatibility
"""Handle exceptions generated from 'user' code"""<import_stmt>sys<import_stmt>traceback<class_stmt>InvalidCommand(Exception)<block_start>"""Invalid command line argument."""<def_stmt>__init__ self *args **kwargs<block_start>self.not_found=kwargs.pop('not_found' <none>)<line_sep>super(InvalidCommand self).__init__(*args **kwargs)<line_sep>self.cmd_used=<none><line_sep>self.bin_name='doit'<block_end># default but might be overwriten <def_stmt>__str__ self<block_start><if_stmt>self.not_found<is><none><block_start><return>super(InvalidCommand self).__str__()<block_end><if_stmt>self.cmd_used<block_start>msg_task_not_found=('command `{cmd_used}` invalid parameter: "{not_found}".'+' Must be a task, or a target.\n'+'Type "{bin_name} list" to see available tasks')<line_sep><return>msg_task_not_found.format(**self.__dict__)<block_end><else_stmt><block_start>msg_cmd_task_not_found=('Invalid parameter: "{not_found}".'+' Must be a command, task, or a target.\n'+'Type "{bin_name} help" to see available commands.\n'+'Type "{bin_name} list" to see available tasks.\n')<line_sep><return>msg_cmd_task_not_found.format(**self.__dict__)<block_end><block_end><block_end><class_stmt>InvalidDodoFile(Exception)<block_start>"""Invalid dodo file"""<line_sep><pass><block_end><class_stmt>InvalidTask(Exception)<block_start>"""Invalid task instance. User error on specifying the task."""<line_sep><pass><block_end><class_stmt>CatchedException(object)<block_start>"""This used to save info from caught exceptions The traceback from the original exception is saved """<def_stmt>__init__ self msg exception=<none><block_start>self.message=msg<line_sep>self.traceback=''<if_stmt>isinstance(exception CatchedException)<block_start>self.traceback=exception.traceback<block_end><elif_stmt>exception<is><not><none># TODO remove doit-code part from traceback <block_start>self.traceback=traceback.format_exception(exception.__class__ exception sys.exc_info()[2])<block_end><block_end><def_stmt>get_msg self<block_start>"""return full exception description (includes traceback)"""<line_sep><return>"%s\n%s"%(self.message "".join(self.traceback))<block_end><def_stmt>get_name self<block_start>"""get Exception name"""<line_sep><return>self.__class__.__name__<block_end><def_stmt>__repr__ self<block_start><return>"(<%s> %s)"%(self.get_name() self.message)<block_end><def_stmt>__str__ self<block_start><return>"%s\n%s"%(self.get_name() self.get_msg())<block_end><block_end><class_stmt>TaskFailed(CatchedException)<block_start>"""Task execution was not successful."""<line_sep><pass><block_end><class_stmt>UnmetDependency(TaskFailed)<block_start>"""Task was not executed because a dependent task failed or is ignored"""<line_sep><pass><block_end><class_stmt>TaskError(CatchedException)<block_start>"""Error while trying to execute task."""<line_sep><pass><block_end><class_stmt>SetupError(CatchedException)<block_start>"""Error while trying to execute setup object"""<line_sep><pass><block_end><class_stmt>DependencyError(CatchedException)<block_start>"""Error while trying to check if task is up-to-date or saving task status"""<line_sep><pass><block_end>
""" owtf.__main__ ~~~~~~~~~~~~~ A __main__ method for OWTF so that internal services can be called as Python modules. """<import_stmt>sys<import_from_stmt>owtf.core main<if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_from_stmt>typing List Type<import_from_stmt>apiron.service.base ServiceBase<class_stmt>DiscoverableService(ServiceBase)<block_start>""" A Service whose hosts are determined via a host resolver. A host resolver is any class with a :func:`resolve` method that takes a service name as its sole argument and returns a list of host names that correspond to that service. """<line_sep>host_resolver_class:Type<line_sep>service_name:str<line_sep>@classmethod<def_stmt>get_hosts cls<arrow>List[str]<block_start><return>cls.host_resolver_class.resolve(cls.service_name)<block_end><def_stmt>__str__ self<arrow>str<block_start><return>self.service_name<block_end><def_stmt>__repr__ self<arrow>str<block_start>klass=self.__class__<line_sep><return>"{klass}(service_name={service_name}, host_resolver={host_resolver})".format(klass=klass.__name__ service_name=klass.service_name host_resolver=klass.host_resolver_class.__name__)<block_end><block_end>
# Copyright 2018, <NAME> LLC # License: Apache License Version 2.0 # ------------------------------------------------------------------------- # registration.py - updates the database to say who is building something # and what the current settings are, which is used by the file serving # code to see if it is ok to serve up files in the buildroot. But also # for record keeping. # -------------------------------------------------------------------------- <import_from_stmt>datetime datetime<import_stmt>random<import_stmt>fcntl<import_stmt>subprocess<import_stmt>os<import_from_stmt>django.utils timezone<import_from_stmt>django.conf settings<import_from_stmt>vespene.common.logger Logger<import_from_stmt>vespene.models.worker Worker<line_sep>LOG=Logger()<line_sep>WORKER_ID_FILE="/etc/vespene/worker_id"<line_sep># ============================================================================= <class_stmt>RegistrationManager(object)<block_start><def_stmt>__init__ self builder build<block_start>self.builder=builder<line_sep>self.build=build<line_sep>self.project=self.build.project<block_end><def_stmt>create_worker_id self<block_start>wid=''.join(random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')<for>i range(50))<line_sep>fd=open(WORKER_ID_FILE "w+")<line_sep>fd.write(wid)<line_sep>fd.close()<line_sep><return>wid<block_end><def_stmt>get_worker_id self fd<block_start><return>fd.readlines()[0].strip()<block_end><def_stmt>get_worker_record self worker_id<block_start>qs=Worker.objects.filter(worker_uid=worker_id)<if_stmt><not>qs.exists()<block_start><return><none><block_end><return>qs.first()<block_end># worker_pool = models.ForeignKey('WorkerPool', null=False, on_delete=models.SET_NULL) # hostname = models.CharField(max_length=1024, null=True) # port = models.IntField(null=False, default=8080) # working_dir = models.CharField(max_length=1024, null=True) # first_checkin = models.DateTimeField(null=True, blank=True) # last_checkin = models.DateTimeField(null=True, blank=True) # fileserving_enabled = models.BooleanField(null=False, default=False) <def_stmt>get_hostname self<block_start><if_stmt>settings.FILESERVING_HOSTNAME<block_start><return>settings.FILESERVING_HOSTNAME<block_end><return>self.guess_hostname()<block_end><def_stmt>guess_hostname self<block_start><return>subprocess.check_output("hostname").decode('utf-8').strip()<block_end><def_stmt>get_port self<block_start><if_stmt>settings.FILESERVING_PORT<block_start><return>settings.FILESERVING_PORT<block_end><else_stmt><block_start><return>8000<block_end><block_end><def_stmt>get_build_root self<block_start><return>settings.BUILD_ROOT<block_end><def_stmt>get_fileserving_enabled self<block_start><return>settings.FILESERVING_ENABLED<block_end><def_stmt>create_worker_record self worker_id<block_start>now=datetime.now(tz=timezone.utc)<line_sep>obj=Worker(worker_uid=worker_id hostname=self.get_hostname() port=self.get_port() build_root=self.get_build_root() first_checkin=now last_checkin=now fileserving_enabled=self.get_fileserving_enabled())<line_sep>obj.save()<line_sep><return>obj<block_end><def_stmt>update_worker_record self worker<block_start>now=datetime.now(tz=timezone.utc)<line_sep>worker.hostname=self.get_hostname()<line_sep>worker.port=self.get_port()<line_sep>worker.build_root=self.get_build_root()<line_sep>worker.last_checkin=now<line_sep>worker.fileserving_enabled=self.get_fileserving_enabled()<line_sep>worker.save()<line_sep><return>worker<block_end><def_stmt>go self<block_start>""" Trigger next stage of pipeline if build was successful """<if_stmt><not>os.path.exists(WORKER_ID_FILE)<block_start>worker_id=self.create_worker_id()<block_end>fd=open(WORKER_ID_FILE "r")<line_sep>fcntl.flock(fd fcntl.LOCK_EX)<line_sep>worker_id=self.get_worker_id(fd)<line_sep>worker_record=self.get_worker_record(worker_id)<if_stmt><not>worker_record<block_start>worker_record=self.create_worker_record(worker_id)<block_end><else_stmt><block_start>worker_record=self.update_worker_record(worker_record)<block_end>self.build.worker=worker_record<line_sep>self.build.save()<line_sep>fcntl.flock(fd fcntl.LOCK_UN)<block_end><block_end>
<import_stmt>rumps<import_stmt>sys<import_stmt>icon_manager<import_from_stmt>datetime timedelta<import_stmt>timekeeper<import_stmt>os<line_sep># pyinstaller --onefile -w --add-data "Icons/:Icons" --icon="Icons/timeglass.png" --clean timeglass.spec # rumps.debug_mode(True) <class_stmt>TimerApp(rumps.App)<block_start><def_stmt>__init__ self initial_seconds<block_start>super(TimerApp self).__init__("")<line_sep>self.mode="hourglass"<line_sep>self.timekeeper=timekeeper.Timer(initial_seconds)<line_sep>self.template=<true><line_sep>self.im=icon_manager.Icon_manager(initial_seconds)<line_sep>self.change_icon()<line_sep>self.remaining_sec=rumps.MenuItem(self.timekeeper.get_remaining_string())<line_sep>self.menu=[self.remaining_sec]<line_sep>self.next_icon_change=self.im.icon_interval<line_sep>self.rumps_timer=rumps.Timer(self.tick 0.5)<line_sep>self.rumps_timer.callback(self.tick)<line_sep>self.invert_counter=0<line_sep>self.notified=<false><line_sep>self.sound=<true><block_end><def_stmt>change_icon self<block_start>print("frame:" self.im.icon_counter)<line_sep>self.icon=self.im.get_icon_path()<block_end><def_stmt>change_remaining self<block_start>self.remaining_sec.title=self.timekeeper.get_remaining_string()<block_end><def_stmt>tick self _<block_start><if_stmt>self.timekeeper.tick()<block_start>self.notDone=<true><line_sep>self.invert_counter=0<line_sep>self.change_remaining()<if_stmt>self.timekeeper.elapsed<ge>self.next_icon_change<block_start>self.im.icon_counter=int(self.timekeeper.elapsed/self.im.icon_interval)+1#1-89 self.change_icon()<line_sep>self.next_icon_change<augadd>self.im.icon_interval<block_end><block_end><if_stmt>self.timekeeper.done<block_start>self.im.active=<false><line_sep>self.change_icon()<if_stmt><not>self.notified<block_start>self.notify()<line_sep>self.notified=<true><block_end><if_stmt>self.notDone<block_start>self.icon=self.im.invert()<line_sep>self.invert_counter<augadd>1<if_stmt>self.invert_counter<g>5<block_start>self.notDone=<false><line_sep>self.rumps_timer.stop()<line_sep>self.reset()<block_end><block_end><block_end><block_end><def_stmt>notify self<block_start>title="Time is up!"<line_sep>text=""<line_sep>sound="Glass"<try_stmt><block_start><if_stmt>self.sound<block_start>os.system("""osascript -e 'display notification "{}" with title "{}" sound name "{}"'""".format(text title sound))<block_end><else_stmt><block_start>os.system("""osascript -e 'display notification "{}" with title "{}"'""".format(text title sound))<block_end><block_end><except_stmt><block_start>print("Could not send notification")<block_end><block_end>@rumps.clicked("Start" key="s")<def_stmt>pause self sender<block_start><if_stmt>sender.title<eq>"Pause"<block_start>self.timekeeper.pause_timer()<line_sep>self.rumps_timer.stop()<line_sep>sender.title="Start"<block_end><elif_stmt>sender.title<eq>"Start"<block_start>self.timekeeper.start()<line_sep>self.im.active=<true><line_sep>self.change_icon()<line_sep>self.rumps_timer.start()<line_sep>sender.title="Pause"<block_end><block_end>@rumps.clicked("Reset" key="r")<def_stmt>reset_button self sender<block_start>self.reset()<line_sep>self.menu["Start"].title="Start"<block_end><def_stmt>reset self<block_start>self.timekeeper.reset()<line_sep>self.rumps_timer.stop()<line_sep>self.im.active=<false><line_sep>self.im.reset()<line_sep>self.change_icon()<line_sep>self.change_remaining()<line_sep>self.next_icon_change=self.im.icon_interval<line_sep>self.menu["Start"].title="Start"<line_sep>self.notified=<false><block_end><def_stmt>string_to_sec self text<block_start>nums=text.split(":")<line_sep>nums.reverse()<line_sep>seconds=0<for_stmt>i,n enumerate(nums)<block_start><if_stmt>i<eq>0<block_start>seconds<augadd>int(n)<block_end><else_stmt><block_start>seconds<augadd>(60<power>i)<times>int(n)<line_sep>print((i<times>60)<times>int(n))<block_end><block_end><return>seconds<block_end><def_stmt>validate_input self text<block_start>texts=text.split(":")<if_stmt>len(texts)<g>3<block_start><return><false><block_end><for_stmt>s texts<block_start><try_stmt><block_start>int(s)<block_end><except_stmt><block_start><return><false><block_end><block_end><return><true><block_end>@rumps.clicked("Set time" key="t")<def_stmt>set_time self _<block_start>self.timekeeper.pause_timer()<line_sep>response=rumps.Window("Enter time: (hours:minutes:seconds)").run()<if_stmt>response.clicked<block_start><if_stmt><not>self.validate_input(response.text)<block_start>skip=<true><line_sep>rumps.alert("Does not compute! Please try again.")<block_end><else_stmt><block_start>seconds=self.string_to_sec(response.text)<line_sep>print(seconds)<line_sep>skip=<false><block_end><if_stmt><not>skip<block_start>self.rumps_timer.stop()<line_sep>self.timekeeper.set_time(seconds)<line_sep>self.im.set_icon_interval(seconds)<line_sep>self.im.reset()<line_sep>self.im.active=<false><line_sep>self.next_icon_change=self.im.icon_interval<line_sep>self.change_icon()<line_sep>self.change_remaining()<line_sep>self.menu["Start"].title="Start"<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>default_secounds=60<times>60<line_sep>TimerApp(default_secounds).run()<block_end>
# Copyright <NAME> 2004. Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) <import_from_future_stmt> print_function<line_sep>''' >>> from iterator_ext import * >>> from input_iterator import * >>> x = list_int() >>> x.push_back(1) >>> x.back() 1 >>> x.push_back(3) >>> x.push_back(5) >>> for y in x: ... print(y) 1 3 5 >>> z = range(x) >>> for y in z: ... print(y) 1 3 5 Range2 wraps a transform_iterator which doubles the elements it traverses. This proves we can wrap input iterators >>> z2 = range2(x) >>> for y in z2: ... print(y) 2 6 10 >>> l2 = two_lists() >>> for y in l2.primes: ... print(y) 2 3 5 7 11 13 >>> for y in l2.evens: ... print(y) 2 4 6 8 10 12 >>> ll = list_list() >>> ll.push_back(x) >>> x.push_back(7) >>> ll.push_back(x) >>> for a in ll: #doctest: +NORMALIZE_WHITESPACE ... for b in a: ... print(b, end='') ... print('') ... 1 3 5 1 3 5 7 '''<def_stmt>run args=<none><block_start><import_stmt>sys<import_stmt>doctest<if_stmt>args<is><not><none><block_start>sys.argv=args<block_end><return>doctest.testmod(sys.modules.get(__name__))<block_end><if_stmt>__name__<eq>'__main__'<block_start>print("running...")<import_stmt>sys<line_sep>status=run()[0]<if_stmt>(status<eq>0)<block_start>print("Done.")<block_end>sys.exit(status)<block_end>
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 <import_from_stmt>openvino.tools.mo.front.common.partial_infer.utils mo_array<import_from_stmt>openvino.tools.mo.ops.proposal ProposalOp<import_from_stmt>openvino.tools.mo.front.caffe.collect_attributes merge_attrs<import_from_stmt>openvino.tools.mo.front.extractor FrontExtractorOp<class_stmt>ProposalFrontExtractor(FrontExtractorOp)<block_start>op='Proposal'<line_sep>enabled=<true><line_sep>@classmethod<def_stmt>extract cls node<block_start>proto_layer=node.pb<line_sep>param=proto_layer.proposal_param<line_sep>update_attrs={'feat_stride':param.feat_stride 'base_size':param.base_size 'min_size':param.min_size 'ratio':mo_array(param.ratio) 'scale':mo_array(param.scale) 'pre_nms_topn':param.pre_nms_topn 'post_nms_topn':param.post_nms_topn 'nms_thresh':param.nms_thresh}<line_sep>mapping_rule=merge_attrs(param update_attrs)<line_sep># update the attributes of the node ProposalOp.update_node_stat(node mapping_rule)<line_sep><return>cls.enabled<block_end><block_end>
""" 提供几种常用的控制器。 这些验证器通常需要提供一些参数进行一次调用,返回的结果才是真正的验证器,其中的技巧在于通过闭包使要控制的对象能够被内部函数访问。 版本: 1.3.0+ """<import_stmt>re<import_from_stmt>nonebot CommandSession<import_from_stmt>nonebot.helpers render_expression<def_stmt>handle_cancellation session:CommandSession<block_start>""" 在用户发送 `算了`、`不用了`、`取消吧`、`停` 之类的话的时候,结束当前传入的命令会话(调用 `session.finish()`),并发送配置项 `SESSION_CANCEL_EXPRESSION` 所填的内容。 如果不是上述取消指令,则将输入原样输出。 参数: session: 要控制的命令会话 """<def_stmt>control value<block_start><if_stmt>_is_cancellation(value)<is><true><block_start>session.finish(render_expression(session.bot.config.SESSION_CANCEL_EXPRESSION))<block_end><return>value<block_end><return>control<block_end><def_stmt>_is_cancellation sentence:str<arrow>bool<block_start><for_stmt>kw ('算' '别' '不' '停' '取消')<block_start><if_stmt>kw<in>sentence# a keyword matches <block_start><break><block_end><block_end><else_stmt># no keyword matches <block_start><return><false><block_end><if_stmt>re.match(r'^那?[算别不停]\w{0,3}了?吧?$' sentence)<or>re.match(r'^那?(?:[给帮]我)?取消了?吧?$' sentence)<block_start><return><true><block_end><return><false><block_end>__all__=['handle_cancellation' ]<line_sep>
<import_from_stmt>desktop_local_tests.public_ip_during_disruption PublicIPDuringDisruptionTestCase<import_from_stmt>desktop_local_tests.windows.windows_reorder_adapters_disrupter WindowsReorderAdaptersDisrupter<class_stmt>TestWindowsPublicIPDisruptReorderAdapters(PublicIPDuringDisruptionTestCase)<block_start>'''Summary: Tests whether traffic leaving the user's device has the public IP hidden when the adapter order is changed. Details: This test will connect to VPN then swap the priority of the primary and secondary network adapters. The test then queries a webpage to detect it's public IP. Discussion: It's not 100% clear if, in the real world, adapters can change their order without user involvement. It is still however a good stress test of the application. On Windows adapter order is determined by the interface metric. It can be manually set but otherwise it is determined by the system by deciding how "good" an adapter is, e.g. what is the throughput. In theory that means metrics can change dynamically. Weaknesses: The time taken to perform each IP request is relatively long. Tests using IPResponder should be preferred over these tests. Scenarios: Requires two active adapters. TODO: Consider a variant which changes the network "Location". This is much more likely to be something a user might do. '''<def_stmt>__init__ self devices parameters<block_start>super().__init__(WindowsReorderAdaptersDisrupter devices parameters)<block_end><block_end>
<import_stmt>json<import_stmt>logging<import_from_stmt>http HTTPStatus<import_from_stmt>typing Any Dict List Optional Tuple Type Union<import_stmt>werkzeug<import_from_stmt>flask Blueprint Flask Response abort jsonify<import_from_stmt>flask.views MethodView<import_from_stmt>flask_cors CORS<import_from_stmt>gevent.pywsgi WSGIServer<import_from_stmt>geventwebsocket Resource<as>WebsocketResource WebSocketServer<import_from_stmt>marshmallow Schema<import_from_stmt>marshmallow.exceptions ValidationError<import_from_stmt>webargs.flaskparser parser<import_from_stmt>werkzeug.exceptions NotFound<import_from_stmt>rotkehlchen.api.rest RestAPI api_response wrap_in_fail_result<import_from_stmt>rotkehlchen.api.v1.parser ignore_kwarg_parser resource_parser<import_from_stmt>rotkehlchen.api.v1.resources AaveBalancesResource AaveHistoryResource AccountingReportDataResource AccountingReportsResource AdexBalancesResource AdexHistoryResource AllAssetsResource AllBalancesResource AssetIconsResource AssetMovementsResource AssetsReplaceResource AssetsTypesResource AssetUpdatesResource AssociatedLocations AsyncTasksResource AvalancheTransactionsResource BalancerBalancesResource BalancerEventsHistoryResource BalancerTradesHistoryResource BinanceAvailableMarkets BinanceUserMarkets BlockchainBalancesResource BlockchainsAccountsResource BTCXpubResource CompoundBalancesResource CompoundHistoryResource CounterpartiesResource CurrentAssetsPriceResource DatabaseBackupsResource DatabaseInfoResource DataImportResource DBSnapshotDeletingResource DBSnapshotDownloadingResource DBSnapshotExportingResource DBSnapshotImportingResource DefiBalancesResource ERC20TokenInfo ERC20TokenInfoAVAX Eth2DailyStatsResource Eth2StakeDepositsResource Eth2StakeDetailsResource Eth2ValidatorsResource EthereumAirdropsResource EthereumAssetsResource EthereumModuleDataResource EthereumModuleResource EthereumTransactionsResource ExchangeBalancesResource ExchangeRatesResource ExchangesDataResource ExchangesResource ExternalServicesResource HistoricalAssetsPriceResource HistoryActionableItemsResource HistoryBaseEntryResource HistoryDownloadingResource HistoryExportingResource HistoryProcessingResource HistoryStatusResource IgnoredActionsResource IgnoredAssetsResource InfoResource LedgerActionsResource LiquityStakingHistoryResource LiquityStakingResource LiquityTrovesHistoryResource LiquityTrovesResource LoopringBalancesResource MakerdaoDSRBalanceResource MakerdaoDSRHistoryResource MakerdaoVaultDetailsResource MakerdaoVaultsResource ManuallyTrackedBalancesResource MessagesResource NamedEthereumModuleDataResource NamedOracleCacheResource NFTSBalanceResource NFTSResource OraclesResource OwnedAssetsResource PeriodicDataResource PickleDillResource PingResource QueriedAddressesResource ReverseEnsResource SettingsResource StakingResource StatisticsAssetBalanceResource StatisticsNetvalueResource StatisticsRendererResource StatisticsValueDistributionResource SushiswapBalancesResource SushiswapEventsHistoryResource SushiswapTradesHistoryResource TagsResource TradesResource UniswapBalancesResource UniswapEventsHistoryResource UniswapTradesHistoryResource UserAssetsResource UserPasswordChangeResource UserPremiumKeyResource UserPremiumSyncResource UsersByNameResource UsersResource WatchersResource YearnVaultsBalancesResource YearnVaultsHistoryResource YearnVaultsV2BalancesResource YearnVaultsV2HistoryResource create_blueprint <import_from_stmt>rotkehlchen.api.websockets.notifier RotkiNotifier RotkiWSApp<import_from_stmt>rotkehlchen.logging RotkehlchenLogsAdapter<line_sep>URLS=List[Union[Tuple[str Type[MethodView]] Tuple[str Type[MethodView] str] ]]<line_sep>URLS_V1:URLS=[('/users' UsersResource) ('/watchers' WatchersResource) ('/users/<string:name>' UsersByNameResource) ('/users/<string:name>/password' UserPasswordChangeResource) ('/premium' UserPremiumKeyResource) ('/premium/sync' UserPremiumSyncResource) ('/settings' SettingsResource) ('/tasks/' AsyncTasksResource) ('/tasks/<int:task_id>' AsyncTasksResource 'specific_async_tasks_resource') ('/exchange_rates' ExchangeRatesResource) ('/external_services/' ExternalServicesResource) ('/oracles' OraclesResource) ('/oracles/<string:oracle>/cache' NamedOracleCacheResource) ('/exchanges' ExchangesResource) ('/exchanges/balances' ExchangeBalancesResource) ('/exchanges/balances/<string:location>' ExchangeBalancesResource 'named_exchanges_balances_resource' ) ('/assets/<string:asset>/icon' AssetIconsResource) ('/trades' TradesResource) ('/ledgeractions' LedgerActionsResource) ('/asset_movements' AssetMovementsResource) ('/tags' TagsResource) ('/exchanges/binance/pairs' BinanceAvailableMarkets) ('/exchanges/binance/pairs/<string:name>' BinanceUserMarkets) ('/exchanges/data/' ExchangesDataResource) ('/exchanges/data/<string:location>' ExchangesDataResource 'named_exchanges_data_resource') ('/balances/blockchains' BlockchainBalancesResource) ('/balances/blockchains/<string:blockchain>' BlockchainBalancesResource 'named_blockchain_balances_resource' ) ('/balances/' AllBalancesResource) ('/balances/manual' ManuallyTrackedBalancesResource) ('/statistics/netvalue' StatisticsNetvalueResource) ('/statistics/balance/<string:asset>' StatisticsAssetBalanceResource) ('/statistics/value_distribution' StatisticsValueDistributionResource) ('/statistics/renderer' StatisticsRendererResource) ('/messages/' MessagesResource) ('/periodic/' PeriodicDataResource) ('/history/' HistoryProcessingResource) ('/history/status' HistoryStatusResource) ('/history/export/' HistoryExportingResource) ('/history/download/' HistoryDownloadingResource) ('/history/events' HistoryBaseEntryResource) ('/history/actionable_items' HistoryActionableItemsResource) ('/reports/' AccountingReportsResource) ('/reports/<int:report_id>' AccountingReportsResource 'per_report_resource' ) ('/reports/<int:report_id>/data' AccountingReportDataResource 'per_report_data_resource' ) ('/queried_addresses' QueriedAddressesResource) ('/blockchains/ETH/transactions' EthereumTransactionsResource) ('/blockchains/ETH/transactions/<string:address>' EthereumTransactionsResource 'per_address_ethereum_transactions_resource' ) ('/blockchains/ETH2/validators' Eth2ValidatorsResource) ('/blockchains/ETH2/stake/deposits' Eth2StakeDepositsResource) ('/blockchains/ETH2/stake/details' Eth2StakeDetailsResource) ('/blockchains/ETH2/stake/dailystats' Eth2DailyStatsResource) ('/blockchains/ETH/defi' DefiBalancesResource) ('/blockchains/ETH/airdrops' EthereumAirdropsResource) ('/blockchains/ETH/erc20details/' ERC20TokenInfo) ('/blockchains/ETH/modules/<string:module_name>/data' NamedEthereumModuleDataResource) ('/blockchains/ETH/modules/data' EthereumModuleDataResource) ('/blockchains/ETH/modules/data/counterparties' CounterpartiesResource) ('/blockchains/ETH/modules/' EthereumModuleResource) ('/blockchains/ETH/modules/makerdao/dsrbalance' MakerdaoDSRBalanceResource) ('/blockchains/ETH/modules/makerdao/dsrhistory' MakerdaoDSRHistoryResource) ('/blockchains/ETH/modules/makerdao/vaults' MakerdaoVaultsResource) ('/blockchains/ETH/modules/makerdao/vaultdetails' MakerdaoVaultDetailsResource) ('/blockchains/ETH/modules/aave/balances' AaveBalancesResource) ('/blockchains/ETH/modules/aave/history' AaveHistoryResource) ('/blockchains/ETH/modules/adex/balances' AdexBalancesResource) ('/blockchains/ETH/modules/adex/history' AdexHistoryResource) ('/blockchains/ETH/modules/balancer/balances' BalancerBalancesResource) ('/blockchains/ETH/modules/balancer/history/trades' BalancerTradesHistoryResource) ('/blockchains/ETH/modules/balancer/history/events' BalancerEventsHistoryResource) ('/blockchains/ETH/modules/compound/balances' CompoundBalancesResource) ('/blockchains/ETH/modules/compound/history' CompoundHistoryResource) ('/blockchains/ETH/modules/uniswap/balances' UniswapBalancesResource) ('/blockchains/ETH/modules/uniswap/history/events' UniswapEventsHistoryResource) ('/blockchains/ETH/modules/uniswap/history/trades' UniswapTradesHistoryResource) ('/blockchains/ETH/modules/sushiswap/balances' SushiswapBalancesResource) ('/blockchains/ETH/modules/sushiswap/history/events' SushiswapEventsHistoryResource) ('/blockchains/ETH/modules/sushiswap/history/trades' SushiswapTradesHistoryResource) ('/blockchains/ETH/modules/yearn/vaults/balances' YearnVaultsBalancesResource) ('/blockchains/ETH/modules/yearn/vaults/history' YearnVaultsHistoryResource) ('/blockchains/ETH/modules/yearn/vaultsv2/balances' YearnVaultsV2BalancesResource) ('/blockchains/ETH/modules/yearn/vaultsv2/history' YearnVaultsV2HistoryResource) ('/blockchains/ETH/modules/liquity/balances' LiquityTrovesResource) ('/blockchains/ETH/modules/liquity/events/trove' LiquityTrovesHistoryResource) ('/blockchains/ETH/modules/liquity/events/staking' LiquityStakingHistoryResource) ('/blockchains/ETH/modules/liquity/staking' LiquityStakingResource) ('/blockchains/ETH/modules/pickle/dill' PickleDillResource) ('/blockchains/ETH/modules/loopring/balances' LoopringBalancesResource) ('/blockchains/<string:blockchain>' BlockchainsAccountsResource) ('/blockchains/BTC/xpub' BTCXpubResource) ('/blockchains/AVAX/transactions' AvalancheTransactionsResource) ('/blockchains/AVAX/transactions/<string:address>' AvalancheTransactionsResource 'per_address_avalanche_transactions_resource' ) ('/blockchains/AVAX/erc20details/' ERC20TokenInfoAVAX) ('/assets' OwnedAssetsResource) ('/assets/types' AssetsTypesResource) ('/assets/replace' AssetsReplaceResource) ('/assets/all' AllAssetsResource) ('/assets/ethereum' EthereumAssetsResource) ('/assets/prices/current' CurrentAssetsPriceResource) ('/assets/prices/historical' HistoricalAssetsPriceResource) ('/assets/ignored' IgnoredAssetsResource) ('/assets/updates' AssetUpdatesResource) ('/assets/user' UserAssetsResource) ('/actions/ignored' IgnoredActionsResource) ('/info' InfoResource) ('/ping' PingResource) ('/import' DataImportResource) ('/nfts' NFTSResource) ('/nfts/balances' NFTSBalanceResource) ('/database/info' DatabaseInfoResource) ('/database/backups' DatabaseBackupsResource) ('/locations/associated' AssociatedLocations) ('/staking/kraken' StakingResource) ('/snapshot/download' DBSnapshotDownloadingResource) ('/snapshot/export' DBSnapshotExportingResource) ('/snapshot/import' DBSnapshotImportingResource) ('/snapshot/delete' DBSnapshotDeletingResource) ('/ens/reverse' ReverseEnsResource) ]<line_sep>logger=logging.getLogger(__name__)<line_sep>log=RotkehlchenLogsAdapter(logger)<def_stmt>setup_urls rest_api:RestAPI blueprint:Blueprint urls:URLS <arrow><none><block_start><for_stmt>url_tuple urls<block_start><if_stmt>len(url_tuple)<eq>2<block_start>route,resource_cls=url_tuple# type: ignore endpoint=resource_cls.__name__.lower()<block_end><elif_stmt>len(url_tuple)<eq>3<block_start>route,resource_cls,endpoint=url_tuple# type: ignore <block_end><else_stmt><block_start><raise>ValueError(f"Invalid URL format: {url_tuple!r}")<block_end>blueprint.add_url_rule(route view_func=resource_cls.as_view(endpoint rest_api_object=rest_api) )<block_end><block_end><def_stmt>endpoint_not_found e:NotFound<arrow>Response<block_start>msg='invalid endpoint'<line_sep># The isinstance check is because I am not sure if `e` is always going to # be a "NotFound" error here <if_stmt>isinstance(e NotFound)<block_start>msg=e.description<block_end><return>api_response(wrap_in_fail_result(msg) HTTPStatus.NOT_FOUND)<block_end>@parser.error_handler# type: ignore @resource_parser.error_handler@ignore_kwarg_parser.error_handler<def_stmt>handle_request_parsing_error err:ValidationError _request:werkzeug.local.LocalProxy _schema:Schema error_status_code:Optional[int] # pylint: disable=unused-argument error_headers:Optional[Dict] # pylint: disable=unused-argument <arrow><none><block_start>""" This handles request parsing errors generated for example by schema field validation failing."""<line_sep>msg=str(err)<if_stmt>isinstance(err.messages dict)# first key is just the location. Ignore <block_start>key=list(err.messages.keys())[0]<line_sep>msg=json.dumps(err.messages[key])<block_end><elif_stmt>isinstance(err.messages list)<block_start>msg=','.join(err.messages)<block_end>err_response=jsonify(result=<none> message=msg)<line_sep>err_response.status_code=HTTPStatus.BAD_REQUEST<line_sep>abort(err_response)<block_end><class_stmt>APIServer()<block_start>_api_prefix='/api/1'<def_stmt>__init__ self rest_api:RestAPI ws_notifier:RotkiNotifier cors_domain_list:List[str]=<none> <arrow><none><block_start>flask_app=Flask(__name__)<if_stmt>cors_domain_list<block_start>CORS(flask_app origins=cors_domain_list)<block_end>blueprint=create_blueprint(self._api_prefix)<line_sep>setup_urls(blueprint=blueprint rest_api=rest_api urls=URLS_V1 )<line_sep>self.rest_api=rest_api<line_sep>self.rotki_notifier=ws_notifier<line_sep>self.flask_app=flask_app<line_sep>self.blueprint=blueprint<line_sep>self.wsgiserver:Optional[WSGIServer]=<none><line_sep>self.flask_app.register_blueprint(self.blueprint)<line_sep>self.ws_server:Optional[WebSocketServer]=<none><line_sep>self.flask_app.errorhandler(HTTPStatus.NOT_FOUND)(endpoint_not_found)<line_sep>self.flask_app.register_error_handler(Exception self.unhandled_exception)<block_end>@staticmethod<def_stmt>unhandled_exception exception:Exception<arrow>Response<block_start>""" Flask.errorhandler when an exception wasn't correctly handled """<line_sep>log.critical('Unhandled exception when processing endpoint request' exc_info=<true> exception=str(exception) )<line_sep><return>api_response(wrap_in_fail_result(str(exception)) HTTPStatus.INTERNAL_SERVER_ERROR)<block_end><def_stmt>run self host:str='127.0.0.1' port:int=5042 **kwargs:Any<arrow><none><block_start>"""This is only used for the data faker and not used in production"""<line_sep>self.flask_app.run(host=host port=port **kwargs)<block_end><def_stmt>start self host:str='127.0.0.1' rest_port:int=5042 websockets_port:int=5043 <arrow><none><block_start>"""This is used to start the API server in production"""<line_sep>wsgi_logger=logging.getLogger(__name__+'.pywsgi')<line_sep>self.wsgiserver=WSGIServer(listener=(host rest_port) application=self.flask_app log=wsgi_logger error_log=wsgi_logger )<line_sep>msg=f'rotki REST API server is running at: {host}:{rest_port}'<line_sep>print(msg)<line_sep>log.info(msg)<line_sep>self.wsgiserver.start()<line_sep>self.ws_server=WebSocketServer(listener=(host websockets_port) application=WebsocketResource([('^/' RotkiWSApp) ]) debug=<false> environ={'rotki_notifier':self.rotki_notifier} )<line_sep>msg=f'rotki Websockets API server is running at: {host}:{websockets_port}'<line_sep>print(msg)<line_sep>log.info(msg)<line_sep>self.ws_server.start()<block_end><def_stmt>stop self timeout:int=5<arrow><none><block_start>"""Stops the API server. If handlers are running after timeout they are killed"""<if_stmt>self.wsgiserver<is><not><none><block_start>self.wsgiserver.stop(timeout)<line_sep>self.wsgiserver=<none><block_end><if_stmt>self.ws_server<is><not><none><block_start>self.ws_server.stop(timeout)<line_sep>self.wsgiserver=<none><block_end>self.rest_api.stop()<block_end><block_end>
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'gui.ui' ## ## Created by: Qt User Interface Compiler version 5.15.2 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ <import_from_stmt>PySide2.QtCore *<import_from_stmt>PySide2.QtGui *<import_from_stmt>PySide2.QtWidgets *<import_from_stmt>.matplotlibwidget MatplotlibWidget<import_from_stmt>.icons_rc *<class_stmt>Ui_Dialog(object)<block_start><def_stmt>setupUi self Dialog<block_start><if_stmt><not>Dialog.objectName()<block_start>Dialog.setObjectName(u"Dialog")<block_end>Dialog.resize(1183 675)<line_sep>self.gridLayout=QGridLayout(Dialog)<line_sep>self.gridLayout.setObjectName(u"gridLayout")<line_sep>self.gridLayout.setContentsMargins(1 1 1 1)<line_sep>self.tabWidget=QTabWidget(Dialog)<line_sep>self.tabWidget.setObjectName(u"tabWidget")<line_sep>self.tab_2=QWidget()<line_sep>self.tab_2.setObjectName(u"tab_2")<line_sep>self.verticalLayout_6=QVBoxLayout(self.tab_2)<line_sep>self.verticalLayout_6.setObjectName(u"verticalLayout_6")<line_sep>self.verticalLayout_6.setContentsMargins(0 0 0 0)<line_sep>self.main_splitter=QSplitter(self.tab_2)<line_sep>self.main_splitter.setObjectName(u"main_splitter")<line_sep>self.main_splitter.setOrientation(Qt.Horizontal)<line_sep>self.frame_8=QFrame(self.main_splitter)<line_sep>self.frame_8.setObjectName(u"frame_8")<line_sep>self.frame_8.setFrameShape(QFrame.NoFrame)<line_sep>self.frame_8.setFrameShadow(QFrame.Raised)<line_sep>self.verticalLayout_5=QVBoxLayout(self.frame_8)<line_sep>self.verticalLayout_5.setObjectName(u"verticalLayout_5")<line_sep>self.verticalLayout_5.setContentsMargins(0 0 0 0)<line_sep>self.frame_5=QFrame(self.frame_8)<line_sep>self.frame_5.setObjectName(u"frame_5")<line_sep>sizePolicy=QSizePolicy(QSizePolicy.Minimum QSizePolicy.Minimum)<line_sep>sizePolicy.setHorizontalStretch(0)<line_sep>sizePolicy.setVerticalStretch(0)<line_sep>sizePolicy.setHeightForWidth(self.frame_5.sizePolicy().hasHeightForWidth())<line_sep>self.frame_5.setSizePolicy(sizePolicy)<line_sep>self.frame_5.setFrameShape(QFrame.NoFrame)<line_sep>self.frame_5.setFrameShadow(QFrame.Raised)<line_sep>self.horizontalLayout=QHBoxLayout(self.frame_5)<line_sep>self.horizontalLayout.setObjectName(u"horizontalLayout")<line_sep>self.label_9=QLabel(self.frame_5)<line_sep>self.label_9.setObjectName(u"label_9")<line_sep>self.horizontalLayout.addWidget(self.label_9)<line_sep>self.name_lineEdit=QLineEdit(self.frame_5)<line_sep>self.name_lineEdit.setObjectName(u"name_lineEdit")<line_sep>self.horizontalLayout.addWidget(self.name_lineEdit)<line_sep>self.verticalLayout_5.addWidget(self.frame_5)<line_sep>self.frame_6=QFrame(self.frame_8)<line_sep>self.frame_6.setObjectName(u"frame_6")<line_sep>sizePolicy.setHeightForWidth(self.frame_6.sizePolicy().hasHeightForWidth())<line_sep>self.frame_6.setSizePolicy(sizePolicy)<line_sep>self.frame_6.setFrameShape(QFrame.NoFrame)<line_sep>self.frame_6.setFrameShadow(QFrame.Raised)<line_sep>self.horizontalLayout_3=QHBoxLayout(self.frame_6)<line_sep>self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")<line_sep>self.horizontalSpacer_2=QSpacerItem(40 20 QSizePolicy.Expanding QSizePolicy.Minimum)<line_sep>self.horizontalLayout_3.addItem(self.horizontalSpacer_2)<line_sep>self.label_8=QLabel(self.frame_6)<line_sep>self.label_8.setObjectName(u"label_8")<line_sep>self.horizontalLayout_3.addWidget(self.label_8)<line_sep>self.frequency_doubleSpinBox=QDoubleSpinBox(self.frame_6)<line_sep>self.frequency_doubleSpinBox.setObjectName(u"frequency_doubleSpinBox")<line_sep>self.frequency_doubleSpinBox.setDecimals(0)<line_sep>self.frequency_doubleSpinBox.setValue(50.000000000000000)<line_sep>self.horizontalLayout_3.addWidget(self.frequency_doubleSpinBox)<line_sep>self.label_11=QLabel(self.frame_6)<line_sep>self.label_11.setObjectName(u"label_11")<line_sep>self.horizontalLayout_3.addWidget(self.label_11)<line_sep>self.rho_doubleSpinBox=QDoubleSpinBox(self.frame_6)<line_sep>self.rho_doubleSpinBox.setObjectName(u"rho_doubleSpinBox")<line_sep>self.rho_doubleSpinBox.setMaximum(9999999.000000000000000)<line_sep>self.rho_doubleSpinBox.setValue(100.000000000000000)<line_sep>self.horizontalLayout_3.addWidget(self.rho_doubleSpinBox)<line_sep>self.verticalLayout_5.addWidget(self.frame_6)<line_sep>self.splitter=QSplitter(self.frame_8)<line_sep>self.splitter.setObjectName(u"splitter")<line_sep>self.splitter.setMaximumSize(QSize(16777215 16777215))<line_sep>self.splitter.setOrientation(Qt.Vertical)<line_sep>self.frame_3=QFrame(self.splitter)<line_sep>self.frame_3.setObjectName(u"frame_3")<line_sep>self.frame_3.setFrameShape(QFrame.NoFrame)<line_sep>self.frame_3.setFrameShadow(QFrame.Raised)<line_sep>self.verticalLayout_8=QVBoxLayout(self.frame_3)<line_sep>self.verticalLayout_8.setObjectName(u"verticalLayout_8")<line_sep>self.label_12=QLabel(self.frame_3)<line_sep>self.label_12.setObjectName(u"label_12")<line_sep>self.verticalLayout_8.addWidget(self.label_12)<line_sep>self.wires_tableView=QTableView(self.frame_3)<line_sep>self.wires_tableView.setObjectName(u"wires_tableView")<line_sep>self.verticalLayout_8.addWidget(self.wires_tableView)<line_sep>self.frame_7=QFrame(self.frame_3)<line_sep>self.frame_7.setObjectName(u"frame_7")<line_sep>self.frame_7.setFrameShape(QFrame.StyledPanel)<line_sep>self.frame_7.setFrameShadow(QFrame.Raised)<line_sep>self.horizontalLayout_4=QHBoxLayout(self.frame_7)<line_sep>self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")<line_sep>self.horizontalLayout_4.setContentsMargins(0 0 0 0)<line_sep>self.add_to_tower_pushButton=QPushButton(self.frame_7)<line_sep>self.add_to_tower_pushButton.setObjectName(u"add_to_tower_pushButton")<line_sep>icon=QIcon()<line_sep>icon.addFile(u":/Icons/icons/plus.svg" QSize() QIcon.Normal QIcon.Off)<line_sep>self.add_to_tower_pushButton.setIcon(icon)<line_sep>self.horizontalLayout_4.addWidget(self.add_to_tower_pushButton)<line_sep>self.horizontalSpacer_3=QSpacerItem(990 20 QSizePolicy.Expanding QSizePolicy.Minimum)<line_sep>self.horizontalLayout_4.addItem(self.horizontalSpacer_3)<line_sep>self.verticalLayout_8.addWidget(self.frame_7)<line_sep>self.splitter.addWidget(self.frame_3)<line_sep>self.frame_4=QFrame(self.splitter)<line_sep>self.frame_4.setObjectName(u"frame_4")<line_sep>self.frame_4.setFrameShape(QFrame.NoFrame)<line_sep>self.frame_4.setFrameShadow(QFrame.Raised)<line_sep>self.verticalLayout_4=QVBoxLayout(self.frame_4)<line_sep>self.verticalLayout_4.setObjectName(u"verticalLayout_4")<line_sep>self.verticalLayout_4.setContentsMargins(9 9 9 9)<line_sep>self.label_10=QLabel(self.frame_4)<line_sep>self.label_10.setObjectName(u"label_10")<line_sep>self.verticalLayout_4.addWidget(self.label_10)<line_sep>self.tower_tableView=QTableView(self.frame_4)<line_sep>self.tower_tableView.setObjectName(u"tower_tableView")<line_sep>self.verticalLayout_4.addWidget(self.tower_tableView)<line_sep>self.frame=QFrame(self.frame_4)<line_sep>self.frame.setObjectName(u"frame")<line_sep>self.frame.setFrameShape(QFrame.NoFrame)<line_sep>self.frame.setFrameShadow(QFrame.Raised)<line_sep>self.horizontalLayout_2=QHBoxLayout(self.frame)<line_sep>self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")<line_sep>self.horizontalLayout_2.setContentsMargins(0 0 0 0)<line_sep>self.delete_from_tower_pushButton=QPushButton(self.frame)<line_sep>self.delete_from_tower_pushButton.setObjectName(u"delete_from_tower_pushButton")<line_sep>icon1=QIcon()<line_sep>icon1.addFile(u":/Icons/icons/minus.svg" QSize() QIcon.Normal QIcon.Off)<line_sep>self.delete_from_tower_pushButton.setIcon(icon1)<line_sep>self.horizontalLayout_2.addWidget(self.delete_from_tower_pushButton)<line_sep>self.horizontalSpacer=QSpacerItem(40 20 QSizePolicy.Expanding QSizePolicy.Minimum)<line_sep>self.horizontalLayout_2.addItem(self.horizontalSpacer)<line_sep>self.compute_pushButton=QPushButton(self.frame)<line_sep>self.compute_pushButton.setObjectName(u"compute_pushButton")<line_sep>icon2=QIcon()<line_sep>icon2.addFile(u":/Icons/icons/calc.svg" QSize() QIcon.Normal QIcon.Off)<line_sep>self.compute_pushButton.setIcon(icon2)<line_sep>self.compute_pushButton.setIconSize(QSize(16 16))<line_sep>self.horizontalLayout_2.addWidget(self.compute_pushButton)<line_sep>self.verticalLayout_4.addWidget(self.frame)<line_sep>self.splitter.addWidget(self.frame_4)<line_sep>self.verticalLayout_5.addWidget(self.splitter)<line_sep>self.main_splitter.addWidget(self.frame_8)<line_sep>self.PlotFrame=QFrame(self.main_splitter)<line_sep>self.PlotFrame.setObjectName(u"PlotFrame")<line_sep>self.PlotFrame.setFrameShape(QFrame.NoFrame)<line_sep>self.PlotFrame.setFrameShadow(QFrame.Raised)<line_sep>self.verticalLayout_7=QVBoxLayout(self.PlotFrame)<line_sep>self.verticalLayout_7.setObjectName(u"verticalLayout_7")<line_sep>self.verticalLayout_7.setContentsMargins(9 9 9 9)<line_sep>self.label_4=QLabel(self.PlotFrame)<line_sep>self.label_4.setObjectName(u"label_4")<line_sep>self.verticalLayout_7.addWidget(self.label_4)<line_sep>self.plotwidget=MatplotlibWidget(self.PlotFrame)<line_sep>self.plotwidget.setObjectName(u"plotwidget")<line_sep>self.verticalLayout_7.addWidget(self.plotwidget)<line_sep>self.frame_9=QFrame(self.PlotFrame)<line_sep>self.frame_9.setObjectName(u"frame_9")<line_sep>self.frame_9.setMaximumSize(QSize(16777215 24))<line_sep>self.frame_9.setFrameShape(QFrame.StyledPanel)<line_sep>self.frame_9.setFrameShadow(QFrame.Raised)<line_sep>self.horizontalLayout_5=QHBoxLayout(self.frame_9)<line_sep>self.horizontalLayout_5.setObjectName(u"horizontalLayout_5")<line_sep>self.horizontalLayout_5.setContentsMargins(0 0 0 0)<line_sep>self.horizontalSpacer_4=QSpacerItem(19 19 QSizePolicy.Expanding QSizePolicy.Minimum)<line_sep>self.horizontalLayout_5.addItem(self.horizontalSpacer_4)<line_sep>self.acceptButton=QPushButton(self.frame_9)<line_sep>self.acceptButton.setObjectName(u"acceptButton")<line_sep>self.horizontalLayout_5.addWidget(self.acceptButton)<line_sep>self.verticalLayout_7.addWidget(self.frame_9)<line_sep>self.main_splitter.addWidget(self.PlotFrame)<line_sep>self.verticalLayout_6.addWidget(self.main_splitter)<line_sep>self.tabWidget.addTab(self.tab_2 "")<line_sep>self.tab=QWidget()<line_sep>self.tab.setObjectName(u"tab")<line_sep>self.verticalLayout_3=QVBoxLayout(self.tab)<line_sep>self.verticalLayout_3.setObjectName(u"verticalLayout_3")<line_sep>self.frame_10=QFrame(self.tab)<line_sep>self.frame_10.setObjectName(u"frame_10")<line_sep>self.frame_10.setFrameShape(QFrame.StyledPanel)<line_sep>self.frame_10.setFrameShadow(QFrame.Raised)<line_sep>self.gridLayout_2=QGridLayout(self.frame_10)<line_sep>self.gridLayout_2.setObjectName(u"gridLayout_2")<line_sep>self.label_2=QLabel(self.frame_10)<line_sep>self.label_2.setObjectName(u"label_2")<line_sep>self.gridLayout_2.addWidget(self.label_2 0 1 1 1)<line_sep>self.label_6=QLabel(self.frame_10)<line_sep>self.label_6.setObjectName(u"label_6")<line_sep>self.gridLayout_2.addWidget(self.label_6 2 0 1 1)<line_sep>self.z_tableView_abcn=QTableView(self.frame_10)<line_sep>self.z_tableView_abcn.setObjectName(u"z_tableView_abcn")<line_sep>self.gridLayout_2.addWidget(self.z_tableView_abcn 1 0 1 1)<line_sep>self.y_tableView_abcn=QTableView(self.frame_10)<line_sep>self.y_tableView_abcn.setObjectName(u"y_tableView_abcn")<line_sep>self.gridLayout_2.addWidget(self.y_tableView_abcn 1 1 1 1)<line_sep>self.label_7=QLabel(self.frame_10)<line_sep>self.label_7.setObjectName(u"label_7")<line_sep>self.gridLayout_2.addWidget(self.label_7 4 0 1 1)<line_sep>self.z_tableView_abc=QTableView(self.frame_10)<line_sep>self.z_tableView_abc.setObjectName(u"z_tableView_abc")<line_sep>self.gridLayout_2.addWidget(self.z_tableView_abc 3 0 1 1)<line_sep>self.label=QLabel(self.frame_10)<line_sep>self.label.setObjectName(u"label")<line_sep>self.gridLayout_2.addWidget(self.label 0 0 1 1)<line_sep>self.z_tableView_seq=QTableView(self.frame_10)<line_sep>self.z_tableView_seq.setObjectName(u"z_tableView_seq")<line_sep>self.gridLayout_2.addWidget(self.z_tableView_seq 5 0 1 1)<line_sep>self.label_3=QLabel(self.frame_10)<line_sep>self.label_3.setObjectName(u"label_3")<line_sep>self.gridLayout_2.addWidget(self.label_3 2 1 1 1)<line_sep>self.y_tableView_abc=QTableView(self.frame_10)<line_sep>self.y_tableView_abc.setObjectName(u"y_tableView_abc")<line_sep>self.gridLayout_2.addWidget(self.y_tableView_abc 3 1 1 1)<line_sep>self.label_5=QLabel(self.frame_10)<line_sep>self.label_5.setObjectName(u"label_5")<line_sep>self.gridLayout_2.addWidget(self.label_5 4 1 1 1)<line_sep>self.y_tableView_seq=QTableView(self.frame_10)<line_sep>self.y_tableView_seq.setObjectName(u"y_tableView_seq")<line_sep>self.gridLayout_2.addWidget(self.y_tableView_seq 5 1 1 1)<line_sep>self.verticalLayout_3.addWidget(self.frame_10)<line_sep>self.tabWidget.addTab(self.tab "")<line_sep>self.gridLayout.addWidget(self.tabWidget 4 0 1 1)<line_sep>self.retranslateUi(Dialog)<line_sep>self.tabWidget.setCurrentIndex(0)<line_sep>QMetaObject.connectSlotsByName(Dialog)<block_end># setupUi <def_stmt>retranslateUi self Dialog<block_start>Dialog.setWindowTitle(QCoreApplication.translate("Dialog" u"Tower creation" <none>))<line_sep>self.label_9.setText(QCoreApplication.translate("Dialog" u"Name" <none>))<line_sep>self.label_8.setText(QCoreApplication.translate("Dialog" u"Frequency (Hz)" <none>))<line_sep>self.label_11.setText(QCoreApplication.translate("Dialog" u"Earth resistivity (Ohm/m^3)" <none>))<line_sep>self.label_12.setText(QCoreApplication.translate("Dialog" u"Wire catalogue" <none>))<line_sep>#if QT_CONFIG(tooltip) self.add_to_tower_pushButton.setToolTip(QCoreApplication.translate("Dialog" u"Add wire" <none>))<line_sep>#endif // QT_CONFIG(tooltip) self.add_to_tower_pushButton.setText("")<line_sep>self.label_10.setText(QCoreApplication.translate("Dialog" u"Wire compisition" <none>))<line_sep>#if QT_CONFIG(tooltip) self.delete_from_tower_pushButton.setToolTip(QCoreApplication.translate("Dialog" u"Delete wire" <none>))<line_sep>#endif // QT_CONFIG(tooltip) self.delete_from_tower_pushButton.setText("")<line_sep>#if QT_CONFIG(tooltip) self.compute_pushButton.setToolTip(QCoreApplication.translate("Dialog" u"Compute matrices" <none>))<line_sep>#endif // QT_CONFIG(tooltip) self.compute_pushButton.setText("")<line_sep>self.label_4.setText(QCoreApplication.translate("Dialog" u"Tower" <none>))<line_sep>self.acceptButton.setText(QCoreApplication.translate("Dialog" u"Accept" <none>))<line_sep>self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2) QCoreApplication.translate("Dialog" u"Tower designer" <none>))<line_sep>self.label_2.setText(QCoreApplication.translate("Dialog" u" Y shunt (uS / km) for ABCN" <none>))<line_sep>self.label_6.setText(QCoreApplication.translate("Dialog" u" Z series (Ohm / km) for ABC" <none>))<line_sep>self.label_7.setText(QCoreApplication.translate("Dialog" u" Z series (Ohm / km) in sequence components" <none>))<line_sep>self.label.setText(QCoreApplication.translate("Dialog" u" Z series (Ohm / km) for ABCN" <none>))<line_sep>self.label_3.setText(QCoreApplication.translate("Dialog" u" Y shunt (uS / km) for ABC" <none>))<line_sep>self.label_5.setText(QCoreApplication.translate("Dialog" u" Y shunt (uS / km) for the sequence components" <none>))<line_sep>self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab) QCoreApplication.translate("Dialog" u"Impedance matrices" <none>))<block_end># retranslateUi <block_end>
# -*- coding: utf-8 -*- # # Copyright (C) 2005-2009 Edgewall Software # Copyright (C) 2005-2007 <NAME> <<EMAIL>> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/log/. <import_from_stmt>ConfigParser ConfigParser<import_from_stmt>copy deepcopy<import_from_stmt>inspect cleandoc<import_stmt>os.path<import_from_stmt>.core ExtensionPoint<line_sep>__all__=['Configuration' 'ConfigSection' 'Option' 'BoolOption' 'IntOption' 'FloatOption' 'ListOption' 'OrderedExtensionsOption']<line_sep>_use_default=object()<def_stmt>as_bool value<block_start>"""Convert the given value to a `bool`. If `value` is a string, return `True` for any of "yes", "true", "enabled", "on" or non-zero numbers, ignoring case. For non-string arguments, return the argument converted to a `bool`, or `False` if the conversion fails. """<if_stmt>isinstance(value basestring)<block_start><try_stmt><block_start><return>bool(float(value))<block_end><except_stmt>ValueError<block_start><return>value.strip().lower()<in>('yes' 'true' 'enabled' 'on')<block_end><block_end><try_stmt><block_start><return>bool(value)<block_end><except_stmt>(TypeError ValueError)<block_start><return><false><block_end><block_end><def_stmt>to_unicode text charset=<none><block_start>"""Convert input to an `unicode` object. For a `str` object, we'll first try to decode the bytes using the given `charset` encoding (or UTF-8 if none is specified), then we fall back to the latin1 encoding which might be correct or not, but at least preserves the original byte sequence by mapping each byte to the corresponding unicode code point in the range U+0000 to U+00FF. For anything else, a simple `unicode()` conversion is attempted, with special care taken with `Exception` objects. """<if_stmt>isinstance(text str)<block_start><try_stmt><block_start><return>unicode(text charset<or>'utf-8')<block_end><except_stmt>UnicodeDecodeError<block_start><return>unicode(text 'latin1')<block_end><block_end><elif_stmt>isinstance(text Exception)# two possibilities for storing unicode strings in exception data: <block_start><try_stmt># custom __str__ method on the exception (e.g. PermissionError) <block_start><return>unicode(text)<block_end><except_stmt>UnicodeError# unicode arguments given to the exception (e.g. parse_date) <block_start><return>' '.join([to_unicode(arg)<for>arg text.args])<block_end><block_end><return>unicode(text)<block_end><def_stmt>_to_utf8 basestr<block_start><return>to_unicode(basestr 'utf-8').encode('utf-8')<block_end><class_stmt>Configuration(object)<block_start>"""Thin layer over `ConfigParser` from the Python standard library. In addition to providing some convenience methods, the class remembers the last modification time of the configuration file, and reparses it when the file has changed. """<def_stmt>__init__ self filename params={}<block_start>self.filename=filename<line_sep>self.parser=ConfigParser()<line_sep>self.parser.optionxform=str<line_sep>self._old_sections={}<line_sep>self.parents=[]<line_sep>self._lastmtime=0<line_sep>self._sections={}<line_sep>self.parser.read(filename)<block_end><def_stmt>__contains__ self name<block_start>"""Return whether the configuration contains a section of the given name. """<line_sep><return>name<in>self.sections()<block_end><def_stmt>__getitem__ self name<block_start>"""Return the configuration section with the specified name."""<if_stmt>name<not><in>self._sections<block_start>self._sections[name]=Section(self name)<block_end><return>self._sections[name]<block_end><def_stmt>__repr__ self<block_start><return>'<%s %r>'%(self.__class__.__name__ self.filename)<block_end><def_stmt>get self section key default=''<block_start>"""Return the value of the specified option. Valid default input is a string. Returns a string. """<line_sep><return>self[section].get(key default)<block_end><def_stmt>getbool self section key default=''<block_start>"""Return the specified option as boolean value. If the value of the option is one of "yes", "true", "enabled", "on", or "1", this method wll return `True`, otherwise `False`. Valid default input is a string or a bool. Returns a bool. """<line_sep><return>self[section].getbool(key default)<block_end><def_stmt>getint self section key default=''<block_start>"""Return the value of the specified option as integer. Valid default input is a string or an int. Returns an int. """<line_sep><return>self[section].getint(key default)<block_end><def_stmt>getfloat self section key default=''<block_start>"""Return the value of the specified option as float. Valid default input is a string, float or int. Returns a float. """<line_sep><return>self[section].getfloat(key default)<block_end><def_stmt>getlist self section key default='' sep=',' keep_empty=<false><block_start>"""Return a list of values that have been specified as a single comma-separated option. A different separator can be specified using the `sep` parameter. If the `keep_empty` parameter is set to `True`, empty elements are included in the list. Valid default input is a string or a list. Returns a string. """<line_sep><return>self[section].getlist(key default sep keep_empty)<block_end><def_stmt>getpath self section key default=''<block_start>"""Return a configuration value as an absolute path. Relative paths are resolved relative to the location of this configuration file. Valid default input is a string. Returns a normalized path. """<line_sep><return>self[section].getpath(key default)<block_end><def_stmt>defaults self compmgr=<none><block_start>"""Returns a dictionary of the default configuration values If `compmgr` is specified, return only options declared in components that are enabled in the given `ComponentManager`. """<line_sep>defaults={}<for_stmt>(section key),option Option.get_registry(compmgr).items()<block_start>defaults.setdefault(section {})[key]=option.default<block_end><return>defaults<block_end><def_stmt>options self section compmgr=<none><block_start>"""Return a list of `(name, value)` tuples for every option in the specified section. This includes options that have default values that haven't been overridden. If `compmgr` is specified, only return default option values for components that are enabled in the given `ComponentManager`. """<line_sep><return>self[section].options(compmgr)<block_end><def_stmt>remove self section key<block_start>"""Remove the specified option."""<line_sep>self[section].remove(key)<block_end><def_stmt>sections self compmgr=<none> defaults=<true><block_start>"""Return a list of section names. If `compmgr` is specified, only the section names corresponding to options declared in components that are enabled in the given `ComponentManager` are returned. """<line_sep>sections=set([to_unicode(s)<for>s self.parser.sections()])<for_stmt>parent self.parents<block_start>sections.update(parent.sections(compmgr defaults=<false>))<block_end><if_stmt>defaults<block_start>sections.update(self.defaults(compmgr))<block_end><return>sorted(sections)<block_end><def_stmt>has_option self section option defaults=<true><block_start>"""Returns True if option exists in section in either the project burp.ini or one of the parents, or is available through the Option registry. """<line_sep>section_str=_to_utf8(section)<if_stmt>self.parser.has_section(section_str)<block_start><if_stmt>_to_utf8(option)<in>self.parser.options(section_str)<block_start><return><true><block_end><block_end><for_stmt>parent self.parents<block_start><if_stmt>parent.has_option(section option defaults=<false>)<block_start><return><true><block_end><block_end><return>defaults<and>(section option)<in>Option.registry<block_end><def_stmt>parse_if_needed self force=<false><block_start><if_stmt><not>self.filename<or><not>os.path.isfile(self.filename)<block_start><return><false><block_end>changed=<false><line_sep>modtime=os.path.getmtime(self.filename)<if_stmt>force<or>modtime<g>self._lastmtime<block_start>self._sections={}<line_sep>self.parser._sections={}<if_stmt><not>self.parser.read(self.filename)<block_start><raise>IOError("Error reading '%(file)s', make sure it is "<concat>"readable."%(self.filename ))<block_end>self._lastmtime=modtime<line_sep>self._old_sections=deepcopy(self.parser._sections)<line_sep>changed=<true><block_end><if_stmt>changed<block_start>self.parents=[]<if_stmt>self.parser.has_option('inherit' 'file')<block_start><for_stmt>filename self.parser.get('inherit' 'file').split(',')<block_start>filename=to_unicode(filename.strip())<if_stmt><not>os.path.isabs(filename)<block_start>filename=os.path.join(os.path.dirname(self.filename) filename)<block_end>self.parents.append(Configuration(filename))<block_end><block_end><block_end><else_stmt><block_start><for_stmt>parent self.parents<block_start>changed<augor>parent.parse_if_needed(force=force)<block_end><block_end><if_stmt>changed<block_start>self._cache={}<block_end><return>changed<block_end><block_end><class_stmt>Section(object)<block_start>"""Proxy for a specific configuration section. Objects of this class should not be instantiated directly. """<line_sep>__slots__=['config' 'name' 'overridden' '_cache']<def_stmt>__init__ self config name<block_start>self.config=config<line_sep>self.name=name<line_sep>self.overridden={}<line_sep>self._cache={}<block_end><def_stmt>contains self key defaults=<true><block_start><if_stmt>self.config.parser.has_option(_to_utf8(self.name) _to_utf8(key))<block_start><return><true><block_end><for_stmt>parent self.config.parents<block_start><if_stmt>parent[self.name].contains(key defaults=<false>)<block_start><return><true><block_end><block_end><return>defaults<and>Option.registry.has_key((self.name key))<block_end>__contains__=contains<def_stmt>iterate self compmgr=<none> defaults=<true><block_start>"""Iterate over the options in this section. If `compmgr` is specified, only return default option values for components that are enabled in the given `ComponentManager`. """<line_sep>options=set()<line_sep>name_str=_to_utf8(self.name)<if_stmt>self.config.parser.has_section(name_str)<block_start><for_stmt>option_str self.config.parser.options(name_str)<block_start>option=to_unicode(option_str)<line_sep>options.add(option.lower())<line_sep><yield>option<block_end><block_end><for_stmt>parent self.config.parents<block_start><for_stmt>option parent[self.name].iterate(defaults=<false>)<block_start>loption=option.lower()<if_stmt>loption<not><in>options<block_start>options.add(loption)<line_sep><yield>option<block_end><block_end><block_end><if_stmt>defaults<block_start><for_stmt>section,option Option.get_registry(compmgr).keys()<block_start><if_stmt>section<eq>self.name<and>option.lower()<not><in>options<block_start><yield>option<block_end><block_end><block_end><block_end>__iter__=iterate<def_stmt>__repr__ self<block_start><return>'<%s [%s]>'%(self.__class__.__name__ self.name)<block_end><def_stmt>get self key default=''<block_start>"""Return the value of the specified option. Valid default input is a string. Returns a string. """<line_sep>cached=self._cache.get(key _use_default)<if_stmt>cached<is><not>_use_default<block_start><return>cached<block_end>name_str=_to_utf8(self.name)<line_sep>key_str=_to_utf8(key)<if_stmt>self.config.parser.has_option(name_str key_str)<block_start>value=self.config.parser.get(name_str key_str)<block_end><else_stmt><block_start><for_stmt>parent self.config.parents<block_start>value=parent[self.name].get(key _use_default)<if_stmt>value<is><not>_use_default<block_start><break><block_end><block_end><else_stmt><block_start><if_stmt>default<is><not>_use_default<block_start>option=Option.registry.get((self.name key))<line_sep>value=option.default<if>option<else>_use_default<block_end><else_stmt><block_start>value=_use_default<block_end><block_end><block_end><if_stmt>value<is>_use_default<block_start><return>default<block_end><if_stmt><not>value<block_start>value=u''<block_end><elif_stmt>isinstance(value basestring)<block_start>value=to_unicode(value)<block_end>self._cache[key]=value<line_sep><return>value<block_end><def_stmt>getbool self key default=''<block_start>"""Return the value of the specified option as boolean. This method returns `True` if the option value is one of "yes", "true", "enabled", "on", or non-zero numbers, ignoring case. Otherwise `False` is returned. Valid default input is a string or a bool. Returns a bool. """<line_sep><return>as_bool(self.get(key default))<block_end><def_stmt>getint self key default=''<block_start>"""Return the value of the specified option as integer. Valid default input is a string or an int. Returns an int. """<line_sep>value=self.get(key default)<if_stmt><not>value<block_start><return>0<block_end><return>int(value)<block_end><def_stmt>getfloat self key default=''<block_start>"""Return the value of the specified option as float. Valid default input is a string, float or int. Returns a float. """<line_sep>value=self.get(key default)<if_stmt><not>value<block_start><return>0.0<block_end><return>float(value)<block_end><def_stmt>getlist self key default='' sep=',' keep_empty=<true><block_start>"""Return a list of values that have been specified as a single comma-separated option. A different separator can be specified using the `sep` parameter. If the `keep_empty` parameter is set to `False`, empty elements are omitted from the list. Valid default input is a string or a list. Returns a list. """<line_sep>value=self.get(key default)<if_stmt><not>value<block_start><return>[]<block_end><if_stmt>isinstance(value basestring)<block_start>items=[item.strip()<for>item value.split(sep)]<block_end><else_stmt><block_start>items=list(value)<block_end><if_stmt><not>keep_empty<block_start>items=filter(<none> items)<block_end><return>items<block_end><def_stmt>getpath self key default=''<block_start>"""Return the value of the specified option as a path, relative to the location of this configuration file. Valid default input is a string. Returns a normalized path. """<line_sep>path=self.get(key default)<if_stmt><not>path<block_start><return>default<block_end><if_stmt><not>os.path.isabs(path)<block_start>path=os.path.join(os.path.dirname(self.config.filename) path)<block_end><return>os.path.normcase(os.path.realpath(path))<block_end><def_stmt>options self compmgr=<none><block_start>"""Return `(key, value)` tuples for every option in the section. This includes options that have default values that haven't been overridden. If `compmgr` is specified, only return default option values for components that are enabled in the given `ComponentManager`. """<for_stmt>key self.iterate(compmgr)<block_start><yield>key self.get(key)<block_end><block_end><block_end><def_stmt>_get_registry cls compmgr=<none><block_start>"""Return the descriptor registry. If `compmgr` is specified, only return descriptors for components that are enabled in the given `ComponentManager`. """<if_stmt>compmgr<is><none><block_start><return>cls.registry<block_end><import_from_stmt>.core ComponentMeta<line_sep>components={}<for_stmt>comp ComponentMeta._components<block_start><for_stmt>attr comp.__dict__.itervalues()<block_start><if_stmt>isinstance(attr cls)<block_start>components[attr]=comp<block_end><block_end><block_end><return>dict(each<for>each cls.registry.iteritems()<if>each[1]<not><in>components<or>compmgr.is_enabled(components[each[1]]))<block_end><class_stmt>ConfigSection(object)<block_start>"""Descriptor for configuration sections."""<line_sep>registry={}<line_sep>@staticmethod<def_stmt>get_registry compmgr=<none><block_start>"""Return the section registry, as a `dict` mapping section names to `ConfigSection` objects. If `compmgr` is specified, only return sections for components that are enabled in the given `ComponentManager`. """<line_sep><return>_get_registry(ConfigSection compmgr)<block_end><def_stmt>__init__ self name doc doc_domain='burpini'<block_start>"""Create the configuration section."""<line_sep>self.name=name<line_sep>self.registry[self.name]=self<line_sep>self.__doc__=cleandoc(doc)<line_sep>self.doc_domain=doc_domain<block_end><def_stmt>__get__ self instance owner<block_start><if_stmt>instance<is><none><block_start><return>self<block_end>config=getattr(instance 'config' <none>)<if_stmt>config<and>isinstance(config Configuration)<block_start><return>config[self.name]<block_end><block_end><def_stmt>__repr__ self<block_start><return>'<%s [%s]>'%(self.__class__.__name__ self.name)<block_end><block_end><class_stmt>Option(object)<block_start>"""Descriptor for configuration options."""<line_sep>registry={}<line_sep>accessor=Section.get<line_sep>@staticmethod<def_stmt>get_registry compmgr=<none><block_start>"""Return the option registry, as a `dict` mapping `(section, key)` tuples to `Option` objects. If `compmgr` is specified, only return options for components that are enabled in the given `ComponentManager`. """<line_sep><return>_get_registry(Option compmgr)<block_end><def_stmt>__init__ self section name default=<none> doc='' doc_domain='burpini'<block_start>"""Create the configuration option. :param section: the name of the configuration section this option belongs to :param name: the name of the option :param default: the default value for the option :param doc: documentation of the option """<line_sep>self.section=section<line_sep>self.name=name<line_sep>self.default=default<line_sep>self.registry[(self.section self.name)]=self<line_sep>self.__doc__=cleandoc(doc)<line_sep>self.doc_domain=doc_domain<block_end><def_stmt>__get__ self instance owner<block_start><if_stmt>instance<is><none><block_start><return>self<block_end>config=getattr(instance 'config' <none>)<if_stmt>config<and>isinstance(config Configuration)<block_start>section=config[self.section]<line_sep>value=self.accessor(section self.name self.default)<line_sep><return>value<block_end><block_end><def_stmt>__set__ self instance value<block_start><raise>AttributeError("can't set attribute")<block_end><def_stmt>__repr__ self<block_start><return>'<%s [%s] "%s">'%(self.__class__.__name__ self.section self.name)<block_end><block_end><class_stmt>BoolOption(Option)<block_start>"""Descriptor for boolean configuration options."""<line_sep>accessor=Section.getbool<block_end><class_stmt>IntOption(Option)<block_start>"""Descriptor for integer configuration options."""<line_sep>accessor=Section.getint<block_end><class_stmt>FloatOption(Option)<block_start>"""Descriptor for float configuration options."""<line_sep>accessor=Section.getfloat<block_end><class_stmt>ListOption(Option)<block_start>"""Descriptor for configuration options that contain multiple values separated by a specific character. """<def_stmt>__init__ self section name default=<none> sep=',' keep_empty=<false> doc='' doc_domain='burpini'<block_start>Option.__init__(self section name default doc doc_domain)<line_sep>self.sep=sep<line_sep>self.keep_empty=keep_empty<block_end><def_stmt>accessor self section name default<block_start><return>section.getlist(name default self.sep self.keep_empty)<block_end><block_end><class_stmt>OrderedExtensionsOption(ListOption)<block_start>"""A comma separated, ordered, list of components implementing `interface`. Can be empty. If `include_missing` is true (the default) all components implementing the interface are returned, with those specified by the option ordered first."""<def_stmt>__init__ self section name interface default=<none> include_missing=<true> doc='' doc_domain='burpini'<block_start>ListOption.__init__(self section name default doc=doc doc_domain=doc_domain)<line_sep>self.xtnpt=ExtensionPoint(interface)<line_sep>self.include_missing=include_missing<block_end><def_stmt>__get__ self instance owner<block_start><if_stmt>instance<is><none><block_start><return>self<block_end>order=ListOption.__get__(self instance owner)<line_sep>components=[]<for_stmt>impl self.xtnpt.extensions(instance)<block_start><if_stmt>self.include_missing<or>impl.__class__.__name__<in>order<block_start>components.append(impl)<block_end><block_end><def_stmt>compare x y<block_start>x,y=x.__class__.__name__ y.__class__.__name__<if_stmt>x<not><in>order<block_start><return>int(y<in>order)<block_end><if_stmt>y<not><in>order<block_start><return>-int(x<in>order)<block_end><return>cmp(order.index(x) order.index(y))<block_end>components.sort(compare)<line_sep><return>components<block_end><block_end>
<import_stmt>os<import_stmt>sys<import_stmt>angr<import_stmt>nose.tools<line_sep>test_location=os.path.join(os.path.dirname(os.path.realpath(__file__)) '..' '..' 'binaries' 'tests')<def_stmt>test_various_loops <block_start>p=angr.Project(os.path.join(test_location 'x86_64' 'various_loops') auto_load_libs=<false>)<line_sep>cfg=p.analyses.CFGFast(normalize=<true>)<line_sep>state=p.factory.entry_state()<line_sep>state.register_plugin('loop_data' angr.state_plugins.SimStateLoopData())<line_sep>dummy=p.loader.main_object.get_symbol('dummy')<line_sep>bvs=state.solver.BVS(dummy.name 8<times>dummy.size)<line_sep>state.memory.store(dummy.rebased_addr bvs endness='Iend_LE')<line_sep>simgr=p.factory.simulation_manager(state)<line_sep>simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg functions=<none> bound=<none>))<line_sep>simgr.run()<line_sep>nose.tools.assert_equal(len(simgr.deadended) 10)<line_sep>nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts) 14)<for_stmt>i,d enumerate(simgr.deadended)<block_start>f=p.kb.functions.function(name='symbolic_loop')<line_sep>l=p.analyses.LoopFinder(functions=[f]).loops[0]<line_sep>nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0] i)<line_sep>f=p.kb.functions.function(name='for_loop')<line_sep>l=p.analyses.LoopFinder(functions=[f]).loops[0]<line_sep>nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0] 9)<line_sep>f=p.kb.functions.function(name='while_loop')<line_sep>l=p.analyses.LoopFinder(functions=[f]).loops[0]<line_sep>nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0] 9)<line_sep>f=p.kb.functions.function(name='do_while_loop')<line_sep>l=p.analyses.LoopFinder(functions=[f]).loops[0]<line_sep>nose.tools.assert_equal(d.loop_data.header_trip_counts[l.entry.addr][0] 9)<line_sep>f=p.kb.functions.function(name='nullify')<line_sep>l=p.analyses.LoopFinder(functions=[f]).loops[0]<line_sep>nose.tools.assert_equal(len(d.loop_data.back_edge_trip_counts[l.entry.addr]) 8)<line_sep>nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0] 9)<line_sep>f=p.kb.functions.function(name='nested_for_loop')<line_sep>ol=p.analyses.LoopFinder(functions=[f]).loops[0]<line_sep>il=ol.subloops[0]<line_sep>nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[ol.entry.addr][0] 3)<line_sep>nose.tools.assert_equal(len(d.loop_data.back_edge_trip_counts[il.entry.addr]) 3)<line_sep>nose.tools.assert_true(all(s<eq>3<for>s d.loop_data.back_edge_trip_counts[il.entry.addr]))<line_sep>f=p.kb.functions.function(name='nested_while_loop')<line_sep>ol=p.analyses.LoopFinder(functions=[f]).loops[0]<line_sep>il=ol.subloops[0]<line_sep>nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[ol.entry.addr][0] 3)<line_sep>nose.tools.assert_equal(len(d.loop_data.back_edge_trip_counts[il.entry.addr]) 3)<line_sep>nose.tools.assert_true(all(s<eq>3<for>s d.loop_data.back_edge_trip_counts[il.entry.addr]))<line_sep>f=p.kb.functions.function(name='nested_do_while_loop')<line_sep>ol=p.analyses.LoopFinder(functions=[f]).loops[0]<line_sep>il=ol.subloops[0]<line_sep>nose.tools.assert_equal(d.loop_data.header_trip_counts[ol.entry.addr][0] 3)<line_sep>nose.tools.assert_equal(len(d.loop_data.header_trip_counts[il.entry.addr]) 3)<line_sep>nose.tools.assert_true(all(s<eq>3<for>s d.loop_data.header_trip_counts[il.entry.addr]))<line_sep>f=p.kb.functions.function(name='break_for_loop')<line_sep>l=p.analyses.LoopFinder(functions=[f]).loops[0]<line_sep>nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0] 9)<line_sep>f=p.kb.functions.function(name='break_do_while_loop')<line_sep>l=p.analyses.LoopFinder(functions=[f]).loops[0]<line_sep>nose.tools.assert_equal(d.loop_data.header_trip_counts[l.entry.addr][0] 9)<block_end><block_end><def_stmt>test_loops_with_invalid_parameter <block_start>p=angr.Project(os.path.join(test_location 'x86_64' 'test_loops') auto_load_libs=<false>)<line_sep>state=p.factory.entry_state()<line_sep>state.register_plugin('loop_data' angr.state_plugins.SimStateLoopData())<line_sep>simgr=p.factory.simulation_manager(state)<line_sep>simgr.use_technique(angr.exploration_techniques.LoopSeer(functions=['main' 0x1234] bound=<none>))<line_sep>simgr.run()<line_sep>nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts) 3)<line_sep>nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400665][0] 10)<line_sep>nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400665]) 10)<line_sep>nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400675][0] 10)<line_sep>nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x4006b2][0] 100)<block_end><def_stmt>test_arrays <block_start>p=angr.Project(os.path.join(test_location 'x86_64' 'test_arrays') auto_load_libs=<false>)<line_sep>cfg=p.analyses.CFGFast(normalize=<true>)<line_sep>state=p.factory.entry_state()<line_sep>state.register_plugin('loop_data' angr.state_plugins.SimStateLoopData())<line_sep>simgr=p.factory.simulation_manager(state)<line_sep>simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg functions='main' bound=<none>))<line_sep>simgr.run()<line_sep>nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts) 2)<line_sep>nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400636][0] 26)<line_sep>nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x4005fd][0] 26)<block_end><def_stmt>test_loop_limiter <block_start>p=angr.Project(os.path.join(test_location 'x86_64' 'test_arrays') auto_load_libs=<false>)<line_sep>cfg=p.analyses.CFGFast(normalize=<true>)<line_sep>state=p.factory.entry_state()<line_sep>state.register_plugin('loop_data' angr.state_plugins.SimStateLoopData())<line_sep>simgr=p.factory.simulation_manager(state)<line_sep>simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg functions='main' bound=5))<line_sep>simgr.run()<line_sep>nose.tools.assert_true('spinning'<in>simgr.stashes)<line_sep>nose.tools.assert_equal(simgr.spinning[0].loop_data.back_edge_trip_counts[0x4005fd][0] 6)<block_end><def_stmt>test_loop_limiter_constant_loop <block_start>p=angr.Project(os.path.join(test_location 'x86_64' 'constant_loopseer') auto_load_libs=<false>)<line_sep>cfg=p.analyses.CFGFast(normalize=<true>)<line_sep>state=p.factory.entry_state()<line_sep>simgr=p.factory.simulation_manager(state)<line_sep>simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg functions='main' bound=5 limit_concrete_loops=<false>))<line_sep>simgr.run()<line_sep>nose.tools.assert_true(simgr.deadended[0].regs.eax.concrete)<line_sep>val=simgr.deadended[0].solver.eval_one(simgr.deadended[0].regs.eax)<line_sep>nose.tools.assert_equal(val 420)<block_end><if_stmt>__name__<eq>"__main__"<block_start><if_stmt>len(sys.argv)<g>1<block_start>globals()['test_'+sys.argv[1]]()<block_end><else_stmt><block_start>g=globals().copy()<for_stmt>k,v g.items()<block_start><if_stmt>k.startswith("test_")<and>hasattr(v '__call__')<block_start>print(k)<line_sep>v()<block_end><block_end><block_end><block_end>
<import_from_stmt>TikTokApi TikTokApi<line_sep>api=TikTokApi.get_instance()<line_sep>count=30<line_sep># You can find this from a tiktok getting method in another way or find songs from the discoverMusic method. sound_id="6601861313180207878"<line_sep>tiktoks=api.by_sound(sound_id count=count)<for_stmt>tiktok tiktoks<block_start>print(tiktok)<block_end>
""" Environment for basic obstacle avoidance controlling a robotic arm from UR. In this environment the obstacle is only moving up and down in a vertical line in front of the robot. The goal is for the robot to stay within a predefined minimum distance to the moving obstacle. When feasible the robot should continue to the original configuration, otherwise wait for the obstacle to move away before proceeding """<import_stmt>numpy<as>np<import_from_stmt>typing Tuple<import_from_stmt>robo_gym_server_modules.robot_server.grpc_msgs.python robot_server_pb2<import_from_stmt>robo_gym.envs.simulation_wrapper Simulation<import_from_stmt>robo_gym.envs.ur.ur_base_avoidance_env URBaseAvoidanceEnv<line_sep># base, shoulder, elbow, wrist_1, wrist_2, wrist_3 JOINT_POSITIONS=[-1.57 -1.31 -1.31 -2.18 1.57 0.0]<line_sep>DEBUG=<true><line_sep>MINIMUM_DISTANCE=0.3# the distance [cm] the robot should keep to the obstacle <class_stmt>BasicAvoidanceUR(URBaseAvoidanceEnv)<block_start>"""Universal Robots UR basic obstacle avoidance environment. Args: rs_address (str): Robot Server address. Formatted as 'ip:port'. Defaults to None. fix_base (bool): Wether or not the base joint stays fixed or is moveable. Defaults to False. fix_shoulder (bool): Wether or not the shoulder joint stays fixed or is moveable. Defaults to False. fix_elbow (bool): Wether or not the elbow joint stays fixed or is moveable. Defaults to False. fix_wrist_1 (bool): Wether or not the wrist 1 joint stays fixed or is moveable. Defaults to False. fix_wrist_2 (bool): Wether or not the wrist 2 joint stays fixed or is moveable. Defaults to False. fix_wrist_3 (bool): Wether or not the wrist 3 joint stays fixed or is moveable. Defaults to True. ur_model (str): determines which ur model will be used in the environment. Defaults to 'ur5'. include_polar_to_elbow (bool): determines wether or not the polar coordinates to the elbow joint are included in the state. Defaults to False. Attributes: ur (:obj:): Robot utilities object. client (:obj:str): Robot Server client. real_robot (bool): True if the environment is controlling a real robot. """<line_sep>max_episode_steps=1000<def_stmt>_set_initial_robot_server_state self rs_state fixed_object_position=<none><arrow>robot_server_pb2.State<block_start><if_stmt>fixed_object_position<block_start>state_msg=super()._set_initial_robot_server_state(rs_state=rs_state fixed_object_position=fixed_object_position)<line_sep><return>state_msg<block_end>z_amplitude=np.random.default_rng().uniform(low=0.09 high=0.35)<line_sep>z_frequency=0.125<line_sep>z_offset=np.random.default_rng().uniform(low=0.2 high=0.6)<line_sep>string_params={"object_0_function":"triangle_wave"}<line_sep>float_params={"object_0_x":0.12 "object_0_y":0.34 "object_0_z_amplitude":z_amplitude "object_0_z_frequency":z_frequency "object_0_z_offset":z_offset}<line_sep>state={}<line_sep>state_msg=robot_server_pb2.State(state=state float_params=float_params string_params=string_params state_dict=rs_state)<line_sep><return>state_msg<block_end><def_stmt>reset self joint_positions=JOINT_POSITIONS fixed_object_position=<none><arrow>np.array<block_start>"""Environment reset. Args: joint_positions (list[6] or np.array[6]): robot joint positions in radians. fixed_object_position (list[3]): x,y,z fixed position of object """<line_sep>self.prev_action=np.zeros(6)<line_sep>state=super().reset(joint_positions=joint_positions fixed_object_position=fixed_object_position)<line_sep><return>state<block_end><def_stmt>reward self rs_state action<arrow>Tuple[float bool dict]<block_start>env_state=self._robot_server_state_to_env_state(rs_state)<line_sep>reward=0<line_sep>done=<false><line_sep>info={}<line_sep># Reward weights close_distance_weight=-2<line_sep>delta_joint_weight=1<line_sep>action_usage_weight=1<line_sep>rapid_action_weight=-0.2<line_sep># Difference in joint position current vs. starting position delta_joint_pos=env_state[9:15]<line_sep># Calculate distance to the obstacle obstacle_coord=np.array([rs_state['object_0_to_ref_translation_x'] rs_state['object_0_to_ref_translation_y'] rs_state['object_0_to_ref_translation_z']])<line_sep>ee_coord=np.array([rs_state['ee_to_ref_translation_x'] rs_state['ee_to_ref_translation_y'] rs_state['ee_to_ref_translation_z']])<line_sep>forearm_coord=np.array([rs_state['forearm_to_ref_translation_x'] rs_state['forearm_to_ref_translation_y'] rs_state['forearm_to_ref_translation_z']])<line_sep>distance_to_ee=np.linalg.norm(obstacle_coord-ee_coord)<line_sep>distance_to_forearm=np.linalg.norm(obstacle_coord-forearm_coord)<line_sep>distance_to_target=np.min([distance_to_ee distance_to_forearm])<line_sep># Reward staying close to the predefined joint position <if_stmt>abs(env_state[-6:]).sum()<l>0.1<times>action.size<block_start>reward<augadd>delta_joint_weight<times>(1-(abs(delta_joint_pos).sum()/(0.1<times>action.size)))<times>(1/1000)<block_end># Reward for not acting <if_stmt>abs(action).sum()<le>action.size<block_start>reward<augadd>action_usage_weight<times>(1-(np.square(action).sum()/action.size))<times>(1/1000)<block_end># Negative reward if actions change to rapidly between steps <for_stmt>i range(len(action))<block_start><if_stmt>abs(action[i]-self.prev_action[i])<g>0.5<block_start>reward<augadd>rapid_action_weight<times>(1/1000)<block_end><block_end># Negative reward if the obstacle is close than the predefined minimum distance <if_stmt>distance_to_target<l>MINIMUM_DISTANCE<block_start>reward<augadd>close_distance_weight<times>(1/self.max_episode_steps)<block_end># Check if there is a collision collision=<true><if>rs_state['in_collision']<eq>1<else><false><if_stmt>collision<block_start>done=<true><line_sep>info['final_status']='collision'<line_sep>info['target_coord']=obstacle_coord<line_sep>self.last_position_on_success=[]<block_end><if_stmt>self.elapsed_steps<ge>self.max_episode_steps<block_start>done=<true><line_sep>info['final_status']='success'<line_sep>info['target_coord']=obstacle_coord<line_sep>self.last_position_on_success=[]<block_end><return>reward done info<block_end><def_stmt>step self action<arrow>Tuple[np.array float bool dict]<block_start><if_stmt>type(action)<eq>list<block_start>action=np.array(action)<block_end>state,reward,done,info=super().step(action)<line_sep>self.prev_action=self.add_fixed_joints(action)<line_sep><return>state reward done info<block_end><block_end><class_stmt>BasicAvoidanceURSim(BasicAvoidanceUR Simulation)<block_start>cmd="roslaunch ur_robot_server ur_robot_server.launch \ world_name:=tabletop_sphere50.world \ reference_frame:=base_link \ max_velocity_scale_factor:=0.2 \ action_cycle_rate:=20 \ rviz_gui:=false \ gazebo_gui:=true \ objects_controller:=true \ rs_mode:=1moving2points \ n_objects:=1.0 \ object_0_model_name:=sphere50 \ object_0_frame:=target"<def_stmt>__init__ self ip=<none> lower_bound_port=<none> upper_bound_port=<none> gui=<false> ur_model='ur5' **kwargs<block_start>self.cmd=self.cmd+' '+'ur_model:='+ur_model<line_sep>Simulation.__init__(self self.cmd ip lower_bound_port upper_bound_port gui **kwargs)<line_sep>BasicAvoidanceUR.__init__(self rs_address=self.robot_server_ip ur_model=ur_model **kwargs)<block_end><block_end><class_stmt>BasicAvoidanceURRob(BasicAvoidanceUR)<block_start>real_robot=<true><block_end># roslaunch ur_robot_server ur_robot_server.launch ur_model:=ur5 real_robot:=true rviz_gui:=true gui:=true reference_frame:=base max_velocity_scale_factor:=0.2 action_cycle_rate:=20 rs_mode:=moving
<import_stmt>unittest<import_stmt>unittest.mock<import_from_stmt>programy.storage.entities.nodes NodesStore<class_stmt>NodesStoreTest(unittest.TestCase)<block_start><def_stmt>test_load self<block_start>store=NodesStore()<with_stmt>self.assertRaises(NotImplementedError)<block_start>collector=unittest.mock.Mock()<line_sep>store.load(collector)<block_end><block_end><block_end>
<import_stmt>unittest<class_stmt>PrefixNotIncluded(unittest.TestCase)<block_start><def_stmt>test_not_included self<block_start><pass><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
""" Stability analysis of the D2Q4 solver for the advection equation d_t(u) + c_x d_x(u) + c_y d_y(u) = 0 """<import_stmt>sympy<as>sp<import_stmt>pylbm<line_sep># pylint: disable=invalid-name # symbolic variables U,X,Y=sp.symbols('U, X, Y')<line_sep># symbolic parameters LA,CX,CY=sp.symbols('lambda, cx, cy' constants=<true>)<line_sep>S_1,S_2=sp.symbols('s1, s2' constants=<true>)<line_sep># numerical parameters la=1.# velocity of the scheme s_1,s_2=2. 1.# relaxation parameters c_x,c_y=0.5 0.25# velocity of the advection equation dico={'dim':2 'scheme_velocity':LA 'schemes':[{'velocities':[1 2 3 4] 'conserved_moments':U 'polynomials':[1 X Y X<power>2-Y<power>2] 'relaxation_parameters':[0 S_1 S_1 S_2] 'equilibrium':[U CX<times>U CY<times>U (CX<power>2-CY<power>2)<times>U] } ] 'parameters':{LA:la S_1:s_1 S_2:s_2 CX:c_x CY:c_y } 'relative_velocity':[CX CY] }<line_sep>scheme=pylbm.Scheme(dico)<line_sep>stab=pylbm.Stability(scheme)<line_sep>stab.visualize({'parameters':{CX:{'range':[0 1] 'init':c_x 'step':0.01 } CY:{'range':[0 1] 'init':c_y 'step':0.01 } S_1:{'name':r"$s_1$" 'range':[0 2] 'init':s_1 'step':0.01 } S_2:{'name':r"$s_2$" 'range':[0 2] 'init':s_2 'step':0.01 } } 'number_of_wave_vectors':4096 })<line_sep>
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>libtbx.utils null_out<import_from_stmt>libtbx easy_pickle<import_from_stmt>six.moves cStringIO<as>StringIO<def_stmt>run_validation pdb_file ignore_hd=<true><block_start><import_from_stmt>mmtbx.validation restraints<import_stmt>mmtbx.command_line<line_sep>cmdline=mmtbx.command_line.load_model_and_data(args=[pdb_file] master_phil=mmtbx.command_line.generic_simple_input_phil() process_pdb_file=<true> require_data=<false> out=null_out())<line_sep>validation=restraints.combined(pdb_hierarchy=cmdline.pdb_hierarchy xray_structure=cmdline.xray_structure geometry_restraints_manager=cmdline.geometry ignore_hd=ignore_hd)<line_sep><return>validation<block_end><def_stmt>exercise_simple # extracted from 1lyz, with hydrogens from reduce <block_start>pdb_in=""" ATOM 1 N LYS A 1 3.296 9.888 10.739 1.00 7.00 N ATOM 2 CA LYS A 1 2.439 10.217 9.791 1.00 6.00 C ATOM 3 C LYS A 1 2.439 11.997 9.160 1.00 6.00 C ATOM 4 O LYS A 1 2.637 12.656 10.107 1.00 8.00 O ATOM 5 CB LYS A 1 0.659 10.086 8.844 1.00 6.00 C ATOM 6 CG LYS A 1 0.198 10.415 8.086 1.00 6.00 C ATOM 7 CD LYS A 1 -1.187 10.086 8.212 1.00 6.00 C ATOM 8 CE LYS A 1 -2.175 10.086 7.264 1.00 6.00 C ATOM 9 NZ LYS A 1 -3.527 9.869 7.288 1.00 7.00 N ATOM 0 H1 LYS A 1 3.156 9.045 10.986 1.00 7.00 H ATOM 0 H2 LYS A 1 4.127 9.972 10.431 1.00 7.00 H ATOM 0 H3 LYS A 1 3.184 10.425 11.440 1.00 7.00 H ATOM 0 HA LYS A 1 2.772 9.314 9.912 1.00 6.00 H ATOM 0 HB2 LYS A 1 0.584 9.128 8.712 1.00 6.00 H ATOM 0 HB3 LYS A 1 0.046 10.323 9.557 1.00 6.00 H ATOM 0 HG2 LYS A 1 0.310 11.376 8.015 1.00 6.00 H ATOM 0 HG3 LYS A 1 0.563 10.027 7.276 1.00 6.00 H ATOM 0 HD2 LYS A 1 -1.193 9.186 8.573 1.00 6.00 H ATOM 0 HD3 LYS A 1 -1.516 10.674 8.910 1.00 6.00 H ATOM 0 HE2 LYS A 1 -2.097 10.964 6.860 1.00 6.00 H ATOM 0 HE3 LYS A 1 -1.857 9.444 6.610 1.00 6.00 H ATOM 0 HZ1 LYS A 1 -3.725 9.170 6.774 1.00 7.00 H ATOM 0 HZ2 LYS A 1 -3.787 9.706 8.123 1.00 7.00 H ATOM 0 HZ3 LYS A 1 -3.949 10.590 6.982 1.00 7.00 H ATOM 10 N VAL A 2 2.637 12.722 7.707 1.00 7.00 N ATOM 11 CA VAL A 2 2.307 14.172 7.580 1.00 6.00 C ATOM 12 C VAL A 2 0.857 14.041 6.949 1.00 6.00 C ATOM 13 O VAL A 2 0.659 13.843 5.875 1.00 8.00 O ATOM 14 CB VAL A 2 3.625 14.172 6.759 1.00 6.00 C ATOM 15 CG1 VAL A 2 3.494 15.491 6.317 1.00 6.00 C ATOM 16 CG2 VAL A 2 4.746 13.843 7.580 1.00 6.00 C ATOM 0 H VAL A 2 2.920 12.338 6.992 1.00 7.00 H ATOM 0 HA VAL A 2 2.195 14.925 8.181 1.00 6.00 H ATOM 0 HB VAL A 2 3.767 13.528 6.048 1.00 6.00 H ATOM 0 HG11 VAL A 2 4.250 15.721 5.755 1.00 6.00 H ATOM 0 HG12 VAL A 2 2.674 15.582 5.808 1.00 6.00 H ATOM 0 HG13 VAL A 2 3.467 16.087 7.081 1.00 6.00 H ATOM 0 HG21 VAL A 2 5.554 13.850 7.043 1.00 6.00 H ATOM 0 HG22 VAL A 2 4.827 14.495 8.294 1.00 6.00 H ATOM 0 HG23 VAL A 2 4.620 12.960 7.962 1.00 6.00 H END """<line_sep>pdb_file="tst_validate_restraints_simple.pdb"<line_sep>open(pdb_file "w").write(pdb_in)<line_sep>v1=run_validation(pdb_file ignore_hd=<true>)<line_sep>out1=StringIO()<line_sep>v1.show(out=out1)<assert_stmt>(""" ----------Chiral volumes---------- atoms ideal model delta sigma residual deviation A 1 LYS CA A 1 LYS N A 1 LYS C A 1 LYS CB 2.57 1.12 1.45 2.00e-01 5.25e+01 7.2*sigma """<in>"\n".join([l.rstrip()<for>l out1.getvalue().splitlines()]))<line_sep>s=easy_pickle.dumps(v1)<line_sep>v1p=easy_pickle.loads(s)<line_sep>out1p=StringIO()<line_sep>v1p.show(out=out1p)<assert_stmt>(out1.getvalue()<eq>out1p.getvalue())<line_sep>v2=run_validation(pdb_file ignore_hd=<false>)<line_sep>out2=StringIO()<line_sep>v2.show(out=out2)<assert_stmt>(out2.getvalue()<ne>out1.getvalue())<assert_stmt>("""\ A 1 LYS HA 110.00 57.00 53.00 3.00e+00 3.12e+02 17.7*sigma A 1 LYS N A 1 LYS CA """<in>"\n".join([l.rstrip()<for>l out2.getvalue().splitlines()]))<line_sep># # C-alpha-only model (from 3b5d) pdb_raw="""\ CRYST1 115.100 43.700 76.400 90.00 108.10 90.00 C 1 2 1 8 ATOM 1 CA TYR A 6 -7.551 -11.355 -17.946 1.00148.04 C ATOM 2 CA LEU A 7 -8.052 -8.804 -20.730 1.00310.75 C ATOM 3 CA GLY A 8 -10.874 -6.691 -19.353 1.00158.95 C ATOM 4 CA GLY A 9 -9.359 -7.332 -15.966 1.00217.68 C ATOM 5 CA ALA A 10 -5.806 -6.508 -16.946 1.00239.12 C ATOM 6 CA ILE A 11 -7.024 -3.514 -18.905 1.00103.16 C ATOM 7 CA LEU A 12 -10.023 -2.071 -17.056 1.00230.80 C ATOM 8 CA ALA A 13 -7.313 -1.820 -14.420 1.00141.04 C """<line_sep>pdb_file="tst_validate_restraints_calpha.pdb"<line_sep>open(pdb_file "w").write(pdb_raw)<line_sep>v1=run_validation(pdb_file ignore_hd=<true>)<block_end><if_stmt>(__name__<eq>"__main__")<block_start>exercise_simple()<line_sep>print("OK")<block_end>
<import_stmt>os<import_stmt>shutil<import_stmt>sys<import_stmt>tarfile<def_stmt>include_package envoy_api_protos rst_file_path prefix# `envoy_api_rst_files` is a list of file paths for .proto.rst files # generated by protodoc # # we are only interested in the proto files generated for envoy protos, # not for non-envoy dependencies <block_start><if_stmt>("pkg/"+prefix)<not><in>rst_file_path<block_start><return><none><block_end># derive the "canonical" path from the filepath canonical=f"{rst_file_path.split('pkg/'+prefix)[1]}"<line_sep># we are only interested in the actual v3 protos, not their dependencies <if_stmt>(prefix+canonical)<not><in>envoy_api_protos<block_start><return><none><block_end><return>canonical<block_end><def_stmt>main <block_start>proto_srcs=sys.argv[1]<line_sep>envoy_api_rst_files=sys.argv[1:-1]<line_sep>output_filename=sys.argv[-1]<with_stmt>open(proto_srcs)<as>f# the contents of `proto_srcs` are the result of a bazel genquery, # containing bazel target rules, eg: # # @envoy_api//envoy/watchdog/v3:abort_action.proto # # this transforms them to a list with a "canonical" form of: # # envoy/watchdog/v3/abort_action.proto.rst # <block_start>envoy_api_protos=[f"{src.split('//')[1].replace(':' '/')}.rst"<for>src f.read().split("\n")<if>src]<block_end><for_stmt>rst_file_path envoy_api_rst_files<block_start>canonical=include_package(envoy_api_protos rst_file_path "envoy/")<if_stmt>canonical<is><none><block_start>canonical=include_package(envoy_api_protos rst_file_path "contrib/envoy/")<block_end><if_stmt>canonical<is><none><block_start><continue><block_end>target=os.path.join("rst-out/api-v3" canonical)<if_stmt><not>os.path.exists(os.path.dirname(target))<block_start>os.makedirs(os.path.dirname(target))<block_end>shutil.copy(rst_file_path target)<block_end># output the generated rst files to a tarfile for consumption # by other bazel rules <with_stmt>tarfile.open(output_filename "w")<as>tar<block_start>tar.add("rst-out" arcname=".")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_from_stmt>mock.mock patch<import_stmt>os<import_stmt>pytest<import_stmt>ca_test_common<import_stmt>ceph_volume_simple_activate<line_sep>fake_cluster='ceph'<line_sep>fake_container_binary='podman'<line_sep>fake_container_image='quay.ceph.io/ceph/daemon:latest'<line_sep>fake_id='42'<line_sep>fake_uuid='0c4a7eca-0c2a-4c12-beff-08a80f064c52'<line_sep>fake_path='/etc/ceph/osd/{}-{}.json'.format(fake_id fake_uuid)<class_stmt>TestCephVolumeSimpleActivateModule(object)<block_start>@patch('ansible.module_utils.basic.AnsibleModule.exit_json')<def_stmt>test_with_check_mode self m_exit_json<block_start>ca_test_common.set_module_args({'osd_id':fake_id 'osd_fsid':fake_uuid '_ansible_check_mode':<true>})<line_sep>m_exit_json.side_effect=ca_test_common.exit_json<with_stmt>pytest.raises(ca_test_common.AnsibleExitJson)<as>result<block_start>ceph_volume_simple_activate.main()<block_end>result=result.value.args[0]<assert_stmt><not>result['changed']<assert_stmt>result['cmd']<eq>['ceph-volume' '--cluster' fake_cluster 'simple' 'activate' fake_id fake_uuid]<assert_stmt>result['rc']<eq>0<assert_stmt><not>result['stdout']<assert_stmt><not>result['stderr']<block_end>@patch('ansible.module_utils.basic.AnsibleModule.exit_json')@patch('ansible.module_utils.basic.AnsibleModule.run_command')<def_stmt>test_with_failure self m_run_command m_exit_json<block_start>ca_test_common.set_module_args({'osd_id':fake_id 'osd_fsid':fake_uuid})<line_sep>m_exit_json.side_effect=ca_test_common.exit_json<line_sep>stdout=''<line_sep>stderr='error'<line_sep>rc=2<line_sep>m_run_command.return_value=rc stdout stderr<with_stmt>pytest.raises(ca_test_common.AnsibleExitJson)<as>result<block_start>ceph_volume_simple_activate.main()<block_end>result=result.value.args[0]<assert_stmt>result['changed']<assert_stmt>result['cmd']<eq>['ceph-volume' '--cluster' fake_cluster 'simple' 'activate' fake_id fake_uuid]<assert_stmt>result['rc']<eq>rc<assert_stmt>result['stderr']<eq>stderr<block_end>@patch('ansible.module_utils.basic.AnsibleModule.exit_json')@patch('ansible.module_utils.basic.AnsibleModule.run_command')<def_stmt>test_activate_all_osds self m_run_command m_exit_json<block_start>ca_test_common.set_module_args({'osd_all':<true>})<line_sep>m_exit_json.side_effect=ca_test_common.exit_json<line_sep>stdout=''<line_sep>stderr=''<line_sep>rc=0<line_sep>m_run_command.return_value=rc stdout stderr<with_stmt>pytest.raises(ca_test_common.AnsibleExitJson)<as>result<block_start>ceph_volume_simple_activate.main()<block_end>result=result.value.args[0]<assert_stmt>result['changed']<assert_stmt>result['cmd']<eq>['ceph-volume' '--cluster' fake_cluster 'simple' 'activate' '--all']<assert_stmt>result['rc']<eq>rc<assert_stmt>result['stderr']<eq>stderr<assert_stmt>result['stdout']<eq>stdout<block_end>@patch.object(os.path 'exists' return_value=<true>)@patch('ansible.module_utils.basic.AnsibleModule.exit_json')@patch('ansible.module_utils.basic.AnsibleModule.run_command')<def_stmt>test_activate_path_exists self m_run_command m_exit_json m_os_path<block_start>ca_test_common.set_module_args({'path':fake_path})<line_sep>m_exit_json.side_effect=ca_test_common.exit_json<line_sep>stdout=''<line_sep>stderr=''<line_sep>rc=0<line_sep>m_run_command.return_value=rc stdout stderr<with_stmt>pytest.raises(ca_test_common.AnsibleExitJson)<as>result<block_start>ceph_volume_simple_activate.main()<block_end>result=result.value.args[0]<assert_stmt>result['changed']<assert_stmt>result['cmd']<eq>['ceph-volume' '--cluster' fake_cluster 'simple' 'activate' '--file' fake_path]<assert_stmt>result['rc']<eq>rc<assert_stmt>result['stderr']<eq>stderr<assert_stmt>result['stdout']<eq>stdout<block_end>@patch.object(os.path 'exists' return_value=<false>)@patch('ansible.module_utils.basic.AnsibleModule.fail_json')<def_stmt>test_activate_path_not_exists self m_fail_json m_os_path<block_start>ca_test_common.set_module_args({'path':fake_path})<line_sep>m_fail_json.side_effect=ca_test_common.fail_json<with_stmt>pytest.raises(ca_test_common.AnsibleFailJson)<as>result<block_start>ceph_volume_simple_activate.main()<block_end>result=result.value.args[0]<assert_stmt>result['msg']<eq>'{} does not exist'.format(fake_path)<assert_stmt>result['rc']<eq>1<block_end>@patch('ansible.module_utils.basic.AnsibleModule.exit_json')@patch('ansible.module_utils.basic.AnsibleModule.run_command')<def_stmt>test_activate_without_systemd self m_run_command m_exit_json<block_start>ca_test_common.set_module_args({'osd_id':fake_id 'osd_fsid':fake_uuid 'systemd':<false>})<line_sep>m_exit_json.side_effect=ca_test_common.exit_json<line_sep>stdout=''<line_sep>stderr=''<line_sep>rc=0<line_sep>m_run_command.return_value=rc stdout stderr<with_stmt>pytest.raises(ca_test_common.AnsibleExitJson)<as>result<block_start>ceph_volume_simple_activate.main()<block_end>result=result.value.args[0]<assert_stmt>result['changed']<assert_stmt>result['cmd']<eq>['ceph-volume' '--cluster' fake_cluster 'simple' 'activate' fake_id fake_uuid '--no-systemd']<assert_stmt>result['rc']<eq>rc<assert_stmt>result['stderr']<eq>stderr<assert_stmt>result['stdout']<eq>stdout<block_end>@patch.dict(os.environ {'CEPH_CONTAINER_BINARY':fake_container_binary})@patch.dict(os.environ {'CEPH_CONTAINER_IMAGE':fake_container_image})@patch('ansible.module_utils.basic.AnsibleModule.exit_json')@patch('ansible.module_utils.basic.AnsibleModule.run_command')<def_stmt>test_activate_with_container self m_run_command m_exit_json<block_start>ca_test_common.set_module_args({'osd_id':fake_id 'osd_fsid':fake_uuid })<line_sep>m_exit_json.side_effect=ca_test_common.exit_json<line_sep>stdout=''<line_sep>stderr=''<line_sep>rc=0<line_sep>m_run_command.return_value=rc stdout stderr<with_stmt>pytest.raises(ca_test_common.AnsibleExitJson)<as>result<block_start>ceph_volume_simple_activate.main()<block_end>result=result.value.args[0]<assert_stmt>result['changed']<assert_stmt>result['cmd']<eq>[fake_container_binary 'run' '--rm' '--privileged' '--ipc=host' '--net=host' '-v' '/etc/ceph:/etc/ceph:z' '-v' '/var/lib/ceph/:/var/lib/ceph/:z' '-v' '/var/log/ceph/:/var/log/ceph/:z' '-v' '/run/lvm/:/run/lvm/' '-v' '/run/lock/lvm/:/run/lock/lvm/' '--entrypoint=ceph-volume' fake_container_image '--cluster' fake_cluster 'simple' 'activate' fake_id fake_uuid]<assert_stmt>result['rc']<eq>rc<assert_stmt>result['stderr']<eq>stderr<assert_stmt>result['stdout']<eq>stdout<block_end><block_end>
# @copyright@ # Copyright (c) 2006 - 2019 Teradata # All rights reserved. Stacki(r) v5.x stacki.com # https://github.com/Teradata/stacki/blob/master/LICENSE.txt # @copyright@ # # @rocks@ # Copyright (c) 2000 - 2010 The Regents of the University of California # All rights reserved. Rocks(r) v5.4 www.rocksclusters.org # https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt # @rocks@ <import_stmt>stack.commands<class_stmt>Command(stack.commands.set.firmware.command)<block_start>""" Associates a firmware implementation with one or more models. <arg type='string' name='models'> One or more models to associate the implementation with. </arg> <param type='string' name='imp'> The name of the implementation to associate with the provided models. </param> <param type='string' name='make'> The make of the models. </param> <example cmd="set firmware model imp m7800 m6036 imp=mellanox_6xxx_7xxx make=mellanox"> Sets the mellanox_6xxx_7xxx implementation as the one to run for the models m7800 and m6036 for make mellanox. </example> """<def_stmt>run self params args<block_start>self.runPlugins(args=(params args))<block_end><block_end>
<try_stmt><block_start><import_from_stmt>torch.hub load_state_dict_from_url<block_end><except_stmt>ImportError<block_start><import_from_stmt>torch.utils.model_zoo load_url<as>load_state_dict_from_url<block_end>
<import_stmt>os<import_stmt>shutil<import_from_stmt>tempfile mkdtemp<import_stmt>pytest<import_stmt>numpy<import_stmt>py.path<as>pp<line_sep>NIPYPE_DATADIR=os.path.realpath(os.path.join(os.path.dirname(__file__) "testing/data"))<line_sep>temp_folder=mkdtemp()<line_sep>data_dir=os.path.join(temp_folder "data")<line_sep>shutil.copytree(NIPYPE_DATADIR data_dir)<line_sep>@pytest.fixture(autouse=<true>)<def_stmt>add_np doctest_namespace<block_start>doctest_namespace["np"]=numpy<line_sep>doctest_namespace["os"]=os<line_sep>doctest_namespace["pytest"]=pytest<line_sep>doctest_namespace["datadir"]=data_dir<block_end>@pytest.fixture(autouse=<true>)<def_stmt>_docdir request<block_start>"""Grabbed from https://stackoverflow.com/a/46991331"""<line_sep># Trigger ONLY for the doctests. doctest_plugin=request.config.pluginmanager.getplugin("doctest")<if_stmt>isinstance(request.node doctest_plugin.DoctestItem)# Get the fixture dynamically by its name. <block_start>tmpdir=pp.local(data_dir)<line_sep># Chdir only for the duration of the test. <with_stmt>tmpdir.as_cwd()<block_start><yield><block_end><block_end><else_stmt># For normal tests, we have to yield, since this is a yield-fixture. <block_start><yield><block_end><block_end><def_stmt>pytest_unconfigure config# Delete temp folder after session is finished <block_start>shutil.rmtree(temp_folder)<block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making GameAISDK available. This source code file is licensed under the GNU General Public License Version 3. For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package. Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. """<import_from_stmt>PyQt5.QtCore Qt<import_from_stmt>PyQt5.QtWidgets QWidget QProgressDialog<class_stmt>ProgressBarDialog(QWidget)<block_start><def_stmt>__init__ self title='' label='' minValue=0 maxValue=100 parent=<none><block_start>super(ProgressBarDialog self).__init__(parent)<line_sep>self.process_bar=QProgressDialog(self)<line_sep>self.set_bar_window_title(title)<line_sep>self.set_label_text(label)<line_sep>self.set_min_value(minValue)<line_sep>self.set_max_value(maxValue)<line_sep>self.process_bar.setWindowModality(Qt.WindowModal)<line_sep>self.setGeometry(800 300 580 570)<line_sep>self.process_bar.canceled.connect(self.close_bar)<block_end><def_stmt>set_bar_window_title self text<block_start>self.process_bar.setWindowTitle(text)<line_sep>self.setWindowTitle(text)<block_end><def_stmt>set_label_text self text<block_start>self.process_bar.setLabelText(text)<block_end><def_stmt>set_min_value self minValue<block_start>self.process_bar.setMinimum(minValue)<block_end><def_stmt>set_max_value self maxvalue<block_start>self.process_bar.setMaximum(maxvalue)<block_end><def_stmt>set_value self value<block_start>self.process_bar.setValue(value)<block_end><def_stmt>close_bar self<block_start>self.process_bar.close()<block_end><def_stmt>reset_bar self<block_start>self.process_bar=<none><block_end><def_stmt>show self<block_start>self.process_bar.show()<block_end><def_stmt>is_valid self<block_start><return>bool(self.process_bar)<block_end><block_end>
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. <import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401 <import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>ExternalMaster(object)<block_start>""" An external master name server used as the source of zone data. """<def_stmt>__init__ self **kwargs<block_start>""" Initializes a new ExternalMaster object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param address: The value to assign to the address property of this ExternalMaster. :type address: str :param port: The value to assign to the port property of this ExternalMaster. :type port: int :param tsig_key_id: The value to assign to the tsig_key_id property of this ExternalMaster. :type tsig_key_id: str """<line_sep>self.swagger_types={'address':'str' 'port':'int' 'tsig_key_id':'str'}<line_sep>self.attribute_map={'address':'address' 'port':'port' 'tsig_key_id':'tsigKeyId'}<line_sep>self._address=<none><line_sep>self._port=<none><line_sep>self._tsig_key_id=<none><block_end>@property<def_stmt>address self<block_start>""" **[Required]** Gets the address of this ExternalMaster. The server's IP address (IPv4 or IPv6). :return: The address of this ExternalMaster. :rtype: str """<line_sep><return>self._address<block_end>@address.setter<def_stmt>address self address<block_start>""" Sets the address of this ExternalMaster. The server's IP address (IPv4 or IPv6). :param address: The address of this ExternalMaster. :type: str """<line_sep>self._address=address<block_end>@property<def_stmt>port self<block_start>""" Gets the port of this ExternalMaster. The server's port. Port value must be a value of 53, otherwise omit the port value. :return: The port of this ExternalMaster. :rtype: int """<line_sep><return>self._port<block_end>@port.setter<def_stmt>port self port<block_start>""" Sets the port of this ExternalMaster. The server's port. Port value must be a value of 53, otherwise omit the port value. :param port: The port of this ExternalMaster. :type: int """<line_sep>self._port=port<block_end>@property<def_stmt>tsig_key_id self<block_start>""" Gets the tsig_key_id of this ExternalMaster. The OCID of the TSIG key. :return: The tsig_key_id of this ExternalMaster. :rtype: str """<line_sep><return>self._tsig_key_id<block_end>@tsig_key_id.setter<def_stmt>tsig_key_id self tsig_key_id<block_start>""" Sets the tsig_key_id of this ExternalMaster. The OCID of the TSIG key. :param tsig_key_id: The tsig_key_id of this ExternalMaster. :type: str """<line_sep>self._tsig_key_id=tsig_key_id<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for hparam."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>six<import_from_stmt>tensorflow.contrib.training.python.training hparam<import_from_stmt>tensorflow.python.platform test<class_stmt>HParamsTest(test.TestCase)<block_start><def_stmt>_assertDictEquals self d1 d2<block_start>self.assertEqual(len(d1) len(d2))<for_stmt>k,v six.iteritems(d1)<block_start>self.assertTrue(k<in>d2 k)<line_sep>self.assertEquals(v d2[k] d2[k])<block_end><block_end><def_stmt>testEmpty self<block_start>hparams=hparam.HParams()<line_sep>self._assertDictEquals({} hparams.values())<line_sep>hparams.parse('')<line_sep>self._assertDictEquals({} hparams.values())<with_stmt>self.assertRaisesRegexp(ValueError 'Unknown hyperparameter')<block_start>hparams.parse('xyz=123')<block_end><block_end><def_stmt>testSomeValues self<block_start>hparams=hparam.HParams(aaa=1 b=2.0 c_c='relu6')<line_sep>self._assertDictEquals({'aaa':1 'b':2.0 'c_c':'relu6'} hparams.values())<line_sep>expected_str='[(\'aaa\', 1), (\'b\', 2.0), (\'c_c\', \'relu6\')]'<line_sep>self.assertEquals(expected_str str(hparams.__str__()))<line_sep>self.assertEquals(expected_str str(hparams))<line_sep>self.assertEquals(1 hparams.aaa)<line_sep>self.assertEquals(2.0 hparams.b)<line_sep>self.assertEquals('relu6' hparams.c_c)<line_sep>hparams.parse('aaa=12')<line_sep>self._assertDictEquals({'aaa':12 'b':2.0 'c_c':'relu6'} hparams.values())<line_sep>self.assertEquals(12 hparams.aaa)<line_sep>self.assertEquals(2.0 hparams.b)<line_sep>self.assertEquals('relu6' hparams.c_c)<line_sep>hparams.parse('c_c=relu4,b=-2.0e10')<line_sep>self._assertDictEquals({'aaa':12 'b':-2.0e10 'c_c':'relu4'} hparams.values())<line_sep>self.assertEquals(12 hparams.aaa)<line_sep>self.assertEquals(-2.0e10 hparams.b)<line_sep>self.assertEquals('relu4' hparams.c_c)<line_sep>hparams.parse('c_c=,b=0,')<line_sep>self._assertDictEquals({'aaa':12 'b':0 'c_c':''} hparams.values())<line_sep>self.assertEquals(12 hparams.aaa)<line_sep>self.assertEquals(0.0 hparams.b)<line_sep>self.assertEquals('' hparams.c_c)<line_sep>hparams.parse('c_c=2.3",b=+2,')<line_sep>self.assertEquals(2.0 hparams.b)<line_sep>self.assertEquals('2.3"' hparams.c_c)<with_stmt>self.assertRaisesRegexp(ValueError 'Unknown hyperparameter')<block_start>hparams.parse('x=123')<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Could not parse')<block_start>hparams.parse('aaa=poipoi')<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Could not parse')<block_start>hparams.parse('aaa=1.0')<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Could not parse')<block_start>hparams.parse('b=12x')<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Could not parse')<block_start>hparams.parse('b=relu')<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Must not pass a list')<block_start>hparams.parse('aaa=[123]')<block_end>self.assertEquals(12 hparams.aaa)<line_sep>self.assertEquals(2.0 hparams.b)<line_sep>self.assertEquals('2.3"' hparams.c_c)<line_sep># Exports to proto. hparam_def=hparams.to_proto()<line_sep># Imports from proto. hparams2=hparam.HParams(hparam_def=hparam_def)<line_sep># Verifies that all hparams are restored. self.assertEquals(12 hparams2.aaa)<line_sep>self.assertEquals(2.0 hparams2.b)<line_sep>self.assertEquals('2.3"' hparams2.c_c)<block_end><def_stmt>testBoolParsing self<block_start><for_stmt>value 'true' 'false' 'True' 'False' '1' '0'<block_start><for_stmt>initial <false> <true><block_start>hparams=hparam.HParams(use_gpu=initial)<line_sep>hparams.parse('use_gpu='+value)<line_sep>self.assertEqual(hparams.use_gpu value<in>['True' 'true' '1'])<line_sep># Exports to proto. hparam_def=hparams.to_proto()<line_sep># Imports from proto. hparams2=hparam.HParams(hparam_def=hparam_def)<line_sep>self.assertEquals(hparams.use_gpu hparams2.use_gpu)<line_sep># Check that hparams2.use_gpu is a bool rather than an int. # The assertEquals() call above won't catch this, since # (0 == False) and (1 == True) in Python. self.assertEquals(bool type(hparams2.use_gpu))<block_end><block_end><block_end><def_stmt>testBoolParsingFail self<block_start>hparams=hparam.HParams(use_gpu=<true>)<with_stmt>self.assertRaisesRegexp(ValueError r'Could not parse.*use_gpu')<block_start>hparams.parse('use_gpu=yep')<block_end><block_end><def_stmt>testLists self<block_start>hparams=hparam.HParams(aaa=[1] b=[2.0 3.0] c_c=['relu6'])<line_sep>self._assertDictEquals({'aaa':[1] 'b':[2.0 3.0] 'c_c':['relu6']} hparams.values())<line_sep>self.assertEquals([1] hparams.aaa)<line_sep>self.assertEquals([2.0 3.0] hparams.b)<line_sep>self.assertEquals(['relu6'] hparams.c_c)<line_sep>hparams.parse('aaa=[12]')<line_sep>self.assertEquals([12] hparams.aaa)<line_sep>hparams.parse('aaa=[12,34,56]')<line_sep>self.assertEquals([12 34 56] hparams.aaa)<line_sep>hparams.parse('c_c=[relu4,relu12],b=[1.0]')<line_sep>self.assertEquals(['relu4' 'relu12'] hparams.c_c)<line_sep>self.assertEquals([1.0] hparams.b)<line_sep>hparams.parse('c_c=[],aaa=[-34]')<line_sep>self.assertEquals([-34] hparams.aaa)<line_sep>self.assertEquals([] hparams.c_c)<line_sep>hparams.parse('c_c=[_12,3\'4"],aaa=[+3]')<line_sep>self.assertEquals([3] hparams.aaa)<line_sep>self.assertEquals(['_12' '3\'4"'] hparams.c_c)<with_stmt>self.assertRaisesRegexp(ValueError 'Unknown hyperparameter')<block_start>hparams.parse('x=[123]')<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Could not parse')<block_start>hparams.parse('aaa=[poipoi]')<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Could not parse')<block_start>hparams.parse('aaa=[1.0]')<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Could not parse')<block_start>hparams.parse('b=[12x]')<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Could not parse')<block_start>hparams.parse('b=[relu]')<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Must pass a list')<block_start>hparams.parse('aaa=123')<block_end># Exports to proto. hparam_def=hparams.to_proto()<line_sep># Imports from proto. hparams2=hparam.HParams(hparam_def=hparam_def)<line_sep># Verifies that all hparams are restored. self.assertEquals([3] hparams2.aaa)<line_sep>self.assertEquals([1.0] hparams2.b)<line_sep>self.assertEquals(['_12' '3\'4"'] hparams2.c_c)<block_end><def_stmt>testJson self<block_start>hparams=hparam.HParams(aaa=1 b=2.0 c_c='relu6' d=<true>)<line_sep>self._assertDictEquals({'aaa':1 'b':2.0 'c_c':'relu6' 'd':<true>} hparams.values())<line_sep>self.assertEquals(1 hparams.aaa)<line_sep>self.assertEquals(2.0 hparams.b)<line_sep>self.assertEquals('relu6' hparams.c_c)<line_sep>hparams.parse_json('{"aaa": 12, "b": 3.0, "c_c": "relu4", "d": false}')<line_sep>self._assertDictEquals({'aaa':12 'b':3.0 'c_c':'relu4' 'd':<false>} hparams.values())<line_sep>self.assertEquals(12 hparams.aaa)<line_sep>self.assertEquals(3.0 hparams.b)<line_sep>self.assertEquals('relu4' hparams.c_c)<line_sep>json_str=hparams.to_json()<line_sep>hparams2=hparam.HParams(aaa=10 b=20.0 c_c='hello' d=<false>)<line_sep>hparams2.parse_json(json_str)<line_sep>self.assertEquals(12 hparams2.aaa)<line_sep>self.assertEquals(3.0 hparams2.b)<line_sep>self.assertEquals('relu4' hparams2.c_c)<line_sep>self.assertEquals(<false> hparams2.d)<block_end><def_stmt>testNonProtoFails self<block_start><with_stmt>self.assertRaisesRegexp(AssertionError '')<block_start>hparam.HParams(hparam_def=1)<block_end><with_stmt>self.assertRaisesRegexp(AssertionError '')<block_start>hparam.HParams(hparam_def=1.0)<block_end><with_stmt>self.assertRaisesRegexp(AssertionError '')<block_start>hparam.HParams(hparam_def='hello')<block_end><with_stmt>self.assertRaisesRegexp(AssertionError '')<block_start>hparam.HParams(hparam_def=[1 2 3])<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test.main()<block_end>
<import_from_stmt>onnxsim.onnx_simplifier simplify<line_sep>__version__='0.0.0'<line_sep>
"""Simple water flow example using ANUGA Water driven up a linear slope and time varying boundary, similar to a beach environment """<line_sep>#------------------------------------------------------------------------------ # Import necessary modules #------------------------------------------------------------------------------ <import_stmt>sys<import_stmt>anuga<import_from_stmt>anuga myid finalize distribute<import_from_stmt>anuga Domain<as>Domain<import_from_stmt>math cos<import_from_stmt>numpy zeros ones array interp polyval ones_like zeros_like<import_from_stmt>numpy where logical_and<import_from_stmt>time localtime strftime gmtime<import_from_stmt>scipy.interpolate interp1d<import_from_stmt>anuga.geometry.polygon inside_polygon is_inside_triangle<line_sep>#from balanced_dev import * #------------------------------------------------------------------------------- # Copy scripts to time stamped output directory and capture screen # output to file #------------------------------------------------------------------------------- time=strftime('%Y%m%d_%H%M%S' localtime())<line_sep>#output_dir = 'varying_width'+time output_dir='.'<line_sep>output_file='varying_width'<line_sep>#anuga.copy_code_files(output_dir,__file__) #start_screen_catcher(output_dir+'_') args=anuga.get_args()<line_sep>alg=args.alg<line_sep>verbose=args.verbose<line_sep>#------------------------------------------------------------------------------ # Setup domain #------------------------------------------------------------------------------ dx=1.<line_sep>dy=dx<line_sep>L=1500.<line_sep>W=60.<line_sep>#=============================================================================== # Create sequential domain #=============================================================================== <if_stmt>myid<eq>0# structured mesh <block_start>points,vertices,boundary=anuga.rectangular_cross(int(L/dx) int(W/dy) L W (0. -W/2.))<line_sep>#domain = anuga.Domain(points, vertices, boundary) domain=Domain(points vertices boundary)<line_sep>domain.set_name(output_file)<line_sep>domain.set_datadir(output_dir)<line_sep>#------------------------------------------------------------------------------ # Setup Algorithm, either using command line arguments # or override manually yourself #------------------------------------------------------------------------------ domain.set_flow_algorithm(alg)<line_sep>#------------------------------------------------------------------------------ # Setup initial conditions #------------------------------------------------------------------------------ domain.set_quantity('friction' 0.0)<line_sep>domain.set_quantity('stage' 12.0)<line_sep>XX=array([0. 50. 100. 150. 250. 300. 350. 400. 425. 435. 450. 470. 475. 500. 505. 530. 550. 565. 575. 600. 650. 700. 750. 800. 820. 900. 950. 1000. 1500.])<line_sep>ZZ=array([0. 0. 2.5 5. 5. 3. 5. 5. 7.5 8. 9. 9. 9. 9.1 9. 9. 6. 5.5 5.5 5. 4. 3. 3. 2.3 2. 1.2 0.4 0. 0.])<line_sep>WW=array([40. 40. 30. 30. 30. 30. 25. 25. 30. 35. 35. 40. 40. 40. 45. 45. 50. 45. 40. 40. 30. 40. 40. 5. 40. 35. 25. 40. 40.])/2.<line_sep>depth=interp1d(XX ZZ)<line_sep>width=interp1d(XX WW)<def_stmt>bed_elevation x y<block_start>z=25.0<times>ones_like(x)<line_sep>wid=width(x)<line_sep>dep=depth(x)<line_sep>z=where(logical_and(y<l>wid y<g>-wid) dep z)<line_sep><return>z<block_end>domain.set_quantity('elevation' bed_elevation)<block_end><else_stmt><block_start>domain=<none><block_end>#=========================================================================== # Create Parallel domain #=========================================================================== domain=distribute(domain)<line_sep>#----------------------------------------------------------------------------- # Setup boundary conditions #------------------------------------------------------------------------------ <import_from_stmt>math sin pi exp<line_sep>Br=anuga.Reflective_boundary(domain)# Solid reflective wall #Bt = anuga.Transmissive_boundary(domain) # Continue all values on boundary #Bd = anuga.Dirichlet_boundary([1,0.,0.]) # Constant boundary values # Associate boundary tags with boundary objects domain.set_boundary({'left':Br 'right':Br 'top':Br 'bottom':Br})<line_sep>#------------------------------------------------------------------------------ # Produce a documentation of parameters #------------------------------------------------------------------------------ <if_stmt>myid<eq>0<block_start>parameter_file=open('parameters.tex' 'w')<line_sep>parameter_file.write('\\begin{verbatim}\n')<import_from_stmt>pprint pprint<line_sep>pprint(domain.get_algorithm_parameters() parameter_file indent=4)<line_sep>parameter_file.write('\\end{verbatim}\n')<line_sep>parameter_file.close()<block_end>#------------------------------------------------------------------------------ # Evolve system through time #------------------------------------------------------------------------------ <import_stmt>time<line_sep>t0=time.time()<for_stmt>t domain.evolve(yieldstep=0.1 finaltime=5.0)#print(domain.timestepping_statistics(track_speeds=True)) <block_start><if_stmt>myid<eq>0<and>verbose<block_start>print(domain.timestepping_statistics())<block_end>#vis.update() <block_end><if_stmt>myid<eq>0<and>verbose<block_start>print('That took %s sec'%str(time.time()-t0))<block_end>domain.sww_merge(delete_old=<true>)<line_sep>finalize()<line_sep>
<import_from_stmt>pyxtal.molecule *<import_from_stmt>ase.build molecule<import_from_stmt>pymatgen.core Molecule<def_stmt>get_ase_mol molname<block_start>"""convert ase molecule to pymatgen style"""<line_sep>ase_mol=molecule(molname)<line_sep>pos=ase_mol.get_positions()<line_sep>symbols=ase_mol.get_chemical_symbols()<line_sep><return>Molecule(symbols pos)<block_end><if_stmt>__name__<eq>"__main__"# --------------------------------------------------- <block_start><for_stmt>name ["H2" "H2O" "HCl" "CS2" "C2Cl4" "PH3" "CH4" "C6H6" "C60"]<block_start>mol=get_ase_mol(name)<line_sep>pga=PointGroupAnalyzer(mol)<line_sep># Symmetrize the molecule using pymatgen mol=pga.symmetrize_molecule()["sym_mol"]<line_sep>pga=PointGroupAnalyzer(mol)<line_sep>print(name " has point group symmetry: " pga.get_pointgroup())<line_sep># Check if orders of rotation are detected correctly pg=pga.get_pointgroup()<for_stmt>op pg<block_start>opa=OperationAnalyzer(op)<if_stmt>opa.order<eq>"irrational"<block_start>print(opa)<block_end><elif_stmt>opa.order<g>10<block_start>print(opa)<block_end><block_end># orientation_in_wyckoff_position(mol, sg, WP's index in sg) # returns a list of orientations consistent with the WP's symmetry. # We can choose any of these orientations at random using np.random.choice # To use an orientation, do mol.apply_operation(orientation) # Spacegroup 16, index 6 has .2. symmetry # check 2 fold rotation allowed=orientation_in_wyckoff_position(mol 16 6 randomize=<true>)<if_stmt>allowed<is><not><false><block_start>print("Found "+str(len(allowed))+" orientations for " name " with site symm 2" )<for_stmt>i,op enumerate(allowed)<block_start>mo=deepcopy(mol)<line_sep>mo.apply_operation(op)<line_sep>filename="xyz/"+name+"-"+str(i)+".xyz"<line_sep>mo.to(fmt="xyz" filename=filename)<block_end><block_end># check reflection allowed=orientation_in_wyckoff_position(mol 25 2 randomize=<true>)<if_stmt>allowed<is><not><false><block_start>print("Found "+str(len(allowed))+" orientations for " name " with site symm m" )<for_stmt>i,op enumerate(allowed)<block_start>mo=deepcopy(mol)<line_sep>mo.apply_operation(op)<line_sep>filename="xyz/"+name+"-"+str(i)+".xyz"<line_sep>mo.to(fmt="xyz" filename=filename)<block_end><block_end># check 3 fold rotation allowed=orientation_in_wyckoff_position(mol 147 4 randomize=<true>)<if_stmt>allowed<is><not><false><block_start>print("Found "+str(len(allowed))+" orientations for " name " with site symm 3" )<for_stmt>i,op enumerate(allowed)<block_start>mo=deepcopy(mol)<line_sep>mo.apply_operation(op)<line_sep>filename="xyz/"+name+"-"+str(i)+".xyz"<line_sep>mo.to(fmt="xyz" filename=filename)<block_end><block_end># check -1 allowed=orientation_in_wyckoff_position(mol 2 2 randomize=<true>)<if_stmt>allowed<is><not><false><block_start>print("Found "+str(len(allowed))+" orientations for " name " with site symm -1" )<for_stmt>i,op enumerate(allowed)<block_start>mo=deepcopy(mol)<line_sep>mo.apply_operation(op)<line_sep>filename="xyz/"+name+"-"+str(i)+".xyz"<line_sep>mo.to(fmt="xyz" filename=filename)<block_end><block_end># check 2/m allowed=orientation_in_wyckoff_position(mol 64 6 randomize=<true>)<if_stmt>allowed<is><not><false><block_start>print("Found "+str(len(allowed))+" orientations for " name " with site symm 2/m" )<for_stmt>i,op enumerate(allowed)<block_start>mo=deepcopy(mol)<line_sep>mo.apply_operation(op)<line_sep>filename="xyz/"+name+"-"+str(i)+".xyz"<line_sep>mo.to(fmt="xyz" filename=filename)<block_end><block_end># check 6 allowed=orientation_in_wyckoff_position(mol 168 3 randomize=<true>)<if_stmt>allowed<is><not><false><block_start>print("Found "+str(len(allowed))+" orientations for " name " with site symm 6" )<for_stmt>i,op enumerate(allowed)<block_start>mo=deepcopy(mol)<line_sep>mo.apply_operation(op)<line_sep>filename="xyz/"+name+"-"+str(i)+".xyz"<line_sep>mo.to(fmt="xyz" filename=filename)<block_end><block_end><block_end><block_end>
<import_from_stmt>time sleep<import_stmt>xpc<def_stmt>ex <block_start>print("X-Plane Connect example script")<line_sep>print("Setting up simulation")<with_stmt>xpc.XPlaneConnect()<as>client# Verify connection <block_start><try_stmt># If X-Plane does not respond to the request, a timeout error # will be raised. <block_start>client.getDREF("sim/test/test_float")<block_end><except_stmt><block_start>print("Error establishing connection to X-Plane.")<line_sep>print("Exiting...")<line_sep><return><block_end># Set position of the player aircraft print("Setting position")<line_sep># Lat Lon Alt Pitch Roll Yaw Gear posi=[37.524 -122.06899 2500 0 0 0 1]<line_sep>client.sendPOSI(posi)<line_sep># Set position of a non-player aircraft print("Setting NPC position")<line_sep># Lat Lon Alt Pitch Roll Yaw Gear posi=[37.52465 -122.06899 2500 0 20 0 1]<line_sep>client.sendPOSI(posi 1)<line_sep># Set angle of attack, velocity, and orientation using the DATA command print("Setting orientation")<line_sep>data=[[18 0 -998 0 -998 -998 -998 -998 -998] [3 130 130 130 130 -998 -998 -998 -998] [16 0 0 0 -998 -998 -998 -998 -998]]<line_sep>client.sendDATA(data)<line_sep># Set control surfaces and throttle of the player aircraft using sendCTRL print("Setting controls")<line_sep>ctrl=[0.0 0.0 0.0 0.8]<line_sep>client.sendCTRL(ctrl)<line_sep># Pause the sim print("Pausing")<line_sep>client.pauseSim(<true>)<line_sep>sleep(2)<line_sep># Toggle pause state to resume print("Resuming")<line_sep>client.pauseSim(<false>)<line_sep># Stow landing gear using a dataref print("Stowing gear")<line_sep>gear_dref="sim/cockpit/switches/gear_handle_status"<line_sep>client.sendDREF(gear_dref 0)<line_sep># Let the sim run for a bit. sleep(4)<line_sep># Make sure gear was stowed successfully gear_status=client.getDREF(gear_dref)<if_stmt>gear_status[0]<eq>0<block_start>print("Gear stowed")<block_end><else_stmt><block_start>print("Error stowing gear")<block_end>print("End of Python client example")<line_sep>input("Press any key to exit...")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>ex()<block_end>
<import_from_stmt>cytoolz.functoolz curry <import_from_stmt>eth_utils to_dict to_tuple <line_sep>@curry@to_dict<def_stmt>normalize_dict value normalizers<block_start><for_stmt>key,item value.items()<block_start>normalizer=normalizers[key]<line_sep><yield>key normalizer(item)<block_end><block_end>@curry@to_tuple<def_stmt>normalize_array value normalizer<block_start>""" This is just `map` but it's nice to have it return a consisten type (tuple). """<for_stmt>item value<block_start><yield>normalizer(item)<block_end><block_end>@curry<def_stmt>normalize_if value conditional_fn normalizer<block_start><if_stmt>conditional_fn(value)<block_start><return>normalizer(value)<block_end><else_stmt><block_start><return>value<block_end><block_end>
<import_stmt>os<import_stmt>shutil<import_stmt>unittest<import_from_stmt>base64 b64encode<import_from_stmt>sonLib.bioio TestStatus<import_from_stmt>sonLib.bioio getTempFile<import_from_stmt>sonLib.bioio getTempDirectory<import_from_stmt>sonLib.bioio system<import_from_stmt>toil.job Job<import_from_stmt>toil.common Toil<import_from_stmt>cactus.shared.common cactus_call ChildTreeJob<class_stmt>TestCase(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.testNo=TestStatus.getTestSetup(1 5 10 100)<line_sep>self.tempDir=getTempDirectory(os.getcwd())<line_sep>self.tempFiles=[]<line_sep>unittest.TestCase.setUp(self)<block_end><def_stmt>tearDown self<block_start>unittest.TestCase.tearDown(self)<line_sep>system("rm -rf %s"%self.tempDir)<block_end>@TestStatus.shortLength<def_stmt>testCactusCall self<block_start>inputFile=getTempFile(rootDir=self.tempDir)<with_stmt>open("/dev/urandom" "rb")<as>randText<block_start><with_stmt>open(inputFile 'w')<as>fh<block_start>fh.write(b64encode(randText.read(1024)).decode())<block_end><block_end><with_stmt>open(inputFile)<as>fh<block_start>input="".join(fh.read().split("\n"))<block_end>#Send input to container's stdin through a file, get output #from stdout output="".join(cactus_call(infile=inputFile check_output=<true> parameters=["docker_test_script"]).split("\n"))<line_sep>self.assertEqual(input output)<line_sep>#Send input as string, get output from stdout output="".join(cactus_call(stdin_string=input check_output=<true> parameters=["docker_test_script"]).split("\n"))<line_sep>self.assertEqual(input output)<block_end>@TestStatus.shortLength<def_stmt>testCactusCallPipes self<block_start>inputFile=getTempFile(rootDir=self.tempDir)<with_stmt>open(inputFile 'w')<as>f<block_start>f.write('foobar\n')<block_end># using 'cat' here rather than infile is intentional; it tests # whether the directory is mounted into containers correctly. output=cactus_call(parameters=[['cat' inputFile] ['sed' 's/foo/baz/g'] ['awk' '{ print "quux" $0 }']] check_output=<true>)<line_sep>self.assertEqual(output 'quuxbazbar\n')<block_end>@TestStatus.mediumLength<def_stmt>testChildTreeJob self<block_start>"""Check that the ChildTreeJob class runs all children."""<line_sep>numChildren=100<line_sep>flagDir=getTempDirectory()<line_sep>options=Job.Runner.getDefaultOptions(getTempDirectory())<line_sep>shutil.rmtree(options.jobStore)<with_stmt>Toil(options)<as>toil<block_start>toil.start(CTTestParent(flagDir numChildren))<block_end># Check that all jobs ran <for_stmt>i range(numChildren)<block_start>self.assertTrue(os.path.exists(os.path.join(flagDir str(i))))<block_end>shutil.rmtree(flagDir)<block_end><block_end><class_stmt>CTTestParent(ChildTreeJob)<block_start><def_stmt>__init__ self flagDir numChildren<block_start>self.flagDir=flagDir<line_sep>self.numChildren=numChildren<line_sep>super(CTTestParent self).__init__()<block_end><def_stmt>run self fileStore<block_start><for_stmt>i range(self.numChildren)<block_start>self.addChild(CTTestChild(self.flagDir i))<block_end><block_end><block_end><class_stmt>CTTestChild(Job)<block_start><def_stmt>__init__ self flagDir index<block_start>self.flagDir=flagDir<line_sep>self.index=index<line_sep>super(CTTestChild self).__init__()<block_end><def_stmt>run self fileStore# Mark that this job has run using a flag file <block_start>path=os.path.join(self.flagDir str(self.index))<with_stmt>open(path 'w')<as>f# Empty file <block_start>f.write('')<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>json<class_stmt>TrainingSpecification<block_start>template=""" { "TrainingSpecification": { "TrainingImage": "IMAGE_REPLACE_ME", "SupportedHyperParameters": [ { "Description": "Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes", "Name": "max_leaf_nodes", "Type": "Integer", "Range": { "IntegerParameterRangeSpecification": { "MinValue": "1", "MaxValue": "100000" } }, "IsTunable": true, "IsRequired": false, "DefaultValue": "100" } ], "SupportedTrainingInstanceTypes": INSTANCES_REPLACE_ME, "SupportsDistributedTraining": false, "MetricDefinitions": METRICS_REPLACE_ME, "TrainingChannels": CHANNELS_REPLACE_ME, "SupportedTuningJobObjectiveMetrics": TUNING_OBJECTIVES_REPLACE_ME } } """<def_stmt>get_training_specification_dict self ecr_image supports_gpu supported_channels=<none> supported_metrics=<none> supported_tuning_job_objective_metrics=<none> <block_start><return>json.loads(self.get_training_specification_json(ecr_image supports_gpu supported_channels supported_metrics supported_tuning_job_objective_metrics ))<block_end><def_stmt>get_training_specification_json self ecr_image supports_gpu supported_channels=<none> supported_metrics=<none> supported_tuning_job_objective_metrics=<none> <block_start><if_stmt>supported_channels<is><none><block_start>print("Please provide at least one supported channel")<line_sep><raise>ValueError("Please provide at least one supported channel")<block_end><if_stmt>supported_metrics<is><none><block_start>supported_metrics=[]<block_end><if_stmt>supported_tuning_job_objective_metrics<is><none><block_start>supported_tuning_job_objective_metrics=[]<block_end><return>(self.template.replace("IMAGE_REPLACE_ME" ecr_image).replace("INSTANCES_REPLACE_ME" self.get_supported_instances(supports_gpu)).replace("CHANNELS_REPLACE_ME" json.dumps([ob.__dict__<for>ob supported_channels] indent=4 sort_keys=<true>) ).replace("METRICS_REPLACE_ME" json.dumps([ob.__dict__<for>ob supported_metrics] indent=4 sort_keys=<true>) ).replace("TUNING_OBJECTIVES_REPLACE_ME" json.dumps([ob.__dict__<for>ob supported_tuning_job_objective_metrics] indent=4 sort_keys=<true> ) ))<block_end>@staticmethod<def_stmt>get_supported_instances supports_gpu<block_start>cpu_list=["ml.m4.xlarge" "ml.m4.2xlarge" "ml.m4.4xlarge" "ml.m4.10xlarge" "ml.m4.16xlarge" "ml.m5.large" "ml.m5.xlarge" "ml.m5.2xlarge" "ml.m5.4xlarge" "ml.m5.12xlarge" "ml.m5.24xlarge" "ml.c4.xlarge" "ml.c4.2xlarge" "ml.c4.4xlarge" "ml.c4.8xlarge" "ml.c5.xlarge" "ml.c5.2xlarge" "ml.c5.4xlarge" "ml.c5.9xlarge" "ml.c5.18xlarge" ]<line_sep>gpu_list=["ml.p2.xlarge" "ml.p2.8xlarge" "ml.p2.16xlarge" "ml.p3.2xlarge" "ml.p3.8xlarge" "ml.p3.16xlarge" ]<line_sep>list_to_return=cpu_list<if_stmt>supports_gpu<block_start>list_to_return=cpu_list+gpu_list<block_end><return>json.dumps(list_to_return)<block_end><block_end>
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>subprocess<import_from_stmt>catapult_base cloud_storage<import_from_stmt>telemetry.core platform<import_from_stmt>telemetry.util image_util<import_from_stmt>telemetry.util rgba_color<line_sep>HIGHLIGHT_ORANGE_FRAME=rgba_color.WEB_PAGE_TEST_ORANGE<class_stmt>BoundingBoxNotFoundException(Exception)<block_start><pass><block_end><class_stmt>Video(object)<block_start>"""Utilities for storing and interacting with the video capture."""<def_stmt>__init__ self video_file_obj<block_start><assert_stmt>video_file_obj.delete<assert_stmt><not>video_file_obj.close_called<line_sep>self._video_file_obj=video_file_obj<line_sep>self._tab_contents_bounding_box=<none><block_end><def_stmt>UploadToCloudStorage self bucket target_path<block_start>"""Uploads video file to cloud storage. Args: target_path: Path indicating where to store the file in cloud storage. """<line_sep>cloud_storage.Insert(bucket target_path self._video_file_obj.name)<block_end><def_stmt>GetVideoFrameIter self<block_start>"""Returns the iteration for processing the video capture. This looks for the initial color flash in the first frame to establish the tab content boundaries and then omits all frames displaying the flash. Yields: (time_ms, image) tuples representing each video keyframe. Only the first frame is a run of sequential duplicate bitmaps is typically included. time_ms is milliseconds since navigationStart. image may be a telemetry.core.Bitmap, or a numpy array depending on whether numpy is installed. """<line_sep>frame_generator=self._FramesFromMp4(self._video_file_obj.name)<line_sep># Flip through frames until we find the initial tab contents flash. content_box=<none><for_stmt>_,bmp frame_generator<block_start>content_box=self._FindHighlightBoundingBox(bmp HIGHLIGHT_ORANGE_FRAME)<if_stmt>content_box<block_start><break><block_end><block_end><if_stmt><not>content_box<block_start><raise>BoundingBoxNotFoundException('Failed to identify tab contents in video capture.')<block_end># Flip through frames until the flash goes away and emit that as frame 0. timestamp=0<for_stmt>timestamp,bmp frame_generator<block_start><if_stmt><not>self._FindHighlightBoundingBox(bmp HIGHLIGHT_ORANGE_FRAME)<block_start><yield>0 image_util.Crop(bmp *content_box)<line_sep><break><block_end><block_end>start_time=timestamp<for_stmt>timestamp,bmp frame_generator<block_start><yield>timestamp-start_time image_util.Crop(bmp *content_box)<block_end><block_end><def_stmt>_FindHighlightBoundingBox self bmp color bounds_tolerance=8 color_tolerance=8<block_start>"""Returns the bounding box of the content highlight of the given color. Raises: BoundingBoxNotFoundException if the hightlight could not be found. """<line_sep>content_box,pixel_count=image_util.GetBoundingBox(bmp color tolerance=color_tolerance)<if_stmt><not>content_box<block_start><return><none><block_end># We assume arbitrarily that tabs are all larger than 200x200. If this # fails it either means that assumption has changed or something is # awry with our bounding box calculation. <if_stmt>content_box[2]<l>200<or>content_box[3]<l>200<block_start><raise>BoundingBoxNotFoundException('Unexpectedly small tab contents.')<block_end># TODO(tonyg): Can this threshold be increased? <if_stmt>pixel_count<l>0.9<times>content_box[2]<times>content_box[3]<block_start><raise>BoundingBoxNotFoundException('Low count of pixels in tab contents matching expected color.')<block_end># Since we allow some fuzziness in bounding box finding, we want to make # sure that the bounds are always stable across a run. So we cache the # first box, whatever it may be. # # This relies on the assumption that since Telemetry doesn't know how to # resize the window, we should always get the same content box for a tab. # If this assumption changes, this caching needs to be reworked. <if_stmt><not>self._tab_contents_bounding_box<block_start>self._tab_contents_bounding_box=content_box<block_end># Verify that there is only minor variation in the bounding box. If it's # just a few pixels, we can assume it's due to compression artifacts. <for_stmt>x,y zip(self._tab_contents_bounding_box content_box)<block_start><if_stmt>abs(x-y)<g>bounds_tolerance# If this fails, it means either that either the above assumption has # changed or something is awry with our bounding box calculation. <block_start><raise>BoundingBoxNotFoundException('Unexpected change in tab contents box.')<block_end><block_end><return>self._tab_contents_bounding_box<block_end><def_stmt>_FramesFromMp4 self mp4_file<block_start>host_platform=platform.GetHostPlatform()<if_stmt><not>host_platform.CanLaunchApplication('avconv')<block_start>host_platform.InstallApplication('avconv')<block_end><def_stmt>GetDimensions video<block_start>proc=subprocess.Popen(['avconv' '-i' video] stderr=subprocess.PIPE)<line_sep>dimensions=<none><line_sep>output=''<for_stmt>line proc.stderr.readlines()<block_start>output<augadd>line<if_stmt>'Video:'<in>line<block_start>dimensions=line.split(',')[2]<line_sep>dimensions=map(int dimensions.split()[0].split('x'))<line_sep><break><block_end><block_end>proc.communicate()<assert_stmt>dimensions ('Failed to determine video dimensions. output=%s'%output)<line_sep><return>dimensions<block_end><def_stmt>GetFrameTimestampMs stderr<block_start>"""Returns the frame timestamp in integer milliseconds from the dump log. The expected line format is: ' dts=1.715 pts=1.715\n' We have to be careful to only read a single timestamp per call to avoid deadlock because avconv interleaves its writes to stdout and stderr. """<while_stmt><true><block_start>line=''<line_sep>next_char=''<while_stmt>next_char<ne>'\n'<block_start>next_char=stderr.read(1)<line_sep>line<augadd>next_char<block_end><if_stmt>'pts='<in>line<block_start><return>int(1000<times>float(line.split('=')[-1]))<block_end><block_end><block_end>dimensions=GetDimensions(mp4_file)<line_sep>frame_length=dimensions[0]<times>dimensions[1]<times>3<line_sep>frame_data=bytearray(frame_length)<line_sep># Use rawvideo so that we don't need any external library to parse frames. proc=subprocess.Popen(['avconv' '-i' mp4_file '-vcodec' 'rawvideo' '-pix_fmt' 'rgb24' '-dump' '-loglevel' 'debug' '-f' 'rawvideo' '-'] stderr=subprocess.PIPE stdout=subprocess.PIPE)<while_stmt><true><block_start>num_read=proc.stdout.readinto(frame_data)<if_stmt><not>num_read<block_start><raise>StopIteration<block_end><assert_stmt>num_read<eq>len(frame_data) 'Unexpected frame size: %d'%num_read<line_sep><yield>(GetFrameTimestampMs(proc.stderr) image_util.FromRGBPixels(dimensions[0] dimensions[1] frame_data))<block_end><block_end><block_end>
<import_from_stmt>adapters.adapter_with_battery AdapterWithBattery<import_from_stmt>devices.switch.selector_switch SelectorSwitch<class_stmt>HeimanAlarmRemoteAdapter(AdapterWithBattery)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.switch=SelectorSwitch('Remote' 'action')<line_sep>self.switch.add_level('Off' <none>)<line_sep>self.switch.add_level('Arm all zones' 'arm_all_zones')<line_sep>self.switch.add_level('Arm partial zones' 'arm_partial_zones')<line_sep>self.switch.add_level('Disarm' 'disarm')<line_sep>self.switch.add_level('Emergency' 'emergency')<line_sep>self.switch.set_selector_style(SelectorSwitch.SELECTOR_TYPE_MENU)<line_sep>self.switch.disable_value_check_on_update()<line_sep>self.devices.append(self.switch)<block_end><def_stmt>convert_message self message<block_start>message=super().convert_message(message)<line_sep><return>message<block_end><def_stmt>handleCommand self alias device device_data command level color<block_start>self.switch.handle_command(device_data command level color)<block_end><block_end>
<import_from_stmt>KratosMultiphysics ParallelEnvironment IsDistributedRun<if_stmt>IsDistributedRun()<block_start><import_from_stmt>KratosMultiphysics.mpi DataCommunicatorFactory<block_end><import_stmt>KratosMultiphysics.KratosUnittest<as>UnitTest<import_stmt>math<class_stmt>TestDataCommunicatorFactory(UnitTest.TestCase)<block_start><def_stmt>setUp self<block_start>self.registered_comms=[]<line_sep>self.default_data_communicator=ParallelEnvironment.GetDefaultDataCommunicator()<line_sep>self.original_default=ParallelEnvironment.GetDefaultDataCommunicatorName()<block_end><def_stmt>tearDown self<block_start><if_stmt>len(self.registered_comms)<g>0<block_start>ParallelEnvironment.SetDefaultDataCommunicator(self.original_default)<for_stmt>comm_name self.registered_comms<block_start>ParallelEnvironment.UnregisterDataCommunicator(comm_name)<block_end><block_end><block_end><def_stmt>markForCleanUp self comm_name<block_start>self.registered_comms.append(comm_name)<block_end>@UnitTest.skipUnless(IsDistributedRun() "Test is distributed.")<def_stmt>testDataCommunicatorDuplication self<block_start>duplicate_comm=DataCommunicatorFactory.DuplicateAndRegister(self.default_data_communicator "Duplicate")<line_sep>self.markForCleanUp("Duplicate")# to clean up during tearDown self.assertEqual(duplicate_comm.Rank() self.default_data_communicator.Rank())<line_sep>self.assertEqual(duplicate_comm.Size() self.default_data_communicator.Size())<block_end>@UnitTest.skipUnless(IsDistributedRun() "Test is distributed.")<def_stmt>testDataCommunicatorSplit self<block_start>rank=self.default_data_communicator.Rank()<line_sep>size=self.default_data_communicator.Size()<line_sep>split_comm=DataCommunicatorFactory.SplitAndRegister(self.default_data_communicator rank%2 0 "EvenOdd")<line_sep>self.markForCleanUp("EvenOdd")# to clean up during tearDown expected_rank=rank<floordiv>2<if_stmt>rank%2<eq>0<block_start>expected_size=math.ceil(size/2)<block_end><else_stmt><block_start>expected_size=math.floor(size/2)<block_end>self.assertEqual(split_comm.Rank() expected_rank)<line_sep>self.assertEqual(split_comm.Size() expected_size)<block_end>@UnitTest.skipUnless(IsDistributedRun()<and>ParallelEnvironment.GetDefaultSize()<g>1 "Test requires at least two ranks.")<def_stmt>testDataCommunicatorCreateFromRange self<block_start>rank=self.default_data_communicator.Rank()<line_sep>size=self.default_data_communicator.Size()<line_sep># Create a communicator using all ranks except the first ranks=[i<for>i range(1 size)]<line_sep>range_comm=DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator ranks "AllExceptFirst")<line_sep>self.markForCleanUp("AllExceptFirst")# to clean up during tearDown <if_stmt>rank<eq>0<block_start>self.assertTrue(range_comm.IsNullOnThisRank())<line_sep>self.assertFalse(range_comm.IsDefinedOnThisRank())<block_end><else_stmt><block_start>self.assertEqual(range_comm.Rank() rank-1)<line_sep>self.assertEqual(range_comm.Size() size-1)<block_end><block_end>@UnitTest.skipUnless(IsDistributedRun()<and>ParallelEnvironment.GetDefaultSize()<g>2 "Test requires at least three ranks.")<def_stmt>testDataCommunicatorCreateUnion self<block_start>rank=self.default_data_communicator.Rank()<line_sep>size=self.default_data_communicator.Size()<line_sep># Create a communicator using all ranks except the first all_except_first=DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator [i<for>i range(1 size)] "AllExceptFirst")<line_sep>self.markForCleanUp("AllExceptFirst")# to clean up during tearDown all_except_last=DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator [i<for>i range(0 size-1)] "AllExceptLast")<line_sep>self.markForCleanUp("AllExceptLast")# to clean up during tearDown # Create union communicator (should contain all ranks) union_comm=DataCommunicatorFactory.CreateUnionAndRegister(all_except_first all_except_last self.default_data_communicator "Union")<line_sep>self.markForCleanUp("Union")# to clean up during tearDown self.assertFalse(union_comm.IsNullOnThisRank())<line_sep>self.assertEqual(union_comm.Rank() rank)<line_sep>self.assertEqual(union_comm.Size() size)<block_end>@UnitTest.skipUnless(IsDistributedRun()<and>ParallelEnvironment.GetDefaultSize()<g>2 "Test requires at least three ranks.")<def_stmt>testDataCommunicatorCreateIntersection self<block_start>rank=self.default_data_communicator.Rank()<line_sep>size=self.default_data_communicator.Size()<line_sep># Create a communicator using all ranks except the first all_except_first=DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator [i<for>i range(1 size)] "AllExceptFirst")<line_sep>self.markForCleanUp("AllExceptFirst")# to clean up during tearDown all_except_last=DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator [i<for>i range(0 size-1)] "AllExceptLast")<line_sep>self.markForCleanUp("AllExceptLast")# to clean up during tearDown intersection_comm=DataCommunicatorFactory.CreateIntersectionAndRegister(all_except_first all_except_last self.default_data_communicator "Intersection")<line_sep>self.markForCleanUp("Intersection")# to clean up during tearDown <if_stmt>rank<eq>0<or>rank<eq>size-1# The first and last ranks do not participate in the intersection communicator <block_start>self.assertTrue(intersection_comm.IsNullOnThisRank())<block_end><else_stmt><block_start>self.assertEqual(intersection_comm.Rank() rank-1)<line_sep>self.assertEqual(intersection_comm.Size() size-2)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>UnitTest.main()<block_end>
# Copyright 2018 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>itertools<import_stmt>os<import_stmt>re<import_from_stmt>. base<class_stmt>OutProc(base.ExpectedOutProc)<block_start><def_stmt>__init__ self expected_outcomes basepath expected_fail expected_filename regenerate_expected_files<block_start>super(OutProc self).__init__(expected_outcomes expected_filename regenerate_expected_files)<line_sep>self._basepath=basepath<line_sep>self._expected_fail=expected_fail<block_end><def_stmt>_is_failure_output self output<block_start>fail=output.exit_code<ne>0<if_stmt>fail<ne>self._expected_fail<block_start><return><true><block_end>expected_lines=[]<line_sep># Can't use utils.ReadLinesFrom() here because it strips whitespace. <with_stmt>open(self._basepath+'.out')<as>f<block_start><for_stmt>line f<block_start><if_stmt>line.startswith("#")<or><not>line.strip()<block_start><continue><block_end>expected_lines.append(line)<block_end><block_end>raw_lines=output.stdout.splitlines()<line_sep>actual_lines=[s<for>s raw_lines<if><not>self._ignore_line(s)]<if_stmt>len(expected_lines)<ne>len(actual_lines)<block_start><return><true><block_end># Try .js first, and fall back to .mjs. # TODO(v8:9406): clean this up by never separating the path from # the extension in the first place. base_path=self._basepath+'.js'<if_stmt><not>os.path.exists(base_path)<block_start>base_path=self._basepath+'.mjs'<block_end>env={'basename':os.path.basename(base_path) }<for_stmt>(expected actual) itertools.izip_longest(expected_lines actual_lines fillvalue='')<block_start>pattern=re.escape(expected.rstrip()%env)<line_sep>pattern=pattern.replace('\\*' '.*')<line_sep>pattern=pattern.replace('\\{NUMBER\\}' '\d+(?:\.\d*)?')<line_sep>pattern='^%s$'%pattern<if_stmt><not>re.match(pattern actual)<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>_ignore_line self string<block_start>"""Ignore empty lines, valgrind output, Android output."""<line_sep><return>(<not>string<or><not>string.strip()<or>string.startswith("==")<or>string.startswith("**")<or>string.startswith("ANDROID")<or># Android linker warning. string.startswith('WARNING: linker:'))<block_end><block_end>
<import_stmt>pytest<import_from_stmt>Thycotic Client secret_password_get_command secret_username_get_command secret_get_command secret_password_update_command secret_checkout_command secret_checkin_command secret_delete_command folder_create_command folder_delete_command folder_update_command<import_from_stmt>test_data.context GET_PASSWORD_BY_ID_CONTEXT GET_USERNAME_BY_ID_CONTENT SECRET_GET_CONTENT SECRET_PASSWORD_UPDATE_CONTEXT SECRET_CHECKOUT_CONTEXT SECRET_CHECKIN_CONTEXT SECRET_DELETE_CONTEXT FOLDER_CREATE_CONTEXT FOLDER_DELETE_CONTEXT FOLDER_UPDATE_CONTEXT<import_from_stmt>test_data.http_responses GET_PASSWORD_BY_ID_RAW_RESPONSE GET_USERNAME_BY_ID_RAW_RESPONSE SECRET_GET_RAW_RESPONSE SECRET_PASSWORD_UPDATE_RAW_RESPONSE SECRET_CHECKOUT_RAW_RESPONSE SECRET_CHECKIN_RAW_RESPONSE SECRET_DELETE_RAW_RESPONSE FOLDER_CREATE_RAW_RESPONSE FOLDER_DELETE_RAW_RESPONSE FOLDER_UPDATE_RAW_RESPONSE<line_sep>GET_PASSWORD_BY_ID_ARGS={"secret_id":"4"}<line_sep>GET_USERNAME_BY_ID_ARGS={"secret_id":"4"}<line_sep>SECRET_GET_ARGS={"secret_id":"4"}<line_sep>SECRET_PASSWORD_UPDATE_ARGS={"secret_id":"4" "newpassword":"<PASSWORD>"}<line_sep>SECRET_CHECKOUT_ARGS={"secret_id":"4"}<line_sep>SECRET_CHECKIN_ARGS={"secret_id":"4"}<line_sep>SECRET_DELETE_ARGS={"id":"9"}<line_sep>FOLDER_CREATE_ARGS={"folderName":"xsoarFolderTest3" "folderTypeId":"1" "parentFolderId":"3"}<line_sep>FOLDER_DELETE_ARGS={"folder_id":"9"}<line_sep>FOLDER_UPDATE_ARGS={"id":"12" "folderName":"xsoarTF3New"}<line_sep>@pytest.mark.parametrize('command, args, http_response, context' [(secret_password_get_command GET_PASSWORD_BY_ID_ARGS GET_PASSWORD_BY_ID_RAW_RESPONSE GET_PASSWORD_BY_ID_CONTEXT) (secret_username_get_command GET_USERNAME_BY_ID_ARGS GET_USERNAME_BY_ID_RAW_RESPONSE GET_USERNAME_BY_ID_CONTENT) (secret_get_command SECRET_GET_ARGS SECRET_GET_RAW_RESPONSE SECRET_GET_CONTENT) (secret_password_update_command SECRET_PASSWORD_UPDATE_ARGS SECRET_PASSWORD_UPDATE_RAW_RESPONSE SECRET_PASSWORD_UPDATE_CONTEXT) (secret_checkout_command SECRET_CHECKOUT_ARGS SECRET_CHECKOUT_RAW_RESPONSE SECRET_CHECKOUT_CONTEXT) (secret_checkin_command SECRET_CHECKIN_ARGS SECRET_CHECKIN_RAW_RESPONSE SECRET_CHECKIN_CONTEXT) (secret_delete_command SECRET_DELETE_ARGS SECRET_DELETE_RAW_RESPONSE SECRET_DELETE_CONTEXT) (folder_create_command FOLDER_CREATE_ARGS FOLDER_CREATE_RAW_RESPONSE FOLDER_CREATE_CONTEXT) (folder_delete_command FOLDER_DELETE_ARGS FOLDER_DELETE_RAW_RESPONSE FOLDER_DELETE_CONTEXT) (folder_update_command FOLDER_UPDATE_ARGS FOLDER_UPDATE_RAW_RESPONSE FOLDER_UPDATE_CONTEXT)])<def_stmt>test_thycotic_commands command args http_response context mocker<block_start>mocker.patch.object(Client '_generate_token')<line_sep>client=Client(server_url="https://thss.softwarium.net/SecretServer" username="xsoar1" password="<PASSWORD>" proxy=<false> verify=<false>)<line_sep>mocker.patch.object(Client '_http_request' return_value=http_response)<line_sep>outputs=command(client **args)<line_sep>results=outputs.to_context()<assert_stmt>results.get("EntryContext")<eq>context<block_end>
# -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc <import_from_future_stmt> unicode_literals<import_from_stmt>snapshottest Snapshot<line_sep>snapshots=Snapshot()<line_sep>snapshots['test_keywords 1']='[{"lineno": 7, "source": [" a\\n"], "value": "1"}, {"lineno": 7, "source": [" a\\n"], "value": "2"}, {"lineno": 7, "source": [" a\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "0"}, {"lineno": 13, "source": [" i\\n"], "value": "1"}, {"lineno": 13, "source": [" i\\n"], "value": "2"}, {"lineno": 13, "source": [" i\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "4"}]'<line_sep>
# -*- coding: utf-8 -*- <import_stmt>sphinx_rtd_theme<line_sep># -- General configuration ----------------------------------------------- extensions=['sphinx.ext.autodoc' 'sphinx.ext.doctest' 'sphinx.ext.intersphinx' ]<line_sep># Add any paths that contain templates here, relative to this directory. templates_path=['_templates']<line_sep># The suffix of source filenames. source_suffix='.rst'<line_sep># The master toctree document. master_doc='index'<line_sep># General information about the project. project=u'bravado'<line_sep>copyright=u'2013, Digium, Inc.; 2014-2015, Yelp, Inc'<line_sep>exclude_patterns=[]<line_sep>pygments_style='sphinx'<line_sep>autoclass_content='both'<line_sep># -- Options for HTML output --------------------------------------------- html_theme='sphinx_rtd_theme'<line_sep>html_theme_path=[sphinx_rtd_theme.get_html_theme_path()]<line_sep>html_static_path=['_static']<line_sep>htmlhelp_basename='bravado-pydoc'<line_sep>intersphinx_mapping={'python':('http://docs.python.org/' <none>) 'bravado-core':('https://bravado-core.readthedocs.io/en/latest/' <none>) }<line_sep>
#This file is part of ElectricEye. #SPDX-License-Identifier: Apache-2.0 #Licensed to the Apache Software Foundation (ASF) under one #or more contributor license agreements. See the NOTICE file #distributed with this work for additional information #regarding copyright ownership. The ASF licenses this file #to you under the Apache License, Version 2.0 (the #"License"); you may not use this file except in compliance #with the License. You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, #software distributed under the License is distributed on an #"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #KIND, either express or implied. See the License for the #specific language governing permissions and limitations #under the License. <import_stmt>boto3<import_stmt>datetime<import_from_stmt>check_register CheckRegister<line_sep>registry=CheckRegister()<line_sep># import boto3 clients ecs=boto3.client("ecs")<line_sep># loop through ECS Clusters <def_stmt>list_clusters cache<block_start>response=cache.get("list_clusters")<if_stmt>response<block_start><return>response<block_end>cache["list_clusters"]=ecs.list_clusters()<line_sep><return>cache["list_clusters"]<block_end>@registry.register_check("ecs")<def_stmt>ecs_cluster_container_insights_check cache:dict awsAccountId:str awsRegion:str awsPartition:str<arrow>dict<block_start>"""[ECS.1] ECS clusters should have container insights enabled"""<line_sep>response=list_clusters(cache)<line_sep>myEcsClusters=response["clusterArns"]<for_stmt>clusters myEcsClusters<block_start>clusterArn=str(clusters)<try_stmt><block_start>response=ecs.describe_clusters(clusters=[clusterArn])<for_stmt>clusterinfo response["clusters"]<block_start>clusterName=str(clusterinfo["clusterName"])<line_sep>ecsClusterArn=str(clusterinfo["clusterArn"])<for_stmt>settings clusterinfo["settings"]<block_start>contInsightsCheck=str(settings["value"])<line_sep># ISO Time iso8601Time=(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())<if_stmt>contInsightsCheck<eq>"disabled"<block_start>finding={"SchemaVersion":"2018-10-08" "Id":ecsClusterArn+"/ecs-cluster-container-insights-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":ecsClusterArn "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices"] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"LOW"} "Confidence":99 "Title":"[ECS.1] ECS clusters should have container insights enabled" "Description":"ECS cluster "+clusterName+" does not have container insights enabled. Refer to the remediation instructions to remediate this behavior" "Remediation":{"Recommendation":{"Text":"For information on configuring Container Insights for your cluster refer to the Setting Up Container Insights on Amazon ECS for Cluster- and Service-Level Metrics section of the Amazon CloudWatch User Guide" "Url":"https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html" }} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsEcsCluster" "Id":ecsClusterArn "Partition":awsPartition "Region":awsRegion "Details":{"Other":{"ClusterName":clusterName}} }] "Compliance":{"Status":"FAILED" "RelatedRequirements":["NIST CSF DE.AE-3" "NIST SP 800-53 AU-6" "NIST SP 800-53 CA-7" "NIST SP 800-53 IR-4" "NIST SP 800-53 IR-5" "NIST SP 800-53 IR-8" "NIST SP 800-53 SI-4" "AICPA TSC CC7.2" "ISO 27001:2013 A.12.4.1" "ISO 27001:2013 A.16.1.7" ] } "Workflow":{"Status":"NEW"} "RecordState":"ACTIVE" }<line_sep><yield>finding<block_end><else_stmt><block_start>finding={"SchemaVersion":"2018-10-08" "Id":ecsClusterArn+"/ecs-cluster-container-insights-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":ecsClusterArn "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices"] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"INFORMATIONAL"} "Confidence":99 "Title":"[ECS.1] ECS clusters should have container insights enabled" "Description":"ECS cluster "+clusterName+" has container insights enabled." "Remediation":{"Recommendation":{"Text":"For information on configuring Container Insights for your cluster refer to the Setting Up Container Insights on Amazon ECS for Cluster- and Service-Level Metrics section of the Amazon CloudWatch User Guide" "Url":"https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html" }} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsEcsCluster" "Id":ecsClusterArn "Partition":awsPartition "Region":awsRegion "Details":{"Other":{"ClusterName":clusterName}} }] "Compliance":{"Status":"PASSED" "RelatedRequirements":["NIST CSF DE.AE-3" "NIST SP 800-53 AU-6" "NIST SP 800-53 CA-7" "NIST SP 800-53 IR-4" "NIST SP 800-53 IR-5" "NIST SP 800-53 IR-8" "NIST SP 800-53 SI-4" "AICPA TSC CC7.2" "ISO 27001:2013 A.12.4.1" "ISO 27001:2013 A.16.1.7" ] } "Workflow":{"Status":"RESOLVED"} "RecordState":"ARCHIVED" }<line_sep><yield>finding<block_end><block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end><block_end>@registry.register_check("ecs")<def_stmt>ecs_cluster_default_provider_strategy_check cache:dict awsAccountId:str awsRegion:str awsPartition:str<arrow>dict<block_start>"""[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured"""<line_sep>response=list_clusters(cache)<line_sep>myEcsClusters=response["clusterArns"]<for_stmt>clusters myEcsClusters<block_start>clusterArn=str(clusters)<try_stmt><block_start>response=ecs.describe_clusters(clusters=[clusterArn])<for_stmt>clusterinfo response["clusters"]<block_start>clusterName=str(clusterinfo["clusterName"])<line_sep>ecsClusterArn=str(clusterinfo["clusterArn"])<line_sep>defaultProviderStratCheck=str(clusterinfo["defaultCapacityProviderStrategy"])<line_sep># ISO Time iso8601Time=(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())<if_stmt>defaultProviderStratCheck<eq>"[]"<block_start>finding={"SchemaVersion":"2018-10-08" "Id":ecsClusterArn+"/ecs-cluster-default-provider-strategy-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":ecsClusterArn "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices"] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"INFORMATIONAL"} "Confidence":99 "Title":"[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured" "Description":"ECS cluster "+clusterName+" does not have a default provider strategy configured. Refer to the remediation instructions to remediate this behavior" "Remediation":{"Recommendation":{"Text":"For information on cluster capacity provider strategies for your cluster refer to the Amazon ECS Cluster Capacity Providers section of the Amazon Elastic Container Service Developer Guide" "Url":"https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html" }} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsEcsCluster" "Id":ecsClusterArn "Partition":awsPartition "Region":awsRegion "Details":{"Other":{"ClusterName":clusterName}} }] "Compliance":{"Status":"FAILED" "RelatedRequirements":["NIST CSF ID.AM-2" "NIST SP 800-53 CM-8" "NIST SP 800-53 PM-5" "AICPA TSC CC3.2" "AICPA TSC CC6.1" "ISO 27001:2013 A.8.1.1" "ISO 27001:2013 A.8.1.2" "ISO 27001:2013 A.12.5.1" ] } "Workflow":{"Status":"NEW"} "RecordState":"ACTIVE" }<line_sep><yield>finding<block_end><else_stmt><block_start>finding={"SchemaVersion":"2018-10-08" "Id":ecsClusterArn+"/ecs-cluster-default-provider-strategy-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":ecsClusterArn "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices"] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"INFORMATIONAL"} "Confidence":99 "Title":"[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured" "Description":"ECS cluster "+clusterName+" has a default provider strategy configured." "Remediation":{"Recommendation":{"Text":"For information on cluster capacity provider strategies for your cluster refer to the Amazon ECS Cluster Capacity Providers section of the Amazon Elastic Container Service Developer Guide" "Url":"https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html" }} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsEcsCluster" "Id":ecsClusterArn "Partition":awsPartition "Region":awsRegion "Details":{"Other":{"ClusterName":clusterName}} }] "Compliance":{"Status":"PASSED" "RelatedRequirements":["NIST CSF ID.AM-2" "NIST SP 800-53 CM-8" "NIST SP 800-53 PM-5" "AICPA TSC CC3.2" "AICPA TSC CC6.1" "ISO 27001:2013 A.8.1.1" "ISO 27001:2013 A.8.1.2" "ISO 27001:2013 A.12.5.1" ] } "Workflow":{"Status":"RESOLVED"} "RecordState":"ARCHIVED" }<line_sep><yield>finding<block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end><block_end>@registry.register_check("ecs")<def_stmt>ecs_task_definition_privileged_container_check cache:dict awsAccountId:str awsRegion:str awsPartition:str<arrow>dict<block_start>"""[ECS.3] ECS Task Definitions should not run privileged containers if not required"""<for_stmt>taskdef ecs.list_task_definitions(status='ACTIVE')['taskDefinitionArns']<block_start><try_stmt><block_start>response=ecs.describe_task_definition(taskDefinition=taskdef)["taskDefinition"]<line_sep>taskDefinitionArn=str(response['taskDefinitionArn'])<line_sep>tdefFamily=str(response["family"])<line_sep># Loop container definitions <for_stmt>cdef response["containerDefinitions"]# ISO Time <block_start>iso8601Time=(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())<line_sep>cdefName=str(cdef["name"])<line_sep># We are going to assume that if there is not a privileged flag...that it is ;) <try_stmt><block_start>privCheck=str(cdef["privileged"])<block_end><except_stmt><block_start>privCheck='UNKNOWN'<block_end><if_stmt>privCheck<ne>'False'<block_start>finding={"SchemaVersion":"2018-10-08" "Id":taskDefinitionArn+"/"+cdefName+"/ecs-task-definition-privileged-container-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":taskDefinitionArn+"/"+cdefName "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices" "TTPs/Privilege Escalation"] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"MEDIUM"} "Confidence":99 "Title":"[ECS.3] ECS Task Definitions should not run privileged containers if not required" "Description":"ECS Container Definition "+cdefName+" in Task Definition "+taskDefinitionArn+" has defined a Privileged container, which should be avoided unless absolutely necessary. Refer to the remediation instructions to remediate this behavior" "Remediation":{"Recommendation":{"Text":"Containers running as Privileged will have Root permissions, this should be avoided if not needed. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide" "Url":"https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions" }} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsEcsTaskDefinition" "Id":taskDefinitionArn "Partition":awsPartition "Region":awsRegion "Details":{"Other":{"Family":tdefFamily "ContainerDefinitionName":cdefName}}}] "Compliance":{"Status":"FAILED" "RelatedRequirements":["NIST CSF PR.AC-1" "NIST SP 800-53 AC-1" "NIST SP 800-53 AC-2" "NIST SP 800-53 IA-1" "NIST SP 800-53 IA-2" "NIST SP 800-53 IA-3" "NIST SP 800-53 IA-4" "NIST SP 800-53 IA-5" "NIST SP 800-53 IA-6" "NIST SP 800-53 IA-7" "NIST SP 800-53 IA-8" "NIST SP 800-53 IA-9" "NIST SP 800-53 IA-10" "NIST SP 800-53 IA-11" "AICPA TSC CC6.1" "AICPA TSC CC6.2" "ISO 27001:2013 A.9.2.1" "ISO 27001:2013 A.9.2.2" "ISO 27001:2013 A.9.2.3" "ISO 27001:2013 A.9.2.4" "ISO 27001:2013 A.9.2.6" "ISO 27001:2013 A.9.3.1" "ISO 27001:2013 A.9.4.2" "ISO 27001:2013 A.9.4.3" ] } "Workflow":{"Status":"NEW"} "RecordState":"ACTIVE" }<line_sep><yield>finding<block_end><else_stmt><block_start>finding={"SchemaVersion":"2018-10-08" "Id":taskDefinitionArn+"/"+cdefName+"/ecs-task-definition-privileged-container-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":taskDefinitionArn+"/"+cdefName "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices" "TTPs/Privilege Escalation"] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"INFORMATIONAL"} "Confidence":99 "Title":"[ECS.3] ECS Task Definitions should not run privileged containers if not required" "Description":"ECS Container Definition "+cdefName+" in Task Definition "+taskDefinitionArn+" has not defined a Privileged container." "Remediation":{"Recommendation":{"Text":"Containers running as Privileged will have Root permissions, this should be avoided if not needed. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide" "Url":"https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions" }} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsEcsTaskDefinition" "Id":taskDefinitionArn "Partition":awsPartition "Region":awsRegion "Details":{"Other":{"Family":tdefFamily "ContainerDefinitionName":cdefName}}}] "Compliance":{"Status":"PASSED" "RelatedRequirements":["NIST CSF PR.AC-1" "NIST SP 800-53 AC-1" "NIST SP 800-53 AC-2" "NIST SP 800-53 IA-1" "NIST SP 800-53 IA-2" "NIST SP 800-53 IA-3" "NIST SP 800-53 IA-4" "NIST SP 800-53 IA-5" "NIST SP 800-53 IA-6" "NIST SP 800-53 IA-7" "NIST SP 800-53 IA-8" "NIST SP 800-53 IA-9" "NIST SP 800-53 IA-10" "NIST SP 800-53 IA-11" "AICPA TSC CC6.1" "AICPA TSC CC6.2" "ISO 27001:2013 A.9.2.1" "ISO 27001:2013 A.9.2.2" "ISO 27001:2013 A.9.2.3" "ISO 27001:2013 A.9.2.4" "ISO 27001:2013 A.9.2.6" "ISO 27001:2013 A.9.3.1" "ISO 27001:2013 A.9.4.2" "ISO 27001:2013 A.9.4.3" ] } "Workflow":{"Status":"RESOLVED"} "RecordState":"ARCHIVED" }<line_sep><yield>finding<block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end><block_end>@registry.register_check("ecs")<def_stmt>ecs_task_definition_security_labels_check cache:dict awsAccountId:str awsRegion:str awsPartition:str<arrow>dict<block_start>"""[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured"""<for_stmt>taskdef ecs.list_task_definitions(status='ACTIVE')['taskDefinitionArns']<block_start><try_stmt><block_start>response=ecs.describe_task_definition(taskDefinition=taskdef)["taskDefinition"]<line_sep>taskDefinitionArn=str(response["taskDefinitionArn"])<line_sep>tdefFamily=str(response["family"])<line_sep># If there is a network mode of "awsvpc" it is likely a Fargate task - even though EC2 compute can run with that... # time for some funky edge cases, keep that in mind before you yeet an issue at me, please ;) <if_stmt>str(response["networkMode"])<eq>'awsvpc'<block_start><continue><block_end><else_stmt># Loop container definitions <block_start><for_stmt>cdef response["containerDefinitions"]# ISO Time <block_start>iso8601Time=(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())<line_sep>cdefName=str(cdef["name"])<try_stmt># This is a passing check <block_start>secOpts=str(cdef["dockerSecurityOptions"])<line_sep>finding={"SchemaVersion":"2018-10-08" "Id":taskDefinitionArn+"/"+cdefName+"/ecs-task-definition-security-labels-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":taskDefinitionArn+"/"+cdefName "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices"] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"INFORMATIONAL"} "Confidence":99 "Title":"[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured" "Description":"ECS Container Definition "+cdefName+" in Task Definition "+taskDefinitionArn+" has Docker Security Options configured." "Remediation":{"Recommendation":{"Text":"Containers running on EC2 Compute-types should have Docker Security Options configured. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide" "Url":"https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions"}} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsEcsTaskDefinition" "Id":taskDefinitionArn "Partition":awsPartition "Region":awsRegion "Details":{"Other":{"Family":tdefFamily "ContainerDefinitionName":cdefName 'DockerSecurityOptions':secOpts}}}] "Compliance":{"Status":"PASSED" "RelatedRequirements":["NIST CSF PR.IP-1" "NIST SP 800-53 CM-2" "NIST SP 800-53 CM-3" "NIST SP 800-53 CM-4" "NIST SP 800-53 CM-5" "NIST SP 800-53 CM-6" "NIST SP 800-53 CM-7" "NIST SP 800-53 CM-9" "NIST SP 800-53 SA-10" "AICPA TSC A1.3" "AICPA TSC CC1.4" "AICPA TSC CC5.3" "AICPA TSC CC6.2" "AICPA TSC CC7.1" "AICPA TSC CC7.3" "AICPA TSC CC7.4" "ISO 27001:2013 A.12.1.2" "ISO 27001:2013 A.12.5.1" "ISO 27001:2013 A.12.6.2" "ISO 27001:2013 A.14.2.2" "ISO 27001:2013 A.14.2.3" "ISO 27001:2013 A.14.2.4" ] } "Workflow":{"Status":"RESOLVED"} "RecordState":"ARCHIVED"}<line_sep><yield>finding<block_end><except_stmt><block_start>secOpts=str('["NO_OPTIONS"]')<line_sep>finding={"SchemaVersion":"2018-10-08" "Id":taskDefinitionArn+"/"+cdefName+"/ecs-task-definition-security-labels-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":taskDefinitionArn+"/"+cdefName "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices"] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"HIGH"} "Confidence":99 "Title":"[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured" "Description":"ECS Container Definition "+cdefName+" in Task Definition "+taskDefinitionArn+" does not have any Docker Security Options configured. Refer to the remediation instructions to remediate this behavior" "Remediation":{"Recommendation":{"Text":"Containers running on EC2 Compute-types should have Docker Security Options configured. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide" "Url":"https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions"}} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsEcsTaskDefinition" "Id":taskDefinitionArn "Partition":awsPartition "Region":awsRegion "Details":{"Other":{"Family":tdefFamily "ContainerDefinitionName":cdefName 'DockerSecurityOptions':secOpts}}}] "Compliance":{"Status":"FAILED" "RelatedRequirements":["NIST CSF PR.IP-1" "NIST SP 800-53 CM-2" "NIST SP 800-53 CM-3" "NIST SP 800-53 CM-4" "NIST SP 800-53 CM-5" "NIST SP 800-53 CM-6" "NIST SP 800-53 CM-7" "NIST SP 800-53 CM-9" "NIST SP 800-53 SA-10" "AICPA TSC A1.3" "AICPA TSC CC1.4" "AICPA TSC CC5.3" "AICPA TSC CC6.2" "AICPA TSC CC7.1" "AICPA TSC CC7.3" "AICPA TSC CC7.4" "ISO 27001:2013 A.12.1.2" "ISO 27001:2013 A.12.5.1" "ISO 27001:2013 A.12.6.2" "ISO 27001:2013 A.14.2.2" "ISO 27001:2013 A.14.2.3" "ISO 27001:2013 A.14.2.4" ] } "Workflow":{"Status":"NEW"} "RecordState":"ACTIVE"}<line_sep><yield>finding<block_end><block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end><block_end>
<import_stmt>unittest<import_from_stmt>mox3.mox MoxTestBase IsA<import_from_stmt>slimta.queue.proxy ProxyQueue<import_from_stmt>slimta.smtp.reply Reply<import_from_stmt>slimta.relay Relay TransientRelayError PermanentRelayError<import_from_stmt>slimta.envelope Envelope<class_stmt>TestProxyQueue(MoxTestBase unittest.TestCase)<block_start><def_stmt>setUp self<block_start>super(TestProxyQueue self).setUp()<line_sep>self.relay=self.mox.CreateMock(Relay)<line_sep>self.env=Envelope('<EMAIL>' ['<EMAIL>'])<block_end><def_stmt>test_enqueue self<block_start>self.relay._attempt(self.env 0)<line_sep>self.mox.ReplayAll()<line_sep>q=ProxyQueue(self.relay)<line_sep>ret=q.enqueue(self.env)<line_sep>self.assertEqual(1 len(ret))<line_sep>self.assertEqual(2 len(ret[0]))<line_sep>self.assertEqual(self.env ret[0][0])<line_sep>self.assertRegexpMatches(ret[0][1] r'[0-9a-fA-F]{32}')<block_end><def_stmt>test_enqueue_relayerror self<block_start>err=PermanentRelayError('msg failure' Reply('550' 'Not Ok'))<line_sep>self.relay._attempt(self.env 0).AndRaise(err)<line_sep>self.mox.ReplayAll()<line_sep>q=ProxyQueue(self.relay)<line_sep>ret=q.enqueue(self.env)<line_sep>self.assertEqual(1 len(ret))<line_sep>self.assertEqual(2 len(ret[0]))<line_sep>self.assertEqual(self.env ret[0][0])<line_sep>self.assertEqual(err ret[0][1])<block_end><def_stmt>test_start_noop self<block_start>self.mox.ReplayAll()<line_sep>q=ProxyQueue(self.relay)<line_sep>q.start()<block_end><def_stmt>test_kill_noop self<block_start>self.mox.ReplayAll()<line_sep>q=ProxyQueue(self.relay)<line_sep>q.kill()<block_end><def_stmt>test_flush_noop self<block_start>self.mox.ReplayAll()<line_sep>q=ProxyQueue(self.relay)<line_sep>q.flush()<block_end><def_stmt>test_add_policy_error self<block_start>self.mox.ReplayAll()<line_sep>q=ProxyQueue(self.relay)<with_stmt>self.assertRaises(NotImplementedError)<block_start>q.add_policy('test')<block_end><block_end><block_end># vim:et:fdm=marker:sts=4:sw=4:ts=4
<import_stmt>json<import_from_stmt>grafana_backup.dashboardApi create_snapshot<def_stmt>main args settings file_path<block_start>grafana_url=settings.get('GRAFANA_URL')<line_sep>http_post_headers=settings.get('HTTP_POST_HEADERS')<line_sep>verify_ssl=settings.get('VERIFY_SSL')<line_sep>client_cert=settings.get('CLIENT_CERT')<line_sep>debug=settings.get('DEBUG')<with_stmt>open(file_path 'r')<as>f<block_start>data=f.read()<block_end>snapshot=json.loads(data)<try_stmt><block_start>snapshot['name']=snapshot['dashboard']['title']<block_end><except_stmt>KeyError<block_start>snapshot['name']="Untitled Snapshot"<block_end>(status content)=create_snapshot(json.dumps(snapshot) grafana_url http_post_headers verify_ssl client_cert debug)<if_stmt>status<eq>200<block_start>print("create snapshot: {0}, status: {1}, msg: {2}".format(snapshot['name'] status content))<block_end><else_stmt><block_start>print("creating snapshot {0} failed with status {1}".format(snapshot['name'] status))<block_end><block_end>
<import_from_stmt>.build build_transforms<import_from_stmt>.pre_augmentation_transforms Resize<import_from_stmt>.target_transforms PanopticTargetGenerator SemanticTargetGenerator<line_sep>
#from http://rosettacode.org/wiki/Greatest_subsequential_sum#Python #pythran export maxsum(int list) #pythran export maxsumseq(int list) #pythran export maxsumit(int list) #runas maxsum([0, 1, 0]) #runas maxsumseq([-1, 2, -1, 3, -1]) #runas maxsumit([-1, 1, 2, -5, -6]) <def_stmt>maxsum sequence<block_start>"""Return maximum sum."""<line_sep>maxsofar,maxendinghere=0 0<for_stmt>x sequence# invariant: ``maxendinghere`` and ``maxsofar`` are accurate for ``x[0..i-1]`` <block_start>maxendinghere=max(maxendinghere+x 0)<line_sep>maxsofar=max(maxsofar maxendinghere)<block_end><return>maxsofar<block_end><def_stmt>maxsumseq sequence<block_start>start,end,sum_start=-1 -1 -1<line_sep>maxsum_,sum_=0 0<for_stmt>i,x enumerate(sequence)<block_start>sum_<augadd>x<if_stmt>maxsum_<l>sum_# found maximal subsequence so far <block_start>maxsum_=sum_<line_sep>start,end=sum_start i<block_end><elif_stmt>sum_<l>0# start new sequence <block_start>sum_=0<line_sep>sum_start=i<block_end><block_end><assert_stmt>maxsum_<eq>maxsum(sequence)<assert_stmt>maxsum_<eq>sum(sequence[start+1:end+1])<line_sep><return>sequence[start+1:end+1]<block_end><def_stmt>maxsumit iterable<block_start>maxseq=seq=[]<line_sep>start,end,sum_start=-1 -1 -1<line_sep>maxsum_,sum_=0 0<for_stmt>i,x enumerate(iterable)<block_start>seq.append(x)<line_sep>sum_<augadd>x<if_stmt>maxsum_<l>sum_<block_start>maxseq=seq<line_sep>maxsum_=sum_<line_sep>start,end=sum_start i<block_end><elif_stmt>sum_<l>0<block_start>seq=[]<line_sep>sum_=0<line_sep>sum_start=i<block_end><block_end><assert_stmt>maxsum_<eq>sum(maxseq[:end-start])<line_sep><return>maxseq[:end-start]<block_end>
<import_from_stmt>typing List Union<import_from_stmt>pytest raises<import_from_stmt>graphql.error GraphQLError format_error<import_from_stmt>graphql.language Node Source<import_from_stmt>graphql.pyutils Undefined<def_stmt>describe_format_error <block_start><def_stmt>formats_graphql_error <block_start>source=Source(""" query { something }""")<line_sep>path:List[Union[int str]]=["one" 2]<line_sep>extensions={"ext":<none>}<line_sep>error=GraphQLError("test message" Node() source [14 40] path ValueError("original") extensions=extensions )<line_sep>formatted=format_error(error)<assert_stmt>formatted<eq>error.formatted<assert_stmt>formatted<eq>{"message":"test message" "locations":[{"line":2 "column":14} {"line":3 "column":20}] "path":path "extensions":extensions }<block_end><def_stmt>uses_default_message # noinspection PyTypeChecker <block_start>formatted=format_error(GraphQLError(<none>))# type: ignore <assert_stmt>formatted<eq>{"message":"An unknown error occurred." "locations":<none> "path":<none> }<block_end><def_stmt>includes_path <block_start>path:List[Union[int str]]=["path" 3 "to" "field"]<line_sep>error=GraphQLError("msg" path=path)<line_sep>formatted=format_error(error)<assert_stmt>formatted<eq>error.formatted<assert_stmt>formatted<eq>{"message":"msg" "locations":<none> "path":path}<block_end><def_stmt>includes_extension_fields <block_start>error=GraphQLError("msg" extensions={"foo":"bar"})<line_sep>formatted=format_error(error)<assert_stmt>formatted<eq>error.formatted<assert_stmt>formatted<eq>{"message":"msg" "locations":<none> "path":<none> "extensions":{"foo":"bar"} }<block_end><def_stmt>rejects_none_and_undefined_errors <block_start><with_stmt>raises(TypeError)<as>exc_info# noinspection PyTypeChecker <block_start>format_error(<none>)# type: ignore <block_end><assert_stmt>str(exc_info.value)<eq>"Expected a GraphQLError."<with_stmt>raises(TypeError)<as>exc_info# noinspection PyTypeChecker <block_start>format_error(Undefined)# type: ignore <block_end><assert_stmt>str(exc_info.value)<eq>"Expected a GraphQLError."<block_end><block_end>
<import_stmt>mock<import_from_stmt>maildown renderer<import_stmt>mistune<import_stmt>pygments<import_from_stmt>pygments lexers<import_from_stmt>pygments.formatters html<import_stmt>premailer<import_stmt>jinja2<def_stmt>test_highlight_renderer monkeypatch<block_start>monkeypatch.setattr(mistune "escape" mock.MagicMock())<line_sep>monkeypatch.setattr(lexers "get_lexer_by_name" mock.MagicMock())<line_sep>monkeypatch.setattr(html "HtmlFormatter" mock.MagicMock())<line_sep>monkeypatch.setattr(pygments "highlight" mock.MagicMock())<line_sep>lexers.get_lexer_by_name.return_value=<true><line_sep>html.HtmlFormatter.return_value={}<line_sep>r=renderer.HighlightRenderer()<line_sep>r.block_code("code")<line_sep>mistune.escape.assert_called_with("code")<line_sep>r.block_code("code" "python")<line_sep>lexers.get_lexer_by_name.assert_called_with("python" stripall=<true>)<line_sep>pygments.highlight.assert_called_with("code" <true> {})<block_end><def_stmt>test_generate_content monkeypatch<block_start>monkeypatch.setattr(mistune "Markdown" mock.MagicMock())<line_sep>monkeypatch.setattr(premailer "transform" mock.MagicMock())<line_sep>monkeypatch.setattr(renderer "HighlightRenderer" mock.MagicMock())<line_sep>monkeypatch.setattr(jinja2 "Template" mock.MagicMock())<line_sep>renderer.HighlightRenderer.return_value=1<line_sep>premailer.transform.return_value=""<line_sep>jinja2.Template.render.return_value=""<line_sep>renderer.generate_content("")<line_sep>mistune.Markdown.assert_called_with(renderer=1)<block_end>
<import_stmt>math<import_from_stmt>mathutils Euler<import_stmt>bpy<import_from_stmt>.portal2_entity_classes *<import_from_stmt>.portal_entity_handlers PortalEntityHandler<line_sep>local_entity_lookup_table=PortalEntityHandler.entity_lookup_table.copy()<line_sep>local_entity_lookup_table.update(entity_class_handle)<class_stmt>Portal2EntityHandler(PortalEntityHandler)<block_start>entity_lookup_table=local_entity_lookup_table<line_sep>pointlight_power_multiplier=1000<def_stmt>handle_prop_weighted_cube self entity:prop_weighted_cube entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('prop_weighted_cube' obj 'props')<block_end><def_stmt>handle_prop_testchamber_door self entity:prop_testchamber_door entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('prop_testchamber_door' obj 'props')<block_end><def_stmt>handle_prop_floor_button self entity:prop_floor_button entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('prop_floor_button' obj 'props')<block_end><def_stmt>handle_prop_floor_ball_button self entity:prop_floor_ball_button entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('prop_floor_ball_button' obj 'props')<block_end><def_stmt>handle_prop_floor_cube_button self entity:prop_floor_cube_button entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('prop_floor_cube_button' obj 'props')<block_end><def_stmt>handle_prop_under_floor_button self entity:prop_under_floor_button entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('prop_under_floor_button' obj 'props')<block_end><def_stmt>handle_prop_tractor_beam self entity:prop_tractor_beam entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('prop_tractor_beam' obj 'props')<block_end><def_stmt>handle_logic_playmovie self entity:logic_playmovie entity_raw:dict<block_start>obj=bpy.data.objects.new(self._get_entity_name(entity) <none>)<line_sep>self._set_location(obj entity.origin)<line_sep>self._set_icon_if_present(obj entity)<line_sep>self._set_entity_data(obj {'entity':entity_raw})<line_sep>self._put_into_collection('logic_playmovie' obj 'logic')<block_end><def_stmt>handle_trigger_paint_cleanser self entity:trigger_paint_cleanser entity_raw:dict<block_start><if_stmt>'model'<not><in>entity_raw<block_start><return><block_end>model_id=int(entity_raw.get('model')[1:])<line_sep>mesh_object=self._load_brush_model(model_id self._get_entity_name(entity))<line_sep>self._set_location_and_scale(mesh_object parse_float_vector(entity_raw.get('origin' '0 0 0')))<line_sep>self._set_rotation(mesh_object parse_float_vector(entity_raw.get('angles' '0 0 0')))<line_sep>self._set_entity_data(mesh_object {'entity':entity_raw})<line_sep>self._put_into_collection('trigger_paint_cleanser' mesh_object 'triggers')<block_end><def_stmt>handle_trigger_catapult self entity:trigger_catapult entity_raw:dict<block_start><if_stmt>'model'<not><in>entity_raw<block_start><return><block_end>model_id=int(entity_raw.get('model')[1:])<line_sep>mesh_object=self._load_brush_model(model_id self._get_entity_name(entity))<line_sep>self._set_location_and_scale(mesh_object parse_float_vector(entity_raw.get('origin' '0 0 0')))<line_sep>self._set_rotation(mesh_object parse_float_vector(entity_raw.get('angles' '0 0 0')))<line_sep>self._set_entity_data(mesh_object {'entity':entity_raw})<line_sep>self._put_into_collection('trigger_catapult' mesh_object 'triggers')<block_end><def_stmt>handle_npc_wheatley_boss self entity:npc_wheatley_boss entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('npc_wheatley_boss' obj 'npc')<block_end><def_stmt>handle_prop_exploding_futbol self entity:prop_exploding_futbol entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('prop_exploding_futbol' obj 'props')<block_end><def_stmt>handle_prop_exploding_futbol_socket self entity:prop_exploding_futbol_socket entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('prop_exploding_futbol' obj 'props')<block_end><def_stmt>handle_prop_exploding_futbol_spawnert self entity:prop_exploding_futbol_spawner entity_raw:dict<block_start>obj=self._handle_entity_with_model(entity entity_raw)<line_sep>self._put_into_collection('prop_exploding_futbol_spawner' obj 'props')<block_end><block_end>
<import_stmt>sys<import_stmt>webbrowser<import_stmt>os<import_from_stmt>comicstreamerlib.folders AppFolders<import_from_stmt>PyQt4 QtGui QtCore<class_stmt>SystemTrayIcon(QtGui.QSystemTrayIcon)<block_start><def_stmt>__init__ self icon app<block_start>QtGui.QSystemTrayIcon.__init__(self icon <none>)<line_sep>self.app=app<line_sep>self.menu=QtGui.QMenu(<none>)<line_sep>exitAction=self.menu.addAction("Exit")<line_sep>self.setContextMenu(self.menu)<line_sep>exitAction.triggered.connect(self.quit)<block_end><def_stmt>quit self<block_start>QtCore.QCoreApplication.quit()<block_end><block_end><class_stmt>QtBasedGui()<block_start><def_stmt>__init__ self apiServer<block_start>self.apiServer=apiServer<line_sep>self.app=QtGui.QApplication(sys.argv)<line_sep>pixmap=QtGui.QPixmap(AppFolders.imagePath("trout.png"))<line_sep>icon=QtGui.QIcon(pixmap.scaled(16 16))<line_sep>self.trayIcon=SystemTrayIcon(icon self)<line_sep>self.trayIcon.show()<block_end><def_stmt>run self<block_start><try_stmt><block_start>self.app.exec_()<block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>QtGui().run()<block_end>
<import_stmt>paddle<import_stmt>paddle.nn<as>nn<import_stmt>paddle.nn.functional<as>F<import_from_stmt>paddle.nn.initializer Assign<import_stmt>math<class_stmt>NoisyLinear(nn.Linear)<block_start><def_stmt>__init__ self in_features out_features sigma_zero=0.4 bias=<true><block_start>super(NoisyLinear self).__init__(in_features out_features)<line_sep>sigma_init=sigma_zero/math.sqrt(in_features)<line_sep>sigma_weight=self.create_parameter(shape=[in_features out_features] default_initializer=Assign(paddle.full((in_features out_features) sigma_init)))<line_sep>self.add_parameter("sigma_weight" sigma_weight)<line_sep>self.register_buffer("epsilon_input" paddle.zeros((1 in_features)))<line_sep>self.register_buffer("epsilon_output" paddle.zeros((out_features 1)))<if_stmt>bias<block_start>sigma_bias=self.create_parameter(shape=[out_features] default_initializer=Assign(paddle.full([out_features] sigma_init)))<line_sep>self.add_parameter("sigma_bias" sigma_bias)<block_end><block_end><def_stmt>_scale_noise self shape<block_start>x=paddle.randn(shape)<line_sep><return>x.sign().multiply(x.abs().sqrt())<block_end><def_stmt>forward self inputs<block_start><with_stmt>paddle.no_grad()<block_start>eps_in=self._scale_noise(self.epsilon_input.shape)<line_sep>eps_out=self._scale_noise(self.epsilon_output.shape)<line_sep>noise_v=paddle.multiply(eps_in eps_out).detach()<block_end><return>F.linear(inputs self.weight+self.sigma_weight<times>noise_v.t() self.bias+self.sigma_bias<times>eps_out.squeeze().t())<block_end><block_end><class_stmt>Model(nn.Layer)<block_start><def_stmt>__init__ self num_inputs num_actions<block_start>super(Model self).__init__()<line_sep>self.conv1=nn.Conv2D(num_inputs 32 3 stride=3)<line_sep>self.conv2=nn.Conv2D(32 32 3 stride=3)<line_sep>self.conv3=nn.Conv2D(32 64 3 stride=1)<line_sep>self.flatten=nn.Flatten()<line_sep>self.linear=NoisyLinear(64<times>3<times>2 256)<line_sep>self.fc=NoisyLinear(256 num_actions)<block_end><def_stmt>forward self x<block_start>x=F.relu(self.conv1(x))<line_sep>x=F.relu(self.conv2(x))<line_sep>x=F.relu(self.conv3(x))<line_sep>x=self.flatten(x)<line_sep>x=self.linear(x)<line_sep><return>self.fc(x)<block_end><block_end>
"""Agents for neural net bandit problems. We implement three main types of agent: - epsilon-greedy (fixed epsilon, annealing epsilon) - dropout (arXiv:1506.02142) - ensemble sampling All code is specialized to the setting of 2-layer fully connected MLPs. """<import_stmt>numpy<as>np<import_stmt>numpy.random<as>rd<import_from_stmt>base.agent Agent<import_from_stmt>ensemble_nn.env_nn TwoLayerNNBandit<class_stmt>TwoLayerNNEpsilonGreedy(Agent)<block_start><def_stmt>__init__ self input_dim hidden_dim actions time_horizon prior_var noise_var epsilon_param=0.0 learning_rate=1e-1 num_gradient_steps=1 batch_size=64 lr_decay=1 leaky_coeff=0.01<block_start>"""Epsilon-greedy agent with two-layer neural network model. Args: input_dim: int dimension of input. hidden_dim: int size of hidden layer. actions: numpy array of valid actions (generated by environment). time_horizon: int size to pre-allocate data storage. prior_var: prior variance for random initialization. noise_var: noise variance for update. epsilon_param: fixed epsilon choice. learning_rate: sgd learning rate. num_gradient_steps: how many sgd to do. batch_size: size of batch. lr_decay: decay learning rate. leaky_coeff: slope of "negative" part of the Leaky ReLU. """<line_sep>self.W1=1e-2<times>rd.randn(hidden_dim input_dim)# initialize weights self.W2=1e-2<times>rd.randn(hidden_dim)<line_sep>self.actions=actions<line_sep>self.num_actions=len(actions)<line_sep>self.T=time_horizon<line_sep>self.prior_var=prior_var<line_sep>self.noise_var=noise_var<line_sep>self.epsilon_param=epsilon_param<line_sep>self.lr=learning_rate<line_sep>self.num_gradient_steps=num_gradient_steps# number of gradient steps we # take during each time period self.batch_size=batch_size<line_sep>self.lr_decay=lr_decay<line_sep>self.leaky_coeff=leaky_coeff<line_sep>self.action_hist=np.zeros((self.T input_dim))<line_sep>self.reward_hist=np.zeros(self.T)<block_end><def_stmt>_model_forward self input_actions<block_start>"""Neural network forward pass. Args: input_actions: actions to evaluate (numpy array). Returns: out: network prediction. cache: tuple holding intermediate activations for backprop. """<line_sep>affine_out=np.sum(input_actions[: np.newaxis :]<times>self.W1 axis=2)<line_sep>relu_out=np.maximum(self.leaky_coeff<times>affine_out affine_out)<line_sep>out=np.sum(relu_out<times>self.W2 axis=1)<line_sep>cache=(input_actions affine_out relu_out)<line_sep><return>out cache<block_end><def_stmt>_model_backward self out cache y<block_start>"""Neural network backward pass (for backpropagation). Args: out: output of batch of predictions. cache: intermediate activations from _model_forward. y: target labels. Returns: dW1: gradients for layer 1. dW2: gradients for layer 2. """<line_sep>input_actions,affine_out,relu_out=cache<line_sep>dout=-(2/self.noise_var)<times>(y-out)<line_sep>dW2=np.sum(dout[: np.newaxis]<times>relu_out axis=0)<line_sep>drelu_out=dout[: np.newaxis]<times>self.W2<line_sep>mask=(affine_out<ge>0)+self.leaky_coeff<times>(affine_out<l>0)<line_sep>daffine_out=mask<times>drelu_out<line_sep>dW1=np.dot(daffine_out.T input_actions)<line_sep><return>dW1 dW2<block_end><def_stmt>_update_model self t<block_start>"""Update the model by taking a few gradient steps."""<for_stmt>i range(self.num_gradient_steps)# sample minibatch <block_start>batch_ind=rd.randint(t+1 size=self.batch_size)<line_sep>action_batch=self.action_hist[batch_ind]<line_sep>reward_batch=self.reward_hist[batch_ind]<line_sep>out,cache=self._model_forward(action_batch)<line_sep>dW1,dW2=self._model_backward(out cache reward_batch)<line_sep>dW1<augdiv>self.batch_size<line_sep>dW2<augdiv>self.batch_size<line_sep>dW1<augadd>2/(self.prior_var<times>(t+1))<times>self.W1<line_sep>dW2<augadd>2/(self.prior_var<times>(t+1))<times>self.W2<line_sep>self.W1<augsub>self.lr<times>dW1<line_sep>self.W2<augsub>self.lr<times>dW2<block_end><block_end><def_stmt>update_observation self observation action reward<block_start>"""Learn from observations."""<line_sep>t=observation<line_sep>self.action_hist[t]=self.actions[action]<line_sep>self.reward_hist[t]=reward<line_sep>self._update_model(t)<line_sep>self.lr<augmul>self.lr_decay<block_end><def_stmt>pick_action self observation<block_start>"""Fixed epsilon-greedy action selection."""<line_sep>u=rd.rand()<if_stmt>u<l>self.epsilon_param<block_start>action=rd.randint(self.num_actions)<block_end><else_stmt><block_start>model_out,_=self._model_forward(self.actions)<line_sep>action=np.argmax(model_out)<block_end><return>action<block_end><block_end><class_stmt>TwoLayerNNEpsilonGreedyAnnealing(TwoLayerNNEpsilonGreedy)<block_start>"""Epsilon-greedy with an annealing epsilon: epsilon = self.epsilon_param / (self.epsilon_param + t) """<def_stmt>pick_action self observation<block_start>"""Overload pick_action to dynamically recalculate epsilon-greedy."""<line_sep>t=observation<line_sep>epsilon=self.epsilon_param/(self.epsilon_param+t)<line_sep>u=rd.rand()<if_stmt>u<l>epsilon<block_start>action=rd.randint(self.num_actions)<block_end><else_stmt><block_start>model_out,_=self._model_forward(self.actions)<line_sep>action=np.argmax(model_out)<block_end><return>action<block_end><block_end><class_stmt>TwoLayerNNDropout(TwoLayerNNEpsilonGreedy)<block_start>"""Dropout is used to represent model uncertainty. ICML paper suggests this is Bayesian uncertainty: arXiv:1506.02142. Follow up work suggests that this is flawed: TODO(iosband) add link. """<def_stmt>__init__ self input_dim hidden_dim actions time_horizon prior_var noise_var drop_prob=0.5 learning_rate=1e-1 num_gradient_steps=1 batch_size=64 lr_decay=1 leaky_coeff=0.01<block_start>"""Dropout agent with two-layer neural network model. Args: input_dim: int dimension of input. hidden_dim: int size of hidden layer. actions: numpy array of valid actions (generated by environment). time_horizon: int size to pre-allocate data storage. prior_var: prior variance for random initialization. noise_var: noise variance for update. drop_prob: probability of randomly zero-ing out weight component. learning_rate: sgd learning rate. num_gradient_steps: how many sgd to do. batch_size: size of batch. lr_decay: decay learning rate. leaky_coeff: slope of "negative" part of the Leaky ReLU. """<line_sep>self.W1=1e-2<times>rd.randn(hidden_dim input_dim)<line_sep>self.W2=1e-2<times>rd.randn(hidden_dim)<line_sep>self.actions=actions<line_sep>self.num_actions=len(actions)<line_sep>self.T=time_horizon<line_sep>self.prior_var=prior_var<line_sep>self.noise_var=noise_var<line_sep>self.p=drop_prob<line_sep>self.lr=learning_rate<line_sep>self.num_gradient_steps=num_gradient_steps<line_sep>self.batch_size=batch_size<line_sep>self.lr_decay=lr_decay<line_sep>self.leaky_coeff=leaky_coeff<line_sep>self.action_hist=np.zeros((self.T input_dim))<line_sep>self.reward_hist=np.zeros(self.T)<block_end><def_stmt>_model_forward self input_actions<block_start>"""Neural network forward pass. Note that dropout remains "on" so that forward pass is stochastic. Args: input_actions: actions to evaluate (numpy array). Returns: out: network prediction. cache: tuple holding intermediate activations for backprop. """<line_sep>affine_out=np.sum(input_actions[: np.newaxis :]<times>self.W1 axis=2)<line_sep>relu_out=np.maximum(self.leaky_coeff<times>affine_out affine_out)<line_sep>dropout_mask=rd.rand(*relu_out.shape)<g>self.p<line_sep>dropout_out=relu_out<times>dropout_mask<line_sep>out=np.sum(dropout_out<times>self.W2 axis=1)<line_sep>cache=(input_actions affine_out relu_out dropout_mask dropout_out)<line_sep><return>out cache<block_end><def_stmt>_model_backward self out cache y<block_start>"""Neural network backward pass (for backpropagation). Args: out: output of batch of predictions. cache: intermediate activations from _model_forward. y: target labels. Returns: dW1: gradients for layer 1. dW2: gradients for layer 2. """<line_sep>input_actions,affine_out,relu_out,dropout_mask,dropout_out=cache<line_sep>dout=-(2/self.noise_var)<times>(y-out)<line_sep>dW2=np.sum(dout[: np.newaxis]<times>relu_out axis=0)<line_sep>ddropout_out=dout[: np.newaxis]<times>self.W2<line_sep>drelu_out=ddropout_out<times>dropout_mask<line_sep>relu_mask=(affine_out<ge>0)+self.leaky_coeff<times>(affine_out<l>0)<line_sep>daffine_out=relu_mask<times>drelu_out<line_sep>dW1=np.dot(daffine_out.T input_actions)<line_sep><return>dW1 dW2<block_end><def_stmt>pick_action self observation<block_start>"""Select the greedy action according to the output of a stochastic forward pass."""<line_sep>model_out,_=self._model_forward(self.actions)<line_sep>action=np.argmax(model_out)<line_sep><return>action<block_end><block_end><class_stmt>TwoLayerNNEnsembleSampling(Agent)<block_start>"""An ensemble sampling agent maintains an ensemble of neural nets, each fitted to a perturbed prior and perturbed observations."""<def_stmt>__init__ self input_dim hidden_dim actions time_horizon prior_var noise_var num_models=10 learning_rate=1e-1 num_gradient_steps=1 batch_size=64 lr_decay=1 leaky_coeff=0.01<block_start>"""Ensemble sampling agent with two-layer neural network model. Args: input_dim: int dimension of input. hidden_dim: int size of hidden layer. actions: numpy array of valid actions (generated by environment). time_horizon: int size to pre-allocate data storage. prior_var: prior variance for random initialization. noise_var: noise variance for update. num_models: Number of ensemble models to train. learning_rate: sgd learning rate. num_gradient_steps: how many sgd to do. batch_size: size of batch. lr_decay: decay learning rate. leaky_coeff: slope of "negative" part of the Leaky ReLU. """<line_sep>self.M=num_models<line_sep># initialize models by sampling perturbed prior means self.W1_model_prior=np.sqrt(prior_var)<times>rd.randn(self.M hidden_dim input_dim)<line_sep>self.W2_model_prior=np.sqrt(prior_var)<times>rd.randn(self.M hidden_dim)<line_sep>self.W1=np.copy(self.W1_model_prior)<line_sep>self.W2=np.copy(self.W2_model_prior)<line_sep>self.actions=actions<line_sep>self.num_actions=len(actions)<line_sep>self.T=time_horizon<line_sep>self.prior_var=prior_var<line_sep>self.noise_var=noise_var<line_sep>self.lr=learning_rate<line_sep>self.num_gradient_steps=num_gradient_steps<line_sep>self.batch_size=batch_size<line_sep>self.lr_decay=lr_decay<line_sep>self.leaky_coeff=leaky_coeff<line_sep>self.action_hist=np.zeros((self.T input_dim))<line_sep>self.model_reward_hist=np.zeros((self.M self.T))<block_end><def_stmt>_model_forward self m input_actions<block_start>"""Neural network forward pass for single model of ensemble. Args: m: index of which network to evaluate. input_actions: actions to evaluate (numpy array). Returns: out: network prediction. cache: tuple holding intermediate activations for backprop. """<line_sep>affine_out=np.sum(input_actions[: np.newaxis :]<times>self.W1[m] axis=2)<line_sep>relu_out=np.maximum(self.leaky_coeff<times>affine_out affine_out)<line_sep>out=np.sum(relu_out<times>self.W2[m] axis=1)<line_sep>cache=(input_actions affine_out relu_out)<line_sep><return>out cache<block_end><def_stmt>_model_backward self m out cache y<block_start>"""Neural network backward pass (for backpropagation) for single network. Args: m: index of which network to evaluate. out: output of batch of predictions. cache: intermediate activations from _model_forward. y: target labels. Returns: dW1: gradients for layer 1. dW2: gradients for layer 2. """<line_sep>input_actions,affine_out,relu_out=cache<line_sep>dout=-(2/self.noise_var)<times>(y-out)<line_sep>dW2=np.sum(dout[: np.newaxis]<times>relu_out axis=0)<line_sep>drelu_out=dout[: np.newaxis]<times>self.W2[m]<line_sep>mask=(affine_out<ge>0)+self.leaky_coeff<times>(affine_out<l>0)<line_sep>daffine_out=mask<times>drelu_out<line_sep>dW1=np.dot(daffine_out.T input_actions)<line_sep><return>dW1 dW2<block_end><def_stmt>_update_model self m t<block_start>"""Apply SGD to model m."""<for_stmt>i range(self.num_gradient_steps)# sample minibatch <block_start>batch_ind=rd.randint(t+1 size=self.batch_size)<line_sep>action_batch=self.action_hist[batch_ind]<line_sep>reward_batch=self.model_reward_hist[m][batch_ind]<line_sep>out,cache=self._model_forward(m action_batch)<line_sep>dW1,dW2=self._model_backward(m out cache reward_batch)<line_sep>dW1<augdiv>self.batch_size<line_sep>dW2<augdiv>self.batch_size<line_sep>dW1<augadd>2/(self.prior_var<times>(t+1))<times>(self.W1[m]-self.W1_model_prior[m])<line_sep>dW2<augadd>2/(self.prior_var<times>(t+1))<times>(self.W2[m]-self.W2_model_prior[m])<line_sep>self.W1[m]<augsub>self.lr<times>dW1<line_sep>self.W2[m]<augsub>self.lr<times>dW2<block_end><return><block_end><def_stmt>update_observation self observation action reward<block_start>"""Learn from observations, shared across all models. However, perturb the reward independently for each model and then update. """<line_sep>t=observation<line_sep>self.action_hist[t]=self.actions[action]<for_stmt>m range(self.M)<block_start>m_noise=np.sqrt(self.noise_var)<times>rd.randn()<line_sep>self.model_reward_hist[m t]=reward+m_noise<line_sep>self._update_model(m t)<block_end>self.lr<augmul>self.lr_decay<block_end><def_stmt>pick_action self observation<block_start>"""Select action via ensemble sampling. Choose active network uniformly at random, then act greedily wrt that model. """<line_sep>m=rd.randint(self.M)<line_sep>model_out,_=self._model_forward(m self.actions)<line_sep>action=np.argmax(model_out)<line_sep><return>action<block_end><block_end>
"""Perform normalization on inputs or rewards. """<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>gym.spaces Box<def_stmt>normalize_angle x<block_start>"""Wraps input angle to [-pi, pi]. """<line_sep><return>((x+np.pi)%(2<times>np.pi))-np.pi<block_end><class_stmt>RunningMeanStd()<block_start>"""Calulates the running mean and std of a data stream. Attributes: mean (np.array): mean of data stream. var (np.array): variance of data stream. count (float): total count of data steam. """<def_stmt>__init__ self epsilon=1e-4 shape=()<block_start>"""Initializes containers for data mean and variance. Args: epsilon (float): helps with arithmetic issues. shape (tuple): the shape of the data stream's output. """<line_sep>self.mean=np.zeros(shape np.float64)<line_sep>self.var=np.ones(shape np.float64)<line_sep>self.count=epsilon<block_end><def_stmt>update self arr<block_start>"""Update current stats with a new stream of data. Args: arr (np.array): 1D array of data, (batch_size, *shape). """<line_sep>batch_mean=np.mean(arr axis=0)<line_sep>batch_var=np.var(arr axis=0)<line_sep>batch_count=arr.shape[0]<line_sep>self.update_from_moments(batch_mean batch_var batch_count)<block_end><def_stmt>update_from_moments self batch_mean batch_var batch_count<block_start>"""Util function for `update` method. """<line_sep>delta=batch_mean-self.mean<line_sep>tot_count=self.count+batch_count<line_sep>new_mean=self.mean+delta<times>batch_count/tot_count<line_sep>m_a=self.var<times>self.count<line_sep>m_b=batch_var<times>batch_count<line_sep>m_2=m_a+m_b+np.square(delta)<times>self.count<times>batch_count/(self.count+batch_count)<line_sep>new_var=m_2/(self.count+batch_count)<line_sep>new_count=batch_count+self.count<line_sep>self.mean=new_mean<line_sep>self.var=new_var<line_sep>self.count=new_count<block_end><block_end><class_stmt>BaseNormalizer(object)<block_start>"""Template/default normalizer. Attributes: read_only (bool): if to freeze the current stats being tracked. """<def_stmt>__init__ self read_only=<false><block_start>self.read_only=read_only<block_end><def_stmt>set_read_only self<block_start>self.read_only=<true><block_end><def_stmt>unset_read_only self<block_start>self.read_only=<false><block_end><def_stmt>__call__ self x *args **kwargs<block_start>"""Invokes normalization on the given input. """<line_sep><return>x<block_end><def_stmt>state_dict self<block_start>"""Returns snapshot of current stats. """<line_sep><return>{}<block_end><def_stmt>load_state_dict self _<block_start>"""Restores the stats from a snapshot. """<line_sep><pass><block_end><block_end><class_stmt>MeanStdNormalizer(BaseNormalizer)<block_start>"""Normalize by the running average. """<def_stmt>__init__ self shape=() read_only=<false> clip=10.0 epsilon=1e-8<block_start>"""Initializes the data stream tracker. Args: shape (tuple): shape of data being tracked. read_only (bool): if to freeze the tracker. clip (float): bounds on the data. epsilon (float): offset to provide divide-by-zero. """<line_sep>super().__init__(read_only)<line_sep>self.read_only=read_only<line_sep>self.rms=RunningMeanStd(shape=shape)<line_sep>self.clip=clip<line_sep>self.epsilon=epsilon<block_end><def_stmt>__call__ self x<block_start>"""Update tracker given data, optionally normalize the data. """<line_sep>x=np.asarray(x)<if_stmt><not>self.read_only<block_start>self.rms.update(x)<block_end><return>np.clip((x-self.rms.mean)/np.sqrt(self.rms.var+self.epsilon) -self.clip self.clip)<block_end><def_stmt>state_dict self<block_start><return>{'mean':self.rms.mean 'var':self.rms.var}<block_end><def_stmt>load_state_dict self saved<block_start>self.rms.mean=saved['mean']<line_sep>self.rms.var=saved['var']<block_end><block_end><class_stmt>RewardStdNormalizer(MeanStdNormalizer)<block_start>"""Reward normalization by running average of returns. Papers: * arxiv.org/pdf/1808.04355.pdf * arxiv.org/pdf/1810.12894.pdf Also see: * github.com/openai/baselines/issues/538 """<def_stmt>__init__ self gamma=0.99 read_only=<false> clip=10.0 epsilon=1e-8<block_start>"""Initializes the data stream tracker. Args: gamma (float): discount factor for rewards. read_only (bool): if to freeze the tracker. clip (float): bounds on the data. epsilon (float): offset to provide divide-by-zero. """<line_sep># Reward has default shape (1,) or just (). super().__init__(() read_only clip epsilon)<line_sep>self.gamma=gamma<line_sep>self.ret=<none><block_end><def_stmt>__call__ self x dones<block_start>"""Update tracker given reward, optionally normalize the reward (only scaling). """<line_sep>x=np.asarray(x)<if_stmt><not>self.read_only# Track running average of forward discounted returns. <block_start><if_stmt>self.ret<is><none><block_start>self.ret=np.zeros(x.shape[0])<block_end>self.ret=self.ret<times>self.gamma+x<line_sep>self.rms.update(self.ret)<line_sep># Prevent information leak from previous episodes. self.ret[dones.astype(np.long)]=0<block_end><return>np.clip(x/np.sqrt(self.rms.var+self.epsilon) -self.clip self.clip)<block_end><block_end><class_stmt>RescaleNormalizer(BaseNormalizer)<block_start>"""Apply constant scaling. """<def_stmt>__init__ self coef=1.0<block_start>"""Initializes with fixed scaling constant. Args: coef (float): scaling coefficient. """<line_sep>super().__init__(self)<line_sep>self.coef=coef<block_end><def_stmt>__call__ self x<block_start>"""Scale the input. """<if_stmt><not>isinstance(x torch.Tensor)<block_start>x=np.asarray(x)<block_end><return>self.coef<times>x<block_end><block_end><class_stmt>ImageNormalizer(RescaleNormalizer)<block_start>"""Scale image pixles from [0,255] to [0,1]. """<def_stmt>__init__ self<block_start>super().__init__(self 1.0/255)<block_end><block_end><class_stmt>ActionUnnormalizer(BaseNormalizer)<block_start>"""Assumes policy output action is in [-1,1], unnormalize it for gym env. """<def_stmt>__init__ self action_space<block_start>"""Defines the mean and std for the bounded action space. """<line_sep>super().__init__()<assert_stmt>isinstance(action_space Box) "action space must be gym.spaces.Box"<line_sep>low,high=action_space.low action_space.high<line_sep>self.mean=(low+high)/2.0<line_sep>self.std=(high-low)/2.0<block_end><def_stmt>__call__ self action<block_start>"""Unnormalizes given input action. """<line_sep>x=np.asarray(action)<line_sep><return>self.mean+x<times>self.std<block_end><block_end>
<import_stmt>os<import_from_stmt>fastf1.core Session Weekend<import_from_stmt>fastf1.livetiming.data LiveTimingData<def_stmt>test_file_loading_w_errors # load file with many errors and invalid data without crashing <block_start>livedata=LiveTimingData('fastf1/testing/reference_data/livedata/with_errors.txt')<line_sep>livedata.load()<block_end><def_stmt>test_file_loading # load a valid file <block_start>livedata=LiveTimingData('fastf1/testing/reference_data/livedata/2021_1_FP3.txt')<line_sep>livedata.load()<line_sep>weekend=Weekend(2021 1)<line_sep>session=Session(weekend=weekend session_name='test_session')<line_sep>session.load_laps(with_telemetry=<true> livedata=livedata)<assert_stmt>session.laps.shape<eq>(274 26)<assert_stmt>session.car_data['44'].shape<eq>(17362 10)<block_end><def_stmt>test_duplicate_removal tmpdir# create a temporary file with two identical lines of data <block_start>tmpfile=os.path.join(tmpdir 'tmpfile.txt')<line_sep>data="['TimingAppData', {'Lines': {'22': {'Stints': {'0': {"<concat>"'LapFlags': 0, 'Compound': 'UNKNOWN', 'New': 'false',"<concat>"'TyresNotChanged': '0', 'TotalLaps': 0, 'StartLaps':"<concat>"0}}}}}, '2021-03-27T12:00:32.086Z']\n"<with_stmt>open(tmpfile 'w')<as>fobj<block_start>fobj.write(data)<line_sep>fobj.write(data)<block_end>livedata=LiveTimingData(tmpfile)<assert_stmt>len(livedata.get('TimingAppData'))<eq>1<line_sep>livedata=LiveTimingData(tmpfile remove_duplicates=<false>)<assert_stmt>len(livedata.get('TimingAppData'))<eq>2<block_end>
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ImageNet dataset with pre-processing and augmentation. Deng, et al CVPR 2009 - ImageNet: A large-scale hierarchical image database. https://image-net.org/ """<import_stmt>enum<import_from_stmt>typing Any Generator Mapping Optional Sequence Text Tuple<import_stmt>jax<import_stmt>numpy<as>np<import_stmt>tensorflow.compat.v2<as>tf<import_stmt>tensorflow_datasets<as>tfds<import_stmt>tensorflow_probability<as>tfp<import_from_stmt>perceiver.train autoaugment<line_sep>Batch=Mapping[Text np.ndarray]<line_sep>MEAN_RGB=(0.485<times>255 0.456<times>255 0.406<times>255)<line_sep>STDDEV_RGB=(0.229<times>255 0.224<times>255 0.225<times>255)<line_sep>AUTOTUNE=tf.data.experimental.AUTOTUNE<line_sep>INPUT_DIM=224# The number of pixels in the image resize. <class_stmt>Split(enum.Enum)<block_start>"""ImageNet dataset split."""<line_sep>TRAIN=1<line_sep>TRAIN_AND_VALID=2<line_sep>VALID=3<line_sep>TEST=4<line_sep>@classmethod<def_stmt>from_string cls name:Text<arrow>'Split'<block_start><return>{'TRAIN':Split.TRAIN 'TRAIN_AND_VALID':Split.TRAIN_AND_VALID 'VALID':Split.VALID 'VALIDATION':Split.VALID 'TEST':Split.TEST}[name.upper()]<block_end>@property<def_stmt>num_examples self<block_start><return>{Split.TRAIN_AND_VALID:1281167 Split.TRAIN:1271167 Split.VALID:10000 Split.TEST:50000}[self]<block_end><block_end><def_stmt>load split:Split * is_training:bool # batch_dims should be: # [device_count, per_device_batch_size] or [total_batch_size] batch_dims:Sequence[int] augmentation_settings:Mapping[str Any] # The shape to which images are resized. im_dim:int=INPUT_DIM threadpool_size:int=48 max_intra_op_parallelism:int=1 <arrow>Generator[Batch <none> <none>]<block_start>"""Loads the given split of the dataset."""<line_sep>start,end=_shard(split jax.host_id() jax.host_count())<line_sep>im_size=(im_dim im_dim)<line_sep>total_batch_size=np.prod(batch_dims)<line_sep>tfds_split=tfds.core.ReadInstruction(_to_tfds_split(split) from_=start to=end unit='abs')<line_sep>ds=tfds.load('imagenet2012:5.*.*' split=tfds_split decoders={'image':tfds.decode.SkipDecoding()})<line_sep>options=tf.data.Options()<line_sep>options.experimental_threading.private_threadpool_size=threadpool_size<line_sep>options.experimental_threading.max_intra_op_parallelism=(max_intra_op_parallelism)<line_sep>options.experimental_optimization.map_parallelization=<true><if_stmt>is_training<block_start>options.experimental_deterministic=<false><block_end>ds=ds.with_options(options)<if_stmt>is_training<block_start><if_stmt>jax.host_count()<g>1# Only cache if we are reading a subset of the dataset. <block_start>ds=ds.cache()<block_end>ds=ds.repeat()<line_sep>ds=ds.shuffle(buffer_size=10<times>total_batch_size seed=0)<block_end><else_stmt><block_start><if_stmt>split.num_examples%total_batch_size<ne>0<block_start><raise>ValueError(f'Test/valid must be divisible by {total_batch_size}')<block_end><block_end><def_stmt>crop_augment_preprocess example<block_start>image,_=_preprocess_image(example['image'] is_training im_size augmentation_settings)<line_sep>label=tf.cast(example['label'] tf.int32)<line_sep>out={'images':image 'labels':label}<if_stmt>is_training<block_start><if_stmt>augmentation_settings['cutmix']<block_start>out['mask']=cutmix_padding(*im_size)<line_sep>out['cutmix_ratio']=tf.reduce_mean(out['mask'])<block_end><if_stmt>augmentation_settings['mixup_alpha']<is><not><none><block_start>beta=tfp.distributions.Beta(augmentation_settings['mixup_alpha'] augmentation_settings['mixup_alpha'])<line_sep>out['mixup_ratio']=beta.sample()<block_end><block_end><return>out<block_end>ds=ds.map(crop_augment_preprocess num_parallel_calls=AUTOTUNE)<line_sep># Mixup/cutmix by temporarily batching (using the per-device batch size): use_cutmix=augmentation_settings['cutmix']<line_sep>use_mixup=augmentation_settings['mixup_alpha']<is><not><none><if_stmt>is_training<and>(use_cutmix<or>use_mixup)<block_start>inner_batch_size=batch_dims[-1]<line_sep># Apply mixup, cutmix, or mixup + cutmix on batched data. # We use data from 2 batches to produce 1 mixed batch. ds=ds.batch(inner_batch_size<times>2)<if_stmt><not>use_cutmix<and>use_mixup<block_start>ds=ds.map(my_mixup num_parallel_calls=AUTOTUNE)<block_end><elif_stmt>use_cutmix<and><not>use_mixup<block_start>ds=ds.map(my_cutmix num_parallel_calls=AUTOTUNE)<block_end><elif_stmt>use_cutmix<and>use_mixup<block_start>ds=ds.map(my_mixup_cutmix num_parallel_calls=AUTOTUNE)<block_end># Unbatch for further processing. ds=ds.unbatch()<block_end><for_stmt>batch_size reversed(batch_dims)<block_start>ds=ds.batch(batch_size)<block_end>ds=ds.prefetch(AUTOTUNE)<line_sep><yield><from>tfds.as_numpy(ds)<block_end># cutmix_padding, my_cutmix, my_mixup, and my_mixup_cutmix taken from: # https://github.com/deepmind/deepmind-research/blob/master/nfnets/dataset.py <def_stmt>cutmix_padding h w<block_start>"""Returns image mask for CutMix. Taken from (https://github.com/google/edward2/blob/master/experimental /marginalization_mixup/data_utils.py#L367) Args: h: image height. w: image width. """<line_sep>r_x=tf.random.uniform([] 0 w tf.int32)<line_sep>r_y=tf.random.uniform([] 0 h tf.int32)<line_sep># Beta dist in paper, but they used Beta(1,1) which is just uniform. image1_proportion=tf.random.uniform([])<line_sep>patch_length_ratio=tf.math.sqrt(1-image1_proportion)<line_sep>r_w=tf.cast(patch_length_ratio<times>tf.cast(w tf.float32) tf.int32)<line_sep>r_h=tf.cast(patch_length_ratio<times>tf.cast(h tf.float32) tf.int32)<line_sep>bbx1=tf.clip_by_value(tf.cast(r_x-r_w<floordiv>2 tf.int32) 0 w)<line_sep>bby1=tf.clip_by_value(tf.cast(r_y-r_h<floordiv>2 tf.int32) 0 h)<line_sep>bbx2=tf.clip_by_value(tf.cast(r_x+r_w<floordiv>2 tf.int32) 0 w)<line_sep>bby2=tf.clip_by_value(tf.cast(r_y+r_h<floordiv>2 tf.int32) 0 h)<line_sep># Create the binary mask. pad_left=bbx1<line_sep>pad_top=bby1<line_sep>pad_right=tf.maximum(w-bbx2 0)<line_sep>pad_bottom=tf.maximum(h-bby2 0)<line_sep>r_h=bby2-bby1<line_sep>r_w=bbx2-bbx1<line_sep>mask=tf.pad(tf.ones((r_h r_w)) paddings=[[pad_top pad_bottom] [pad_left pad_right]] mode='CONSTANT' constant_values=0)<line_sep>mask.set_shape((h w))<line_sep><return>mask[<ellipsis> <none>]# Add channel dim. <block_end><def_stmt>my_cutmix batch<block_start>"""Apply CutMix: https://arxiv.org/abs/1905.04899."""<line_sep>batch=dict(**batch)<line_sep>bs=tf.shape(batch['images'])[0]<floordiv>2<line_sep>mask=batch['mask'][:bs]<line_sep>images=(mask<times>batch['images'][:bs]+(1.0-mask)<times>batch['images'][bs:])<line_sep>mix_labels=batch['labels'][bs:]<line_sep>labels=batch['labels'][:bs]<line_sep>ratio=batch['cutmix_ratio'][:bs]<line_sep><return>{'images':images 'labels':labels 'mix_labels':mix_labels 'ratio':ratio}<block_end><def_stmt>my_mixup batch<block_start>"""Apply mixup: https://arxiv.org/abs/1710.09412."""<line_sep>batch=dict(**batch)<line_sep>bs=tf.shape(batch['images'])[0]<floordiv>2<line_sep>ratio=batch['mixup_ratio'][:bs <none> <none> <none>]<line_sep>images=(ratio<times>batch['images'][:bs]+(1.0-ratio)<times>batch['images'][bs:])<line_sep>mix_labels=batch['labels'][bs:]<line_sep>labels=batch['labels'][:bs]<line_sep>ratio=ratio[<ellipsis> 0 0 0]# Unsqueeze <return>{'images':images 'labels':labels 'mix_labels':mix_labels 'ratio':ratio}<block_end><def_stmt>my_mixup_cutmix batch<block_start>"""Apply mixup to half the batch, and cutmix to the other."""<line_sep>batch=dict(**batch)<line_sep>bs=tf.shape(batch['images'])[0]<floordiv>4<line_sep>mixup_ratio=batch['mixup_ratio'][:bs <none> <none> <none>]<line_sep>mixup_images=(mixup_ratio<times>batch['images'][:bs]+(1.0-mixup_ratio)<times>batch['images'][bs:2<times>bs])<line_sep>mixup_labels=batch['labels'][:bs]<line_sep>mixup_mix_labels=batch['labels'][bs:2<times>bs]<line_sep>cutmix_mask=batch['mask'][2<times>bs:3<times>bs]<line_sep>cutmix_images=(cutmix_mask<times>batch['images'][2<times>bs:3<times>bs]+(1.0-cutmix_mask)<times>batch['images'][-bs:])<line_sep>cutmix_labels=batch['labels'][2<times>bs:3<times>bs]<line_sep>cutmix_mix_labels=batch['labels'][-bs:]<line_sep>cutmix_ratio=batch['cutmix_ratio'][2<times>bs:3<times>bs]<line_sep><return>{'images':tf.concat([mixup_images cutmix_images] axis=0) 'labels':tf.concat([mixup_labels cutmix_labels] axis=0) 'mix_labels':tf.concat([mixup_mix_labels cutmix_mix_labels] 0) 'ratio':tf.concat([mixup_ratio[<ellipsis> 0 0 0] cutmix_ratio] axis=0)}<block_end><def_stmt>_to_tfds_split split:Split<arrow>tfds.Split<block_start>"""Returns the TFDS split appropriately sharded."""<line_sep># NOTE: Imagenet did not release labels for the test split used in the # competition, so it has been typical at DeepMind to consider the VALID # split the TEST split and to reserve 10k images from TRAIN for VALID. <if_stmt>split<in>(Split.TRAIN Split.TRAIN_AND_VALID Split.VALID)<block_start><return>tfds.Split.TRAIN<block_end><else_stmt><block_start><assert_stmt>split<eq>Split.TEST<line_sep><return>tfds.Split.VALIDATION<block_end><block_end><def_stmt>_shard split:Split shard_index:int num_shards:int<arrow>Tuple[int int]<block_start>"""Returns [start, end) for the given shard index."""<assert_stmt>shard_index<l>num_shards<line_sep>arange=np.arange(split.num_examples)<line_sep>shard_range=np.array_split(arange num_shards)[shard_index]<line_sep>start,end=shard_range[0] (shard_range[-1]+1)<if_stmt>split<eq>Split.TRAIN# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000]. <block_start>offset=Split.VALID.num_examples<line_sep>start<augadd>offset<line_sep>end<augadd>offset<block_end><return>start end<block_end><def_stmt>_preprocess_image image_bytes:tf.Tensor is_training:bool image_size:Sequence[int] augmentation_settings:Mapping[str Any] <arrow>Tuple[tf.Tensor tf.Tensor]<block_start>"""Returns processed and resized images."""<line_sep># Get the image crop. <if_stmt>is_training<block_start>image,im_shape=_decode_and_random_crop(image_bytes)<line_sep>image=tf.image.random_flip_left_right(image)<block_end><else_stmt><block_start>image,im_shape=_decode_and_center_crop(image_bytes)<block_end><assert_stmt>image.dtype<eq>tf.uint8<line_sep># Optionally apply RandAugment: https://arxiv.org/abs/1909.13719 <if_stmt>is_training<block_start><if_stmt>augmentation_settings['randaugment']<is><not><none># Input and output images are dtype uint8. <block_start>image=autoaugment.distort_image_with_randaugment(image num_layers=augmentation_settings['randaugment']['num_layers'] magnitude=augmentation_settings['randaugment']['magnitude'])<block_end><block_end># Resize and normalize the image crop. # NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without # clamping overshoots. This means values returned will be outside the range # [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]). image=tf.image.resize(image image_size tf.image.ResizeMethod.BICUBIC)<line_sep>image=_normalize_image(image)<line_sep><return>image im_shape<block_end><def_stmt>_normalize_image image:tf.Tensor<arrow>tf.Tensor<block_start>"""Normalize the image to zero mean and unit variance."""<line_sep>image<augsub>tf.constant(MEAN_RGB shape=[1 1 3] dtype=image.dtype)<line_sep>image<augdiv>tf.constant(STDDEV_RGB shape=[1 1 3] dtype=image.dtype)<line_sep><return>image<block_end><def_stmt>_distorted_bounding_box_crop image_bytes:tf.Tensor * jpeg_shape:tf.Tensor bbox:tf.Tensor min_object_covered:float aspect_ratio_range:Tuple[float float] area_range:Tuple[float float] max_attempts:int <arrow>Tuple[tf.Tensor tf.Tensor]<block_start>"""Generates cropped_image using one of the bboxes randomly distorted."""<line_sep>bbox_begin,bbox_size,_=tf.image.sample_distorted_bounding_box(jpeg_shape bounding_boxes=bbox min_object_covered=min_object_covered aspect_ratio_range=aspect_ratio_range area_range=area_range max_attempts=max_attempts use_image_if_no_bounding_boxes=<true>)<line_sep># Crop the image to the specified bounding box. offset_y,offset_x,_=tf.unstack(bbox_begin)<line_sep>target_height,target_width,_=tf.unstack(bbox_size)<line_sep>crop_window=[offset_y offset_x target_height target_width]<if_stmt>image_bytes.dtype<eq>tf.dtypes.string<block_start>image=tf.image.decode_and_crop_jpeg(image_bytes tf.stack(crop_window) channels=3)<block_end><else_stmt><block_start>image=tf.image.crop_to_bounding_box(image_bytes *crop_window)<block_end>im_shape=tf.stack([target_height target_width])<line_sep><return>image im_shape<block_end><def_stmt>_decode_whole_image image_bytes:tf.Tensor<arrow>Tuple[tf.Tensor tf.Tensor]<block_start>image=tf.io.decode_jpeg(image_bytes channels=3)<line_sep>im_shape=tf.io.extract_jpeg_shape(image_bytes output_type=tf.int32)<line_sep><return>image im_shape<block_end><def_stmt>_decode_and_random_crop image_bytes:tf.Tensor<arrow>Tuple[tf.Tensor tf.Tensor]<block_start>"""Make a random crop of INPUT_DIM."""<if_stmt>image_bytes.dtype<eq>tf.dtypes.string<block_start>jpeg_shape=tf.image.extract_jpeg_shape(image_bytes)<block_end><else_stmt><block_start>jpeg_shape=tf.shape(image_bytes)<block_end>bbox=tf.constant([0.0 0.0 1.0 1.0] dtype=tf.float32 shape=[1 1 4])<line_sep>image,im_shape=_distorted_bounding_box_crop(image_bytes jpeg_shape=jpeg_shape bbox=bbox min_object_covered=0.1 aspect_ratio_range=(3/4 4/3) area_range=(0.08 1.0) max_attempts=10)<if_stmt>tf.reduce_all(tf.equal(jpeg_shape tf.shape(image)))# If the random crop failed fall back to center crop. <block_start>image,im_shape=_decode_and_center_crop(image_bytes jpeg_shape)<block_end><return>image im_shape<block_end><def_stmt>_center_crop image crop_dim<block_start>"""Center crops an image to a target dimension."""<line_sep>image_height=image.shape[0]<line_sep>image_width=image.shape[1]<line_sep>offset_height=((image_height-crop_dim)+1)<floordiv>2<line_sep>offset_width=((image_width-crop_dim)+1)<floordiv>2<line_sep><return>tf.image.crop_to_bounding_box(image offset_height offset_width crop_dim crop_dim)<block_end><def_stmt>_decode_and_center_crop image_bytes:tf.Tensor jpeg_shape:Optional[tf.Tensor]=<none> <arrow>Tuple[tf.Tensor tf.Tensor]<block_start>"""Crops to center of image with padding then scales."""<if_stmt>jpeg_shape<is><none><block_start><if_stmt>image_bytes.dtype<eq>tf.dtypes.string<block_start>jpeg_shape=tf.image.extract_jpeg_shape(image_bytes)<block_end><else_stmt><block_start>jpeg_shape=tf.shape(image_bytes)<block_end><block_end>image_height=jpeg_shape[0]<line_sep>image_width=jpeg_shape[1]<line_sep>padded_center_crop_size=tf.cast(((INPUT_DIM/(INPUT_DIM+32))<times>tf.cast(tf.minimum(image_height image_width) tf.float32)) tf.int32)<line_sep>offset_height=((image_height-padded_center_crop_size)+1)<floordiv>2<line_sep>offset_width=((image_width-padded_center_crop_size)+1)<floordiv>2<line_sep>crop_window=[offset_height offset_width padded_center_crop_size padded_center_crop_size]<if_stmt>image_bytes.dtype<eq>tf.dtypes.string<block_start>image=tf.image.decode_and_crop_jpeg(image_bytes tf.stack(crop_window) channels=3)<block_end><else_stmt><block_start>image=tf.image.crop_to_bounding_box(image_bytes *crop_window)<block_end>im_shape=tf.stack([padded_center_crop_size padded_center_crop_size])<line_sep><return>image im_shape<block_end>
# -*- coding: utf-8 -*- """ Ramsey numbers. """<line_sep># Copyright (C) 2011 by # <NAME> <<EMAIL>> # All rights reserved. # BSD license. <import_stmt>networkx<as>nx<import_from_stmt>...utils arbitrary_element<line_sep>__all__=["ramsey_R2"]<line_sep>__author__="""<NAME> (<EMAIL>)"""<def_stmt>ramsey_R2 G<block_start>r"""Approximately computes the Ramsey number `R(2;s,t)` for graph. Parameters ---------- G : NetworkX graph Undirected graph Returns ------- max_pair : (set, set) tuple Maximum clique, Maximum independent set. """<if_stmt><not>G<block_start><return>set() set()<block_end>node=arbitrary_element(G)<line_sep>nbrs=nx.all_neighbors(G node)<line_sep>nnbrs=nx.non_neighbors(G node)<line_sep>c_1,i_1=ramsey_R2(G.subgraph(nbrs).copy())<line_sep>c_2,i_2=ramsey_R2(G.subgraph(nnbrs).copy())<line_sep>c_1.add(node)<line_sep>i_2.add(node)<line_sep># Choose the larger of the two cliques and the larger of the two # independent sets, according to cardinality. <return>max(c_1 c_2 key=len) max(i_1 i_2 key=len)<block_end>
<import_stmt>tensorflow<as>tf<import_from_stmt>..fastspeech.model TFFastSpeechEncoder TFTacotronPostnet TFFastSpeechLayer <import_from_stmt>..speechsplit.model InterpLnr<import_stmt>numpy<as>np<import_stmt>copy<class_stmt>Encoder_6(tf.keras.layers.Layer)<block_start><def_stmt>__init__ self config hparams **kwargs<block_start>super(Encoder_6 self).__init__(name='Encoder_6' **kwargs)<line_sep>self.dim_neck_3=hparams.dim_neck_3<line_sep>self.freq_3=hparams.freq_3<line_sep>self.dim_f0=hparams.dim_f0<line_sep>self.dim_enc_3=hparams.dim_enc_3<line_sep>self.dim_emb=hparams.dim_spk_emb<line_sep>self.chs_grp=hparams.chs_grp<line_sep>self.before_dense_1=tf.keras.layers.Dense(units=self.dim_enc_3 dtype=tf.float32 name='before_dense_1')<line_sep>config_1=copy.deepcopy(config)<line_sep>config_1.hidden_size=self.dim_enc_3<line_sep>self.layer_1=[TFFastSpeechLayer(config_1 name='layer_._{}'.format(i))<for>i range(config_1.num_hidden_layers)]<line_sep>self.encoder_dense_1=tf.keras.layers.Dense(units=self.dim_neck_3 dtype=tf.float32 name='encoder_dense_1' )<line_sep>self.interp=InterpLnr(hparams)<block_end><def_stmt>call self x attention_mask training=<true><block_start>x=self.before_dense_1(x)<for_stmt>no,layer_module enumerate(self.layer_1)<block_start>x=layer_module([x attention_mask] training=training)[0]<line_sep>x=self.interp(x tf.tile([tf.shape(x)[1]] [tf.shape(x)[0]]) training=training )<block_end>x=self.encoder_dense_1(x)<line_sep><return>x<block_end><block_end><class_stmt>Encoder_7(tf.keras.layers.Layer)<block_start><def_stmt>__init__ self config hparams **kwargs<block_start>super(Encoder_7 self).__init__(name='Encoder_7' **kwargs)<line_sep>self.config=config<line_sep>self.dim_neck=hparams.dim_neck<line_sep>self.dim_neck_3=hparams.dim_neck_3<line_sep>self.dim_freq=hparams.dim_freq<line_sep>self.dim_enc=hparams.dim_enc<line_sep>self.dim_enc_3=hparams.dim_enc_3<line_sep>self.before_dense_1=tf.keras.layers.Dense(units=self.dim_enc dtype=tf.float32 name='before_dense_1')<line_sep>self.before_dense_2=tf.keras.layers.Dense(units=self.dim_enc_3 dtype=tf.float32 name='before_dense_2')<line_sep>config_1=copy.deepcopy(config)<line_sep>config_1.hidden_size=self.dim_enc<line_sep>self.layer_1=[TFFastSpeechLayer(config_1 name='layer_._{}'.format(i))<for>i range(config_1.num_hidden_layers)]<line_sep>config_2=copy.deepcopy(config)<line_sep>config_2.hidden_size=self.dim_enc_3<line_sep>self.layer_2=[TFFastSpeechLayer(config_2 name='layer_._{}'.format(i))<for>i range(config_2.num_hidden_layers)]<line_sep>self.encoder_dense_1=tf.keras.layers.Dense(units=self.dim_neck dtype=tf.float32 name='encoder_dense_1')<line_sep>self.encoder_dense_2=tf.keras.layers.Dense(units=self.dim_neck_3 dtype=tf.float32 name='encoder_dense_2' )<line_sep>self.interp=InterpLnr(hparams)<block_end><def_stmt>call self x_f0 attention_mask training=<true><block_start>x=x_f0[: : :self.dim_freq]<line_sep>f0=x_f0[: : self.dim_freq:]<line_sep>x=self.before_dense_1(x)<line_sep>f0=self.before_dense_2(f0)<line_sep>seq_length=tf.shape(x_f0)[1]<for_stmt>no,layer_module enumerate(self.layer_1)<block_start>x=layer_module([x attention_mask] training=training)[0]<line_sep>f0=self.layer_2[no]([f0 attention_mask] training=training)[0]<line_sep>x_f0=tf.concat((x f0) axis=2)<line_sep>x_f0=self.interp(x_f0 tf.tile([tf.shape(x_f0)[1]] [tf.shape(x)[0]]) training=training )<line_sep>x=x_f0[: : :self.dim_enc]<line_sep>f0=x_f0[: : self.dim_enc:]<block_end>x=x_f0[: : :self.dim_enc]<line_sep>f0=x_f0[: : self.dim_enc:]<line_sep>x=self.encoder_dense_1(x)<line_sep>f0=self.encoder_dense_2(f0)<line_sep><return>x f0<block_end><block_end><class_stmt>Encoder_t(tf.keras.layers.Layer)<block_start><def_stmt>__init__ self config hparams **kwargs<block_start>super(Encoder_t self).__init__(name='Encoder_t' **kwargs)<line_sep>self.dim_neck_2=hparams.dim_neck_2<line_sep>self.freq_2=hparams.freq_2<line_sep>self.dim_freq=hparams.dim_freq<line_sep>self.dim_enc_2=hparams.dim_enc_2<line_sep>self.dim_emb=hparams.dim_spk_emb<line_sep>self.chs_grp=hparams.chs_grp<line_sep>config=copy.deepcopy(config)<line_sep>config.num_hidden_layers=1<line_sep>config.hidden_size=self.dim_enc_2<line_sep>self.config=config<line_sep>self.before_dense=tf.keras.layers.Dense(units=self.dim_enc_2 dtype=tf.float32 name='before_dense_1')<line_sep>self.encoder=TFFastSpeechEncoder(config name='encoder')<line_sep>self.encoder_dense=tf.keras.layers.Dense(units=self.dim_neck_2 dtype=tf.float32 name='encoder_dense')<block_end><def_stmt>call self x attention_mask training=<true><block_start>x=self.before_dense(x)<line_sep>seq_length=tf.shape(x)[1]<line_sep>f=self.encoder([x attention_mask] training=training)[0]<line_sep><return>self.encoder_dense(f)<block_end><block_end><class_stmt>Decoder_3(tf.keras.layers.Layer)<block_start><def_stmt>__init__ self config hparams **kwargs<block_start>super(Decoder_3 self).__init__(name='Decoder_3' **kwargs)<line_sep>self.config=config<line_sep>self.encoder=TFFastSpeechEncoder(config name='encoder')<line_sep>self.before_dense=tf.keras.layers.Dense(units=config.hidden_size dtype=tf.float32 name='before_dense_1' )<line_sep>self.linear_projection=tf.keras.layers.Dense(units=hparams.dim_freq dtype=tf.float32 name='self.linear_projection' )<block_end><def_stmt>call self x attention_mask training=<true><block_start>x=self.before_dense(x)<line_sep>seq_length=tf.shape(x)[1]<line_sep>f=self.encoder([x attention_mask] training=training)[0]<line_sep><return>self.linear_projection(f)<block_end><block_end><class_stmt>Decoder_4(tf.keras.layers.Layer)<block_start><def_stmt>__init__ self config hparams **kwargs<block_start>super(Decoder_4 self).__init__(name='Decoder_4' **kwargs)<line_sep>self.config=config<line_sep>self.encoder=TFFastSpeechEncoder(config name='encoder')<line_sep>self.before_dense=tf.keras.layers.Dense(units=config.hidden_size dtype=tf.float32 name='before_dense_1' )<line_sep>self.linear_projection=tf.keras.layers.Dense(units=hparams.dim_f0 dtype=tf.float32 name='self.linear_projection' )<block_end><def_stmt>call self x attention_mask training=<true><block_start>x=self.before_dense(x)<line_sep>seq_length=tf.shape(x)[1]<line_sep>f=self.encoder([x attention_mask] training=training)[0]<line_sep><return>self.linear_projection(f)<block_end><block_end><class_stmt>Model(tf.keras.Model)<block_start><def_stmt>__init__ self config hparams **kwargs<block_start>super(Model self).__init__(name='speechsplit' **kwargs)<line_sep>self.encoder_1=Encoder_7(config.encoder_self_attention_params hparams)<line_sep>self.encoder_2=Encoder_t(config.encoder_self_attention_params hparams)<line_sep>self.decoder=Decoder_3(config.decoder_self_attention_params hparams)<line_sep>self.freq=hparams.freq<line_sep>self.freq_2=hparams.freq_2<line_sep>self.freq_3=hparams.freq_3<block_end><def_stmt>call self x_f0 x_org c_trg mel_lengths training=<true><block_start>max_length=tf.cast(tf.reduce_max(mel_lengths) tf.int32)<line_sep>attention_mask=tf.sequence_mask(lengths=mel_lengths maxlen=max_length dtype=tf.float32)<line_sep>attention_mask.set_shape((<none> <none>))<line_sep>codes_x,codes_f0=self.encoder_1(x_f0 attention_mask training=training)<line_sep>codes_2=self.encoder_2(x_org attention_mask training=training)<line_sep>code_exp_1=codes_x<line_sep>code_exp_3=codes_f0<line_sep>code_exp_2=codes_2<line_sep>c_trg=tf.tile(tf.expand_dims(c_trg 1) (1 tf.shape(x_f0)[1] 1))<line_sep>encoder_outputs=tf.concat((code_exp_1 code_exp_2 code_exp_3 c_trg) axis=-1)<line_sep>mel_outputs=self.decoder(encoder_outputs attention_mask training=training)<line_sep><return>codes_x codes_f0 codes_2 encoder_outputs mel_outputs<block_end><block_end><class_stmt>Model_F0(tf.keras.Model)<block_start><def_stmt>__init__ self config hparams **kwargs<block_start>super(Model_F0 self).__init__(name='speechsplit_f0' **kwargs)<line_sep>self.encoder_2=Encoder_t(config.encoder_self_attention_params hparams)<line_sep>self.encoder_3=Encoder_6(config.encoder_self_attention_params hparams)<line_sep>self.decoder=Decoder_4(config.decoder_self_attention_params hparams)<line_sep>self.freq_2=hparams.freq_2<line_sep>self.freq_3=hparams.freq_3<block_end><def_stmt>call self x_org f0_trg mel_lengths training=<true><block_start>max_length=tf.cast(tf.reduce_max(mel_lengths) tf.int32)<line_sep>attention_mask=tf.sequence_mask(lengths=mel_lengths maxlen=max_length dtype=tf.float32)<line_sep>attention_mask.set_shape((<none> <none>))<line_sep>codes_2=self.encoder_2(x_org attention_mask training=training)<line_sep>code_exp_2=codes_2<line_sep>codes_3=self.encoder_3(f0_trg attention_mask training=training)<line_sep>code_exp_3=codes_3<line_sep>self.o=[code_exp_2 code_exp_3]<line_sep>encoder_outputs=tf.concat((code_exp_2 code_exp_3) axis=-1)<line_sep>mel_outputs=self.decoder(encoder_outputs attention_mask training=training)<line_sep><return>codes_2 codes_3 encoder_outputs mel_outputs<block_end><block_end>
# flake8: noqa # errmsg.h CR_ERROR_FIRST=2000<line_sep>CR_UNKNOWN_ERROR=2000<line_sep>CR_SOCKET_CREATE_ERROR=2001<line_sep>CR_CONNECTION_ERROR=2002<line_sep>CR_CONN_HOST_ERROR=2003<line_sep>CR_IPSOCK_ERROR=2004<line_sep>CR_UNKNOWN_HOST=2005<line_sep>CR_SERVER_GONE_ERROR=2006<line_sep>CR_VERSION_ERROR=2007<line_sep>CR_OUT_OF_MEMORY=2008<line_sep>CR_WRONG_HOST_INFO=2009<line_sep>CR_LOCALHOST_CONNECTION=2010<line_sep>CR_TCP_CONNECTION=2011<line_sep>CR_SERVER_HANDSHAKE_ERR=2012<line_sep>CR_SERVER_LOST=2013<line_sep>CR_COMMANDS_OUT_OF_SYNC=2014<line_sep>CR_NAMEDPIPE_CONNECTION=2015<line_sep>CR_NAMEDPIPEWAIT_ERROR=2016<line_sep>CR_NAMEDPIPEOPEN_ERROR=2017<line_sep>CR_NAMEDPIPESETSTATE_ERROR=2018<line_sep>CR_CANT_READ_CHARSET=2019<line_sep>CR_NET_PACKET_TOO_LARGE=2020<line_sep>CR_EMBEDDED_CONNECTION=2021<line_sep>CR_PROBE_SLAVE_STATUS=2022<line_sep>CR_PROBE_SLAVE_HOSTS=2023<line_sep>CR_PROBE_SLAVE_CONNECT=2024<line_sep>CR_PROBE_MASTER_CONNECT=2025<line_sep>CR_SSL_CONNECTION_ERROR=2026<line_sep>CR_MALFORMED_PACKET=2027<line_sep>CR_WRONG_LICENSE=2028<line_sep>CR_NULL_POINTER=2029<line_sep>CR_NO_PREPARE_STMT=2030<line_sep>CR_PARAMS_NOT_BOUND=2031<line_sep>CR_DATA_TRUNCATED=2032<line_sep>CR_NO_PARAMETERS_EXISTS=2033<line_sep>CR_INVALID_PARAMETER_NO=2034<line_sep>CR_INVALID_BUFFER_USE=2035<line_sep>CR_UNSUPPORTED_PARAM_TYPE=2036<line_sep>CR_SHARED_MEMORY_CONNECTION=2037<line_sep>CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR=2038<line_sep>CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR=2039<line_sep>CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR=2040<line_sep>CR_SHARED_MEMORY_CONNECT_MAP_ERROR=2041<line_sep>CR_SHARED_MEMORY_FILE_MAP_ERROR=2042<line_sep>CR_SHARED_MEMORY_MAP_ERROR=2043<line_sep>CR_SHARED_MEMORY_EVENT_ERROR=2044<line_sep>CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR=2045<line_sep>CR_SHARED_MEMORY_CONNECT_SET_ERROR=2046<line_sep>CR_CONN_UNKNOW_PROTOCOL=2047<line_sep>CR_INVALID_CONN_HANDLE=2048<line_sep>CR_SECURE_AUTH=2049<line_sep>CR_FETCH_CANCELED=2050<line_sep>CR_NO_DATA=2051<line_sep>CR_NO_STMT_METADATA=2052<line_sep>CR_NO_RESULT_SET=2053<line_sep>CR_NOT_IMPLEMENTED=2054<line_sep>CR_SERVER_LOST_EXTENDED=2055<line_sep>CR_STMT_CLOSED=2056<line_sep>CR_NEW_STMT_METADATA=2057<line_sep>CR_ALREADY_CONNECTED=2058<line_sep>CR_AUTH_PLUGIN_CANNOT_LOAD=2059<line_sep>CR_DUPLICATE_CONNECTION_ATTR=2060<line_sep>CR_AUTH_PLUGIN_ERR=2061<line_sep>CR_ERROR_LAST=2061<line_sep>
<import_from_stmt>pathlib Path<import_stmt>logging<import_from_stmt>.logger Logger<import_from_stmt>.log_formatter LogFormatter<class_stmt>FileLogger(Logger)<block_start>fmt=LogFormatter(use_colour=<false> output_ts=<false>)<line_sep>logger=<none><def_stmt>__init__ self folder format=<none><block_start><if_stmt>format<is><none><block_start>format=("%(asctime)s|%(levelname)s|%(message)s" )<block_end>formatter=logging.Formatter(format)<line_sep>log_file=Path(folder "sayn.log")<if_stmt><not>log_file.parent.exists()<block_start>log_file.parent.mkdir(parents=<true>)<block_end>handler=logging.FileHandler(log_file)<line_sep>handler.setLevel(logging.DEBUG)<line_sep>handler.setFormatter(formatter)<line_sep>logger=logging.getLogger(__name__)<line_sep>logger.addHandler(handler)<line_sep>logger.setLevel(logging.DEBUG)<line_sep>self.logger=logger<block_end><def_stmt>print self s=<none><block_start><if_stmt>s<is><not><none><block_start><if_stmt>s["level"]<eq>"info"<block_start>func=self.logger.info<block_end><elif_stmt>s["level"]<eq>"error"<block_start>func=self.logger.error<block_end><elif_stmt>s["level"]<eq>"warning"<block_start>func=self.logger.warning<block_end><else_stmt><block_start>func=self.logger.debug<block_end>s=s["message"]<if_stmt>isinstance(s str)<block_start>s=[s]<block_end><elif_stmt><not>isinstance(s list)<block_start><raise>ValueError("error in logging print")<block_end>func(f"{s[0]}")<for_stmt>e s[1:]<block_start><for_stmt>l e.split("\n")<block_start>func(f"{l}")<block_end><block_end><block_end><block_end><block_end>
# Mostly copied and modified from torch/vision/references/segmentation to support unlabeled data # Copied functions from fmassa/vision-1 to support multi-dimensional masks loaded from numpy ndarray <import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_stmt>random<import_stmt>torch<import_stmt>utils.functional<as>F<line_sep># For 2/3 dimensional tensors only <def_stmt>get_tensor_image_size img<block_start><if_stmt>img.dim()<eq>2<block_start>h,w=img.size()<block_end><else_stmt><block_start>h=img.size()[1]<line_sep>w=img.size()[2]<block_end><return>h w<block_end><class_stmt>Compose(object)<block_start><def_stmt>__init__ self transforms<block_start>self.transforms=transforms<block_end><def_stmt>__call__ self image target *args<block_start><for_stmt>t self.transforms<block_start>image,target=t(image target)<block_end><return>(image target *args)<block_end><block_end><class_stmt>Resize(object)<block_start><def_stmt>__init__ self size_image size_label<block_start>self.size_image=size_image<line_sep>self.size_label=size_label<block_end><def_stmt>__call__ self image target<block_start>image=image<if>type(image)<eq>str<else>F.resize(image self.size_image interpolation=Image.LINEAR)<line_sep>target=target<if>type(target)<eq>str<else>F.resize(target self.size_label interpolation=Image.NEAREST)<line_sep><return>image target<block_end><block_end># Pad image with zeros, yet pad target with 255 (ignore label) on bottom & right if # given a bigger desired size (or else nothing is done at all) <class_stmt>ZeroPad(object)<block_start><def_stmt>__init__ self size<block_start>self.h,self.w=size<block_end>@staticmethod<def_stmt>zero_pad image target h w<block_start>oh,ow=get_tensor_image_size(image)<line_sep>pad_h=h-oh<if>oh<l>h<else>0<line_sep>pad_w=w-ow<if>ow<l>w<else>0<line_sep>image=F.pad(image (0 0 pad_w pad_h) fill=0)<line_sep>target=target<if>type(target)<eq>str<else>F.pad(target (0 0 pad_w pad_h) fill=255)<line_sep><return>image target<block_end><def_stmt>__call__ self image target<block_start><return>self.zero_pad(image target self.h self.w)<block_end><block_end><class_stmt>RandomResize(object)<block_start><def_stmt>__init__ self min_size max_size=<none><block_start>self.min_size=min_size<if_stmt>max_size<is><none><block_start>max_size=min_size<block_end>self.max_size=max_size<block_end><def_stmt>__call__ self image target<block_start>min_h,min_w=self.min_size<line_sep>max_h,max_w=self.max_size<line_sep>h=random.randint(min_h max_h)<line_sep>w=random.randint(min_w max_w)<line_sep>image=F.resize(image (h w) interpolation=Image.LINEAR)<line_sep>target=target<if>type(target)<eq>str<else>F.resize(target (h w) interpolation=Image.NEAREST)<line_sep><return>image target<block_end><block_end><class_stmt>RandomScale(object)<block_start><def_stmt>__init__ self min_scale max_scale=<none><block_start>self.min_scale=min_scale<if_stmt>max_scale<is><none><block_start>max_scale=min_scale<block_end>self.max_scale=max_scale<block_end><def_stmt>__call__ self image target<block_start>scale=random.uniform(self.min_scale self.max_scale)<line_sep>h,w=get_tensor_image_size(image)<line_sep>h=int(scale<times>h)<line_sep>w=int(scale<times>w)<line_sep>image=F.resize(image (h w) interpolation=Image.LINEAR)<line_sep>target=target<if>type(target)<eq>str<else>F.resize(target (h w) interpolation=Image.NEAREST)<line_sep><return>image target<block_end><block_end><class_stmt>RandomCrop(object)<block_start><def_stmt>__init__ self size<block_start>self.size=size<block_end>@staticmethod<def_stmt>get_params img output_size<block_start>h,w=get_tensor_image_size(img)<line_sep>th,tw=output_size<if_stmt>w<eq>tw<and>h<eq>th<block_start><return>0 0 h w<block_end>i=random.randint(0 h-th)<line_sep>j=random.randint(0 w-tw)<line_sep><return>i j th tw<block_end><def_stmt>__call__ self image target# Pad if needed <block_start>ih,iw=get_tensor_image_size(image)<if_stmt>ih<l>self.size[0]<or>iw<l>self.size[1]<block_start>image,target=ZeroPad.zero_pad(image target max(self.size[0] ih) max(self.size[1] iw))<block_end>i,j,h,w=self.get_params(image self.size)<line_sep>image=F.crop(image i j h w)<line_sep>target=target<if>type(target)<eq>str<else>F.crop(target i j h w)<line_sep><return>image target<block_end><block_end><class_stmt>RandomHorizontalFlip(object)<block_start><def_stmt>__init__ self flip_prob<block_start>self.flip_prob=flip_prob<block_end><def_stmt>__call__ self image target<block_start>t=random.random()<if_stmt>t<l>self.flip_prob<block_start>image=F.hflip(image)<block_end>target=target<if>(type(target)<eq>str<or>t<ge>self.flip_prob)<else>F.hflip(target)<line_sep><return>image target<block_end><block_end><class_stmt>ToTensor(object)<block_start><def_stmt>__init__ self keep_scale=<false> reverse_channels=<false># keep_scale = True => Images or whatever are not divided by 255 # reverse_channels = True => RGB images are changed to BGR(the default behavior of openCV & Caffe, # let's wish them all go to heaven, # for they wasted me days!) <block_start>self.keep_scale=keep_scale<line_sep>self.reverse_channels=reverse_channels<block_end><def_stmt>__call__ self image target<block_start>image=image<if>type(image)<eq>str<else>self._pil_to_tensor(image)<line_sep>target=target<if>type(target)<eq>str<else>self.label_to_tensor(target)<line_sep><return>image target<block_end>@staticmethod<def_stmt>label_to_tensor pic# 3 dimensional arrays or normal segmentation masks <block_start><if_stmt>isinstance(pic np.ndarray)<block_start><return>torch.as_tensor(pic.transpose((2 0 1)) dtype=torch.float32)<block_end><else_stmt><block_start><return>torch.as_tensor(np.asarray(pic).copy() dtype=torch.int64)<block_end><block_end><def_stmt>_pil_to_tensor self pic# Convert a PIL Image to tensor(a direct copy) <block_start><if_stmt>pic.mode<eq>'I'<block_start>img=torch.from_numpy(np.array(pic np.int32 copy=<false>))<block_end><elif_stmt>pic.mode<eq>'I;16'<block_start>img=torch.from_numpy(np.array(pic np.int16 copy=<false>))<block_end><elif_stmt>pic.mode<eq>'F'<block_start>img=torch.from_numpy(np.array(pic np.float32 copy=<false>))<block_end><elif_stmt>pic.mode<eq>'1'<block_start>img=255<times>torch.from_numpy(np.array(pic np.uint8 copy=<false>))<block_end><else_stmt><block_start>img=torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))<block_end># PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK <if_stmt>pic.mode<eq>'YCbCr'<block_start>nchannel=3<block_end><elif_stmt>pic.mode<eq>'I;16'<block_start>nchannel=1<block_end><else_stmt><block_start>nchannel=len(pic.mode)<block_end>img=img.view(pic.size[1] pic.size[0] nchannel)<if_stmt>self.reverse_channels# Beware this only works with 3 channels(can't use -1 with tensors) <block_start>img=img[: : [2 1 0]]<block_end># put it from HWC to CHW format # yikes, this transpose takes 80% of the loading time/CPU img=img.transpose(0 1).transpose(0 2).contiguous()<if_stmt>isinstance(img torch.ByteTensor)<block_start><if_stmt>self.keep_scale<block_start><return>img.float()<block_end><else_stmt><block_start><return>img.float().div(255)<block_end><block_end><else_stmt><block_start><return>img<block_end><block_end><block_end><class_stmt>Normalize(object)<block_start><def_stmt>__init__ self mean std<block_start>self.mean=mean<line_sep>self.std=std<block_end><def_stmt>__call__ self image target<block_start>image=F.normalize(image mean=self.mean std=self.std)<line_sep><return>image target<block_end><block_end># Init with a python list as the map(mainly for cityscapes's id -> train_id) <class_stmt>LabelMap(object)<block_start><def_stmt>__init__ self label_id_map<block_start>self.label_id_map=torch.tensor(label_id_map)<block_end><def_stmt>__call__ self image target<block_start>target=target<if>type(target)<eq>str<else>self.label_id_map[target]<line_sep><return>image target<block_end><block_end>
<import_from_stmt>django template<line_sep>register=template.Library()<line_sep>@register.filter<def_stmt>negate value<block_start><return>-value<block_end>@register.filter<def_stmt>subtract value arg<block_start><return>value-arg<block_end>
<import_from_stmt>enum Enum<import_stmt>pytest<import_stmt>gino<import_from_stmt>gino.dialects.aiomysql AsyncEnum<line_sep>pytestmark=pytest.mark.asyncio<line_sep>db=gino.Gino()<class_stmt>MyEnum(Enum)<block_start>ONE="one"<line_sep>TWO="two"<block_end><class_stmt>Blog(db.Model)<block_start>__tablename__="s_blog"<line_sep>id=db.Column(db.BigInteger() primary_key=<true>)<line_sep>title=db.Column(db.Unicode(255) index=<true> comment="Title Comment")<line_sep>visits=db.Column(db.BigInteger() default=0)<line_sep>comment_id=db.Column(db.ForeignKey("s_comment.id"))<line_sep>number=db.Column(db.Enum(MyEnum) nullable=<false> default=MyEnum.TWO)<line_sep>number2=db.Column(AsyncEnum(MyEnum) nullable=<false> default=MyEnum.TWO)<block_end><class_stmt>Comment(db.Model)<block_start>__tablename__="s_comment"<line_sep>id=db.Column(db.BigInteger() primary_key=<true>)<line_sep>blog_id=db.Column(db.ForeignKey("s_blog.id" name="blog_id_fk"))<block_end>blog_seq=db.Sequence("blog_seq" metadata=db schema="schema_test")<async_keyword><def_stmt>test engine define=<true><block_start><async_keyword><with_stmt>engine.acquire()<as>conn<block_start><assert_stmt><not><await>engine.dialect.has_table(conn "non_exist")<block_end>Blog.__table__.comment="Blog Comment"<line_sep>db.bind=engine<line_sep><await>db.gino.create_all()<line_sep><await>Blog.number.type.create_async(engine checkfirst=<true>)<line_sep><await>Blog.number2.type.create_async(engine checkfirst=<true>)<line_sep><await>db.gino.create_all(tables=[Blog.__table__] checkfirst=<true>)<line_sep><await>blog_seq.gino.create(checkfirst=<true>)<line_sep><await>Blog.__table__.gino.create(checkfirst=<true>)<line_sep><await>db.gino.drop_all()<line_sep><await>db.gino.drop_all(tables=[Blog.__table__] checkfirst=<true>)<line_sep><await>Blog.__table__.gino.drop(checkfirst=<true>)<line_sep><await>blog_seq.gino.drop(checkfirst=<true>)<if_stmt>define<block_start><class_stmt>Comment2(db.Model)<block_start>__tablename__="s_comment_2"<line_sep>id=db.Column(db.BigInteger() primary_key=<true>)<line_sep>blog_id=db.Column(db.ForeignKey("s_blog.id"))<block_end><block_end><await>db.gino.create_all()<line_sep><await>db.gino.drop_all()<block_end>
""" netrd ----- netrd stands for Network Reconstruction and Distances. It is a repository of different algorithms for constructing a network from time series data, as well as for comparing two networks. It is the product of the Network Science Insitute 2019 Collabathon. """<import_from_stmt>. distance# noqa <import_from_stmt>. reconstruction# noqa <import_from_stmt>. dynamics# noqa <import_from_stmt>. utilities# noqa
<import_from_stmt>transformer Encoder<import_from_stmt>torch nn optim<import_from_stmt>torch.nn.functional cross_entropy softmax relu<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>torch.utils.data.dataloader default_collate<import_stmt>torch<import_stmt>utils<import_stmt>os<import_stmt>pickle<class_stmt>GPT(nn.Module)<block_start><def_stmt>__init__ self model_dim max_len num_layer num_head n_vocab lr max_seg=3 drop_rate=0.2 padding_idx=0<block_start>super().__init__()<line_sep>self.padding_idx=padding_idx<line_sep>self.n_vocab=n_vocab<line_sep>self.max_len=max_len<line_sep>self.word_emb=nn.Embedding(n_vocab model_dim)<line_sep>self.word_emb.weight.data.normal_(0 0.1)<line_sep>self.segment_emb=nn.Embedding(num_embeddings=max_seg embedding_dim=model_dim)<line_sep>self.segment_emb.weight.data.normal_(0 0.1)<line_sep>self.position_emb=torch.empty(1 max_len model_dim)<line_sep>nn.init.kaiming_normal_(self.position_emb mode='fan_out' nonlinearity='relu')<line_sep>self.position_emb=nn.Parameter(self.position_emb)<line_sep>self.encoder=Encoder(n_head=num_head emb_dim=model_dim drop_rate=drop_rate n_layer=num_layer)<line_sep>self.task_mlm=nn.Linear(in_features=model_dim out_features=n_vocab)<line_sep>self.task_nsp=nn.Linear(in_features=model_dim<times>self.max_len out_features=2)<line_sep>self.opt=optim.Adam(self.parameters() lr)<block_end><def_stmt>forward self seqs segs training=<false><block_start>embed=self.input_emb(seqs segs)<line_sep>z=self.encoder(embed training mask=self.mask(seqs))# [n, step, model_dim] mlm_logits=self.task_mlm(z)# [n, step, n_vocab] nsp_logits=self.task_nsp(z.reshape(z.shape[0] -1))# [n, n_cls] <return>mlm_logits nsp_logits<block_end><def_stmt>step self seqs segs seqs_ nsp_labels<block_start>self.opt.zero_grad()<line_sep>mlm_logits,nsp_logits=self(seqs segs training=<true>)<line_sep>pred_loss=cross_entropy(mlm_logits.reshape(-1 self.n_vocab) seqs_.reshape(-1))<line_sep>nsp_loss=cross_entropy(nsp_logits nsp_labels.reshape(-1))<line_sep>loss=pred_loss+0.2<times>nsp_loss<line_sep>loss.backward()<line_sep>self.opt.step()<line_sep><return>loss.cpu().data.numpy() mlm_logits<block_end><def_stmt>input_emb self seqs segs# device = next(self.parameters()).device # self.position_emb = self.position_emb.to(device) <block_start><return>self.word_emb(seqs)+self.segment_emb(segs)+self.position_emb<block_end><def_stmt>mask self seqs<block_start>device=next(self.parameters()).device<line_sep>batch_size,seq_len=seqs.shape<line_sep>mask=torch.triu(torch.ones((seq_len seq_len) dtype=torch.long) diagonal=1).to(device)# [seq_len ,seq_len] pad=torch.eq(seqs self.padding_idx)# [n, seq_len] mask=torch.where(pad[: <none> <none> :] 1 mask[<none> <none> : :]).to(device)# [n, 1, seq_len, seq_len] <return>mask<g>0<block_end># [n, 1, seq_len, seq_len] @property<def_stmt>attentions self<block_start>attentions={"encoder":[l.mh.attention.cpu().data.numpy()<for>l self.encoder.encoder_layers]}<line_sep><return>attentions<block_end><block_end><def_stmt>train <block_start>MODEL_DIM=256<line_sep>N_LAYER=4<line_sep>LEARNING_RATE=1e-4<line_sep>dataset=utils.MRPCData("./MRPC" 2000)<line_sep>print("num word: " dataset.num_word)<line_sep>model=GPT(model_dim=MODEL_DIM max_len=dataset.max_len-1 num_layer=N_LAYER num_head=4 n_vocab=dataset.num_word lr=LEARNING_RATE max_seg=dataset.num_seg drop_rate=0.2 padding_idx=dataset.pad_id)<if_stmt>torch.cuda.is_available()<block_start>print("GPU train avaliable")<line_sep>device=torch.device("cuda")<line_sep>model=model.cuda()<block_end><else_stmt><block_start>device=torch.device("cpu")<line_sep>model=model.cpu()<block_end>loader=DataLoader(dataset batch_size=32 shuffle=<true>)<for_stmt>epoch range(100)<block_start><for_stmt>batch_idx,batch enumerate(loader)<block_start>seqs,segs,xlen,nsp_labels=batch<line_sep>seqs,segs,nsp_labels=seqs.type(torch.LongTensor).to(device) segs.type(torch.LongTensor).to(device) nsp_labels.to(device)<line_sep># pred: [n, step, n_vocab] loss,pred=model.step(seqs=seqs[: :-1] segs=segs[: :-1] seqs_=seqs[: 1:] nsp_labels=nsp_labels)<if_stmt>batch_idx%100<eq>0<block_start>pred=pred[0].cpu().data.numpy().argmax(axis=1)# [step] print("Epoch: " epoch "|batch: " batch_idx "| loss: %.3f"%loss "\n| tgt: " " ".join([dataset.i2v[i]<for>i seqs[0 1:].cpu().data.numpy()[:xlen[0].sum()+1]]) "\n| prd: " " ".join([dataset.i2v[i]<for>i pred[:xlen[0].sum()+1]]) )<block_end><block_end><block_end>os.makedirs("./visual/models/gpt" exist_ok=<true>)<line_sep>torch.save(model.state_dict() "./visual/models/gpt/model.pth")<line_sep>export_attention(model device dataset)<block_end><def_stmt>export_attention model device data name="gpt"<block_start>model.load_state_dict(torch.load("./visual/models/gpt/model.pth" map_location=device))<line_sep>seqs,segs,xlen,nsp_labels=data[:32]<line_sep>seqs,segs,xlen,nsp_labels=torch.from_numpy(seqs) torch.from_numpy(segs) torch.from_numpy(xlen) torch.from_numpy(nsp_labels)<line_sep>seqs,segs,nsp_labels=seqs.type(torch.LongTensor).to(device) segs.type(torch.LongTensor).to(device) nsp_labels.to(device)<line_sep>model(seqs[: :-1] segs[: :-1] <false>)<line_sep>seqs=seqs.cpu().data.numpy()<line_sep>data={"src":[[data.i2v[i]<for>i seqs[j]]<for>j range(len(seqs))] "attentions":model.attentions}<line_sep>path="./visual/tmp/%s_attention_matrix.pkl"%name<line_sep>os.makedirs(os.path.dirname(path) exist_ok=<true>)<with_stmt>open(path "wb")<as>f<block_start>pickle.dump(data f)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>train()<block_end>
<import_stmt>pandas<as>pd<import_stmt>smartplots3_setup<def_stmt>createSetup name expansion_factor percapita_factor plot_size settings<block_start>plt_setup_smart={'name':name 'expansion_factor':expansion_factor 'percapita_factor':percapita_factor 'scenarios_itr':[] 'scenarios_id':[] 'scenarios_year':[] 'plot_size':plot_size 'bottom_labels':[] 'top_labels':[] 'plots_folder':"makeplots3"}<line_sep>plt_setup_smart['name']=name<line_sep>plt_setup_smart['expansion_factor']=expansion_factor<line_sep>plt_setup_smart['plot_size']=plot_size<line_sep>plt_setup_smart['scenarios_year']=[]<line_sep>plt_setup_smart['scenarios_id']=[]<line_sep>plt_setup_smart['scenarios_itr']=[]<line_sep>plt_setup_smart['top_labels']=[]<for_stmt>(scenarios_year scenarios_id scenarios_itr bottom_label top_label) settings<block_start>plt_setup_smart['scenarios_year'].append(scenarios_year)<line_sep>plt_setup_smart['scenarios_id'].append(scenarios_id)<line_sep>plt_setup_smart['scenarios_itr'].append(scenarios_itr)<line_sep>plt_setup_smart['top_labels'].append(top_label)<line_sep>plt_setup_smart['bottom_labels'].append(bottom_label)<block_end><return>plt_setup_smart<block_end><def_stmt>createSettingRow scenarios_year scenarios_id scenarios_itr bottom_label top_label<block_start><return>(scenarios_year scenarios_id scenarios_itr bottom_label top_label)<block_end>scenarios_lables={"Base_CL_CT":"Base0" "Base_STL_STT_BAU":"Base2" "Base_STL_STT_VTO":"Base3" "Base_LTL_LTT_BAU":"Base5" "Base_LTL_LTT_VTO":"Base6" "A_STL_STT_BAU":"A2" "A_STL_STT_VTO":"A3" "B_LTL_LTT_BAU":"B5" "B_LTL_LTT_VTO":"B6" "C_LTL_LTT_BAU":"C5" "C_LTL_LTT_VTO":"C6"}<line_sep>output_folder="/home/ubuntu/git/jupyter/data/28thOct2019"<line_sep># Base_CL_CT # A_STL_STT_BAU settings=[]<line_sep>settings.append(createSettingRow(2010 1 15 scenarios_lables["Base_CL_CT"] ""))<line_sep>settings.append(createSettingRow(2025 6 15 scenarios_lables["A_STL_STT_BAU"] ""))<line_sep>settings.append(createSettingRow(2025 7 15 scenarios_lables["A_STL_STT_VTO"] ""))<line_sep>settings.append(createSettingRow(2040 8 15 scenarios_lables["B_LTL_LTT_BAU"] ""))<line_sep>settings.append(createSettingRow(2040 9 15 scenarios_lables["B_LTL_LTT_VTO"] ""))<line_sep>settings.append(createSettingRow(2040 10 15 scenarios_lables["C_LTL_LTT_BAU"] ""))<line_sep>settings.append(createSettingRow(2040 11 15 scenarios_lables["C_LTL_LTT_VTO"] ""))<line_sep>plt_setup_smart3=createSetup('7scenarios' (7.75/0.315)<times>27.0/21.3 27.0/21.3 (8 4.5) settings)<line_sep>#smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3, output_folder) #smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3, output_folder) #smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3, output_folder) #smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3, output_folder) #smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3, output_folder) #smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3, output_folder) #smartplots3_setup.pltRHWaitTime(plt_setup_smart3, output_folder) #smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3, output_folder) settings=[]<line_sep>settings.append(createSettingRow(2010 1 15 scenarios_lables["Base_CL_CT"] ""))<line_sep>settings.append(createSettingRow(2025 2 15 scenarios_lables["Base_STL_STT_BAU"] ""))<line_sep>settings.append(createSettingRow(2025 3 15 scenarios_lables["Base_STL_STT_VTO"] ""))<line_sep>settings.append(createSettingRow(2040 4 15 scenarios_lables["Base_LTL_LTT_BAU"] ""))<line_sep>settings.append(createSettingRow(2040 5 15 scenarios_lables["Base_LTL_LTT_VTO"] ""))<line_sep>settings.append(createSettingRow(2025 6 15 scenarios_lables["A_STL_STT_BAU"] ""))<line_sep>settings.append(createSettingRow(2025 7 15 scenarios_lables["A_STL_STT_VTO"] ""))<line_sep>settings.append(createSettingRow(2040 8 15 scenarios_lables["B_LTL_LTT_BAU"] ""))<line_sep>settings.append(createSettingRow(2040 9 15 scenarios_lables["B_LTL_LTT_VTO"] ""))<line_sep>settings.append(createSettingRow(2040 10 15 scenarios_lables["C_LTL_LTT_BAU"] ""))<line_sep>settings.append(createSettingRow(2040 11 15 scenarios_lables["C_LTL_LTT_VTO"] ""))<line_sep>plt_setup_smart3_base=createSetup('11scenarios' (7.75/0.315)<times>27.0/21.3 27.0/21.3 (10 4.5) settings)<line_sep>smartplots3_setup.pltEnergyPerCapita(plt_setup_smart3_base output_folder)<line_sep>smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3_base output_folder)<line_sep>smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3_base output_folder)<line_sep>smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3_base output_folder)<line_sep>smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3_base output_folder)<line_sep>smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3_base output_folder)<line_sep>smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3_base output_folder)<line_sep>smartplots3_setup.pltRHWaitTime(plt_setup_smart3_base output_folder)<line_sep>smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3_base output_folder)<line_sep>#smartplots3_setup.pltMEP(plt_setup_smart3, output_folder, [15071,21151,22872,29014,27541,36325,45267]) smartplots3_setup.tableSummary(plt_setup_smart3_base output_folder)<line_sep>
<import_from_stmt>sklearn.cluster KMeans<import_from_stmt>.exceptions KMeansException<import_from_stmt>.task Task<class_stmt>Cluster(Task)<block_start>""" Use the K-Means algorithm to group pixels by clusters. The algorithm tries to determine the optimal number of clusters for the given pixels. """<def_stmt>__init__ self settings=<none><block_start><if_stmt>settings<is><none><block_start>settings={}<block_end>super(Cluster self).__init__(settings)<line_sep>self._kmeans_args={'max_iter':50 'tol':1.0 }<block_end><def_stmt>get self img<block_start>a=self._settings['algorithm']<if_stmt>a<eq>'kmeans'<block_start><return>self._jump(img)<block_end><else_stmt><block_start><raise>ValueError('Unknown algorithm {}'.format(a))<block_end><block_end><def_stmt>_kmeans self img k<block_start>kmeans=KMeans(n_clusters=k **self._kmeans_args)<try_stmt><block_start>kmeans.fit(img)<block_end><except_stmt><block_start><raise>KMeansException()<block_end><return>kmeans.inertia_ kmeans.labels_ kmeans.cluster_centers_<block_end><def_stmt>_jump self img<block_start>npixels=img.size<line_sep>best=<none><line_sep>prev_distorsion=0<line_sep>largest_diff=float('-inf')<for_stmt>k range(self._settings['min_k'] self._settings['max_k'])<block_start>compact,labels,centers=self._kmeans(img k)<line_sep>distorsion=Cluster._square_distorsion(npixels compact 1.5)<line_sep>diff=prev_distorsion-distorsion<line_sep>prev_distorsion=distorsion<if_stmt>diff<g>largest_diff<block_start>largest_diff=diff<line_sep>best=k labels centers<block_end><block_end><return>best<block_end>@staticmethod<def_stmt>_default_settings <block_start><return>{'min_k':2 'max_k':7 'algorithm':'kmeans' }<block_end>@staticmethod<def_stmt>_square_distorsion npixels compact y<block_start><return>pow(compact/npixels -y)<block_end><block_end>
# # @license BSD-3-Clause # # Copyright (c) 2019 Project Jupyter Contributors. # Distributed under the terms of the 3-Clause BSD License. <import_stmt>IPython.display<import_stmt>pandas<def_stmt>output_url url<block_start>IPython.display.publish_display_data({"application/x.jupyter.relative-dataset-urls+json":[url]})<block_end>
#! /usr/bin/python <import_stmt>requests<import_stmt>re<import_from_stmt>bs4 BeautifulSoup<import_stmt>colors<class_stmt>FindingComments(object)<block_start><def_stmt>__init__ self url<block_start>self.url=url<line_sep>self.comment_list=['<!--(.*)-->']<line_sep>self.found_comments={}<block_end><def_stmt>get_soure_code self<block_start>resp_text=requests.get(self.url).text<line_sep><return>resp_text<block_end><def_stmt>find_comment self<block_start>source_code=self.get_soure_code()<for_stmt>comment self.comment_list<block_start>comments=re.findall(comment source_code)<line_sep>self.found_comments[comment]=comments<block_end><block_end><def_stmt>parse_comments self<block_start>self.find_comment()<line_sep>comment_dict={}<if_stmt>len(self.found_comments)<g>0<block_start><for_stmt>comment_code,comment self.found_comments.items()<block_start>colors.success('Found for {} : {}'.format(comment_code comment))<line_sep>comment_dict[comment_code]=comment<block_end><block_end><else_stmt><block_start>colors.error('No comment found')<block_end><return>comment_dict<block_end><block_end>
d={"1":"a"}<line_sep>d[1]<line_sep>d["1"]<line_sep>
"""Support for Eight Sleep binary sensors."""<import_from_future_stmt> annotations<import_stmt>logging<import_from_stmt>pyeight.eight EightSleep<import_from_stmt>homeassistant.components.binary_sensor BinarySensorDeviceClass BinarySensorEntity <import_from_stmt>homeassistant.core HomeAssistant<import_from_stmt>homeassistant.helpers.entity_platform AddEntitiesCallback<import_from_stmt>homeassistant.helpers.typing ConfigType DiscoveryInfoType<import_from_stmt>. CONF_BINARY_SENSORS DATA_API DATA_EIGHT DATA_HEAT EightSleepBaseEntity EightSleepHeatDataCoordinator <line_sep>_LOGGER=logging.getLogger(__name__)<async_keyword><def_stmt>async_setup_platform hass:HomeAssistant config:ConfigType async_add_entities:AddEntitiesCallback discovery_info:DiscoveryInfoType=<none> <arrow><none><block_start>"""Set up the eight sleep binary sensor."""<if_stmt>discovery_info<is><none><block_start><return><block_end>name="Eight"<line_sep>sensors=discovery_info[CONF_BINARY_SENSORS]<line_sep>eight:EightSleep=hass.data[DATA_EIGHT][DATA_API]<line_sep>heat_coordinator:EightSleepHeatDataCoordinator=hass.data[DATA_EIGHT][DATA_HEAT]<line_sep>all_sensors=[EightHeatSensor(name heat_coordinator eight side sensor)<for>side,sensor sensors]<line_sep>async_add_entities(all_sensors)<block_end><class_stmt>EightHeatSensor(EightSleepBaseEntity BinarySensorEntity)<block_start>"""Representation of a Eight Sleep heat-based sensor."""<def_stmt>__init__ self name:str coordinator:EightSleepHeatDataCoordinator eight:EightSleep side:str|<none> sensor:str <arrow><none><block_start>"""Initialize the sensor."""<line_sep>super().__init__(name coordinator eight side sensor)<line_sep>self._attr_device_class=BinarySensorDeviceClass.OCCUPANCY<assert_stmt>self._usrobj<line_sep>_LOGGER.debug("Presence Sensor: %s, Side: %s, User: %s" self._sensor self._side self._usrobj.userid )<block_end>@property<def_stmt>is_on self<arrow>bool<block_start>"""Return true if the binary sensor is on."""<assert_stmt>self._usrobj<line_sep><return>bool(self._usrobj.bed_presence)<block_end><block_end>
default_app_config="nautobot.circuits.apps.CircuitsConfig"<line_sep>
<import_from_stmt>core.terraform.resources BaseTerraformVariable<class_stmt>TerraformVariable(BaseTerraformVariable)<block_start>""" Base resource class for Terraform tfvar variable Attributes: variable_dict_input (dict/none): Var dict values available_args (dict): Instance configurations variable_type (str): Define the variable i.e. terraform list var or terraform dict var etc """<line_sep>variable_dict_input=<none><line_sep>variable_type=<none><line_sep>available_args={'variable_name':{'required':<true>} 'variable_type':{'required':<false>} 'default_value':{'required':<false>}}<block_end>
<import_stmt>os<line_sep>scrapy_project_path='/Users/kingname/book/chapter_12/DeploySpider'<line_sep>os.chdir(scrapy_project_path)#切换工作区,进入爬虫工程根目录执行命令 os.system('scrapyd-deploy')<import_stmt>json<import_stmt>time<import_stmt>requests<line_sep>start_url='http://45.76.110.210:6800/schedule.json'<line_sep>start_data={'project':'DeploySpider' 'spider':'Example'}<line_sep>end_url='http://172.16.31.10:6800/cancel.json'<line_sep>end_data={'project':'DeploySpider'}<line_sep>result=requests.post(start_url data=start_data auth=('kingname' 'genius')).text<line_sep>result=requests.post(end_url data=end_data auth=('kingname' 'genius')).text<line_sep># result_dict = json.loads(result) # job_id = result_dict['jobid'] # print(f'启动的爬虫,jobid为:{job_id}') # # time.sleep(5) # end_data['job'] = job_id # result = requests.post(end_url, data=end_data).text # print(result)
<import_stmt>traceback<import_stmt>re<import_stmt>sys<import_stmt>logging<line_sep>""" ********** Note by wvmarle: This file contains the complete code from chained_exception.py plus the error handling code from GlacierWrapper.py, allowing it to be used in other modules like glaciercorecalls as well. ********** """<class_stmt>GlacierException(Exception)<block_start>""" An extension of the built-in Exception class, this handles an additional cause keyword argument, adding it as cause attribute to the exception message. It logs the error message (amount of information depends on the log level) and passes it on to a higher level to handle. Furthermore it allows for the upstream handler to call for a complete stack trace or just a simple error and cause message. TODO: describe usage. """<line_sep>ERRORCODE={'InternalError':127 # Library internal error. 'UndefinedErrorCode':126 # Undefined code. 'NoResults':125 # Operation yielded no results. 'GlacierConnectionError':1 # Can not connect to Glacier. 'SdbConnectionError':2 # Can not connect to SimpleDB. 'CommandError':3 # Command line is invalid. 'VaultNameError':4 # Invalid vault name. 'DescriptionError':5 # Invalid archive description. 'IdError':6 # Invalid upload/archive/job ID given. 'RegionError':7 # Invalid region given. 'FileError':8 # Error related to reading/writing a file. 'ResumeError':9 # Problem resuming a multipart upload. 'NotReady':10 # Requested download is not ready yet. 'BookkeepingError':11 # Bookkeeping not available. 'SdbCommunicationError':12 # Problem reading/writing SimpleDB data. 'ResourceNotFoundException':13 # Glacier can not find the requested resource. 'InvalidParameterValueException':14 # Parameter not accepted. 'DownloadError':15 # Downloading an archive failed. 'SNSConnectionError':126 # Can not connect to SNS 'SNSConfigurationError':127 # Problem with configuration file 'SNSParameterError':128 # Problem with arguments passed to SNS }<def_stmt>__init__ self message code=<none> cause=<none><block_start>""" Constructor. Logs the error. :param message: the error message. :type message: str :param code: the error code. :type code: str :param cause: explanation on what caused the error. :type cause: str """<line_sep>self.logger=logging.getLogger(self.__class__.__name__)<line_sep>self.exitcode=self.ERRORCODE[code]<if>code<in>self.ERRORCODE<else>254<line_sep>self.code=code<if_stmt>cause<block_start>self.logger.error('ERROR: %s'%cause)<line_sep>self.cause=cause<if>isinstance(cause tuple)<else>(cause )<line_sep>self.stack=traceback.format_stack()[:-2]<block_end><else_stmt><block_start>self.logger.error('An error occurred, exiting.')<line_sep>self.cause=()<line_sep># Just wrap up a cause-less exception. # Get the stack trace for this exception. self.stack=(traceback.format_stack()[:-2]+traceback.format_tb(sys.exc_info()[2]))<line_sep># ^^^ let's hope the information is still there; caller must take # care of this. <block_end>self.message=message<line_sep>self.logger.info(self.fetch(message=<true>))<line_sep>self.logger.debug(self.fetch(stack=<true>))<if_stmt>self.exitcode<eq>254<block_start>self.logger.debug('Unknown error code: %s.'%code)<block_end><block_end># Works as a generator to help get the stack trace and the cause # written out. <def_stmt>causeTree self indentation=' ' alreadyMentionedTree=[] stack=<false> message=<false><block_start>""" Returns a complete stack tree, an error message, or both. Returns a warning if neither stack or message are True. """<if_stmt>stack<block_start><yield>"Traceback (most recent call last):\n"<line_sep>ellipsed=0<for_stmt>i,line enumerate(self.stack)<block_start><if_stmt>(ellipsed<is><not><false><and>i<l>len(alreadyMentionedTree)<and>line<eq>alreadyMentionedTree[i])<block_start>ellipsed<augadd>1<block_end><else_stmt><block_start><if_stmt>ellipsed<block_start><yield>" ... (%d frame%s repeated)\n"%(ellipsed ""<if>ellipsed<eq>1<else>"s")<line_sep>ellipsed=<false><block_end># marker for "given out" <yield>line<block_end><block_end><block_end><if_stmt>message<block_start>exc=self<if>self.message<is><none><else>self.message<for_stmt>line traceback.format_exception_only(exc.__class__ exc)<block_start><yield>line<block_end><if_stmt>self.cause<block_start><yield>("Caused by: %d exception%s\n"%(len(self.cause) ""<if>len(self.cause)<eq>1<else>"s"))<for_stmt>causePart self.cause<block_start><if_stmt>hasattr(causePart "causeTree")<block_start><for_stmt>line causePart.causeTree(indentation self.stack)<block_start><yield>re.sub(r'([^\n]*\n)' indentation+r'\1' line)<block_end><block_end><else_stmt><block_start><for_stmt>line traceback.format_exception_only(causePart.__class__ causePart)<block_start><yield>re.sub(r'([^\n]*\n)' indentation+r'\1' line)<block_end><block_end><block_end><block_end><block_end><if_stmt><not>message<and><not>stack<block_start><yield>('No output. Specify message=True and/or stack=True \ to get output when calling this function.\n')<block_end><block_end><def_stmt>write self stream=<none> indentation=' ' message=<false> stack=<false><block_start>""" Writes the error details to sys.stderr or a stream. """<line_sep>stream=sys.stderr<if>stream<is><none><else>stream<for_stmt>line self.causeTree(indentation message=message stack=stack)<block_start>stream.write(line)<block_end><block_end><def_stmt>fetch self indentation=' ' message=<false> stack=<false><block_start>""" Fetches the error details and returns them as string. """<line_sep>out=''<for_stmt>line self.causeTree(indentation message=message stack=stack)<block_start>out<augadd>line<block_end><return>out<block_end><block_end><class_stmt>InputException(GlacierException)<block_start>""" Exception that is raised when there is someting wrong with the user input. """<line_sep>VaultNameError=1<line_sep>VaultDescriptionError=2<def_stmt>__init__ self message code=<none> cause=<none><block_start>""" Handles the exception. :param message: the error message. :type message: str :param code: the error code. :type code: :param cause: explanation on what caused the error. :type cause: str """<line_sep>GlacierException.__init__(self message code=code cause=cause)<block_end><block_end><class_stmt>ConnectionException(GlacierException)<block_start>""" Exception that is raised when there is something wrong with the connection. """<line_sep>GlacierConnectionError=1<line_sep>SdbConnectionError=2<def_stmt>__init__ self message code=<none> cause=<none><block_start>""" Handles the exception. :param message: the error message. :type message: str :param code: the error code. :type code: :param cause: explanation on what caused the error. :type cause: str """<line_sep>GlacierException.__init__(self message code=code cause=cause)<block_end><block_end><class_stmt>CommunicationException(GlacierException)<block_start>""" Exception that is raised when there is something wrong in the communication with an external library like boto. """<def_stmt>__init__ self message code=<none> cause=<none><block_start>""" Handles the exception. :param message: the error message. :type message: str :param code: the error code. :type code: :param cause: explanation on what caused the error. :type cause: str """<line_sep>GlacierException.__init__(self message code=code cause=cause)<block_end><block_end><class_stmt>ResponseException(GlacierException)<block_start>""" Exception that is raised when there is an http response error. """<def_stmt>__init__ self message code=<none> cause=<none><block_start>GlacierException.__init__(self message code=code cause=cause)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><class_stmt>ChildrenException(Exception)<block_start><def_stmt>__init__ self message<block_start>Exception.__init__(self message)<block_end><block_end><class_stmt>ParentException(GlacierException)<block_start><def_stmt>__init__ self message cause=<none><block_start><if_stmt>cause<block_start>GlacierException.__init__(self message cause=cause)<block_end><else_stmt><block_start>GlacierException.__init__(self message)<block_end><block_end><block_end><try_stmt><block_start><try_stmt><block_start><raise>ChildrenException("parent")<block_end><except_stmt>ChildrenException e<block_start><raise>ParentException("children" cause=e)<block_end><block_end><except_stmt>ParentException e<block_start>e.write(indentation='|| ')<block_end><block_end>
<import_stmt>async<import_from_stmt>async.services EchoUpperData<line_sep>server=async.server('10.211.55.3' 20007)<line_sep>async.register(transport=server protocol=EchoUpperData)<line_sep>async.run()<line_sep>
""" LLDB AppKit formatters part of The LLVM Compiler Infrastructure This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. """<line_sep># summary provider for class NSException <import_stmt>lldb.runtime.objc.objc_runtime<import_stmt>lldb.formatters.metrics<import_stmt>CFString<import_stmt>lldb<import_stmt>lldb.formatters.Logger<line_sep>statistics=lldb.formatters.metrics.Metrics()<line_sep>statistics.add_metric('invalid_isa')<line_sep>statistics.add_metric('invalid_pointer')<line_sep>statistics.add_metric('unknown_class')<line_sep>statistics.add_metric('code_notrun')<class_stmt>NSKnownException_SummaryProvider<block_start><def_stmt>adjust_for_architecture self<block_start><pass><block_end><def_stmt>__init__ self valobj params<block_start>logger=lldb.formatters.Logger.Logger()<line_sep>self.valobj=valobj<line_sep>self.sys_params=params<if_stmt><not>(self.sys_params.types_cache.id)<block_start>self.sys_params.types_cache.id=self.valobj.GetType().GetBasicType(lldb.eBasicTypeObjCID)<block_end>self.update()<block_end><def_stmt>update self<block_start>logger=lldb.formatters.Logger.Logger()<line_sep>self.adjust_for_architecture()<block_end><def_stmt>offset_name self<block_start>logger=lldb.formatters.Logger.Logger()<line_sep><return>self.sys_params.pointer_size<block_end><def_stmt>offset_reason self<block_start>logger=lldb.formatters.Logger.Logger()<line_sep><return>2<times>self.sys_params.pointer_size<block_end><def_stmt>description self<block_start>logger=lldb.formatters.Logger.Logger()<line_sep>name_ptr=self.valobj.CreateChildAtOffset("name" self.offset_name() self.sys_params.types_cache.id)<line_sep>reason_ptr=self.valobj.CreateChildAtOffset("reason" self.offset_reason() self.sys_params.types_cache.id)<line_sep><return>'name:'+CFString.CFString_SummaryProvider(name_ptr <none>)+' reason:'+CFString.CFString_SummaryProvider(reason_ptr <none>)<block_end><block_end><class_stmt>NSUnknownException_SummaryProvider<block_start><def_stmt>adjust_for_architecture self<block_start><pass><block_end><def_stmt>__init__ self valobj params<block_start>logger=lldb.formatters.Logger.Logger()<line_sep>self.valobj=valobj<line_sep>self.sys_params=params<line_sep>self.update()<block_end><def_stmt>update self<block_start>logger=lldb.formatters.Logger.Logger()<line_sep>self.adjust_for_architecture()<block_end><def_stmt>description self<block_start>logger=lldb.formatters.Logger.Logger()<line_sep>stream=lldb.SBStream()<line_sep>self.valobj.GetExpressionPath(stream)<line_sep>name_vo=self.valobj.CreateValueFromExpression("name" "(NSString*)["+stream.GetData()+" name]")<line_sep>reason_vo=self.valobj.CreateValueFromExpression("reason" "(NSString*)["+stream.GetData()+" reason]")<if_stmt>name_vo.IsValid()<and>reason_vo.IsValid()<block_start><return>CFString.CFString_SummaryProvider(name_vo <none>)+' '+CFString.CFString_SummaryProvider(reason_vo <none>)<block_end><return>'<variable is not NSException>'<block_end><block_end><def_stmt>GetSummary_Impl valobj<block_start>logger=lldb.formatters.Logger.Logger()<line_sep><global>statistics<line_sep>class_data,wrapper=lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(valobj statistics)<if_stmt>wrapper<block_start><return>wrapper<block_end>name_string=class_data.class_name()<line_sep>logger<rshift>"class name is: "+str(name_string)<if_stmt>name_string<eq>'NSException'<block_start>wrapper=NSKnownException_SummaryProvider(valobj class_data.sys_params)<line_sep>statistics.metric_hit('code_notrun' valobj)<block_end><else_stmt><block_start>wrapper=NSUnknownException_SummaryProvider(valobj class_data.sys_params)<line_sep>statistics.metric_hit('unknown_class' valobj.GetName()+" seen as "+name_string)<block_end><return>wrapper<block_end><def_stmt>NSException_SummaryProvider valobj dict<block_start>logger=lldb.formatters.Logger.Logger()<line_sep>provider=GetSummary_Impl(valobj)<if_stmt>provider<is><not><none><block_start><if_stmt>isinstance(provider lldb.runtime.objc.objc_runtime.SpecialSituation_Description)<block_start><return>provider.message()<block_end><try_stmt><block_start>summary=provider.description()<block_end><except_stmt><block_start>summary=<none><block_end>logger<rshift>"got summary "+str(summary)<if_stmt>summary<is><none><block_start>summary='<variable is not NSException>'<block_end><return>str(summary)<block_end><return>'Summary Unavailable'<block_end><def_stmt>__lldb_init_module debugger dict<block_start>debugger.HandleCommand("type summary add -F NSException.NSException_SummaryProvider NSException")<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>CondCore.DBCommon.CondDBCommon_cfi *<line_sep>PoolDBESSourceMistag110118=cms.ESSource("PoolDBESSource" CondDBCommon toGet=cms.VPSet(# # working points # cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGJBPLtable_v5_offline') label=cms.untracked.string('BTagMISTAGJBPLtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGJBPLwp_v5_offline') label=cms.untracked.string('BTagMISTAGJBPLwp_v5_offline')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGJBPMtable_v5_offline') label=cms.untracked.string('BTagMISTAGJBPMtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGJBPMwp_v5_offline') label=cms.untracked.string('BTagMISTAGJBPMwp_v5_offline')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGJBPTtable_v5_offline') label=cms.untracked.string('BTagMISTAGJBPTtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGJBPTwp_v5_offline') label=cms.untracked.string('BTagMISTAGJBPTwp_v5_offline')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGJPLtable_v5_offline') label=cms.untracked.string('BTagMISTAGJPLtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGJPLwp_v5_offline') label=cms.untracked.string('BTagMISTAGJPLwp_v5_offline')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGJPMtable_v5_offline') label=cms.untracked.string('BTagMISTAGJPMtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGJPMwp_v5_offline') label=cms.untracked.string('BTagMISTAGJPMwp_v5_offline')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGJPTtable_v5_offline') label=cms.untracked.string('BTagMISTAGJPTtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGJPTwp_v5_offline') label=cms.untracked.string('BTagMISTAGJPTwp_v5_offline')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGSSVHEMtable_v5_offline') label=cms.untracked.string('BTagMISTAGSSVHEMtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGSSVHEMwp_v5_offline') label=cms.untracked.string('BTagMISTAGSSVHEMwp_v5_offline')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGSSVHPTtable_v5_offline') label=cms.untracked.string('BTagMISTAGSSVHPTtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGSSVHPTwp_v5_offline') label=cms.untracked.string('BTagMISTAGSSVHPTwp_v5_offline')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGTCHELtable_v5_offline') label=cms.untracked.string('BTagMISTAGTCHELtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGTCHELwp_v5_offline') label=cms.untracked.string('BTagMISTAGTCHELwp_v5_offline')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGTCHEMtable_v5_offline') label=cms.untracked.string('BTagMISTAGTCHEMtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGTCHEMwp_v5_offline') label=cms.untracked.string('BTagMISTAGTCHEMwp_v5_offline')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('BTagMISTAGTCHPTtable_v5_offline') label=cms.untracked.string('BTagMISTAGTCHPTtable_v5_offline')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('BTagMISTAGTCHPTwp_v5_offline') label=cms.untracked.string('BTagMISTAGTCHPTwp_v5_offline')) ))<line_sep>PoolDBESSourceMistag110118.connect='frontier://FrontierProd/CMS_COND_31X_PHYSICSTOOLS'<line_sep>
<import_stmt>sys<import_stmt>argparse<import_from_stmt>hgraph *<import_from_stmt>rdkit Chem<import_from_stmt>multiprocessing Pool<def_stmt>process data<block_start>vocab=set()<for_stmt>line data<block_start>s=line.strip("\r\n ")<line_sep>hmol=MolGraph(s)<for_stmt>node,attr hmol.mol_tree.nodes(data=<true>)<block_start>smiles=attr['smiles']<line_sep>vocab.add(attr['label'])<for_stmt>i,s attr['inter_label']<block_start>vocab.add((smiles s))<block_end><block_end><block_end><return>vocab<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--ncpu' type=int default=1)<line_sep>args=parser.parse_args()<line_sep>data=[mol<for>line sys.stdin<for>mol line.split()[:2]]<line_sep>data=list(set(data))<line_sep>batch_size=len(data)<floordiv>args.ncpu+1<line_sep>batches=[data[i:i+batch_size]<for>i range(0 len(data) batch_size)]<line_sep>pool=Pool(args.ncpu)<line_sep>vocab_list=pool.map(process batches)<line_sep>vocab=[(x y)<for>vocab vocab_list<for>x,y vocab]<line_sep>vocab=list(set(vocab))<for_stmt>x,y sorted(vocab)<block_start>print(x y)<block_end><block_end>
<import_from_future_stmt> annotations<import_from_stmt>django_perf_rec.sql sql_fingerprint<def_stmt>test_empty <block_start><assert_stmt>sql_fingerprint("")<eq>""<assert_stmt>sql_fingerprint("\n\n \n")<eq>""<block_end><def_stmt>test_select <block_start><assert_stmt>sql_fingerprint("SELECT `f1`, `f2` FROM `b`")<eq>"SELECT ... FROM `b`"<block_end><def_stmt>test_select_show_columns settings<block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2` FROM `b`" hide_columns=<false>)<eq>"SELECT `f1`, `f2` FROM `b`")<block_end><def_stmt>test_select_limit settings<block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2` FROM `b` LIMIT 12" hide_columns=<false>)<eq>"SELECT `f1`, `f2` FROM `b` LIMIT #")<block_end><def_stmt>test_select_coalesce_show_columns settings<block_start><assert_stmt>(sql_fingerprint(("SELECT `table`.`f1`, COALESCE(table.f2->>'a', table.f2->>'b', "+"'default') FROM `table`") hide_columns=<false> )<eq>"SELECT `table`.`f1`, COALESCE(table.f2->>#, table.f2->>#, #) FROM `table`")<block_end><def_stmt>test_select_where <block_start><assert_stmt>(sql_fingerprint("SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = 1")<eq>"SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = #")<block_end><def_stmt>test_select_where_show_columns settings<block_start><assert_stmt>(sql_fingerprint("SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = 1" hide_columns=<false> )<eq>"SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = #")<block_end><def_stmt>test_select_comment <block_start><assert_stmt>(sql_fingerprint("SELECT /* comment */ `f1`, `f2` FROM `b`")<eq>"SELECT /* comment */ ... FROM `b`")<block_end><def_stmt>test_select_comment_show_columns settings<block_start><assert_stmt>(sql_fingerprint("SELECT /* comment */ `f1`, `f2` FROM `b`" hide_columns=<false>)<eq>"SELECT /* comment */ `f1`, `f2` FROM `b`")<block_end><def_stmt>test_select_join <block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = 1")<eq>"SELECT ... FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = #")<block_end><def_stmt>test_select_join_show_columns settings<block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = 1" hide_columns=<false> )<eq>"SELECT f1, f2 FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = #")<block_end><def_stmt>test_select_order_by <block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3")<eq>"SELECT ... FROM a ORDER BY f3")<block_end><def_stmt>test_select_order_by_limit <block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3 LIMIT 12")<eq>"SELECT ... FROM a ORDER BY f3 LIMIT #")<block_end><def_stmt>test_select_order_by_show_columns settings<block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3" hide_columns=<false>)<eq>"SELECT f1, f2 FROM a ORDER BY f3")<block_end><def_stmt>test_select_order_by_multiple <block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3, f4")<eq>"SELECT ... FROM a ORDER BY f3, f4")<block_end><def_stmt>test_select_group_by <block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1")<eq>"SELECT ... FROM a GROUP BY f1")<block_end><def_stmt>test_select_group_by_show_columns settings<block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1" hide_columns=<false>)<eq>"SELECT f1, f2 FROM a GROUP BY f1")<block_end><def_stmt>test_select_group_by_multiple <block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1, f2")<eq>"SELECT ... FROM a GROUP BY f1, f2")<block_end><def_stmt>test_select_group_by_having <block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > 21")<eq>"SELECT ... FROM a GROUP BY f1 HAVING f1 > #")<block_end><def_stmt>test_select_group_by_having_show_columns settings<block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > 21" hide_columns=<false>)<eq>"SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > #")<block_end><def_stmt>test_select_group_by_having_multiple <block_start><assert_stmt>(sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > 21, f2 < 42")<eq>"SELECT ... FROM a GROUP BY f1 HAVING f1 > #, f2 < #")<block_end><def_stmt>test_insert <block_start><assert_stmt>(sql_fingerprint("INSERT INTO `table` (`f1`, `f2`) VALUES ('v1', 2)")<eq>"INSERT INTO `table` (...) VALUES (...)")<block_end><def_stmt>test_insert_show_columns settings<block_start><assert_stmt>(sql_fingerprint("INSERT INTO `table` (`f1`, `f2`) VALUES ('v1', 2)" hide_columns=<false>)<eq>"INSERT INTO `table` (`f1`, `f2`) VALUES (#, #)")<block_end><def_stmt>test_update <block_start><assert_stmt>(sql_fingerprint("UPDATE `table` SET `foo` = 'bar' WHERE `table`.`id` = 1")<eq>"UPDATE `table` SET ... WHERE `table`.`id` = #")<block_end><def_stmt>test_update_no_where <block_start><assert_stmt>(sql_fingerprint("UPDATE `table` SET `foo` = 'bar'")<eq>"UPDATE `table` SET ...")<block_end><def_stmt>test_declare_cursor <block_start><assert_stmt>(sql_fingerprint('DECLARE "_django_curs_140239496394496_1300" NO SCROLL CURSOR WITHOUT')<eq>'DECLARE "_django_curs_#" NO SCROLL CURSOR WITHOUT')<block_end><def_stmt>test_savepoint <block_start><assert_stmt>sql_fingerprint("SAVEPOINT `s140323809662784_x54`")<eq>"SAVEPOINT `#`"<block_end><def_stmt>test_rollback_to_savepoint <block_start><assert_stmt>(sql_fingerprint("ROLLBACK TO SAVEPOINT `s140323809662784_x54`")<eq>"ROLLBACK TO SAVEPOINT `#`")<block_end><def_stmt>test_release_savepoint <block_start><assert_stmt>(sql_fingerprint("RELEASE SAVEPOINT `s140699855320896_x17`")<eq>"RELEASE SAVEPOINT `#`")<block_end><def_stmt>test_null_value <block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `b`.`name` IS NULL" hide_columns=<false>)<eq>"SELECT `f1`, `f2` FROM `b` WHERE `b`.`name` IS #")<block_end><def_stmt>test_strip_duplicate_whitespaces <block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `b`.`f1` IS NULL LIMIT 12 ")<eq>"SELECT ... FROM `b` WHERE `b`.`f1` IS # LIMIT #")<block_end><def_stmt>test_strip_duplicate_whitespaces_recursive <block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2`, ( COALESCE(b.f3->>'en', b.f3->>'fr', '')) "<concat>"FROM `b` WHERE (`b`.`f1` IS NULL OR ( EXISTS COUNT(1) )) LIMIT 12 " hide_columns=<false> )<eq>"SELECT `f1`, `f2`, (COALESCE(b.f3->>#, b.f3->>#, #)) "<concat>"FROM `b` WHERE (`b`.`f1` IS # OR (EXISTS COUNT(#))) LIMIT #")<block_end><def_stmt>test_strip_newlines <block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2`\nFROM `b`\n LIMIT 12\n\n")<eq>"SELECT ... FROM `b` LIMIT #")<block_end><def_stmt>test_strip_raw_query <block_start><assert_stmt>(sql_fingerprint(""" SELECT 'f1' , 'f2' , 'f3' FROM "table_a" WHERE "table_a"."f1" = 1 OR ( "table_a"."type" = 'A' AND EXISTS ( SELECT "table_b"."id" FROM "table_b" WHERE "table_b"."id" = 1 ) = true) """)<eq>('SELECT ... FROM "table_a" WHERE "table_a"."f1" = # OR '+'("table_a"."type" = # AND EXISTS (SELECT "table_b"."id" FROM '+'"table_b" WHERE "table_b"."id" = # ) = true)'))<block_end><def_stmt>test_in_single_value <block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1)")<eq>"SELECT ... FROM `b` WHERE `x` IN (...)")<block_end><def_stmt>test_in_multiple_values <block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1, 2, 3)")<eq>"SELECT ... FROM `b` WHERE `x` IN (...)")<block_end><def_stmt>test_in_multiple_clauses <block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1, 2, 3) AND `y` IN (4, 5, 6)")<eq>"SELECT ... FROM `b` WHERE `x` IN (...) AND `y` IN (...)")<block_end><def_stmt>test_in_multiple_values_and_clause <block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1, 2, 3) AND (`y` = 1 OR `y` = 2)")<eq>"SELECT ... FROM `b` WHERE `x` IN (...) AND (`y` = # OR `y` = #)")<block_end><def_stmt>test_in_subquery <block_start><assert_stmt>(sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (SELECT 1)")<eq>"SELECT ... FROM `b` WHERE `x` IN (SELECT #)")<block_end>
<import_from_stmt>abc abstractproperty<import_from_stmt>..backend_config.bucket_config S3BucketConfig<import_from_stmt>..storage.helper StorageHelper<class_stmt>SetupUploadMixin(object)<block_start>log=abstractproperty()<line_sep>storage_uri=abstractproperty()<def_stmt>setup_upload self bucket_name host=<none> access_key=<none> secret_key=<none> region=<none> multipart=<true> https=<true> verify=<true><block_start>""" Setup upload options (currently only S3 is supported) :param bucket_name: AWS bucket name :type bucket_name: str :param host: Hostname. Only required in case a Non-AWS S3 solution such as a local Minio server is used) :type host: str :param access_key: AWS access key. If not provided, we'll attempt to obtain the key from the configuration file (bucket-specific, than global) :type access_key: str :param secret_key: AWS secret key. If not provided, we'll attempt to obtain the secret from the configuration file (bucket-specific, than global) :type secret_key: str :param multipart: Server supports multipart. Only required when using a Non-AWS S3 solution that doesn't support multipart. :type multipart: bool :param https: Server supports HTTPS. Only required when using a Non-AWS S3 solution that only supports HTTPS. :type https: bool :param region: Bucket region. Required if the bucket doesn't reside in the default region (us-east-1) :type region: str :param verify: Whether or not to verify SSL certificates. Only required when using a Non-AWS S3 solution that only supports HTTPS with self-signed certificate. :type verify: bool """<line_sep>self._bucket_config=S3BucketConfig(bucket=bucket_name host=host key=access_key secret=secret_key multipart=multipart secure=https region=region verify=verify)<line_sep>self.storage_uri=('s3://%(host)s/%(bucket_name)s'<if>host<else>'s3://%(bucket_name)s')%locals()<line_sep>StorageHelper.add_configuration(self._bucket_config log=self.log)<block_end><block_end>