content
stringlengths
0
1.55M
<import_stmt>pandas<as>pd<import_stmt>pytest<import_from_stmt>iexfinance.stocks get_market_gainers get_market_iex_percent get_market_iex_volume get_market_losers get_market_most_active <class_stmt>TestMarketMovers(object)<block_start><def_stmt>test_market_gainers self<block_start>li=get_market_gainers()<assert_stmt>isinstance(li pd.DataFrame)<assert_stmt>len(li)<eq>pytest.approx(10 1)<block_end><def_stmt>test_market_losers self<block_start>li=get_market_losers()<assert_stmt>isinstance(li pd.DataFrame)<assert_stmt>len(li)<eq>pytest.approx(10 1)<block_end><def_stmt>test_market_most_active self<block_start>li=get_market_most_active()<assert_stmt>isinstance(li pd.DataFrame)<assert_stmt>len(li)<eq>pytest.approx(10 1)<block_end><def_stmt>test_market_iex_volume self<block_start>li=get_market_iex_volume()<assert_stmt>isinstance(li pd.DataFrame)<assert_stmt>len(li)<eq>pytest.approx(10 1)<block_end><def_stmt>test_market_iex_percent self<block_start>li=get_market_iex_percent()<assert_stmt>isinstance(li pd.DataFrame)<assert_stmt>len(li)<eq>pytest.approx(10 1)<block_end><block_end>
<import_stmt>re<import_from_stmt>ztag.annotation Annotation<import_from_stmt>ztag.annotation OperatingSystem<import_from_stmt>ztag protocols<import_stmt>ztag.test<class_stmt>FtpKebi(Annotation)<block_start>protocol=protocols.FTP<line_sep>subprotocol=protocols.FTP.BANNER<line_sep>port=<none><line_sep>impl_re=re.compile("^220- Kebi FTP Server" re.IGNORECASE)<line_sep>version_re=re.compile("\(Version (\d+(?:\.\d+)*)\)" re.IGNORECASE)<def_stmt>process self obj meta<block_start>banner=obj["banner"]<if_stmt>self.impl_re.search(banner)<block_start>meta.local_metadata.product="Kebi Ftpd"<block_end>match=self.version_re.search(banner)<if_stmt>match<block_start>meta.local_metadata.version=match.group(1)<block_end><return>meta<block_end>""" Tests "220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server (Version 2.0.0)\r\n220 SINN \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Easy FTP\r\n" "220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" "220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n" """<block_end>
<import_stmt>os<class_stmt>Credentials<block_start><def_stmt>__init__ self refresh_token credentials<block_start>self.client_id=credentials.lwa_app_id<line_sep>self.client_secret=credentials.lwa_client_secret<line_sep>self.refresh_token=refresh_token<or>credentials.refresh_token<block_end><block_end>
<import_from_stmt>unittest TestCase<import_from_stmt>regal BaseInfo<import_from_stmt>regal.grouping GroupAlgorithm<import_from_stmt>regal.check_interface AlgorithmABC<line_sep># Run Method: python -m unittest -v tests.py <class_stmt>TestBaseInfoInitial(TestCase)<block_start><def_stmt>test_empty_info self<block_start>ab=BaseInfo('' '' '')<with_stmt>self.assertRaises(AttributeError)<block_start>ab.grouping()<block_end><block_end><def_stmt>test_empty_info_version_host_isdict self<block_start>ab=BaseInfo({} '' '')<line_sep>self.assertIsNotNone(ab.grouping())<block_end><def_stmt>test_info_errortype self<block_start>ab=BaseInfo({} '1' 'sds')<line_sep>self.assertIsNotNone(ab.grouping())<block_end><block_end><class_stmt>TestGroupingResult(TestCase)<block_start>ver={'ver1':'1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4,5.1.1.1,6.2.2.2,7.3.3.3,8.4.4.4'}<line_sep>combine_num=4<def_stmt>test_combine_num self<block_start>ab=BaseInfo(self.ver self.combine_num)<line_sep>instance_combine_num=ab.grouping().result[0][1]<line_sep>self.assertEqual(len(instance_combine_num[1:-1][0]) self.combine_num)<block_end><def_stmt>test_schedule_num self<block_start>schedule_num=2<line_sep>ab=BaseInfo(self.ver self.combine_num schedule_num)<line_sep>instance_combine_num=ab.grouping().result[0][1]<line_sep>self.assertEqual(len(instance_combine_num[0][0].split(',')) schedule_num)<block_end><block_end><class_stmt>TestInstance(TestCase)<block_start><def_stmt>test_algorithm_instance self<block_start>self.assertIsInstance(GroupAlgorithm() AlgorithmABC)<block_end><block_end>
# -*- coding: utf-8 -*- # Copyright (c) 2021-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Configuration tuning module."""<import_from_stmt>typing Any Dict List Optional Union<import_from_stmt>neural_compressor.ux.utils.exceptions ClientErrorException<import_from_stmt>neural_compressor.ux.utils.json_serializer JsonSerializer<import_from_stmt>neural_compressor.ux.utils.utils parse_bool_value parse_to_float_list parse_to_string_list <class_stmt>Strategy(JsonSerializer)<block_start>"""Configuration Strategy class."""<def_stmt>__init__ self data:Dict[str Any]={}<arrow><none><block_start>"""Initialize configuration Strategy class."""<line_sep>super().__init__()<line_sep># [Required] One of neural_compressor.strategy.STRATEGIES self.name:str=data.get("name" "basic")<line_sep>self.sigopt_api_token:Optional[str]=data.get("sigopt_api_token" <none>)<line_sep>self.accuracy_weight:Optional[float]=data.get("accuracy_weight" <none>)<line_sep>self.latency_weight:Optional[float]=data.get("latency_weight" <none>)<block_end><block_end><class_stmt>MultiObjectives(JsonSerializer)<block_start>"""Configuration MultiObjectives class."""<def_stmt>__init__ self data:Dict[str Any]={}<arrow><none><block_start>"""Initialize configuration MultiObjectives class."""<line_sep>super().__init__()<line_sep>self._objective:List[str]=data.get("objective" [])<line_sep>self._weight:List[float]=data.get("weight" [])<block_end>@property<def_stmt>objective self<arrow>List[str]<block_start>"""Get objectives."""<line_sep><return>self._objective<block_end>@objective.setter<def_stmt>objective self value:Union[<none> str List[str]]<arrow><none><block_start>"""Set inputs value."""<line_sep>self._objective=parse_to_string_list(value)<block_end>@property<def_stmt>weight self<arrow>List[float]<block_start>"""Get weights."""<line_sep><return>self._weight<block_end>@weight.setter<def_stmt>weight self value:Union[<none> float List[float]]<arrow><none><block_start>"""Set weights value."""<line_sep>self._weight=parse_to_float_list(value)<block_end><block_end><class_stmt>AccCriterion(JsonSerializer)<block_start>"""Configuration AccCriterion class."""<def_stmt>__init__ self data:Dict[str Any]={}<arrow><none><block_start>"""Initialize configuration AccCriterion class."""<line_sep>super().__init__()<line_sep>self.relative:Optional[float]=data.get("relative" <none> )<line_sep># [Optional] (INT8-FP32)/FP32 self.absolute:Optional[float]=data.get("absolute" <none> )<line_sep># [Optional] INT8-FP32 # Set default accuracy criterion to relative <if_stmt>self.relative<is><none><and>self.absolute<is><none><block_start>self.relative=0.1<block_end><block_end><block_end><class_stmt>ExitPolicy(JsonSerializer)<block_start>"""Configuration ExitPolicy class."""<def_stmt>__init__ self data:Dict[str Any]={}<arrow><none><block_start>"""Initialize Configuration ExitPolicy class."""<line_sep>super().__init__()<line_sep>self.timeout:Optional[int]=data.get("timeout" <none>)<line_sep>self.max_trials:Optional[int]=data.get("max_trials" <none>)<line_sep>self.performance_only:Optional[bool]=data.get("performance_only" <none>)<block_end><block_end><class_stmt>Workspace(JsonSerializer)<block_start>"""Configuration Workspace class."""<def_stmt>__init__ self data:Dict[str Any]={}<arrow><none><block_start>"""Initialize Configuration Workspace class."""<line_sep>super().__init__()<line_sep>self.path:Optional[str]=data.get("path" <none>)# [Optional] self.resume:Optional[str]=data.get("resume" <none>)<block_end><block_end># [Optional] <class_stmt>Tuning(JsonSerializer)<block_start>"""Configuration Tuning class."""<def_stmt>__init__ self data:Dict[str Any]={}<arrow><none><block_start>"""Initialize Configuration Tuning class."""<line_sep>super().__init__()<line_sep>self.strategy:Strategy=Strategy()<if_stmt>data.get("strategy")<block_start>self.strategy=Strategy(data.get("strategy" {}))<block_end>self.accuracy_criterion:AccCriterion=AccCriterion(data.get("accuracy_criterion" {}) )<line_sep>self.multi_objectives:Optional[MultiObjectives]=<none><if_stmt>data.get("multi_objectives")<block_start>self.multi_objectives=MultiObjectives(data.get("multi_objectives" {}))<block_end>self.exit_policy:Optional[ExitPolicy]=<none><if_stmt>data.get("exit_policy")<block_start>self.exit_policy=ExitPolicy(data.get("exit_policy" {}))<block_end>self.random_seed:Optional[int]=data.get("random_seed" <none>)<line_sep>self.tensorboard:Optional[bool]=data.get("tensorboard" <none>)<line_sep>self.workspace:Optional[Workspace]=<none><if_stmt>data.get("workspace" {})<block_start>self.workspace=Workspace(data.get("workspace" {}))<block_end><block_end><def_stmt>set_timeout self timeout:int<arrow><none><block_start>"""Update tuning timeout in config."""<try_stmt><block_start>timeout=int(timeout)<if_stmt>timeout<l>0<block_start><raise>ValueError<block_end><block_end><except_stmt>ValueError<block_start><raise>ClientErrorException("The timeout value is not valid. "<concat>"Timeout should be non negative integer." )<block_end><if_stmt>self.exit_policy<block_start>self.exit_policy.timeout=timeout<block_end><else_stmt><block_start>self.exit_policy=ExitPolicy({"timeout":timeout})<block_end><block_end><def_stmt>set_max_trials self max_trials:int<arrow><none><block_start>"""Update max tuning trials in config."""<try_stmt><block_start>max_trials=int(max_trials)<if_stmt>max_trials<l>0<block_start><raise>ValueError<block_end><block_end><except_stmt>ValueError<block_start><raise>ClientErrorException("The max trials value is not valid. "<concat>"Max trials should be non negative integer." )<block_end><if_stmt>self.exit_policy<block_start>self.exit_policy.max_trials=max_trials<block_end><else_stmt><block_start>self.exit_policy=ExitPolicy({"max_trials":max_trials})<block_end><block_end><def_stmt>set_performance_only self performance_only:Any<arrow><none><block_start>"""Update performance only flag in config."""<try_stmt><block_start>performance_only=parse_bool_value(performance_only)<block_end><except_stmt>ValueError<block_start><raise>ClientErrorException("The performance_only flag value is not valid. "<concat>"Performance_ony should be a boolean." )<block_end><if_stmt>self.exit_policy<block_start>self.exit_policy.performance_only=performance_only<block_end><else_stmt><block_start>self.exit_policy=ExitPolicy({"performance_only":performance_only})<block_end><block_end><def_stmt>set_random_seed self random_seed:int<arrow><none><block_start>"""Update random seed value in config."""<try_stmt><block_start>random_seed=int(random_seed)<block_end><except_stmt>ValueError<block_start><raise>ClientErrorException("The random seed value is not valid. "<concat>"Random seed should be an integer." )<block_end>self.random_seed=random_seed<block_end><def_stmt>set_workspace self path:str<arrow><none><block_start>"""Update tuning workspace path in config."""<if_stmt>self.workspace<is><none><block_start>self.workspace=Workspace()<block_end>self.workspace.path=path<block_end><block_end>
<import_stmt>pytest<import_from_stmt>dddpy.domain.book Book Isbn<class_stmt>TestBook<block_start><def_stmt>test_constructor_should_create_instance self<block_start>book=Book(id="book_01" isbn=Isbn("978-0321125217") title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares" page=560 )<assert_stmt>book.id<eq>"book_01"<assert_stmt>book.isbn<eq>Isbn("978-0321125217")<assert_stmt>(book.title<eq>"Domain-Driven Design: Tackling Complexity in the Heart of Softwares")<assert_stmt>book.page<eq>560<assert_stmt>book.read_page<eq>0<block_end><def_stmt>test_book_entity_should_be_identified_by_id self<block_start>book_1=Book(id="book_01" isbn=Isbn("978-0321125217") title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares" page=560 read_page=50 )<line_sep>book_2=Book(id="book_01" isbn=Isbn("978-0321125217") title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares" page=560 read_page=120 )<line_sep>book_3=Book(id="book_02" isbn=Isbn("978-0321125217") title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares" page=560 read_page=50 )<assert_stmt>book_1<eq>book_2<assert_stmt>book_1<ne>book_3<block_end>@pytest.mark.parametrize("read_page" [(0) (1) (320) ] )<def_stmt>test_read_page_setter_should_update_value self read_page<block_start>book=Book(id="book_01" isbn=Isbn("978-0321125217") title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares" page=560 )<line_sep>book.read_page=read_page<assert_stmt>book.read_page<eq>read_page<block_end>@pytest.mark.parametrize("read_page, expected" [(0 <false>) (559 <false>) (560 <true>) ] )<def_stmt>test_is_already_read_should_true_when_read_page_has_reached_last_page self read_page expected<block_start>book=Book(id="book_01" isbn=Isbn("978-0321125217") title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares" page=560 )<line_sep>book.read_page=read_page<assert_stmt>book.is_already_read()<eq>expected<block_end><block_end>
<class_stmt>Button()<block_start>LEFT=0<line_sep>CENTER=1<line_sep>RIGHT=2<block_end><class_stmt>Key()<block_start>""" Key codes for InputEmulation.Keyboard object. Can be entered directly or concatenated with an existing string, e.g. ``type(Key.TAB)`` """<line_sep>ENTER="{ENTER}"<line_sep>ESC="{ESC}"<line_sep>BACKSPACE="{BACKSPACE}"<line_sep>DELETE="{DELETE}"<line_sep>F1="{F1}"<line_sep>F2="{F2}"<line_sep>F3="{F3}"<line_sep>F4="{F4}"<line_sep>F5="{F5}"<line_sep>F6="{F6}"<line_sep>F7="{F7}"<line_sep>F8="{F8}"<line_sep>F9="{F9}"<line_sep>F10="{F10}"<line_sep>F11="{F11}"<line_sep>F12="{F12}"<line_sep>F13="{F13}"<line_sep>F14="{F14}"<line_sep>F15="{F15}"<line_sep>F16="{F16}"<line_sep>HOME="{HOME}"<line_sep>END="{END}"<line_sep>LEFT="{LEFT}"<line_sep>RIGHT="{RIGHT}"<line_sep>DOWN="{DOWN}"<line_sep>UP="{UP}"<line_sep>PAGE_DOWN="{PAGE_DOWN}"<line_sep>PAGE_UP="{PAGE_UP}"<line_sep>TAB="{TAB}"<line_sep>CAPS_LOCK="{CAPS_LOCK}"<line_sep>NUM_LOCK="{NUM_LOCK}"<line_sep>SCROLL_LOCK="{SCROLL_LOCK}"<line_sep>INSERT="{INSERT}"<line_sep>SPACE="{SPACE}"<line_sep>PRINTSCREEN="{PRINTSCREEN}"<line_sep>ALT="{ALT}"<line_sep>CMD="{CMD}"<line_sep>CTRL="{CTRL}"<line_sep>META="{META}"<line_sep>SHIFT="{SHIFT}"<line_sep>WIN="{WIN}"<line_sep>PAUSE="{PAUSE}"<line_sep>NUM0="{NUM0}"<line_sep>NUM1="{NUM1}"<line_sep>NUM2="{NUM2}"<line_sep>NUM3="{NUM3}"<line_sep>NUM4="{NUM4}"<line_sep>NUM5="{NUM5}"<line_sep>NUM6="{NUM6}"<line_sep>NUM7="{NUM7}"<line_sep>NUM8="{NUM8}"<line_sep>NUM9="{NUM9}"<line_sep>SEPARATOR="{SEPARATOR}"<line_sep>ADD="{ADD}"<line_sep>MINUS="{MINUS}"<line_sep>MULTIPLY="{MULTIPLY}"<line_sep>DIVIDE="{DIVIDE}"<block_end><class_stmt>KeyModifier()<block_start>""" Can be used with type() to modify another key, e.g. ``type(Key.DELETE, Key.CTRL+Key.ALT)`` """<line_sep>CTRL="{CTRL}"<line_sep>SHIFT="{SHIFT}"<line_sep>ALT="{ALT}"<line_sep>META="{META}"<line_sep>CMD="{CMD}"<line_sep>WIN="{WIN}"<block_end>
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # <pep8 compliant> <import_stmt>os<import_stmt>time<import_stmt>shutil<import_stmt>bpy<import_stmt>mathutils<def_stmt>write_objc filepath context<block_start>out=open(filepath 'w')<line_sep>current_scene=bpy.context.scene<line_sep>objs=current_scene.objects<line_sep>#i know there has to be an easier way to do this, but i'm too lazy to look it up <for_stmt>next_obj objs<block_start><if_stmt>next_obj.type<eq>'MESH'<block_start>mesh=next_obj<block_end><block_end>print("Writing Object")<for_stmt>i current_scene.objects<block_start>i.select=<false>#deselect all objects <block_end>mesh.select=<true><line_sep>current_scene.objects.active=mesh#set the mesh object to current bpy.ops.object.mode_set(mode='EDIT')#Operators bpy.ops.mesh.select_all(action='SELECT')#select all the face/vertex/edge bpy.ops.mesh.quads_convert_to_tris()#Operators current_scene.update()<line_sep>bpy.ops.object.mode_set(mode='OBJECT')# set it in object mesh=mesh.data<line_sep>objectname=mesh.name<line_sep>basename=objectname.capitalize()<line_sep>out.write('#import "OpenGLCommon.h"\n\n\n')<if_stmt>len(mesh.uv_textures)<g>0<block_start>out.write('static const TexturedVertexData3D %sVertexData[] = {\n'%basename)<line_sep>#for face in uv: #loop through the faces uv_layer=mesh.active_uv_texture<for_stmt>face mesh.faces<block_start>faceUV=uv_layer.data[face.index]<line_sep>i=0<for_stmt>index face.vertices<block_start><if_stmt>len(face.vertices)<eq>3<block_start>vert=mesh.vertices[index]<line_sep>out.write('\t{/*v:*/{%f, %f, %f}, '%(vert.co.x vert.co.y vert.co.z))<line_sep>out.write('/*n:*/{%f, %f, %f}, '%(vert.normal.x vert.normal.y vert.normal.z))<line_sep>out.write('/*t:*/{%f, %f}'%(faceUV.uv[i][0] faceUV.uv[i][1]))<line_sep>out.write('},\n')<line_sep>i<augadd>1<block_end><block_end><block_end>out.write('};\n\n')<block_end><elif_stmt>len(mesh.vertex_colors)<g>0<block_start>out.write('static const ColoredVertexData3D %sVertexData[] = {\n'%basename)<line_sep>color_layer=mesh.active_vertex_color<for_stmt>face mesh.faces<block_start><if_stmt>len(face.vertices)<eq>3<block_start>faceC=color_layer.data[face.index]<line_sep>i=0<for_stmt>index face.vertices<block_start>vert=mesh.vertices[index]<line_sep>out.write('\t{/*v:*/{%f, %f, %f}, '%(vert.co.x vert.co.y vert.co.z))<line_sep>out.write('/*n:*/{%f, %f, %f}, '%(vert.normal.x vert.normal.y vert.normal.z))<line_sep>out.write('/*c:*/{%f, %f, %f, %f}'%(faceC.color1[i] faceC.color2[i] faceC.color3[i] faceC.color4[i]))<line_sep>out.write('},\n')<line_sep>i<augadd>1<block_end><block_end><block_end>out.write('};\n\n')<block_end><else_stmt><block_start>out.write<line_sep>out.write('static const VertexData3D %sVertexData[] = {\n'%basename)<for_stmt>face mesh.faces<block_start><if_stmt>len(face.vertices)<eq>3<block_start><for_stmt>index face.vertices<block_start>vert=mesh.vertices[index]<line_sep>out.write('\t{/*v:*/{%f, %f, %f}, '%(vert.co.x vert.co.y vert.co.z))<line_sep>out.write('/*n:*/{%f, %f, %f} '%(vert.normal.x vert.normal.y vert.normal.z))<line_sep>out.write('},\n')<block_end><block_end><block_end>out.write('};\n\n')<block_end>#if editmode: Window.EditMode(1) out.write('#define k%sNumberOfVertices\t%i\n'%(basename len(mesh.faces)<times>3))<line_sep>out.write('// Drawing Code:\n')<line_sep>out.write('// glEnableClientState(GL_VERTEX_ARRAY);\n')<if_stmt>len(mesh.uv_textures)<g>0<block_start>out.write('// glEnableClientState(GL_TEXTURE_COORD_ARRAY);\n')<block_end><elif_stmt>len(mesh.vertex_colors)<g>0<block_start>out.write('// glEnableClientState(GL_COLOR_ARRAY);\n')<line_sep>out.write('// glEnable(GL_COLOR_MATERIAL)\n')<block_end>out.write('// glEnableClientState(GL_NORMAL_ARRAY);\n')<line_sep>out.write('// glVertexPointer(3, GL_FLOAT, sizeof(')<if_stmt>len(mesh.uv_textures)<g>0<block_start>out.write('TexturedVertexData3D')<block_end><elif_stmt>len(mesh.vertex_colors)<g>0<block_start>out.write('ColoredVertexData3D')<block_end><else_stmt><block_start>out.write('VertexData3D')<block_end>out.write('), &%sVertexData[0].vertex);\n'%basename)<line_sep>out.write('// glNormalPointer(GL_FLOAT, sizeof(')<if_stmt>len(mesh.uv_textures)<g>0<block_start>out.write('TexturedVertexData3D')<block_end><elif_stmt>len(mesh.vertex_colors)<g>0<block_start>out.write('ColoredVertexData3D')<block_end><else_stmt><block_start>out.write('VertexData3D')<block_end>out.write('), &%sVertexData[0].normal);\n'%basename)<if_stmt>len(mesh.uv_textures)<g>0<block_start>out.write('// glTexCoordPointer(2, GL_FLOAT, sizeof(TexturedVertexData3D), &%sVertexData[0].texCoord);\n'%basename)<block_end><elif_stmt>len(mesh.vertex_colors)<g>0<block_start>out.write('// glColorPointer(4, GL_FLOAT, sizeof(ColoredVertexData3D), &%sVertexData[0].color);\n'%basename)<block_end>out.write('// glDrawArrays(GL_TRIANGLES, 0, k%sNumberOfVertices);\n'%basename)<line_sep>out.write('// glDisableClientState(GL_VERTEX_ARRAY);\n')<if_stmt>len(mesh.uv_textures)<g>0<block_start>out.write('// glDisableClientState(GL_TEXTURE_COORD_ARRAY);\n')<block_end><elif_stmt>len(mesh.vertex_colors)<g>0<block_start>out.write('// glDisableClientState(GL_NORMAL_ARRAY);\n')<line_sep>out.write('// glDisable(GL_COLOR_MATERIAL);\n')<block_end>out.write('// glDisableClientState(GL_NORMAL_ARRAY);\n\n\n')<line_sep>out.close()<block_end><def_stmt>save operator context filepath="" use_triangles=<false> use_edges=<true> use_normals=<false> use_hq_normals=<false> use_uvs=<true> use_materials=<true> copy_images=<false> use_modifiers=<true> use_rotate_x90=<true> use_blen_objects=<true> group_by_object=<false> group_by_material=<false> keep_vertex_order=<false> use_vertex_groups=<false> use_nurbs=<true> use_selection=<true> use_all_scenes=<false> use_animation=<false> <block_start>write_objc(filepath context)<line_sep><return>{'FINISHED'}<block_end>
""" Utilities based on building baseline machine learning models. """<import_from_stmt>typing Union Optional<import_from_stmt>pandas DataFrame Series<import_from_stmt>numpy mean tile empty std square sqrt log<as>nplog reciprocal<import_from_stmt>scipy.stats boxcox normaltest mode<import_from_stmt>sklearn.compose ColumnTransformer<import_from_stmt>sklearn.exceptions ConvergenceWarning DataConversionWarning<import_from_stmt>sklearn.impute SimpleImputer<import_from_stmt>sklearn.linear_model LinearRegression LogisticRegression<import_from_stmt>sklearn.metrics mean_squared_error roc_auc_score<import_from_stmt>sklearn.mixture GaussianMixture<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>sklearn.pipeline Pipeline<import_from_stmt>sklearn.preprocessing FunctionTransformer OneHotEncoder RobustScaler StandardScaler label_binarize <import_from_stmt>sklearn.utils._testing ignore_warnings<import_from_stmt>.auxiliary infer_dtypes<import_from_stmt>.enum PredictionTask<line_sep>BASELINE_CLASSIFIER=Pipeline([('imputer' SimpleImputer()) ('classifier' LogisticRegression())])<line_sep>BASELINE_REGRESSION=Pipeline([('imputer' SimpleImputer()) ('classifier' LinearRegression())])<line_sep>NUMERIC_TRANSFORMER=Pipeline([('imputer' SimpleImputer()) ('scaler' StandardScaler())])<line_sep>CATEGORICAL_TRANSFORMER=Pipeline([('imputer' SimpleImputer(strategy='most_frequent')) ('encoder' OneHotEncoder(handle_unknown='ignore'))])<line_sep>ORDINAL_TRANSFORMER=<none># Not implemented <def_stmt>get_prediction_task df:DataFrame label:str<block_start>"Heuristics to infer prediction task (classification/regression)."<line_sep><return>'classification'<if>len(set(df[label]))<eq>2<else>'regression'<block_end>@ignore_warnings(category=ConvergenceWarning)<def_stmt>baseline_predictions df:DataFrame label:str task='classification'<block_start>"Train a baseline model and predict for a test set"<line_sep># 0. Infer the prediction task task=get_prediction_task(df=df label=label)<line_sep># 1. Define the baseline model model=BASELINE_CLASSIFIER<if>task<eq>'classification'<else>BASELINE_REGRESSION<line_sep># 2. Train overall model x_orig,y_orig=df.drop(label axis=1) label_binarize(df[label] classes=list(set(df[label])))<line_sep>x_train,x_test,y_train,y_test=train_test_split(x_orig y_orig test_size=0.3 random_state=42)<line_sep>model.fit(x_train.select_dtypes('number') y_train)<line_sep># 3. Predict <if_stmt>task<eq>'regression'<block_start>y_pred=model.predict(x_test.select_dtypes('number'))<block_end><elif_stmt>task<eq>'classification'<block_start>y_pred=model.predict_proba(x_test.select_dtypes('number'))[: 1]<block_end># 4. Return both the predictions and x_test, y_test to analyze the performances <return>y_pred x_test y_test<block_end>@ignore_warnings(category=DataConversionWarning)<def_stmt>baseline_performance df:DataFrame label:str task:PredictionTask=PredictionTask.CLASSIFICATION adjusted_metric:bool=<false><block_start>"""Train a baseline model, predict for a test set and return the performance. Args: - df (DataFrame): original dataset - label (str): name of target feature column - task (PredictionTask): classification, regression - adjusted_metric (bool): if True, return metric as percentage of max achievable performance """<line_sep># 0. Infer the prediction task task=get_prediction_task(df=df label=label)<line_sep># 1. Define the baseline performance metric metric=roc_auc_score<if>task<eq>'classification'<else>mean_squared_error<line_sep># 2. Get the baseline predictions y_pred,_,y_test=baseline_predictions(df=df label=label task=task)<line_sep># 3. Get the performance <if_stmt>adjusted_metric<block_start>perf=adjusted_performance(y_test y_pred task=task metric=metric)<block_end><else_stmt><block_start>perf=metric(y_test y_pred)<block_end><return>perf<block_end><def_stmt>adjusted_performance y_true y_pred task:PredictionTask metric:callable<block_start>"""Calculates the adjusted metric as ratio of real to maximum performance. Returns the percentage to the best achievable performance starting from a baseline. """<line_sep>task=PredictionTask(task)<line_sep>y_default=mean(y_true)<if>task<eq>PredictionTask.CLASSIFICATION<else>mode(y_true).mode[0]# define the value y_base=tile(y_default (len(y_true) 1))# create an array with default value best_perf=metric(y_true y_true)<line_sep>base_perf=metric(y_true y_base)<line_sep>real_perf=metric(y_true y_pred)<line_sep><return>(real_perf-base_perf)/(best_perf-base_perf)<block_end>@ignore_warnings(category=DataConversionWarning)<def_stmt>performance_per_feature_values df:DataFrame feature:str label:str task='classification'<block_start>"""Performance achieved per each value of a groupby feature."""<line_sep># 0. Infer the prediction task task=get_prediction_task(df=df label=label)<line_sep># 1. Define the baseline performance metric metric=roc_auc_score<if>task<eq>'classification'<else>mean_squared_error<line_sep># 2. Get the baseline predictions y_pred,x_test,y_test=baseline_predictions(df=df label=label task=task)<line_sep># 3. Get the performances per feature value uniques=set(x_test[feature])<line_sep>results={}<for_stmt>value uniques# for each category <block_start>y_pred_cat=y_pred[x_test[feature]<eq>value]<line_sep>y_true_cat=y_test[x_test[feature]<eq>value]<try_stmt><block_start>results[value]=metric(y_true_cat y_pred_cat)<block_end><except_stmt>ValueError<as>exc<block_start>results[value]=f'[ERROR] Failed performance metric with message: {exc}'<block_end><block_end><return>results<block_end><def_stmt>performance_per_missing_value df:DataFrame feature:str label:str task='classification'<block_start>"""Performance difference between valued and missing values in feature."""<line_sep># 0. Infer the prediction task task=get_prediction_task(df=df label=label)<line_sep># 1. Define the baseline performance metric metric=roc_auc_score<if>task<eq>'classification'<else>mean_squared_error<line_sep># 2. Get the baseline predictions y_pred,x_test,y_test=baseline_predictions(df=df label=label task=task)<line_sep># 3. Get the performance per valued vs missing feature missing_mask=x_test[feature].isna()<line_sep>results={}<line_sep>results['missing']=metric(y_test[missing_mask] y_pred[missing_mask])<line_sep>results['valued']=metric(y_test[~missing_mask] y_pred[~missing_mask])<line_sep><return>results<block_end>@ignore_warnings(category=ConvergenceWarning)<def_stmt>predict_missingness df:DataFrame feature:str<block_start>"Train a baseline model to predict the missingness of a feature value."<line_sep># 0. Preprocessing df=df.copy()# avoid altering the original DataFrame target=f'is_missing_{feature}'<line_sep># 1. Define the baseline model model=BASELINE_CLASSIFIER<line_sep># 2. Create the new target df[target]=df[feature].isna()<line_sep># 3. Train overall model x_orig,y_orig=df.drop([feature target] axis=1) df[target]<line_sep>x_train,x_test,y_train,y_test=train_test_split(x_orig y_orig test_size=0.3 random_state=42)<line_sep>model.fit(x_train.select_dtypes('number') y_train)<line_sep># 4. Predict y_pred=model.predict_proba(x_test.select_dtypes('number'))[: 1]<line_sep># 5. Return the area under the roc curve <return>roc_auc_score(y_test y_pred)<block_end><def_stmt>standard_transform df dtypes skip:Optional[list]=<none> robust=<false><block_start>"""Applies standard transformation to the dataset (imputation, centering and scaling), returns transformed data and the fitted transformer. Numerical data is imputed with mean, centered and scaled by 4 standard deviations. Categorical data is imputed with mode. Encoding is not performed in this stage to preserve the same columns. If robust is passed as True, will truncate numerical data before computing statistics. [1]From 1997 <NAME>; Martinez, <NAME>. - Improved Heterogeneous Distance Functions https://arxiv.org/pdf/cs/9701101.pdf """<line_sep>skip=[]<if>skip<is><none><else>skip<line_sep>numerical_features=[key<for>key,value dtypes.items()<if>value<eq>'numerical'<and>key<not><in>skip]<line_sep>categorical_features=[key<for>key,value dtypes.items()<if>value<eq>'categorical'<and>key<not><in>skip]<assert_stmt>len(numerical_features+categorical_features+skip)<eq>len(df.columns) 'the union of dtypes keys with skip should be the same as the df columns'<if_stmt>robust<block_start>numeric_transformer=Pipeline([('imputer' SimpleImputer()) ('scaler' RobustScaler(quantile_range=(5.0 95.0)))])<block_end><else_stmt><block_start>numeric_transformer=NUMERIC_TRANSFORMER<block_end>preprocessor=ColumnTransformer(transformers=[# Numerical vars are scaled by 4sd so that most of the data are fit in the [-1, 1] range ('num' Pipeline(numeric_transformer.steps+[('divby4' FunctionTransformer(<lambda>x:x/4))]) numerical_features) ('cat' Pipeline([('impute' SimpleImputer(strategy='most_frequent'))]) categorical_features)] remainder='passthrough')<line_sep>new_column_order=numerical_features+categorical_features+skip<line_sep>tdf=DataFrame(preprocessor.fit_transform(df) index=df.index columns=new_column_order)<line_sep><return>tdf preprocessor<block_end><def_stmt>performance_one_vs_rest df:DataFrame label_feat:str _class:str dtypes=<none><block_start>"""Train a classifier to predict a class in binary fashion against all other classes. A normalized dataframe should be passed for best results"""<line_sep># 0. Preprocessing df=df.copy()# avoid altering the original DataFrame # 1. Define the baseline model <if_stmt><not>dtypes<block_start>dtypes=infer_dtypes(df)<block_end>categorical_features=[key<for>key,value dtypes.items()<if>value<eq>'categorical'<and>key<ne>label_feat]<line_sep>preprocessor=ColumnTransformer(transformers=[('cat' CATEGORICAL_TRANSFORMER categorical_features)])<line_sep># OHE categorical variables model=Pipeline([('preprocessing' preprocessor) ('classifier' LogisticRegression())])<line_sep># 2. Train overall model x_orig,y_orig=df.drop(label_feat axis=1) label_binarize(df[label_feat] classes=[_class]).squeeze()<line_sep>x_train,x_test,y_train,y_test=train_test_split(x_orig y_orig test_size=0.3 random_state=24)<line_sep>model.fit(x_train y_train)<line_sep># 3. Predict y_pred=model.predict_proba(x_test)[: 1]<line_sep># 4. Return the area under the roc curve <return>roc_auc_score(y_test y_pred)<block_end><def_stmt>center_of_mass_statistic column:Series col_dtype:str<arrow>Union[float int str]<block_start>"Returns a center of mass statistic of a column based on its dtype."<line_sep><return>column.mean()<if>col_dtype<eq>'numerical'<else>column.mode()[0]<block_end># only first mode <def_stmt>estimate_centroid df:DataFrame dtypes:dict=<none><block_start>"""Makes a centroid estimation for a given dataframe. Will use provided dtypes or infer in order to use best statistic columnwise"""<if_stmt>dtypes<block_start><if_stmt><not>all((col<in>dtypes<for>col df.columns))<block_start>dtypes=dtypes.update(infer_dtypes(df skip=dtypes.columns))<block_end><block_end><else_stmt><block_start>dtypes=infer_dtypes(df)<block_end>centroid=Series(df.iloc[0])<for_stmt>col centroid.index<block_start>centroid[col]=center_of_mass_statistic(df[col] dtypes[col])<block_end><return>centroid<block_end><def_stmt>heom x_df:DataFrame y_df dtypes<block_start>"""Implements the Heterogeneous Euclidean-Overlap Metric between a sample x and a reference y. The data is assumed to already be preprocessed (normalized and imputed). [1]From 1997 <NAME>; <NAME>. - Improved Heterogeneous Distance Functions https://arxiv.org/pdf/cs/9701101.pdf """<line_sep>distances=DataFrame(empty(x_df.shape) index=x_df.index columns=x_df.columns)<line_sep>distance_funcs={'categorical':<lambda>x y:0<if>x<eq>y<else>1 'numerical':<lambda>x y:abs(x-y)}<line_sep># Here we are assuming the data to be previously scaled <for_stmt>col_idx,column enumerate(distances.columns)<block_start>distances[column]=x_df[column].apply(distance_funcs[dtypes[column]] args=[y_df[col_idx]])<block_end><return>distances<block_end><def_stmt>estimate_sd sample:DataFrame reference=<none> dtypes=<none><block_start>"""Estimates the standard deviation of a sample of records. A reference can be passed in order to avoid new computation of mean or to use distances to another reference point. The reference is expected as a (1, N) array where N is the number of columns in the sample. Returns: std_dev: the standard deviation of the distance vectors of the sample to the reference point std_distances: the distances of the sample points to the reference point scaled by std_dev """<if_stmt>dtypes# Ensure dtypes are compatible with sample <block_start><if_stmt><not>all((col<in>dtypes<for>col sample.columns))<block_start>dtypes=dtypes.update(infer_dtypes(sample skip=dtypes.columns))<block_end><block_end><else_stmt><block_start>dtypes=infer_dtypes(sample)<block_end><if_stmt>reference<is><none><block_start>reference=estimate_centroid(sample dtypes)<block_end><else_stmt><block_start><assert_stmt>len(reference)<eq>len(sample.columns) "The provided reference point does not have the same dimension as the sample records"<block_end>distances=heom(x_df=sample y_df=reference dtypes=dtypes)<line_sep>euclidean_distances=(distances.apply(square).sum(axis=1)/len(sample.columns)).apply(sqrt)<line_sep>std_dev=std(euclidean_distances)<line_sep>std_distances=euclidean_distances/std_dev<line_sep><return>std_dev std_distances<block_end><def_stmt>gmm_clustering data n_gaussians<block_start>"""Produces a GMM model with n_gaussians to cluster provided data."""<line_sep>gmm_=GaussianMixture(n_components=n_gaussians).fit(data)<line_sep><return>gmm_.predict(data) gmm_.aic(data)<block_end><def_stmt>normality_test data suite='full' p_th=5e-3<block_start>"""Performs a normality test on the data. Null hypothesis, data comes from normal distribution. A transformations taken from a suite is applied to the data before each run of the normal test. The first transformation in the suite that passes the normalcy test is returned Returns: result: True if any transformation led to a positive normal test, False otherwise test: The first test in the suite to lead to positive normal test"""<line_sep>transforms={<none>:<lambda>x:x 'inverse':reciprocal 'square root':sqrt 'log':nplog 'Box Cox':boxcox}<if_stmt>suite<eq>'full'<block_start>suite=transforms.keys()<block_end><else_stmt><block_start>suite=list(suite)<if>isinstance(suite str)<else>suite<block_end><for_stmt>transform suite<block_start><try_stmt><block_start>transformed_data=transforms[transform](data)<line_sep>_,p_stat=normaltest(transformed_data nan_policy='raise')<block_end><except_stmt>(AttributeError TypeError ZeroDivisionError ValueError)<block_start><continue><block_end><if_stmt>p_stat<g>p_th<block_start><return><true> transform p_stat<block_end><block_end><return><false> <none> <none><block_end>
<import_from_stmt>suffix_trees STree<def_stmt>test_lcs <block_start>a=["abeceda" "abecednik" "abeabecedabeabeced" "abecedaaaa" "aaabbbeeecceeeddaaaaabeceda"]<line_sep>st=STree.STree(a)<assert_stmt>st.lcs()<eq>"abeced" "LCS test"<block_end><def_stmt>test_missing <block_start>text="name language w en url http w namelanguage en url http"<line_sep>stree=STree.STree(text)<assert_stmt>stree.find("law")<eq>-1<assert_stmt>stree.find("ptth")<eq>-1<assert_stmt>stree.find("name language w en url http w namelanguage en url httpp")<eq>-1<block_end><def_stmt>test_find <block_start>st=STree.STree("abcdefghab")<assert_stmt>st.find("abc")<eq>0<assert_stmt>st.find_all("ab")<eq>{0 8}<block_end>
<import_from_stmt>falcor *<def_stmt>render_graph_ForwardRendering <block_start>loadRenderPassLibrary("DepthPass.dll")<line_sep>loadRenderPassLibrary("ForwardLightingPass.dll")<line_sep>loadRenderPassLibrary("BlitPass.dll")<line_sep>testForwardRendering=RenderGraph("ForwardRenderer")<line_sep>DepthPass=createPass("DepthPass" {'depthFormat':ResourceFormat.D32Float})<line_sep>testForwardRendering.addPass(DepthPass "DepthPass")<line_sep>SkyBox=createPass("SkyBox")<line_sep>testForwardRendering.addPass(SkyBox "SkyBox")<line_sep>ForwardLightingPass=createPass("ForwardLightingPass" {'sampleCount':1 'enableSuperSampling':<false>})<line_sep>testForwardRendering.addPass(ForwardLightingPass "ForwardLightingPass")<line_sep>BlitPass=createPass("BlitPass" {'filter':SamplerFilter.Linear})<line_sep>testForwardRendering.addPass(BlitPass "BlitPass")<line_sep>testForwardRendering.addEdge("ForwardLightingPass.color" "BlitPass.src")<line_sep>testForwardRendering.addEdge("DepthPass.depth" "ForwardLightingPass.depth")<line_sep>testForwardRendering.addEdge("DepthPass.depth" "SkyBox.depth")<line_sep>testForwardRendering.addEdge("SkyBox.target" "ForwardLightingPass.color")<line_sep>testForwardRendering.markOutput("BlitPass.dst")<line_sep>testForwardRendering.markOutput("ForwardLightingPass.motionVecs")<line_sep><return>testForwardRendering<block_end>ForwardRendering=render_graph_ForwardRendering()<try_stmt><block_start>m.addGraph(ForwardRendering)<block_end><except_stmt>NameError<block_start><none><block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>observations.r.engel engel<def_stmt>test_engel <block_start>"""Test module engel.py by downloading engel.csv and testing shape of extracted data has 235 rows and 2 columns """<line_sep>test_path=tempfile.mkdtemp()<line_sep>x_train,metadata=engel(test_path)<try_stmt><block_start><assert_stmt>x_train.shape<eq>(235 2)<block_end><except_stmt><block_start>shutil.rmtree(test_path)<line_sep><raise>()<block_end><block_end>
<import_stmt>enum<import_from_stmt>collections.abc Mapping<import_from_stmt>voluptuous validators<line_sep>SCHEMA_KWD="schema"<line_sep>META_KWD="meta"<def_stmt>lockfile_version_schema value<block_start>expected=[LOCKFILE_VERSION.V2.value]# pylint: disable=no-member msg="invalid schema version {}, expected one of {}".format(value expected)<line_sep><return>validators.Any(*expected msg=msg)(value)<block_end><class_stmt>VersionEnum(str enum.Enum)<block_start>@classmethod<def_stmt>all_versions cls<block_start><return>[v.value<for>v cls]<block_end><block_end><class_stmt>LOCKFILE_VERSION(VersionEnum)<block_start>V1="1.0"<line_sep>V2="2.0"<line_sep>@classmethod<def_stmt>from_dict cls data# 1) if it's empty or or is not a dict, use the latest one (V2). # 2) use the `schema` identifier if it exists and is a supported # version # 3) if it's not in any of the supported version, use the latest one # 4) if there's no identifier, it's a V1 <block_start><if_stmt><not>data<or><not>isinstance(data Mapping)<block_start><return>cls(cls.V2)<block_end>version=data.get(SCHEMA_KWD)<if_stmt>version<block_start><return>cls(version<if>version<in>cls.all_versions()<else>cls.V2)<block_end><return>cls(cls.V1)<block_end><block_end>
"""This shows how we can connect to an instance of MongoDB Atlas to read/write market tick data Note, that you will need to get a MongoDB Atlas cloud account, and change the connection string below for it to work """<line_sep>__author__='saeedamen'# <NAME> / <EMAIL> # # Copyright 2020 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro # # See the License for the specific language governing permissions and limitations under the License. # <import_stmt>datetime<import_stmt>time<import_from_stmt>tcapy.util.loggermanager LoggerManager<import_from_stmt>tcapy.conf.constants Constants<import_from_stmt>tcapy.data.datafactory MarketRequest<import_from_stmt>tcapy.data.databasesource DatabaseSourceArctic<import_from_stmt>tcapy.util.mediator Mediator<import_from_stmt>tcapy.util.customexceptions *<import_from_stmt>test.config *<line_sep>logger=LoggerManager().getLogger(__name__)<line_sep>constants=Constants()<line_sep>logger.info('Make sure you have created folder '+constants.csv_folder+' & '+constants.temp_data_folder+' otherwise tests will fail')<line_sep>Mediator.get_volatile_cache().clear_cache()<line_sep>######################################################################################################################## # YOU MAY NEED TO CHANGE THESE start_date='26 Apr 2017'<line_sep>finish_date='05 Jun 2017'<line_sep>ticker='EURUSD'<line_sep># Market data parameters for tables/databases test_harness_arctic_market_data_table='market_data_table_test_harness'<line_sep>test_harness_arctic_market_data_store='arctic-testharness'<line_sep>csv_market_data_store=resource('small_test_market_df.parquet')<line_sep>csv_reverse_market_data_store=resource('small_test_market_df_reverse.parquet')<line_sep># Note, you'll need to get your own connection string! # You can setup your own MongoDB instance on the cloud using MongoDB Atlas https://www.mongodb.com/cloud/atlas # It will give you the connection string to use arctic_connection_string="mongodb+srv://<username>:<password>@cluster0.blah-blah.mongodb.net/?retryWrites=true&w=majority"<def_stmt>write_mongo_db_atlas_arctic <block_start>"""Tests we can write market data to Arctic/MongoDB on Atlas (cloud) """<line_sep>market_loader=Mediator.get_tca_market_trade_loader(version=tcapy_version)<line_sep>### Test we can read data from CSV and dump to Arctic (and when read back it matches CSV) db_start_date='01 Jan 2016'<line_sep>db_finish_date=pd.Timestamp(datetime.datetime.utcnow())<line_sep>database_source=DatabaseSourceArctic(postfix='testharness' arctic_lib_type='CHUNK_STORE' connection_string=arctic_connection_string)<line_sep># Write CSV to Arctic database_source.convert_csv_to_table(csv_market_data_store ticker test_harness_arctic_market_data_table if_exists_table='replace' if_exists_ticker='replace' market_trade_data='market' remove_duplicates=<false>)<line_sep># Read back data from Arctic and compare with CSV market_request=MarketRequest(start_date=db_start_date finish_date=db_finish_date ticker=ticker data_store=database_source # test_harness_arctic_market_data_store, market_data_database_table=test_harness_arctic_market_data_table)<line_sep>market_df_load=market_loader.get_market_data(market_request=market_request)<line_sep>print(market_df_load)<block_end><if_stmt>__name__<eq>'__main__'<block_start>start=time.time()<line_sep>write_mongo_db_atlas_arctic()<line_sep>finish=time.time()<line_sep>print('Status: calculated '+str(round(finish-start 3))+"s")<block_end>
<import_from_stmt>pkg_resources get_distribution DistributionNotFound<try_stmt><block_start>__version__=get_distribution('foundations_rest_api').version<block_end><except_stmt>DistributionNotFound<block_start>__version__=<none><block_end>
<import_from_stmt>allennlp.predictors.sentence_tagger SentenceTaggerPredictor# noqa: F401 # This component lives in the main repo because we need it there for tests.
# The MIT License (MIT) # # Copyright (c) 2014 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Transaction Database # # txck - transaction composite key (see below) # txid_hint - hash integer, provides pruning to likely txid # txn - the binary blob of the transaction # # The database is broken up into files about 1.75GB each (so file systems like # FAT32 work). The database filename contains two numbers, a number of # partitions (N) and an index (i) which is in the range [0, N). These files # will be denoted as file(N, i) # # When inserting, we insert into the highest N. Given an id, we insert into # file(N, get_q(txid) % N). The function get_q hash bytes into an integer # # When searching, we must check each partition level, so to search for id, we # start at the highest N, and check: # 1. file(N, get_q(txid) % N) # 2. file(N / 2, get_q(txid) % (N / 2)) # 3. file(N / 4, get_q(txid) % (N / 4)) # and so on, until we reach a k, such that (N / (2 ** k)) < 4. # # We can also, over time migrate values into higher levels. This is a future # todo, if performance becomes an issue. # Composite Keys # # We use composite keys so we can optimize space with the 8-byte rowid we get # by default in a sqlite database as well as the speed gain as they are the # keys in the B-Tree. (see: http://www.sqlite.org/lang_createtable.html#rowid) # # txck (transaction-composite-key: 43 bits) # - (block-id:23 bits) (txn-index:20 bits) # # With these keys, we can support up to 8 million blocks, each block with up # to 1 million transactions. # Hints # # A hint (hash integer) the integer value of a byte string to quickly prune # any obviously non-matching elements. The remaining elements must then be # compared against confirmed values, since the hash may yield false positives. <import_stmt>os<import_stmt>random<import_stmt>sqlite3<import_stmt>struct<import_from_stmt>. database<import_from_stmt>. keys<import_from_stmt>.. coins<import_from_stmt>.. protocol<import_from_stmt>.. script<import_from_stmt>.. util<line_sep>__all__=['Database']<def_stmt>get_q txid<block_start>'Compute the index q from a txid.'<line_sep><return>struct.unpack('>I' txid[:4])[0]<block_end>_KEY_DUP='PRIMARY KEY must be unique'<line_sep>_0=chr(0)<times>32<class_stmt>Transaction(object)<block_start><def_stmt>__init__ self database row _transaction=<none><block_start>keys=[n<for>(n t i) database.Columns]<line_sep>self._database=database<line_sep>self._data=dict(zip(keys row))<line_sep># cache for previous outputs' transactions, since it hits the database self._po_cache=dict()<line_sep>self._transaction=_transaction<block_end>version=property(<lambda>s:s.txn.version)<line_sep>inputs=property(<lambda>s:s.txn.tx_in)<line_sep>outputs=property(<lambda>s:s.txn.tx_out)<line_sep>lock_time=property(<lambda>s:s.txn.lock_time)<line_sep>hash=property(<lambda>s:s.txn.hash)<line_sep>index=property(<lambda>s:keys.get_txck_index(s._txck))<def_stmt>__getstate__ self<block_start><return>(self._po_cache dict(txn=str(self._data['txn']) txck=self._data['txck']))<block_end><def_stmt>__setstate__ self state<block_start>self._database=<none><line_sep>(self._po_cache self._data)=state<line_sep>self._transaction=<none><block_end><def_stmt>cache_previous_outputs self<block_start><for_stmt>i xrange(0 len(self.inputs))<block_start>self.previous_transaction(i)<block_end><block_end><def_stmt>previous_transaction self index<block_start>"Returns the previous output's transaction for the input at index."<line_sep># coinbase transaction <if_stmt>self.index<eq>0<and>index<eq>0<block_start><return><none><block_end># look up the previous output's transaction and cache it <if_stmt>index<not><in>self._po_cache<block_start>po_hash=self.inputs[index].previous_output.hash<line_sep>previous_txn=self._database.get(po_hash)<if_stmt>previous_txn<is><none><block_start><raise>KeyError('missing transaction: %s'%po_hash)<block_end>self._po_cache[index]=previous_txn<block_end># return the cache value <return>self._po_cache[index]<block_end><def_stmt>previous_output self index<block_start>'Returns the previous output for the input at index.'<line_sep>previous_txn=self.previous_transaction(index)<if_stmt>previous_txn<is><none><block_start><return><none><block_end>po=self.inputs[index].previous_output<line_sep><return>previous_txn.outputs[po.index]<block_end><def_stmt>__str__ self<block_start><return>"<Transaction hash=0x%s>"%self.hash.encode('hex')<block_end># transaction composite key and database block id; internal use _txck=property(<lambda>s:s._data['txck'])<line_sep>_blockid=property(<lambda>s:keys.get_txck_blockid(s._txck))<def_stmt>_previous_uock self index<block_start>previous_txn=self.previous_transaction(index)<if_stmt>previous_txn<is><none><block_start><return><none><block_end>po=self.inputs[index].previous_output<line_sep><return>keys.get_uock(previous_txn._txck po.index)<block_end>@property<def_stmt>txn self<block_start>'The raw transaction object.'<if_stmt>self._transaction<is><none><block_start>(vl self._transaction)=protocol.Txn.parse(self.txn_binary)<block_end><return>self._transaction<block_end>txn_binary=property(<lambda>s:str(s._data['txn']))<block_end><class_stmt>Database(database.Database)<block_start>MINIMUM_N=4<line_sep>TARGET_SIZE=(1<lshift>30)<times>7<floordiv>4# 1.75GB Columns=[('txck' 'integer primary key' <false>) ('txid_hint' 'integer' <true>) ('txn' 'blob' <false>) ]<line_sep>Name='txns'<def_stmt>__init__ self data_dir=<none> coin=coins.Bitcoin<block_start>database.Database.__init__(self data_dir coin)<line_sep># maps (n, i % n) tuples to sqlite connection self._connections=dict()<line_sep># the largest N level on disk self._N=self.load_n()<line_sep># loading/creating a connection loads/creates the entire level n=self._N<while_stmt>n<ge>self.MINIMUM_N<block_start>self.get_connection(n 0 <true>)<line_sep>n<augfloordiv>2<block_end>#self._unspent = unspent.Database(self.data_dir, coin) <block_end><def_stmt>load_n self<block_start>'Determine the highest N for a database directory.'<line_sep>n=self.MINIMUM_N<while_stmt><true><block_start><if_stmt><not>os.path.isfile(self.get_filename(self.get_suffix(n<times>2 0)))<block_start><break><block_end>n<augmul>2<block_end><return>n<block_end><def_stmt>get_suffix self n q<block_start><return>'-%03d-%03d'%(n q%n)<block_end><def_stmt>get_connection self n q allow_create=<false><block_start>'''Get a connection for the database file at (n, q % n). First a connection cache is searched. Then the disk is checked for new files, in which case every file at level n is loaded. If allow_create and the database file does not exist, all partitions at the level n are created.'''<line_sep># the location we want loc=(n q%n)<if_stmt>loc<not><in>self._connections<block_start>locs=[(n i)<for>i xrange(0 n)]<line_sep># doesn't exist; create the files backward <if_stmt><not>os.path.isfile(self.get_filename(self.get_suffix(n 0)))<block_start><if_stmt><not>allow_create<block_start><return><none><block_end>locs.reverse()<block_end><for_stmt>l locs<block_start>suffix=self.get_suffix(l[0] l[1])<line_sep>self._connections[l]=database.Database.get_connection(self suffix)<block_end><block_end><return>self._connections[loc]<block_end><def_stmt>check_size self<block_start>'Checks the sizes of the database level, increasing the size as needed.'<line_sep># if any (statistically selected) database is full, increase our size suffix=self.get_suffix(self._N random.randint(0 self._N-1))<line_sep>filename=self.get_filename(suffix)<if_stmt>os.path.getsize(filename)<g>self.TARGET_SIZE<block_start>self._N<augmul>2<line_sep>self.get_connection(self._N 0 <true>)<block_end><block_end><def_stmt>add self block transactions<block_start>'Add transactions to the database.'<line_sep># expand the database if necessary self.check_size()<line_sep># check the merkle root of the transactions against the block block._check_merkle_root(util.get_merkle_root(transactions))<line_sep># for each transaction... connections=dict()<line_sep>block_txns=[]<for_stmt>(txn_index txn) enumerate(transactions)# ...get the database to save to <block_start>txid=txn.hash<line_sep>q=get_q(txid)<line_sep>connection=self.get_connection(self._N q)<line_sep>connections[(self._N q%self._N)]=connection<line_sep># ...insert cursor=connection.cursor()<line_sep>txck=keys.get_txck(block._blockid txn_index)<line_sep>row=(txck keys.get_hint(txid) buffer(txn.binary()))<try_stmt><block_start>cursor.execute(self.sql_insert row)<block_end># (duplicates don't matter) <except_stmt>sqlite3.IntegrityError e<block_start><if_stmt>e.message<ne>_KEY_DUP<block_start><raise>e<block_end><block_end># wrap up the transaction for the returned block block_txns.append(Transaction(self row txn))<block_end># commit the transactions to the databases <for_stmt>connection connections.values()<block_start>connection.commit()<block_end># update the block with the transactions block._update_transactions(block_txns)<line_sep># return the now updated block <return>block<block_end># @TODO optimization: store in each txn db a max_blockid so we can prune <def_stmt>_get self txck<block_start>''<for_stmt>connection self._connections.values()<block_start>cursor=connection.cursor()<line_sep>cursor.execute(self.sql_select+' where txck = ?' (txck ))<line_sep>row=cursor.fetchone()<if_stmt>row<block_start><return>Transaction(self row)<block_end><block_end><return><none><block_end><def_stmt>_get_transactions self blockid<block_start>"Find all transactions for a block, ordered by transaction index. Internal use."<line_sep># the range that this block's composite keys can have [lo, hi) lo=keys.get_txck(blockid 0)<line_sep>hi=keys.get_txck(blockid+1 0)<line_sep># find all transactions across all databases within this range txns=[]<for_stmt>connection self._connections.values()<block_start>cursor=connection.cursor()<line_sep>cursor.execute(self.sql_select+' where txck >= ? and txck < ?' (lo hi))<line_sep>txns.extend((r[0] r)<for>r cursor.fetchall())<block_end># sort by index (actually (blockid, index), but all have same blockid) txns.sort()<line_sep># wrap it up in a helpful wrapper <return>[Transaction(self row)<for>(txck row) txns]<block_end><def_stmt>get self txid default=<none><block_start>'Get a transaction by its txid.'<line_sep># the hint we index by for faster lookup txid_hint=keys.get_hint(txid)<line_sep># search each level (n, n // 2, n // 4, etc) n=self._N<line_sep>q=get_q(txid)<while_stmt>n<ge>self.MINIMUM_N<block_start>connection=self.get_connection(n q)<line_sep>cursor=connection.cursor()<line_sep>cursor.execute(self.sql_select+' where txid_hint = ?' (txid_hint ))<for_stmt>row cursor.fetchall()<block_start>(vl txn)=protocol.Txn.parse(row[2])<if_stmt>txn.hash<eq>txid<block_start><return>Transaction(self row txn)<block_end><block_end>n<augfloordiv>2<block_end># maybe another process grew us, and we didn't know? Try again. new_n=self.load_n()<if_stmt>new_n<ne>self._N<block_start>self._N=new_n<line_sep><return>self._get(txid)<block_end><return>default<block_end>#def __getitem__(self, name): # 'Get a transaction by its txid.' # # txn = self.get(name) # if txn is not None: # return txn # raise KeyError(name) # Useful? Should it return a blockhain.transaction.Transaction or protocol.Txn? #def __iter__(self): # 'Iterate over every transaction. There is no meaningful order.' # # for connection in self._connections.values(): # cursor = connection.cursor() # cursor.execute(self.sql_select) # while True: # rows = cursor.fetchmany() # if not rows: break # for row in rows: # #yield Transaction(self, row) # (vl, txn) = protocol.Txn.parse(row[2])[1] # yield txn <block_end>
<import_stmt>sys<line_sep>sys.path.append('../core')<import_stmt>argparse<import_stmt>torch<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>viz sim3_visualization<import_from_stmt>lietorch SO3 SE3 Sim3<import_from_stmt>networks.sim3_net Sim3Net<def_stmt>normalize_images images<block_start>images=images[: : [2 1 0]]<line_sep>mean=torch.as_tensor([0.485 0.456 0.406] device=images.device)<line_sep>std=torch.as_tensor([0.229 0.224 0.225] device=images.device)<line_sep><return>(images/255.0).sub_(mean[: <none> <none>]).div_(std[: <none> <none>])<block_end><def_stmt>load_example i=0<block_start>""" get demo example """<line_sep>DEPTH_SCALE=5.0<if_stmt>i<eq>0<block_start>image1=cv2.imread('assets/image1.png')<line_sep>image2=cv2.imread('assets/image2.png')<line_sep>depth1=np.load('assets/depth1.npy')/DEPTH_SCALE<line_sep>depth2=np.load('assets/depth2.npy')/DEPTH_SCALE<block_end><elif_stmt>i<eq>1<block_start>image1=cv2.imread('assets/image3.png')<line_sep>image2=cv2.imread('assets/image4.png')<line_sep>depth1=np.load('assets/depth3.npy')/DEPTH_SCALE<line_sep>depth2=np.load('assets/depth4.npy')/DEPTH_SCALE<block_end>images=np.stack([image1 image2] 0)<line_sep>images=torch.from_numpy(images).permute(0 3 1 2)<line_sep>depths=np.stack([depth1 depth2] 0)<line_sep>depths=torch.from_numpy(depths).float()<line_sep>intrinsics=np.array([320.0 320.0 320.0 240.0])<line_sep>intrinsics=np.tile(intrinsics[<none>] (2 1))<line_sep>intrinsics=torch.from_numpy(intrinsics).float()<line_sep><return>images[<none>].cuda() depths[<none>].cuda() intrinsics[<none>].cuda()<block_end>@torch.no_grad()<def_stmt>demo model index=0<block_start>images,depths,intrinsics=load_example(index)<line_sep># initial transformation estimate <if_stmt>args.transformation<eq>'SE3'<block_start>Gs=SE3.Identity(1 2 device='cuda')<block_end><elif_stmt>args.transformation<eq>'Sim3'<block_start>Gs=Sim3.Identity(1 2 device='cuda')<line_sep>depths[: 0]<augmul>2<power>(2<times>torch.rand(1)-1.0).cuda()<block_end>images1=normalize_images(images)<line_sep>ests,_=model(Gs images1 depths intrinsics num_steps=12)<line_sep># only care about last transformation Gs=ests[-1]<line_sep>T=Gs[: 0]<times>Gs[: 1].inv()<line_sep>T=T[0].matrix().double().cpu().numpy()<line_sep>sim3_visualization(T images depths intrinsics)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--transformation' default='SE3' help='checkpoint to restore')<line_sep>parser.add_argument('--ckpt' help='checkpoint to restore')<line_sep>args=parser.parse_args()<line_sep>model=Sim3Net(args)<line_sep>model.load_state_dict(torch.load(args.ckpt))<line_sep>model.cuda()<line_sep>model.eval()<line_sep># run two demos demo(model 0)<line_sep>demo(model 1)<block_end>
<import_stmt>subprocess<import_stmt>sys<import_stmt>json<import_stmt>datetime<import_stmt>urllib.parse<import_stmt>sys<def_stmt>main <block_start>files_by_date={}<line_sep>bucket=sys.argv[1]<line_sep>days_to_keep=int(sys.argv[2])<line_sep>print(f"Looking for binaries to delete older than {days_to_keep} days")<line_sep>files_lines=execute_cli(f"b2 ls --long --versions {bucket} nightly").split("\n")<for_stmt>x files_lines<block_start>parts=[y<for>y x.split(' ')<if>y]<if_stmt>parts<and>parts[0]<block_start>date=datetime.datetime.strptime(parts[2] '%Y-%m-%d').replace(hour=0 minute=0 second=0 microsecond=0)<line_sep>now=datetime.datetime.utcnow().replace(hour=0 minute=0 second=0 microsecond=0)<line_sep>delta=now-date<if_stmt>delta.days<g>days_to_keep<block_start>print(f'Deleting {parts[5]}')<line_sep>execute_cli(f'b2 delete-file-version {parts[0]}')<block_end><block_end><block_end><block_end><def_stmt>execute_cli command<block_start>sb=subprocess.Popen(command shell=<true> stdout=subprocess.PIPE)<line_sep><return>sb.stdout.read().decode("utf-8")<line_sep><block_end><if_stmt>__name__<eq>'__main__'<block_start>sys.exit(main())<block_end>
# -*- coding: utf-8 -*- """ pybitcoin ~~~~~ :copyright: (c) 2014 by Halfmoon Labs :license: MIT, see LICENSE for more details. """<import_from_stmt>.opcodes *<import_from_stmt>.utils count_bytes<import_from_stmt>..constants MAX_BYTES_AFTER_OP_RETURN<import_from_stmt>..b58check b58check_decode b58check_encode<import_from_stmt>binascii hexlify unhexlify<import_from_stmt>utilitybelt is_hex<def_stmt>script_to_hex script<block_start>""" Parse the string representation of a script and return the hex version. Example: "OP_DUP OP_HASH160 c629...a6db OP_EQUALVERIFY OP_CHECKSIG" """<line_sep>hex_script=''<line_sep>parts=script.split(' ')<for_stmt>part parts<block_start><if_stmt>part[0:3]<eq>'OP_'<block_start><try_stmt><block_start>hex_script<augadd>'%0.2x'%eval(part)<block_end><except_stmt><block_start><raise>Exception('Invalid opcode: %s'%part)<block_end><block_end><elif_stmt>isinstance(part (int))<block_start>hex_script<augadd>'%0.2x'%part<block_end><elif_stmt>is_hex(part)<block_start>hex_script<augadd>'%0.2x'%count_bytes(part)+part<block_end><else_stmt><block_start><raise>Exception('Invalid script - only opcodes and hex characters allowed.')<block_end><block_end><return>hex_script<block_end><def_stmt>make_pay_to_address_script address<block_start>""" Takes in an address and returns the script """<line_sep>hash160=hexlify(b58check_decode(address))<line_sep>script_string='OP_DUP OP_HASH160 %s OP_EQUALVERIFY OP_CHECKSIG'%hash160<line_sep><return>script_to_hex(script_string)<block_end><def_stmt>make_op_return_script data format='bin'<block_start>""" Takes in raw ascii data to be embedded and returns a script. """<if_stmt>format<eq>'hex'<block_start><assert_stmt>(is_hex(data))<line_sep>hex_data=data<block_end><elif_stmt>format<eq>'bin'<block_start>hex_data=hexlify(data)<block_end><else_stmt><block_start><raise>Exception("Format must be either 'hex' or 'bin'")<block_end>num_bytes=count_bytes(hex_data)<if_stmt>num_bytes<g>MAX_BYTES_AFTER_OP_RETURN<block_start><raise>Exception('Data is %i bytes - must not exceed 40.'%num_bytes)<block_end>script_string='OP_RETURN %s'%hex_data<line_sep><return>script_to_hex(script_string)<block_end>
<import_from_stmt>math ceil<import_stmt>numpy<as>np<import_stmt>os<import_stmt>tempfile<import_from_stmt>...import_utils *<if_stmt>is_all_dependency_installed('encoders-video')<block_start><import_stmt>librosa<import_stmt>soundfile<as>sf<import_from_stmt>cv2 cv2<import_from_stmt>moviepy.video.io.ffmpeg_reader ffmpeg_parse_infos<import_from_stmt>moviepy.video.io.VideoFileClip VideoFileClip<block_end><class_stmt>FrameSamplingFilter()<block_start><def_stmt>__init__ self every=<none> hertz=<none> top_n=<none><block_start><if_stmt>every<is><none><and>hertz<is><none><and>top_n<is><none><block_start><raise>ValueError("When initializing the FrameSamplingFilter, "<concat>"one of the 'every', 'hertz', or 'top_n' must "<concat>"be specified.")<block_end>self.every=every<line_sep>self.hertz=hertz<line_sep>self.top_n=top_n<block_end><def_stmt>get_audio_sampling_rate self filename:str<block_start>infos=ffmpeg_parse_infos(filename)<line_sep>fps=infos.get('audio_fps' 44100)<if_stmt>fps<eq>'unknown'<block_start>fps=44100<block_end><return>fps<block_end><def_stmt>load_clip self filename:str<block_start>audio_fps=self.get_audio_sampling_rate(filename)<line_sep>self.clip=VideoFileClip(filename audio_fps)<block_end><def_stmt>initialize_video self filename:str<block_start>self.filename=filename<line_sep>self.load_clip(filename)<line_sep>self.fps=self.clip.fps<line_sep>self.width=self.clip.w<line_sep>self.height=self.clip.h<line_sep>self.frame_index=range(int(ceil(self.fps<times>self.clip.duration)))<line_sep>self.duration=self.clip.duration<line_sep>self.n_frames=len(self.frame_index)<block_end><def_stmt>get_audio_vector self new_sampling_rate:int=16000<block_start>fd,fp=tempfile.mkstemp()<line_sep>audio=f'{fp}.wav'<line_sep>self.clip.audio.to_audiofile(audio)<line_sep>data,sampling_rate=sf.read(audio dtype='float32')<line_sep>os.close(fd)<line_sep>os.remove(audio)<line_sep><return>np.array(librosa.resample(data.T sampling_rate new_sampling_rate))<block_end><def_stmt>transform self filename:str<block_start>self.initialize_video(filename)<if_stmt>(self.every<is><not><none>)<block_start>new_idx=range(self.n_frames)[::self.every]<block_end><elif_stmt>(self.hertz<is><not><none>)<block_start>interval=self.fps/float(self.hertz)<line_sep>new_idx=np.arange(0 self.n_frames interval).astype(int)<line_sep>new_idx=list(new_idx)<block_end><elif_stmt>self.top_n<is><not><none><block_start>diffs=[]<for_stmt>i,img enumerate(range(self.n_frames))<block_start><if_stmt>i<eq>0<block_start>last=img<line_sep><continue><block_end>pixel_diffs=cv2.sumElems(cv2.absdiff(self.get_frame(last) self.get_frame(img)))<line_sep>diffs.append(sum(pixel_diffs))<line_sep>last=img<block_end>new_idx=sorted(range(len(diffs)) key=<lambda>i:diffs[i] reverse=<true>)[:self.top_n]<block_end>result=[]<for_stmt>index new_idx<block_start>result.append(self.get_frame(index))<block_end><return>result<block_end><def_stmt>get_frame self index:int<block_start><return>self.clip.get_frame(index)<block_end><def_stmt>iter_frames self<block_start><for_stmt>i,f enumerate(self.frame_index)<block_start><yield>self.get_frame(f)<block_end><block_end><block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- <import_from_stmt>enum Enum EnumMeta<import_from_stmt>six with_metaclass<class_stmt>_CaseInsensitiveEnumMeta(EnumMeta)<block_start><def_stmt>__getitem__ self name<block_start><return>super().__getitem__(name.upper())<block_end><def_stmt>__getattr__ cls name<block_start>"""Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. """<try_stmt><block_start><return>cls._member_map_[name.upper()]<block_end><except_stmt>KeyError<block_start><raise>AttributeError(name)<block_end><block_end><block_end><class_stmt>DeploymentMode(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The mode that is used to deploy resources. This value can be either Incremental or Complete. In Incremental mode, resources are deployed without deleting existing resources that are not included in the template. In Complete mode, resources are deployed and existing resources in the resource group that are not included in the template are deleted. Be careful when using Complete mode as you may unintentionally delete resources. """<line_sep>INCREMENTAL="Incremental"<line_sep>COMPLETE="Complete"<block_end><class_stmt>OnErrorDeploymentType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The deployment on error behavior type. Possible values are LastSuccessful and SpecificDeployment. """<line_sep>LAST_SUCCESSFUL="LastSuccessful"<line_sep>SPECIFIC_DEPLOYMENT="SpecificDeployment"<block_end><class_stmt>ResourceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The identity type. """<line_sep>SYSTEM_ASSIGNED="SystemAssigned"<line_sep>USER_ASSIGNED="UserAssigned"<line_sep>SYSTEM_ASSIGNED_USER_ASSIGNED="SystemAssigned, UserAssigned"<line_sep>NONE="None"<block_end>
<import_from_stmt>django.conf.urls.defaults *<line_sep>urlpatterns=patterns('wouso.games.challenge.views' url(r'^$' 'index' name='challenge_index_view') url(r'^(?P<id>\d+)/$' 'challenge' name='view_challenge') url(r'^launch/(?P<to_id>\d+)/$' 'launch' name='challenge_launch') url(r'^refuse/(?P<id>\d+)/$' 'refuse' name='challenge_refuse') url(r'^accept/(?P<id>\d+)/$' 'accept' name='challenge_accept') url(r'^cancel/(?P<id>\d+)/$' 'cancel' name='challenge_cancel') url(r'^setplayed/(?P<id>\d+)/$' 'setplayed' name='setplayed') url(r'^use_artifact/$' 'use_one_more' name='challenge_onemore') url(r'^history/(?P<playerid>\d+)/$' 'history' name='challenge_history') url(r'^playerchallenge/$' 'challenge_player' name='challenge_player') url(r'^randomchallenge/$' 'challenge_random' name='challenge_random') url(r'^stats/$' 'challenge_stats' name='challenge_stats') url(r'^stats/player=(?P<player_id>\d+)/$' 'challenge_stats' name='challenge_stats') url(r'^stats/target=(?P<target_id>\d+)/' 'detailed_challenge_stats' name='detailed_challenge_stats') url(r'^stats/player=(?P<player_id>\d+)/target=(?P<target_id>\d+)/' 'detailed_challenge_stats' name='detailed_challenge_stats') )<line_sep>
""" ================= hlines and vlines ================= This example showcases the functions hlines and vlines. """<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<line_sep>t=np.arange(0.0 5.0 0.1)<line_sep>s=np.exp(-t)+np.sin(2<times>np.pi<times>t)+1<line_sep>nse=np.random.normal(0.0 0.3 t.shape)<times>s<line_sep>fig,(vax hax)=plt.subplots(1 2 figsize=(12 6))<line_sep>vax.plot(t s+nse '^')<line_sep>vax.vlines(t [0] s)<line_sep># By using ``transform=vax.get_xaxis_transform()`` the y coordinates are scaled # such that 0 maps to the bottom of the axes and 1 to the top. vax.vlines([1 2] 0 1 transform=vax.get_xaxis_transform() colors='r')<line_sep>vax.set_xlabel('time (s)')<line_sep>vax.set_title('Vertical lines demo')<line_sep>hax.plot(s+nse t '^')<line_sep>hax.hlines(t [0] s lw=2)<line_sep>hax.set_xlabel('time (s)')<line_sep>hax.set_title('Horizontal lines demo')<line_sep>plt.show()<line_sep>
# -*- coding: utf-8 -*- # # JSON osu! map analysis # <import_stmt>numpy<as>np<def_stmt>get_map_timing_array map_json length=-1 divisor=4<block_start><if_stmt>length<eq>-1<block_start>length=map_json["obj"][-1]["time"]+1000<line_sep># it has an extra time interval after the last note <if_stmt>map_json["obj"][-1]["type"]&8# spinner end <block_start>length=map_json["obj"][-1]["spinnerEndTime"]+1000<line_sep><block_end><block_end>uts_a=map_json["timing"]["uts"]<line_sep>out=[]<for_stmt>i,uts enumerate(uts_a)<block_start>begin_time=uts["beginTime"]<line_sep>mspb=uts["tickLength"]<if_stmt>i<l>len(uts_a)-1<block_start>end_time=uts_a[i+1]["beginTime"]<block_end><else_stmt><block_start>end_time=length<line_sep><block_end>arr=np.floor(np.arange(begin_time end_time mspb/divisor))<line_sep>out=out+list(map(<lambda>f:int(f) arr))<line_sep><block_end><return>out<line_sep><block_end><def_stmt>get_tick_len map_json tick<block_start>uts_a=map_json["timing"]["uts"]<if_stmt>tick<l>uts_a[0]["beginTime"]<block_start><return>uts_a[0]["tickLength"]<line_sep><block_end>_out=600<for_stmt>uts uts_a<block_start><if_stmt>tick<ge>uts["beginTime"]<block_start>_out=uts["tickLength"]<block_end><else_stmt><block_start><return>_out<line_sep><block_end><block_end><return>_out<line_sep><block_end><def_stmt>get_slider_len map_json tick<block_start>ts_a=map_json["timing"]["ts"]<if_stmt>tick<l>ts_a[0]["beginTime"]<block_start><return>ts_a[0]["sliderLength"]<line_sep><block_end>_out=100<for_stmt>ts ts_a<block_start><if_stmt>tick<ge>ts["beginTime"]<block_start>_out=ts["sliderLength"]<block_end><else_stmt><block_start><return>_out<line_sep><block_end><block_end><return>_out<line_sep><block_end><def_stmt>get_slider_len_ts ts_a tick<block_start><if_stmt>tick<l>ts_a[0]["beginTime"]<block_start><return>ts_a[0]["sliderLength"]<line_sep><block_end>_out=100<for_stmt>ts ts_a<block_start><if_stmt>tick<ge>ts["beginTime"]<block_start>_out=ts["sliderLength"]<block_end><else_stmt><block_start><return>_out<line_sep><block_end><block_end><return>_out<line_sep><block_end><def_stmt>get_end_time note<block_start><if_stmt>note["type"]&8<block_start><return>note["spinnerEndTime"]<block_end><elif_stmt>note["type"]&2<block_start><return>note["sliderData"]["endTime"]<block_end>#elif note["type"] & 128: # return note["holdEndTime"]; <else_stmt><block_start><return>note["time"]<line_sep><block_end><block_end># edited from uts to ts <def_stmt>get_all_ticks_and_lengths_from_ts uts_array ts_array end_time divisor=4# Returns array of all timestamps, ticklens and sliderlens. <block_start>endtimes=([uts["beginTime"]<for>uts uts_array]+[end_time])[1:]<line_sep>timestamps=[np.arange(uts["beginTime"] endtimes[i] uts["tickLength"]/divisor)<for>i,uts enumerate(uts_array)]<line_sep>ticks_from_uts=[list(range(len(timestamp_group)))<for>timestamp_group timestamps]<line_sep>tick_len=[[uts["tickLength"]]<times>len(np.arange(uts["beginTime"] endtimes[i] uts["tickLength"]/divisor))<for>i,uts enumerate(uts_array)]<line_sep># slider_len = [[ts["sliderLength"]] * len(np.arange(ts["beginTime"], endtimes[i], ts["tickLength"] / divisor)) for i, ts in enumerate(ts_array)]; slider_len=[get_slider_len_ts(ts_array timestamp)<for>timestamp np.concatenate(timestamps)]<line_sep><return>np.concatenate(ticks_from_uts) np.round(np.concatenate(timestamps)).astype(int) np.concatenate(tick_len) np.array(slider_len)<line_sep><block_end><def_stmt>get_end_point note<block_start><if_stmt>note["type"]&8<block_start><return>np.array([256 192])<block_end><elif_stmt>note["type"]&2<block_start><return>np.array(note["sliderData"]["endpoint"])<block_end><else_stmt><block_start><return>np.array([note["x"] note["y"]])<line_sep><block_end><block_end><def_stmt>get_input_vector note prev_note<block_start><if_stmt>note["type"]&8<block_start><return><none><block_end>#elif note["type"] & 2: # return np.array(note["sliderData"]["dIn"]); <else_stmt><block_start>vec=np.array([note["x"] note["y"]])-get_end_point(prev_note)<line_sep><return>vec/max(0.001 np.sqrt(vec.dot(vec)))<line_sep><block_end><block_end><def_stmt>get_output_vector note prev_note<block_start><if_stmt>note["type"]&8<block_start><return><none><block_end><elif_stmt>note["type"]&2<block_start><return>np.array(note["sliderData"]["dOut"])<block_end><else_stmt><block_start>vec=np.array([note["x"] note["y"]])-get_end_point(prev_note)<line_sep><return>vec/max(0.001 np.sqrt(vec.dot(vec)))<line_sep><block_end><block_end><def_stmt>get_momentum note prev_note slider_len<block_start>""" momentum = distance snap (distance / slider length). for sliders, takes small value between from slider end or slider start to next note. """<line_sep>v1=np.array([note["x"] note["y"]])<line_sep>v0=get_end_point(prev_note)<line_sep>v=v1-v0<if_stmt>note["time"]-get_end_time(prev_note)<eq>0<or>note["time"]-prev_note["time"]<eq>0# it has the same time the previous note ends. either a bugged sliderend or a double note <block_start><return>0<line_sep><block_end>end_type_momentum=np.sqrt(v.dot(v))/(note["time"]-get_end_time(prev_note))/slider_len<line_sep># Since slider jumps in maps cause parameters to be learned too high # we try to deal with slider leniency by using the beginning of slider v2=np.array([prev_note["x"] prev_note["y"]])<line_sep>v3=v1-v2<line_sep>start_type_momentum=np.sqrt(v3.dot(v3))/(note["time"]-prev_note["time"])/slider_len<line_sep><return>np.min([end_type_momentum start_type_momentum])<line_sep><block_end><def_stmt>is_uts_begin map_json tick<block_start>uts_a=map_json["timing"]["uts"]<line_sep>begin_times=[uts["beginTime"]<for>uts uts_a]<for_stmt>t begin_times<block_start><if_stmt>tick<g>t-1<and>tick<l>t+5<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>get_map_notes map_json **kwargs<block_start>""" Reads JSON map data and creates a list for every tick Returns: data = list of data array: [TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3] flow_data = list of data array: [i, tick, note_type, x, y, vec_in_x, vec_in_y, vec_out_x, vec_out_y, end_x, end_y] Ex1, Ex2, Ex3 = tickLength/500, BPM/120, sliderLength/150 """<line_sep>length=kwargs.get("length" -1)<line_sep>divisor=kwargs.get("divisor" 4)<line_sep>tick_times=get_map_timing_array(map_json length=length divisor=divisor)<line_sep>objs=map_json["obj"]<line_sep>obj_times=list(map(<lambda>obj:obj["time"] objs))<line_sep># 1 for circle, 2 for slider, 3 for spinner <def_stmt>get_note_type obj<block_start><if_stmt><not>obj<block_start><return>0<line_sep><block_end><if_stmt>obj["type"]&2<block_start><return>2<block_end><elif_stmt>obj["type"]&8<block_start><return>3<line_sep><block_end><return>1<line_sep><block_end>po=0<line_sep>note_max_wait_time=kwargs.get("note_max_wait_time" 1000)<line_sep>start_time=obj_times[0]-note_max_wait_time<line_sep>last_obj_time=start_time<line_sep>sliding=0<line_sep>slider_end_time=0<line_sep>spinning=0<line_sep>spinner_end_time=0<line_sep>data=[]<line_sep>flow_data=[]<line_sep># constant multipliers and subtractions tlen_mp=1/500<line_sep>tlen_s=1<line_sep>bpm_mp=1/120<line_sep>bpm_s=1<line_sep>slen_mp=1/150<line_sep>slen_s=1<line_sep># tick count from start of uninherited timing section uts_i=0<line_sep># tick is timestamp here <for_stmt>i,tick enumerate(tick_times)<block_start><if_stmt>is_uts_begin(map_json tick)<block_start>uts_i=0<block_end><else_stmt><block_start>uts_i<augadd>1<line_sep><block_end># Attach extra vars at the end of each note data row tlen=get_tick_len(map_json tick)<line_sep>bpm=60000/tlen<line_sep>slen=get_slider_len(map_json tick)<line_sep>ex1=tlen<times>tlen_mp-tlen_s<line_sep>ex2=bpm<times>bpm_mp-bpm_s<line_sep>ex3=slen<times>slen_mp-slen_s<while_stmt>obj_times[po]<l>tick-5<and>po<l>len(obj_times)-1<block_start>po<augadd>1<line_sep><block_end><if_stmt>obj_times[po]<ge>tick-5<and>obj_times[po]<le>tick+5# found note <block_start>last_obj_time=tick<line_sep>note_type=get_note_type(objs[po])<line_sep># calculate momentum <if_stmt>po<ge>1<block_start>momentum=get_momentum(objs[po] objs[po-1] slen/tlen)<block_end><else_stmt><block_start>momentum=0<line_sep><block_end># flow data <if_stmt>po<ge>1<block_start>input_vector=get_input_vector(objs[po] objs[po-1])<line_sep>output_vector=get_output_vector(objs[po] objs[po-1])<block_end><else_stmt><block_start>input_vector=[0 0]<line_sep>output_vector=[0 0]<line_sep><block_end><if_stmt>input_vector<is><none><or>input_vector[0]<is><none><or>input_vector[1]<is><none><block_start>input_vector=[0 0]<line_sep><block_end><if_stmt>output_vector<is><none><or>output_vector[0]<is><none><or>output_vector[1]<is><none><block_start>output_vector=[0 0]<line_sep><block_end># end point endpoint=get_end_point(objs[po])<line_sep>flow_data.append([uts_i tick note_type objs[po]["x"] objs[po]["y"] input_vector[0] input_vector[1] output_vector[0] output_vector[1] endpoint[0] endpoint[1]])<line_sep># put data <if_stmt>note_type<eq>1<block_start>spinning=0<line_sep>sliding=0<block_end><elif_stmt>note_type<eq>2<block_start>sliding=1<line_sep>slider_end_time=objs[po]["sliderData"]["endTime"]<block_end><elif_stmt>note_type<eq>3<block_start>spinning=1<line_sep>spinner_end_time=objs[po]["spinnerEndTime"]<line_sep># because the spinner sometimes get over 3 secs last_obj_time=spinner_end_time<line_sep><block_end># TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3 data.append([uts_i tick 1 note_type sliding spinning momentum ex1 ex2 ex3])<block_end><elif_stmt>spinning<eq>1<block_start><if_stmt>tick<ge>spinner_end_time-5<block_start>spinning=0<line_sep>data.append([uts_i tick 1 5 0 0 0 ex1 ex2 ex3])<block_end><else_stmt><block_start>data.append([uts_i tick 0 0 0 1 0 ex1 ex2 ex3])<block_end><block_end><elif_stmt>sliding<eq>1<block_start><if_stmt>tick<ge>slider_end_time-5<block_start>sliding=0<line_sep>data.append([uts_i tick 1 4 0 0 0 ex1 ex2 ex3])<block_end><else_stmt><block_start>data.append([uts_i tick 0 0 1 0 0 ex1 ex2 ex3])<block_end><block_end><else_stmt># not found <block_start><if_stmt>tick-last_obj_time<l>note_max_wait_time<and>tick<ge>start_time<block_start>data.append([uts_i tick 0 0 0 0 0 ex1 ex2 ex3])<line_sep><block_end><block_end><block_end><return>data flow_data<line_sep><block_end>
### IMPORTS <import_from_future_stmt> print_function<import_stmt>os<import_stmt>fnmatch<import_stmt>numpy<as>np<import_stmt>skimage.data<import_stmt>cv2<import_stmt>sys<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib.patches<as>mpatches<import_from_stmt>PIL Image<import_from_stmt>keras applications<import_from_stmt>keras.preprocessing.image ImageDataGenerator<import_from_stmt>keras optimizers<import_from_stmt>keras.optimizers RMSprop Adagrad<import_from_stmt>keras.models Sequential Model<import_from_stmt>keras.layers Dropout Flatten Dense Input<import_from_stmt>keras.callbacks ModelCheckpoint CSVLogger TensorBoard EarlyStopping<import_stmt>logging<line_sep>FORMAT="[%(lineno)4s : %(funcName)-30s ] %(message)s"<line_sep>logging.basicConfig(level=logging.DEBUG format=FORMAT)<import_from_stmt>selective_search selective_search_bbox<line_sep>### GLOBALS # dimensions of our images. # img_width = 150 # img_height = 150 img_width=224<line_sep>img_height=224<line_sep># dataset_path = 'dataset_dogs_cats' dataset_path='dataset'<line_sep>dataset_train_path=os.path.join(dataset_path 'train')<line_sep>dataset_val_path=os.path.join(dataset_path 'validation')<line_sep>dataset_test_path=os.path.join(dataset_path 'test')<line_sep># path to the model weights files. weights_path='weights/vgg16_weights.h5'<line_sep>#top_model_weights_path = 'output/bottleneck_fc_model.h5' #top_model_weights_path = 'output_6_categ/best-weights-015-0.5636-0.7923.hdf5' #finetune_model_weights_path = 'output/finetune_bottleneck_fc_model.h5' #finetune_model_weights_path = 'output_6_categ/best-weights-finetune-000-0.2325-0.9062.hdf5' #finetune_model_weights_path = 'output_6_categ_crop/best-weights-finetune-008-0.3453-0.8774.hdf5' #finetune_model_weights_path = 'output/best-weights-finetune-000-1.5646-0.5217.hdf5' #finetune_model_weights_path = 'results_36categ/best-weights-finetune-000-1.5646-0.5217.hdf5' finetune_model_weights_path='output/finetune_bottleneck_fc_model.h5'<line_sep>#epochs = 50 epochs=5<line_sep>#batch_size = 16 #batch_size = 32 batch_size=1<line_sep># Count no. of images(.jpg) in a directory <def_stmt>get_images_count_recursive path<block_start>matches=[]<line_sep>logging.debug('path {}'.format(path))<for_stmt>root,dirnames,filenames os.walk(path)<block_start><for_stmt>filename fnmatch.filter(filenames '*.jpg')<block_start>matches.append(os.path.join(root filename))<block_end><block_end># logging.debug('matches {}'.format(matches)) images_count=len(matches)<line_sep><return>images_count<block_end>nb_test_samples=get_images_count_recursive(dataset_test_path)<line_sep>logging.debug('nb_test_samples {}'.format(nb_test_samples))<if_stmt><not>os.path.exists('output')<block_start>os.makedirs('output')<block_end><if_stmt><not>os.path.exists('logs')<block_start>os.makedirs('logs')<block_end># TODO: HARDCODING - Should be same as used during training VGG; Else error (None, None, 512) input_shape=(img_width img_height 3)<line_sep># Sorted subdirectories list <def_stmt>get_subdir_list path<block_start>names=[]<for_stmt>name sorted(os.listdir(path))<block_start><if_stmt>os.path.isdir(os.path.join(path name))<block_start>names.append(name)<block_end><block_end>logging.debug('names {}'.format(names))<line_sep><return>names<block_end>class_names=get_subdir_list(dataset_train_path)<line_sep>logging.debug('class_names {}'.format(class_names))<line_sep># build the VGG16 network base_model=applications.VGG16(weights='imagenet' include_top=<false> input_shape=input_shape)<line_sep>logging.debug('Model loaded.')<line_sep>logging.debug('{}'.format(base_model.output_shape))# (None, None, None, 512) if input_shape not given in applications.VGG16 logging.debug('{}'.format(base_model.output_shape[1:]))# (None, None, 512) ### MODEL 1 # build a classifier model to put on top of the convolutional model # top_model = Sequential() # top_model.add(Flatten(input_shape=base_model.output_shape[1:])) # top_model.add(Dense(256, activation='relu')) # top_model.add(Dropout(0.5)) # top_model.add(Dense(len(class_names), activation='softmax')) # Binary to Multi classification changes # #top_model.add(Dense(1, activation='sigmoid')) # # note that it is necessary to start with a fully-trained # # classifier, including the top classifier, # # in order to successfully do fine-tuning # # top_model.load_weights(top_model_weights_path) # # add the model on top of the convolutional base # # base_model.add(top_model) # Not working; AttributeError: 'Model' object has no attribute 'add' # model = Model(inputs=base_model.input, outputs=top_model(base_model.output)) # logging.debug('{}'.format(model.summary())) # model.compile(loss='sparse_categorical_crossentropy', # optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), # metrics=['accuracy']) ### MODEL2 inputs=Input(shape=(base_model.output_shape[1:]))<line_sep>x_common=Dense(256 activation='relu')(inputs)<line_sep>## Model Classification x=Flatten()(x_common)<line_sep>#x = Dropout(dropout_rate)(x) predictions_class=Dense(len(class_names) activation='softmax' name='predictions_class')(x)<line_sep>## Model (Regression) IOU score x=Flatten()(x_common)<line_sep># x = Dense(256, activation='relu')(x) # x = Dropout(dropout_rate)(x) predictions_iou=Dense(1 activation='sigmoid' name='predictions_iou')(x)<line_sep># This creates a model that includes the Input layer and three Dense layers #model = Model(inputs=inputs, outputs=[predictions_class(base_model.output), predictions_iou(base_model.output)]) model=Model(inputs=inputs outputs=[predictions_class(base_model.output) predictions_iou])<line_sep>logging.debug('model summary {}'.format(model.summary()))<line_sep>model.compile(optimizer=optimizers.SGD(lr=1e-4 momentum=0.9) loss={'predictions_class':'sparse_categorical_crossentropy' 'predictions_iou':'mean_squared_error'} metrics=['accuracy'])<line_sep>model.load_weights(finetune_model_weights_path)<line_sep>logging.debug('weights loaded: {}'.format(finetune_model_weights_path))<def_stmt>evaluate_test_dataset ## Test <block_start>test_datagen=ImageDataGenerator(rescale=1./255)<line_sep>test_generator=test_datagen.flow_from_directory(dataset_test_path target_size=(img_height img_width) batch_size=batch_size class_mode='sparse' # Binary to Multi classification changes save_to_dir=<none> shuffle=<false>)<line_sep>scores=model.evaluate_generator(test_generator nb_test_samples<floordiv>batch_size)<line_sep>logging.debug('model.metrics_names {}'.format(model.metrics_names))<line_sep>logging.debug('scores {}'.format(scores))<block_end><def_stmt>predict_image_dir # Predict # TODO: Hardcoding # Put all images in sample_images/test folder <block_start>dataset_predict_path='sample_images'<line_sep>#dataset_predict_path='temp' logging.debug('dataset_predict_path {}'.format(dataset_predict_path))<line_sep>predict_datagen=ImageDataGenerator(rescale=1./255)<line_sep>predict_generator=predict_datagen.flow_from_directory(dataset_predict_path target_size=(img_height img_width) batch_size=batch_size class_mode='sparse' # Binary to Multi classification changes save_to_dir=<none> shuffle=<false>)<line_sep>nb_predict_samples=get_images_count_recursive(dataset_predict_path)<line_sep>logging.debug('nb_predict_samples {}'.format(nb_predict_samples))<line_sep>prediction=model.predict_generator(predict_generator nb_predict_samples<floordiv>batch_size verbose=1)<line_sep>logging.debug('\n\nprediction \n{}'.format(prediction))<line_sep># Display predictions matches=[]<for_stmt>root,dirnames,filenames os.walk(os.path.join(dataset_predict_path 'test'))<block_start><for_stmt>filename fnmatch.filter(filenames '*.jpg')<block_start>matches.append(os.path.join(root filename))<block_end><block_end><for_stmt>index,preds enumerate(prediction)<block_start>logging.debug('\n{}'.format((matches[index])))<for_stmt>index2,pred enumerate(preds)<block_start>logging.debug('class_names {}'.format(class_names[index2]))<line_sep>logging.debug('pred {0:6f}'.format(float(pred)))<block_end><block_end><block_end><def_stmt>pad_and_crop_image old_im new_width new_height# old_im = Image.open('someimage.jpg') <block_start>old_size=old_im.size<line_sep>new_size=(new_width new_height)<line_sep>new_im=Image.new("RGB" new_size)# this is already black! new_im.paste(old_im ((new_size[0]-old_size[0])/2 (new_size[1]-old_size[1])/2))<line_sep># new_im.show() # new_im.save('someimage.jpg') <return>new_im<block_end><def_stmt>predict_image_name image_path_name<block_start>logging.debug('image_path_name {}'.format(image_path_name))<line_sep>candidates=selective_search_bbox(image_path_name)<line_sep>logging.debug('candidates {}'.format(candidates))<line_sep>image_name=image_path_name.split('/')[-1].split('.')[0]<line_sep>logging.debug('image_name {}'.format(image_name))<line_sep># img = Image.open(image_path_name) # logging.debug('{} {} {}'.format(img.format, img.size, img.mode)) #img2 = img.crop((0, 0, 100, 100)) # img2.save("img2.jpg") # img2.show() #crop_img = img[200:400, 100:300] # Crop from x, y, w, h -> 100, 200, 300, 400 # NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h] # img = cv2.imread(image_path_name) # fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) img_read=Image.open(image_path_name)<line_sep>logging.debug('{} {} {}'.format(img_read.format img_read.size img_read.mode))<line_sep># img_read.show() i=0<for_stmt>x,y,w,h (candidates)# left, upper, right, and lower pixel; The cropped section includes the left column and # the upper row of pixels and goes up to (but doesn't include) the right column and bottom row of pixels <block_start>img_crop=img_read.crop((y x y+w x+h))<line_sep>img_crop.save('temp/test/'+image_name+'_'+str(i)+'_cropped_'+'.jpg')<line_sep>logging.debug('img_crop {} {} {}'.format(img_crop.format img_crop.size img_crop.mode))<line_sep>img_crop_resize=img_crop.resize((img_width img_height))<line_sep>img_crop_resize.save('temp/test/'+image_name+'_'+str(i)+'_cropped_resize'+'.jpg')<line_sep>logging.debug('img_crop_resize {} {} {}'.format(img_crop_resize.format img_crop_resize.size img_crop_resize.mode))<line_sep>i=i+1<line_sep># crop_img = img[x:y, w:h] # Crop from x, y, w, h -> 100, 200, 300, 400 # logging.debug('crop_img {}'.format(crop_img.shape)) # ax.imshow(crop_img) # # cv2.imshow('cropped', crop_img) # # cv2.waitKey(0) # plt.show() # # Convert Image to array # img = PIL.Image.open("foo.jpg").convert("L") # arr = numpy.array(img) # # Convert array to Image # img = PIL.Image.fromarray(arr) # img = cv2.resize(cv2.imread(image_path_name), (224, 224)).astype(np.float32) # img2.save('temp/test/img_'+str(i)+'.jpg') # img3 = img2.thumbnail((img_width, img_height)) # logging.debug('img3 {}'.format(type(img3))) # # img3.save('temp/test/img_'+str(i)+'_resized.jpg') # logging.debug('{} {} {}'.format(img3.format, img3.size, img3.mode)) # img4 = pad_and_crop_image(img3, img_width, img_height) # logging.debug('{} {} {}'.format(img4.format, img4.size, img4.mode)) # img4.save('temp/test/img_'+str(i)+'_resized1.jpg') img=np.array(img_crop_resize).astype(np.float32)<line_sep>img[: : 0]<augsub>103.939<line_sep>img[: : 1]<augsub>116.779<line_sep>img[: : 2]<augsub>123.68<line_sep>#img = img.transpose((2,0,1)) img=np.expand_dims(img axis=0)<line_sep>prediction=model.predict(img batch_size verbose=1)<line_sep>logging.debug('\n\nprediction \n{}'.format(prediction))<for_stmt>index,preds enumerate(prediction)<block_start><for_stmt>pred preds<block_start>logging.debug('pred {0:6f}'.format(float(pred)))<block_end><block_end><block_end><block_end>### MAIN ### #evaluate_test_dataset() #predict_image_dir() # #image='dataset/test/Jeans/img_Distressed_Skinny_Jeans_img_00000004.jpg' # #image='sample_images/test/img_Distressed_Denim_Jeans_img_00000001.jpg' # image='sample_images/test/img_Acid_Wash_Denim_Romper_img_00000070.jpg' image='sample_images/test/img_Acid_Wash_-_Skinny_Jeans_img_00000005.jpg'<line_sep>#image='sample_images/test/img_Boxy_Faux_Fur_Jacket_img_00000001.jpg' #image='sample_images/test/img_Athletic_Marled_Knit_Joggers_img_00000009.jpg' predict_image_name(image)<line_sep>
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for rasterize than splat functionality with opengl rasterization."""<import_from_stmt>tensorflow_graphics.rendering rasterization_backend<import_from_stmt>tensorflow_graphics.rendering.tests splat_test<import_from_stmt>tensorflow_graphics.util test_case<class_stmt>SplatWithOpenGLTest(splat_test.SplatTest)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep># This pattern was chosen instead of a parametrized test to faclitate # running the test cases in pure CPU mode on machines that do not have a # GPU. In this case the opengl rasterizer cannot be added as dependency to # the binary as CPU only machines do not have the required libEGL.so # available. This pattern provides a separate build target for the opengl # rasterizer version. self._backend=rasterization_backend.RasterizationBackends.OPENGL<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test_case.main()<block_end>
# Neon number --> If the sum of digits of the squared numbers are equal to the orignal number , the number is said to be Neon number. Example 9 ch=int(input("Enter 1 to do it with loop and 2 without loop :\n"))<line_sep>n=int(input("Enter the number :\n"))<def_stmt>number n<block_start>sq=n<power>2<line_sep>digisum=0<while_stmt>sq<g>0<block_start>r=sq%10<line_sep>digisum=digisum+r<line_sep>sq=sq<floordiv>10<block_end><if_stmt>(n<eq>digisum)<block_start>print("The number is neon number")<block_end><else_stmt><block_start>print("Not a neon mumber")<block_end><block_end># Without Loop <def_stmt>number2 n<block_start>sq=n<times>n<line_sep>r=sq%10<line_sep>q=sq<floordiv>10<line_sep>tocheck=r+q<if_stmt>n<eq>tocheck<block_start>print("It is a Neon Number")<block_end><else_stmt><block_start>print("Not a neon number")<block_end><block_end><if_stmt>ch<eq>1<block_start>number(n)<block_end><elif_stmt>ch<eq>2<block_start>number2(n)<block_end><else_stmt><block_start>print("Enter correct choice")<block_end>""" Time complexity - O(1) Space complexity - O(1) I/o-- Enter 1 to do it with loop and 2 without loop : 2 Enter the number : 9 It is a Neon Number Explanation Input n: 9 sq=81 r=1 q=8 tocheck=8+1 =>9 Output if 9 == 9 ==> Neon number """<line_sep>
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # -------------------------------------------------------------------------------------------- # - Generated by tools/entrypoint_compiler.py: do not edit by hand """ SymSgdBinaryClassifier """<line_sep>__all__=["SymSgdBinaryClassifier"]<import_from_stmt>sklearn.base ClassifierMixin<import_from_stmt>..base_predictor BasePredictor<import_from_stmt>..internal.core.linear_model.symsgdbinaryclassifier SymSgdBinaryClassifier<as>core<import_from_stmt>..internal.utils.utils trace<class_stmt>SymSgdBinaryClassifier(core BasePredictor ClassifierMixin)<block_start>""" Train an symbolic SGD model. .. remarks:: Stochastic gradient descent (SGD) is a well known method for regression and classification tasks, and is primarily a sequential algorithm. The ``SymSgdBinaryClassifier`` is an implementation of a parallel SGD algorithm that, to a first-order approximation, retains the sequential semantics of SGD. Each thread learns a local model as well a `model combiner` which allows local models to be combined to to produce what a sequential model would have produced. **Reference** `Parallel Stochastic Gradient Descent with Sound Combiners <https://arxiv.org/pdf/1705.08030.pdf>`_ :param feature: see `Columns </nimbusml/concepts/columns>`_. :param label: see `Columns </nimbusml/concepts/columns>`_. :param normalize: Specifies the type of automatic normalization used: * ``"Auto"``: if normalization is needed, it is performed automatically. This is the default choice. * ``"No"``: no normalization is performed. * ``"Yes"``: normalization is performed. * ``"Warn"``: if normalization is needed, a warning message is displayed, but normalization is not performed. Normalization rescales disparate data ranges to a standard scale. Feature scaling insures the distances between data points are proportional and enables various optimization methods such as gradient descent to converge much faster. If normalization is performed, a ``MaxMin`` normalizer is used. It normalizes values in an interval [a, b] where ``-1 <= a <= 0`` and ``0 <= b <= 1`` and ``b - a = 1``. This normalizer preserves sparsity by mapping zero to zero. :param caching: Whether trainer should cache input training data. :param number_of_iterations: Number of passes over the data. :param learning_rate: Determines the size of the step taken in the direction of the gradient in each step of the learning process. This determines how fast or slow the learner converges on the optimal solution. If the step size is too big, you might overshoot the optimal solution. If the step size is too small, training takes longer to converge to the best solution. :param l2_regularization: L2 regularization. :param number_of_threads: Degree of lock-free parallelism. Determinism not guaranteed. Multi-threading is not supported currently. :param tolerance: Tolerance for difference in average loss in consecutive passes. :param update_frequency: The number of iterations each thread learns a local model until combining it with the global model. Low value means more updated global model and high value means less cache traffic. :param memory_size: Memory size for L-BFGS. Lower=faster, less accurate. The technique used for optimization here is L-BFGS, which uses only a limited amount of memory to compute the next step direction. This parameter indicates the number of past positions and gradients to store for the computation of the next step. Must be greater than or equal to ``1``. :param shuffle: Shuffle data?. :param positive_instance_weight: Apply weight to the positive class, for imbalanced data. :param params: Additional arguments sent to compute engine. .. seealso:: :py:class:`LogisticRegressionBinaryClassifier <nimbusml.linear_model.LogisticRegressionBinaryClassifier>`, :py:class:`SgdBinaryClassifier <nimbusml.linear_model.SgdBinaryClassifier>`, :py:class:`FastLinearBinaryClassifier <nimbusml.linear_model.FastLinearBinaryClassifier>` .. index:: models, parallel, SGD, symbolic Example: .. literalinclude:: /../nimbusml/examples/SymSgdBinaryClassifier.py :language: python """<line_sep>@trace<def_stmt>__init__ self normalize='Auto' caching='Auto' number_of_iterations=50 learning_rate=<none> l2_regularization=0.0 number_of_threads=<none> tolerance=0.0001 update_frequency=<none> memory_size=1024 shuffle=<true> positive_instance_weight=1.0 feature=<none> label=<none> **params<block_start><if_stmt>'feature_column_name'<in>params<block_start><raise>NameError("'feature_column_name' must be renamed to 'feature'")<block_end><if_stmt>feature<block_start>params['feature_column_name']=feature<block_end><if_stmt>'label_column_name'<in>params<block_start><raise>NameError("'label_column_name' must be renamed to 'label'")<block_end><if_stmt>label<block_start>params['label_column_name']=label<block_end>BasePredictor.__init__(self type='classifier' **params)<line_sep>core.__init__(self normalize=normalize caching=caching number_of_iterations=number_of_iterations learning_rate=learning_rate l2_regularization=l2_regularization number_of_threads=number_of_threads tolerance=tolerance update_frequency=update_frequency memory_size=memory_size shuffle=shuffle positive_instance_weight=positive_instance_weight **params)<line_sep>self.feature=feature<line_sep>self.label=label<block_end>@trace<def_stmt>predict_proba self X **params<block_start>''' Returns probabilities '''<line_sep><return>self._predict_proba(X **params)<block_end>@trace<def_stmt>decision_function self X **params<block_start>''' Returns score values '''<line_sep><return>self._decision_function(X **params)<block_end><def_stmt>get_params self deep=<false><block_start>""" Get the parameters for this operator. """<line_sep><return>core.get_params(self)<block_end><block_end>
"""The derivative component."""<line_sep>
# -*- coding: utf-8 -*- # @Time: 2020/7/16 11:38 # @Author: GraceKoo # @File: interview_8.py # @Desc: https://www.nowcoder.com/practice/8c82a5b80378478f9484d87d1c5f12a4?tpId=13&rp=1&ru=%2Fta%2Fcoding-interviews&qr # u=%2Fta%2Fcoding-interviews%2Fquestion-ranking <class_stmt>Solution<block_start><def_stmt>climbStairs self n:int<arrow>int<block_start><if_stmt>0<le>n<le>2<block_start><return>n<block_end>dp=[i<for>i range(n)]<line_sep>dp[0]=1<line_sep>dp[1]=2<for_stmt>i range(2 n)<block_start>dp[i]=dp[i-1]+dp[i-2]<block_end><return>dp[-1]<block_end><block_end>so=Solution()<line_sep>print(so.climbStairs(3))<line_sep>
"""Slack notification tests."""<line_sep>
<class_stmt>Solution<block_start><def_stmt>permuteUnique self nums:List[int]<arrow>List[List[int]]<block_start><def_stmt>BackTrack m per:list<block_start><if_stmt>m<eq>n<block_start><if_stmt>per<not><in>permutation<block_start>permutation.append(per)<block_end><return>per<block_end><for_stmt>i range(n)<block_start><if_stmt><not>visited[i]<block_start>per.append(nums[i])<line_sep>visited[i]=<true><line_sep>per=BackTrack(m+1 per)<line_sep>per=per[:-1]<line_sep>visited[i]=<false><block_end><block_end><return>per<block_end>n=len(nums)<line_sep>visited=[<false><for>_ range(n)]<line_sep>per=[]<line_sep>permutation=[]<line_sep>BackTrack(0 [])<line_sep><return>list(set(tuple(k)<for>k permutation))<block_end><block_end>
<import_from_stmt>oem.media.show.identifier EpisodeIdentifier# NOQA <import_from_stmt>oem.media.show.mapper ShowMapper# NOQA <import_from_stmt>oem.media.show.match EpisodeMatch# NOQA
<import_stmt>pytest<import_from_stmt>brownie_tokens MintableForkToken<class_stmt>_MintableTestToken(MintableForkToken)<block_start><def_stmt>__init__ self address<block_start>super().__init__(address)<block_end><block_end>@pytest.fixture(scope="session")<def_stmt>MintableTestToken <block_start><yield>_MintableTestToken<block_end>@pytest.fixture(scope="module")<def_stmt>USDC <block_start><yield>_MintableTestToken("<KEY>")<block_end>@pytest.fixture(scope="module")<def_stmt>ThreeCRV <block_start><yield>_MintableTestToken("0x6c3F90f043a72FA612cbac8115EE7e52BDe6E490")<block_end>@pytest.fixture(scope="module")<def_stmt>SUSD <block_start><yield>_MintableTestToken("0x57ab1ec28d129<PASSWORD>52df4df418<PASSWORD>a2d46d5f51")<block_end>@pytest.fixture(scope="module")<def_stmt>SBTC <block_start><yield>_MintableTestToken("0xfE18be6b3Bd88A2D2A7f928d00292E7a9963CfC6")<block_end>
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for scoring function."""<import_stmt>os<import_from_stmt>bleurt score<import_stmt>tensorflow.compat.v1<as>tf<line_sep>tf.enable_eager_execution()<line_sep>references=["An apple a day keeps the doctor away." "An apple a day keeps the doctor away."]<line_sep>candidates=["An apple a day keeps the doctor away." "An apple a day keeps doctors away."]<line_sep>ref_scores=[0.910811 0.771989]<def_stmt>get_test_checkpoint <block_start>pkg=os.path.abspath(__file__)<line_sep>pkg,_=os.path.split(pkg)<line_sep>ckpt=os.path.join(pkg "test_checkpoint")<assert_stmt>tf.io.gfile.exists(ckpt)<line_sep><return>ckpt<block_end><class_stmt>ScoreTest(tf.test.TestCase)<block_start><def_stmt>test_default_bleurt_score self<block_start>bleurt=score.BleurtScorer()<line_sep>scores=bleurt.score(references=references candidates=candidates)<line_sep>self.assertLen(scores 2)<line_sep>self.assertAllClose(scores ref_scores)<block_end><def_stmt>test_positional_args_error self<block_start>bleurt=score.BleurtScorer()<with_stmt>self.assertRaises(AssertionError)<block_start>_=bleurt.score(references candidates)<block_end><block_end><def_stmt>test_bleurt_nulls self<block_start>bleurt=score.BleurtScorer()<line_sep>test_references=[]<line_sep>test_candidates=[]<line_sep>scores=bleurt.score(references=test_references candidates=test_candidates)<line_sep>self.assertLen(scores 0)<block_end><def_stmt>test_bleurt_empty self<block_start>bleurt=score.BleurtScorer()<line_sep>test_references=[""]<line_sep>test_candidates=[""]<line_sep>scores=bleurt.score(references=test_references candidates=test_candidates)<line_sep>self.assertLen(scores 1)<block_end><def_stmt>test_bleurt_score_with_checkpoint self<block_start>checkpoint=get_test_checkpoint()<line_sep>bleurt=score.BleurtScorer(checkpoint)<line_sep>scores=bleurt.score(references=references candidates=candidates)<line_sep>self.assertLen(scores 2)<line_sep>self.assertAllClose(scores ref_scores)<block_end><def_stmt>test_tf_bleurt_score_eager self# Creates the TF Graph. <block_start>bleurt_ops=score.create_bleurt_ops()<line_sep>tfcandidates=tf.constant(candidates)<line_sep>tfreferences=tf.constant(references)<line_sep>bleurt_out=bleurt_ops(references=tfreferences candidates=tfcandidates)<line_sep># Computes the BLEURT scores. self.assertIn("predictions" bleurt_out)<line_sep>self.assertEqual(bleurt_out["predictions"].shape (2 ))<line_sep>self.assertAllClose(bleurt_out["predictions"] ref_scores)<block_end><def_stmt>test_tf_bleurt_positional_args_error self# Creates the TF Graph. <block_start>bleurt_ops=score.create_bleurt_ops()<line_sep>tfcandidates=tf.constant(candidates)<line_sep>tfreferences=tf.constant(references)<with_stmt>self.assertRaises(AssertionError)<block_start>_=bleurt_ops(tfreferences tfcandidates)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
# -*- coding: utf-8 -*- """ Created on 2017-6-27 @author: cheng.li """<import_from_stmt>typing Dict<import_from_stmt>typing Optional<import_from_stmt>typing Tuple<import_from_stmt>typing Union<import_stmt>numpy<as>np<import_from_stmt>alphamind.portfolio.optimizers QuadraticOptimizer TargetVolOptimizer <import_from_stmt>alphamind.exceptions.exceptions PortfolioBuilderException<def_stmt>_create_bounds lbound ubound bm risk_exposure risk_target<block_start><if_stmt>lbound<is><not><none><block_start>lbound=lbound-bm<block_end><if_stmt>ubound<is><not><none><block_start>ubound=ubound-bm<block_end><if_stmt>risk_exposure<is><not><none><block_start>cons_mat=risk_exposure.T<line_sep>bm_risk=cons_mat@bm<line_sep>clbound=(risk_target[0]-bm_risk).reshape((-1 1))<line_sep>cubound=(risk_target[1]-bm_risk).reshape((-1 1))<block_end><else_stmt><block_start>cons_mat=<none><line_sep>clbound=<none><line_sep>cubound=<none><block_end><return>lbound ubound cons_mat clbound cubound<block_end><def_stmt>_create_result optimizer bm<block_start><if_stmt>optimizer.status()<eq>"optimal"<or>optimizer.status()<eq>"optimal_inaccurate"<block_start><return>optimizer.status() optimizer.feval() optimizer.x_value()+bm<block_end><else_stmt><block_start><raise>PortfolioBuilderException(optimizer.status())<block_end><block_end><def_stmt>mean_variance_builder er:np.ndarray risk_model:Dict[str Union[<none> np.ndarray]] bm:np.ndarray lbound:Union[np.ndarray float <none>] ubound:Union[np.ndarray float <none>] risk_exposure:Optional[np.ndarray] risk_target:Optional[Tuple[np.ndarray np.ndarray]] lam:float=1. linear_solver:str='deprecated'<arrow>Tuple[str float np.ndarray]<block_start>lbound,ubound,cons_mat,clbound,cubound=_create_bounds(lbound ubound bm risk_exposure risk_target)<if_stmt>cons_mat<is><not><none><block_start>cons_matrix=np.concatenate([cons_mat clbound cubound] axis=1)<block_end><else_stmt><block_start>cons_matrix=<none><block_end>cov=risk_model['cov']<line_sep>special_risk=risk_model['idsync']<line_sep>risk_cov=risk_model['factor_cov']<line_sep>risk_exposure=risk_model['factor_loading']<line_sep>prob=QuadraticOptimizer(objective=-er cons_matrix=cons_matrix lbound=lbound ubound=ubound penalty=lam cov=cov factor_cov=risk_cov factor_load=risk_exposure factor_special=special_risk)<if_stmt>prob.status()<eq>"optimal"<or>prob.status()<eq>'optimal_inaccurate'<block_start><return>prob.status() prob.feval() prob.x_value()+bm<block_end><else_stmt><block_start><raise>PortfolioBuilderException(prob.status())<block_end><block_end><def_stmt>target_vol_builder er:np.ndarray risk_model:Dict[str Union[<none> np.ndarray]] bm:np.ndarray lbound:Union[np.ndarray float] ubound:Union[np.ndarray float] risk_exposure:Optional[np.ndarray] risk_target:Optional[Tuple[np.ndarray np.ndarray]] vol_target:float=1. linear_solver:str='ma27'<arrow>Tuple[str float np.ndarray]<block_start>lbound,ubound,cons_mat,clbound,cubound=_create_bounds(lbound ubound bm risk_exposure risk_target)<if_stmt>cons_mat<is><not><none><block_start>cons_matrix=np.concatenate([cons_mat clbound cubound] axis=1)<block_end><else_stmt><block_start>cons_matrix=<none><block_end>cov=risk_model['cov']<line_sep>special_risk=risk_model['idsync']<line_sep>risk_cov=risk_model['factor_cov']<line_sep>risk_exposure=risk_model['factor_loading']<line_sep>prob=TargetVolOptimizer(objective=-er cons_matrix=cons_matrix lbound=lbound ubound=ubound target_vol=vol_target factor_cov=risk_cov factor_load=risk_exposure factor_special=special_risk cov=cov)<if_stmt>prob.status()<eq>"optimal"<or>prob.status()<eq>'optimal_inaccurate'<block_start><return>prob.status() prob.feval() prob.x_value()+bm<block_end><else_stmt><block_start><raise>PortfolioBuilderException(prob.status())<block_end><block_end>
<import_stmt>sublime<import_stmt>os<import_from_stmt>..clipboard clipboard<line_sep>plat=sublime.platform()<if_stmt>plat<eq>"osx"<block_start><import_from_stmt>..applescript osascript<line_sep>RSTUDIOAPPLESCRIPT=os.path.join(os.path.dirname(__file__) "rstudio.applescript")<def_stmt>send_to_rstudio cmd<block_start>osascript(RSTUDIOAPPLESCRIPT cmd)<block_end><block_end><elif_stmt>plat<eq>"windows"<block_start><import_from_stmt>.. winauto<def_stmt>send_to_rstudio cmd from_view<block_start>rid=winauto.find_rstudio()<line_sep>clipboard.set_clipboard(cmd)<line_sep>winauto.paste_to_rstudio(rid from_view=from_view)<line_sep>clipboard.reset_clipboard()<block_end><block_end><elif_stmt>plat<eq>"linux"<block_start><import_from_stmt>..xdotool xdotool<def_stmt>send_to_rstudio cmd<block_start>wid=xdotool("search" "--onlyvisible" "--class" "rstudio")<if_stmt>wid<block_start>wid=wid.decode("utf-8").strip().split("\n")[-1]<line_sep>clipboard.set_clipboard(cmd)<line_sep>xdotool("key" "--window" wid "ctrl+v")<line_sep>xdotool("key" "--window" wid "--clearmodifiers" "Return")<line_sep>clipboard.reset_clipboard()<block_end><block_end><block_end>
<import_stmt>re<def_stmt>get_docker_registry image_uri<block_start>""" Explanation: (.+?(?:[:.].+?)\/)? - [GROUP 0] REGISTRY .+? - A registry must start with at least one character (?:[:.].+?)\/ - A registry must have ":" or "." and end with "/" ? - Make a registry optional (.*?) - [GROUP 1] REPOSITORY .*? - Get repository name until separator (?:[@:])? - SEPARATOR ?: - Don't capture separator [@:] - The separator must be either "@" or ":" ? - The separator is optional ((?<=[@:]).*)? - [GROUP 2] TAG / DIGEST (?<=[@:]) - A tag / digest must be preceeded by "@" or ":" .* - Capture rest of tag / digest ? - A tag / digest is optional Examples: image - None - image - None example/image - None - example/image - None example/image:tag - None - example/image - tag example.domain.com/example/image:tag - example.domain.com/ - example/image - tag 192.168.127.12:123/example/image:tag - 192.168.127.12:123/ - example/image - tag example.domain.com/example/image@sha256:45b23dee0 - example.domain.com/ - example/image - sha256:45b23dee0 """<line_sep>pattern=re.compile(r"^(.+?(?:[:.].+?)\/)?(.*?)(?:[@:])?((?<=[@:]).*)?$")<line_sep>registry,repository,tag=pattern.match(image_uri).groups()<if_stmt>registry<is><not><none><block_start>registry=registry.rstrip("/")<block_end><return>registry<block_end>
####################################################################### # Name: scoping.__init__.py # Purpose: Meta-model / scope providers. # Author: <NAME> # License: MIT License ####################################################################### <import_stmt>glob<import_stmt>os<import_stmt>errno<import_from_stmt>os.path join exists abspath<def_stmt>metamodel_for_file_or_default_metamodel filename the_metamodel<block_start><import_from_stmt>textx metamodel_for_file<import_from_stmt>textx.exceptions TextXRegistrationError<try_stmt><block_start><return>metamodel_for_file(filename)<block_end><except_stmt>TextXRegistrationError<block_start><return>the_metamodel<block_end><block_end># ----------------------------------------------------------------------------- # Scope helper classes: # ----------------------------------------------------------------------------- <class_stmt>Postponed(object)<block_start>""" Return an object of this class to postpone a reference resolution. If you get circular dependencies in resolution logic, an error is raised. """<block_end><class_stmt>ModelRepository(object)<block_start>""" This class has the responsibility to hold a set of (model-identifiers, model) pairs as dictionary. In case of some scoping providers the model-identifier is the absolute filename of the model. """<def_stmt>__init__ self<block_start>self.name_idx=1<line_sep>self.filename_to_model={}<block_end><def_stmt>has_model self filename<block_start><return>abspath(filename)<in>self.filename_to_model<block_end><def_stmt>add_model self model<block_start><if_stmt>model._tx_filename<block_start>filename=abspath(model._tx_filename)<block_end><else_stmt><block_start>filename='builtin_model_{}'.format(self.name_idx)<line_sep>self.name_idx<augadd>1<block_end>self.filename_to_model[filename]=model<block_end><def_stmt>remove_model self model<block_start>filename=<none><for_stmt>f,m self.filename_to_model.items()<block_start><if_stmt>m<eq>model<block_start>filename=f<block_end><block_end><if_stmt>filename# print("*** delete {}".format(filename)) <block_start><del_stmt>self.filename_to_model[filename]<block_end><block_end><def_stmt>__contains__ self filename<block_start><return>self.has_model(filename)<block_end><def_stmt>__iter__ self<block_start><return>iter(self.filename_to_model.values())<block_end><def_stmt>__len__ self<block_start><return>len(self.filename_to_model)<block_end><def_stmt>__getitem__ self filename<block_start><return>self.filename_to_model[filename]<block_end><def_stmt>__setitem__ self filename model<block_start>self.filename_to_model[filename]=model<block_end><block_end><class_stmt>GlobalModelRepository(object)<block_start>""" This class has the responsibility to hold two ModelRepository objects: - one for model-local visible models - one for all models (globally, starting from some root model). The second `ModelRepository` `all_models` is to cache already loaded models and to prevent to load one model twice. The class allows loading local models visible to the current model. The current model is the model which references this `GlobalModelRepository` as attribute `_tx_model_repository` When loading a new local model, the current `GlobalModelRepository` forwards the embedded `ModelRepository` `all_models` to the new `GlobalModelRepository` of the next model. This is done using the `pre_ref_resolution_callback` to set the necessary information before resolving the references in the new loaded model. """<def_stmt>__init__ self all_models=<none><block_start>""" Create a new repo for a model. Args: all_models: models to be added to this new repository. """<line_sep>self.local_models=ModelRepository()# used for current model <if_stmt>all_models<is><not><none><block_start>self.all_models=all_models# used to reuse already loaded models <block_end><else_stmt><block_start>self.all_models=ModelRepository()<block_end><block_end><def_stmt>remove_model self model<block_start>self.all_models.remove_model(model)<line_sep>self.local_models.remove_model(model)<block_end><def_stmt>remove_models self models<block_start><for_stmt>m models<block_start>self.remove_model(m)<block_end><block_end><def_stmt>load_models_using_filepattern self filename_pattern model glob_args is_main_model=<false> encoding='utf-8' add_to_local_models=<true> model_params=<none><block_start>""" Add a new model to all relevant objects. Args: filename_pattern: models to be loaded model: model holding the loaded models in its _tx_model_repository field (may be None). glob_args: arguments passed to the glob.glob function. Returns: the list of loaded models """<import_from_stmt>textx get_metamodel<if_stmt>model<is><not><none><block_start>self.update_model_in_repo_based_on_filename(model)<line_sep>the_metamodel=get_metamodel(model)# default metamodel <block_end><else_stmt><block_start>the_metamodel=<none><block_end>filenames=glob.glob(filename_pattern **glob_args)<if_stmt>len(filenames)<eq>0<block_start><raise>IOError(errno.ENOENT os.strerror(errno.ENOENT) filename_pattern)<block_end>loaded_models=[]<for_stmt>filename filenames<block_start>the_metamodel=metamodel_for_file_or_default_metamodel(filename the_metamodel)<line_sep>loaded_models.append(self.load_model(the_metamodel filename is_main_model encoding=encoding add_to_local_models=add_to_local_models model_params=model_params))<block_end><return>loaded_models<block_end><def_stmt>load_model_using_search_path self filename model search_path is_main_model=<false> encoding='utf8' add_to_local_models=<true> model_params=<none><block_start>""" Add a new model to all relevant objects Args: filename: models to be loaded model: model holding the loaded models in its _tx_model_repository field (may be None). search_path: list of search directories. Returns: the loaded model """<import_from_stmt>textx get_metamodel<if_stmt>model<block_start>self.update_model_in_repo_based_on_filename(model)<block_end><for_stmt>the_path search_path<block_start>full_filename=join(the_path filename)<line_sep># print(full_filename) <if_stmt>exists(full_filename)<block_start><if_stmt>model<is><not><none><block_start>the_metamodel=get_metamodel(model)<block_end><else_stmt><block_start>the_metamodel=<none><block_end>the_metamodel=metamodel_for_file_or_default_metamodel(filename the_metamodel)<line_sep><return>self.load_model(the_metamodel full_filename is_main_model encoding=encoding add_to_local_models=add_to_local_models model_params=model_params)<block_end><block_end><raise>IOError(errno.ENOENT os.strerror(errno.ENOENT) filename)<block_end><def_stmt>load_model self the_metamodel filename is_main_model encoding='utf-8' add_to_local_models=<true> model_params=<none><block_start>""" Load a single model Args: the_metamodel: the metamodel used to load the model filename: the model to be loaded (if not cached) Returns: the loaded/cached model """<assert_stmt>model_params<is><not><none> "model_params needs to be specified"<line_sep>filename=abspath(filename)<if_stmt><not>self.local_models.has_model(filename)<block_start><if_stmt>self.all_models.has_model(filename)# print("CACHED {}".format(filename)) <block_start>new_model=self.all_models[filename]<block_end><else_stmt># print("LOADING {}".format(filename)) # all models loaded here get their references resolved from the # root model <block_start>new_model=the_metamodel.internal_model_from_file(filename pre_ref_resolution_callback=<lambda>other_model:self.pre_ref_resolution_callback(other_model) is_main_model=is_main_model encoding=encoding model_params=model_params)<line_sep>self.all_models[filename]=new_model<block_end># print("ADDING {}".format(filename)) <if_stmt>add_to_local_models<block_start>self.local_models[filename]=new_model<block_end><block_end><else_stmt># print("LOCALLY CACHED {}".format(filename)) <block_start><pass><block_end><assert_stmt>filename<in>self.all_models# to be sure... <return>self.all_models[filename]<block_end><def_stmt>_add_model self model<block_start>filename=self.update_model_in_repo_based_on_filename(model)<line_sep># print("ADDED {}".format(filename)) self.local_models[filename]=model<block_end><def_stmt>update_model_in_repo_based_on_filename self model<block_start>""" Adds a model to the repo (not initially visible) Args: model: the model to be added. If the model has no filename, a name is invented Returns: the filename of the model added to the repo """<if_stmt>model._tx_filename<is><none><block_start><for_stmt>fn self.all_models.filename_to_model<block_start><if_stmt>self.all_models.filename_to_model[fn]<eq>model# print("UPDATED/CACHED {}".format(fn)) <block_start><return>fn<block_end><block_end>i=0<while_stmt>self.all_models.has_model("anonymous{}".format(i))<block_start>i<augadd>1<block_end>myfilename="anonymous{}".format(i)<line_sep>self.all_models[myfilename]=model<block_end><else_stmt><block_start>myfilename=abspath(model._tx_filename)<if_stmt>(<not>self.all_models.has_model(myfilename))<block_start>self.all_models[myfilename]=model<block_end><block_end># print("UPDATED/ADDED/CACHED {}".format(myfilename)) <return>myfilename<block_end><def_stmt>pre_ref_resolution_callback self other_model<block_start>""" internal: used to store a model after parsing into the repository Args: other_model: the parsed model Returns: nothing """<line_sep>filename=other_model._tx_filename<line_sep># print("PRE-CALLBACK -> {}".format(filename)) <assert_stmt>(filename)<line_sep>filename=abspath(filename)<line_sep>other_model._tx_model_repository=GlobalModelRepository(self.all_models)<line_sep>self.all_models[filename]=other_model<block_end><block_end><class_stmt>ModelLoader(object)<block_start>""" This class is an interface to mark a scope provider as an additional model loader. """<def_stmt>__init__ self<block_start><pass><block_end><def_stmt>load_models self model<block_start><pass><block_end><block_end><def_stmt>get_all_models_including_attached_models model<block_start>""" get a list of all models stored within a model (including the owning model). @deprecated (BIC): use model_object.get_included_models() Args: model: the owning model Returns: a list of all models """<line_sep><return>get_included_models(model)<block_end><def_stmt>get_included_models model<block_start>""" get a list of all models stored within a model (including the owning model). Args: model: the owning model Returns: a list of all models """<if_stmt>(hasattr(model "_tx_model_repository"))<block_start>models=list(model._tx_model_repository.all_models)<if_stmt>model<not><in>models<block_start>models.append(model)<block_end><block_end><else_stmt><block_start>models=[model]<block_end><return>models<block_end><def_stmt>is_file_included filename model<block_start>""" Determines if a file is included by a model. Also checks for indirect inclusions (files included by included files). Args: filename: the file to be checked (filename is normalized) model: the owning model Returns: True if the file is included, else False (Note: if no _tx_model_repository is present, the function always returns False) """<if_stmt>(hasattr(model "_tx_model_repository"))<block_start>all_entries=model._tx_model_repository.all_models<line_sep><return>all_entries.has_model(filename)<block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>remove_models_from_repositories models models_to_be_removed<block_start>""" Remove models from all relevant repositories (_tx_model_repository of models and related metamodel(s), if applicable). Args: models: the list of models from which the models_to_be_removed have to be removed. models_to_be_removed: models to be removed Returns: None """<assert_stmt>isinstance(models list)<for_stmt>model models<block_start><if_stmt>hasattr(model._tx_metamodel "_tx_model_repository")<block_start>model._tx_metamodel._tx_model_repository.remove_models(models_to_be_removed)<block_end><if_stmt>hasattr(model "_tx_model_repository")<block_start>model._tx_model_repository.remove_models(models_to_be_removed)<block_end><block_end><block_end>
<import_stmt>jax<def_stmt>flush platform<block_start>"""Wait for all pending XLA operations"""<line_sep>devices=jax.devices(platform)<for_stmt>device devices# as suggested in jax#4335 <block_start>noop=jax.device_put(0 device=device)+0<line_sep>noop.block_until_ready()<block_end><block_end>
<import_from_stmt>.core read_orc to_orc<line_sep>
""" base package """<line_sep>#print("Package at {0}".format(__path__[0])) <import_stmt>importlib<line_sep>_modules=['globaling' 'excepting' 'interfacing' 'registering' 'storing' 'skedding' 'tasking' 'framing' 'logging' 'serving' 'monitoring' 'acting' 'poking' 'goaling' 'needing' 'traiting' 'fiating' 'wanting' 'completing' 'doing' 'deeding' 'arbiting' 'housing' 'building']<for_stmt>m _modules<block_start>importlib.import_module(".{0}".format(m) package='ioflo.base')<block_end><import_from_stmt>.storing Store Node Share Data Deck<import_from_stmt>.doing doify Doer DoerParam DoerSince DoerLapse<line_sep>
<import_stmt>maya.mel<as>mm<import_stmt>maya.cmds<as>mc<import_stmt>maya.OpenMaya<as>OpenMaya<import_stmt>glTools.utils.base<import_stmt>glTools.utils.mesh<import_stmt>glTools.utils.skinCluster<import_stmt>os.path<def_stmt>writeBurlyWeights mesh skinCluster influence filePath<block_start>''' '''<line_sep># Get basic procedure information burly='dnBurlyDeformer1'<line_sep>vtxCount=mc.polyEvaluate(mesh v=<true>)<line_sep>inf=mc.ls(influence l=<true>)<line_sep># Check skinCluster <if_stmt><not>glTools.utils.skinCluster.isSkinCluster(skinCluster)<block_start><raise>Exception('Object "'+skinCluster+'" is not a valid skinCluster!')<block_end># Get skinCluster Fn skinFn=glTools.utils.skinCluster.getSkinClusterFn(skinCluster)<line_sep># Get influence dag path influencePath=glTools.utils.base.getMDagPath(influence)<line_sep># Get points affected by influence infSelectionList=OpenMaya.MSelectionList()<line_sep>infWeightList=OpenMaya.MFloatArray()<line_sep>skinFn.getPointsAffectedByInfluence(influencePath infSelectionList infWeightList)<line_sep>infObjectPath=OpenMaya.MDagPath()<line_sep>infComponentList=OpenMaya.MObject()<line_sep>infSelectionList.getDagPath(0 infObjectPath infComponentList)<line_sep># Get affect point indices infComponentIndex=OpenMaya.MIntArray()<line_sep>infComponentIndexFn=OpenMaya.MFnSingleIndexedComponent(infComponentList)<line_sep>infComponentIndexFn.getElements(infComponentIndex)<line_sep>infComponentIndex=list(infComponentIndex)<line_sep># Get affect point position and normal arrays infComponentPosArray=OpenMaya.MPointArray()<line_sep>infComponentNormArray=OpenMaya.MVectorArray()<line_sep>infComponentVtxIt=OpenMaya.MItMeshVertex(infObjectPath infComponentList)<line_sep>normal=OpenMaya.MVector()<while_stmt><not>infComponentVtxIt.isDone()<block_start>infComponentPosArray.append(infComponentVtxIt.position(OpenMaya.MSpace.kWorld))<line_sep>infComponentVtxIt.getNormal(normal)<line_sep>infComponentNormArray.append(normal)<line_sep>infComponentVtxIt.next()<block_end># Open file fileId=open(filePath "w")<line_sep># Header header=['<?xml version="1.0" standalone="no" ?>\n' '<dnWeights type="dnBurlyDeformer" version="1.0" name="'+burly+'">\n' '\t<Map name="'+inf[0]+'">\n' '\t\t<Topology vertexCount="'+str(vtxCount)+'"/>\n']<line_sep>fileId.writelines(header)<line_sep># Weights weights=['\t\t<Weights>\n']<for_stmt>i range(len(infComponentIndex))<block_start><if_stmt><not>i%5<block_start>weights.append('\t\t\t')<block_end>weights.append(str(infWeightList[i])+' ')<if_stmt>i%5<eq>4<block_start>weights.append('\n')<block_end><block_end>weights.append('\n\t\t</Weights>\n')<line_sep>fileId.writelines(weights)<line_sep># Indices indices=['\t\t<Indices>\n']<for_stmt>i range(len(infComponentIndex))<block_start><if_stmt><not>i%10<block_start>indices.append('\t\t\t')<block_end>indices.append(str(infComponentIndex[i])+' ')<if_stmt>i%10<eq>9<block_start>indices.append('\n')<block_end><block_end>indices.append('\n\t\t</Indices>\n')<line_sep>fileId.writelines(indices)<line_sep># Position pos=['\t\t<Positions>\n']<for_stmt>i range(len(infComponentIndex))<block_start><if_stmt><not>i%2<block_start>pos.append('\t\t\t')<block_end>pos.append(str(infComponentPosArray[i][0])+' '+str(infComponentPosArray[i][1])+' '+str(infComponentPosArray[i][2])+' ')<if_stmt>i%2<block_start>pos.append('\n')<block_end><block_end>pos.append('\n\t\t</Positions>\n')<line_sep>fileId.writelines(pos)<line_sep># Normals norm=['\t\t<Normals>\n']<for_stmt>i range(len(infComponentIndex))<block_start><if_stmt><not>i%2<block_start>norm.append('\t\t\t')<block_end>norm.append(str(infComponentNormArray[i][0])+' '+str(infComponentNormArray[i][1])+' '+str(infComponentNormArray[i][2])+' ')<if_stmt>i%2<block_start>norm.append('\n')<block_end><block_end>norm.append('\n\t\t</Normals>\n')<line_sep>fileId.writelines(norm)<line_sep># Radii radii=['\t\t<Radii>\n']<for_stmt>i range(len(infComponentIndex))<block_start><if_stmt><not>i%6<block_start>radii.append('\t\t\t')<block_end>radii.append('0.01 ')<if_stmt>i%6<eq>5<block_start>radii.append('\n')<block_end><block_end>radii.append('\n\t\t</Radii>\n')<line_sep>fileId.writelines(radii)<line_sep># Footer footer=['\t</Map>' '\n</dnWeights>']<line_sep>fileId.writelines(footer)<line_sep># Close file fileId.close()<block_end><def_stmt>writeBurlyWeights_allInfluences mesh skinCluster directoryPath<block_start>''' '''<line_sep># Check mesh <if_stmt><not>glTools.utils.mesh.isMesh(mesh)<block_start><raise>Exception('Object "'+mesh+'" contains no valid polygon mesh!')<block_end># Check skinCluster <if_stmt><not>glTools.utils.skinCluster.isSkinCluster(skinCluster)<block_start><raise>Exception('Object "'+skinCluster+'" is not a valid skinCluster!')<block_end># Check directory <if_stmt><not>os.path.isdir(directoryPath)<block_start><raise>Exception('Directory path "'+directoryPath+'" does not exist!')<block_end># Get skinCluster influences influenceList=mc.skinCluster(skinCluster q=<true> inf=<true>)<line_sep># Write weights <for_stmt>influence influenceList<block_start>writeBurlyWeights(mesh skinCluster influence directoryPath+influence+'.xml')<block_end><block_end><def_stmt>loadBurlyWeights burlyDeformer directoryPath<block_start>''' '''<line_sep># Check burly deformer <if_stmt><not>mc.objExists(burlyDeformer)<block_start><raise>Exception('Burly deformer "'+burlyDeformer+'" does not exist!')<block_end># Check directory path <if_stmt><not>directoryPath.endswith('/')<block_start>directoryPath<augadd>'/'<block_end><if_stmt><not>os.path.isdir(directoryPath)<block_start><raise>Exception('Directory path "'+directoryPath+'" does not exist!')<block_end># Get directory listing fileList=[i<for>i os.listdir(directoryPath)<if>i.endswith('.xml')]<line_sep># Load weights <for_stmt>filePath fileList<block_start>fileId=directoryPath+filePath<line_sep>influence=filePath.replace('.xml' '')<line_sep>mm.eval('dnBurlyDeformer -loadWeights "'+fileId+'" "'+burlyDeformer+'" "'+influence+'"')<block_end><block_end><def_stmt>convertToBurly skinCluster burlyDeformerName=''<block_start>''' '''<line_sep># Check skinCluster <if_stmt><not>mc.objExists(skinCluster)<block_start><raise>Exception('SkinCluster "'+skinCluster+'" does not exist!')<block_end><if_stmt><not>glTools.utils.skinCluster.isSkinCluster(skinCluster)<block_start><raise>Exception('Object "'+skinCluster+'" is not a valid skinCluster deformer!')<block_end># Get affected mesh #mesh = # Designate temporary path for exported weight files dirPath='/usr/tmp/'<line_sep># Export skinCluster weight files influenceList=mc.skinCluster(skinCluster q=<true> inf=<true>)<line_sep>writeBurlyWeights_allInfluences(mesh skinCluster dirPath)<line_sep># Create burly deformer mm.eval('dnBurlyDeformer_createNamed("'+geo+'","'+burlyDeformerName+'")')<block_end>
# system <import_from_future_stmt> print_function<line_sep># python lib <import_stmt>math<import_from_stmt>copy deepcopy<import_stmt>numpy<as>np<line_sep># tf_render <import_stmt>tensorflow<as>tf<line_sep># self <import_from_stmt>thirdParty.tf_mesh_renderer.mesh_renderer.mesh_renderer phong_shader tone_mapper<import_from_stmt>thirdParty.tf_mesh_renderer.mesh_renderer.rasterize_triangles rasterize_triangles<line_sep># perspective <def_stmt>mesh_renderer_camera_light vertices triangles normals diffuse_colors mtx_camera mtx_perspective_frustrum camera_position image_width image_height<block_start>"""Renders an input scene using phong shading, and returns an output image. Args: vertices: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each triplet is an xyz position in world space. triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet should contain vertex indices describing a triangle such that the triangle's normal points toward the viewer if the forward order of the triplet defines a clockwise winding of the vertices. Gradients with respect to this tensor are not available. normals: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each triplet is the xyz vertex normal for its corresponding vertex. Each vector is assumed to be already normalized. diffuse_colors: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. The RGB diffuse reflection in the range [0,1] for each vertex. mtx_camera: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with shape [4, 4] specifying the camera model view matrix mtx_perspective_frustrum: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with shape [4, 4] specifying the perspective and frustrum matrix camera_position: 2-D tensor with shape [batch_size, 3] or 1-D tensor with shape [3] specifying the XYZ world space camera position. light_intensities: a 3-D tensor with shape [batch_size, light_count, 3]. The RGB intensity values for each light. Intensities may be above one. image_width: int specifying desired output image width in pixels. image_height: int specifying desired output image height in pixels. Returns: A 4-D float32 tensor of shape [batch_size, image_height, image_width, 4] containing the lit RGBA color values for each image at each pixel. RGB colors are the intensity values before tonemapping and can be in the range [0, infinity]. Clipping to the range [0,1] with tf_render.clip_by_value is likely reasonable for both viewing and training most scenes. More complex scenes with multiple lights should tone map color values for display only. One simple tonemapping approach is to rescale color values as x/(1+x); gamma compression is another common techinque. Alpha values are zero for background pixels and near one for mesh pixels. Raises: ValueError: An invalid argument to the method is detected. """<if_stmt>len(vertices.shape)<ne>3<block_start><raise>ValueError('Vertices must have shape [batch_size, vertex_count, 3].')<block_end>batch_size=vertices.shape[0].value<if_stmt>len(normals.shape)<ne>3<block_start><raise>ValueError('Normals must have shape [batch_size, vertex_count, 3].')<block_end><if_stmt>len(diffuse_colors.shape)<ne>3<block_start><raise>ValueError('vertex_diffuse_colors must have shape [batch_size, vertex_count, 3].')<block_end><if_stmt>camera_position.get_shape().as_list()<eq>[3]<block_start>camera_position=tf.tile(tf.expand_dims(camera_position axis=0) [batch_size 1])<block_end><elif_stmt>camera_position.get_shape().as_list()<ne>[batch_size 3]<block_start><raise>ValueError('Camera_position must have shape [batch_size, 3]')<block_end># TODO: Debug Shape <if_stmt>mtx_camera.get_shape().as_list()<eq>[4 4]<block_start>mtx_camera=tf.tile(tf.expand_dims(mtx_camera axis=0) [batch_size 1 1])<block_end><elif_stmt>mtx_camera.get_shape().as_list()<ne>[batch_size 4 4]<block_start><raise>ValueError('Camera_lookat must have shape [batch_size, 4, 4]')<block_end><if_stmt>mtx_perspective_frustrum.get_shape().as_list()<eq>[4 4]<block_start>mtx_camera=tf.tile(tf.expand_dims(mtx_perspective_frustrum axis=0) [batch_size 1])<block_end><elif_stmt>mtx_camera.get_shape().as_list()<ne>[batch_size 4 4]<block_start><raise>ValueError('Camera_lookat must have shape [batch_size, 4, 4]')<block_end>vertex_attributes=tf.concat([normals vertices diffuse_colors] axis=2)<line_sep>clip_space_transforms=tf.matmul(mtx_perspective_frustrum mtx_camera name="mtx_clip_space_transforms_batch")<line_sep>pixel_attributes,alpha,tri_ids=rasterize_triangles(vertices vertex_attributes triangles clip_space_transforms image_width image_height [-1]<times>vertex_attributes.shape[2].value)<line_sep># Extract the interpolated vertex attributes from the pixel buffer and # supply them to the shader: #pixel_normals = tf.nn.l2_normalize(pixel_attributes[:, :, :, 0:3], dim=3) #pixel_positions = pixel_attributes[:, :, :, 3:6] diffuse_colors=pixel_attributes[: : : 6:9]<line_sep>diffuse_colors=tf.reverse(diffuse_colors axis=[1])<line_sep>#return renders, pixel_mask pixel_mask=alpha<g>0.5<line_sep>pixel_mask=tf.cast(pixel_mask dtype=tf.float32)<line_sep>pixel_mask=tf.reverse(pixel_mask axis=[1])<line_sep># tri_ids=tf.expand_dims(tri_ids -1)<line_sep><return>diffuse_colors pixel_mask tri_ids<block_end><def_stmt>mesh_renderer_camera vertices triangles normals diffuse_colors mtx_camera mtx_perspective_frustrum camera_position light_positions light_intensities image_width image_height specular_colors=<none> shininess_coefficients=<none> ambient_color=<none> background=-1<block_start>"""Renders an input scene using phong shading, and returns an output image. Args: vertices: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each triplet is an xyz position in world space. triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet should contain vertex indices describing a triangle such that the triangle's normal points toward the viewer if the forward order of the triplet defines a clockwise winding of the vertices. Gradients with respect to this tensor are not available. normals: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each triplet is the xyz vertex normal for its corresponding vertex. Each vector is assumed to be already normalized. diffuse_colors: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. The RGB diffuse reflection in the range [0,1] for each vertex. mtx_camera: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with shape [4, 4] specifying the camera model view matrix mtx_perspective_frustrum: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with shape [4, 4] specifying the perspective and frustrum matrix camera_position: 2-D tensor with shape [batch_size, 3] or 1-D tensor with shape [3] specifying the XYZ world space camera position. light_positions: a 3-D tensor with shape [batch_size, light_count, 3]. The XYZ position of each light in the scene. In the same coordinate space as pixel_positions. light_intensities: a 3-D tensor with shape [batch_size, light_count, 3]. The RGB intensity values for each light. Intensities may be above one. image_width: int specifying desired output image width in pixels. image_height: int specifying desired output image height in pixels. specular_colors: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. The RGB specular reflection in the range [0, 1] for each vertex. If supplied, specular reflections will be computed, and both specular_colors and shininess_coefficients are expected. shininess_coefficients: a 0D-2D float32 tensor with maximum shape [batch_size, vertex_count]. The phong shininess coefficient of each vertex. A 0D tensor or float gives a constant shininess coefficient across all batches and images. A 1D tensor must have shape [batch_size], and a single shininess coefficient per image is used. ambient_color: a 2D tensor with shape [batch_size, 3]. The RGB ambient color, which is added to each pixel in the scene. If None, it is assumed to be black. Returns: A 4-D float32 tensor of shape [batch_size, image_height, image_width, 4] containing the lit RGBA color values for each image at each pixel. RGB colors are the intensity values before tonemapping and can be in the range [0, infinity]. Clipping to the range [0,1] with tf_render.clip_by_value is likely reasonable for both viewing and training most scenes. More complex scenes with multiple lights should tone map color values for display only. One simple tonemapping approach is to rescale color values as x/(1+x); gamma compression is another common techinque. Alpha values are zero for background pixels and near one for mesh pixels. Raises: ValueError: An invalid argument to the method is detected. """<if_stmt>len(vertices.shape)<ne>3<block_start><raise>ValueError('Vertices must have shape [batch_size, vertex_count, 3].')<block_end>batch_size=vertices.shape[0].value<if_stmt>len(normals.shape)<ne>3<block_start><raise>ValueError('Normals must have shape [batch_size, vertex_count, 3].')<block_end><if_stmt>len(light_positions.shape)<ne>3<block_start><raise>ValueError('Light_positions must have shape [batch_size, light_count, 3].')<block_end><if_stmt>len(light_intensities.shape)<ne>3<block_start><raise>ValueError('Light_intensities must have shape [batch_size, light_count, 3].')<block_end><if_stmt>len(diffuse_colors.shape)<ne>3<block_start><raise>ValueError('vertex_diffuse_colors must have shape [batch_size, vertex_count, 3].')<block_end><if_stmt>(ambient_color<is><not><none><and>ambient_color.get_shape().as_list()<ne>[batch_size 3])<block_start><raise>ValueError('Ambient_color must have shape [batch_size, 3].')<block_end><if_stmt>camera_position.get_shape().as_list()<eq>[3]<block_start>camera_position=tf.tile(tf.expand_dims(camera_position axis=0) [batch_size 1])<block_end><elif_stmt>camera_position.get_shape().as_list()<ne>[batch_size 3]<block_start><raise>ValueError('Camera_position must have shape [batch_size, 3]')<block_end># TODO: Debug Shape <if_stmt>mtx_camera.get_shape().as_list()<eq>[4 4]<block_start>mtx_camera=tf.tile(tf.expand_dims(mtx_camera axis=0) [batch_size 1 1])<block_end><elif_stmt>mtx_camera.get_shape().as_list()<ne>[batch_size 4 4]<block_start><raise>ValueError('Camera_lookat must have shape [batch_size, 4, 4]')<block_end><if_stmt>mtx_perspective_frustrum.get_shape().as_list()<eq>[4 4]<block_start>mtx_camera=tf.tile(tf.expand_dims(mtx_perspective_frustrum axis=0) [batch_size 1])<block_end><elif_stmt>mtx_camera.get_shape().as_list()<ne>[batch_size 4 4]<block_start><raise>ValueError('Camera_lookat must have shape [batch_size, 4, 4]')<block_end><if_stmt>specular_colors<is><not><none><and>shininess_coefficients<is><none><block_start><raise>ValueError('Specular colors were supplied without shininess coefficients.')<block_end><if_stmt>shininess_coefficients<is><not><none><and>specular_colors<is><none><block_start><raise>ValueError('Shininess coefficients were supplied without specular colors.')<block_end><if_stmt>specular_colors<is><not><none># Since a 0-D float32 tensor is accepted, also accept a float. <block_start><if_stmt>isinstance(shininess_coefficients float)<block_start>shininess_coefficients=tf.constant(shininess_coefficients dtype=tf.float32)<block_end><if_stmt>len(specular_colors.shape)<ne>3<block_start><raise>ValueError('The specular colors must have shape [batch_size, '<concat>'vertex_count, 3].')<block_end><if_stmt>len(shininess_coefficients.shape)<g>2<block_start><raise>ValueError('The shininess coefficients must have shape at most'<concat>'[batch_size, vertex_count].')<block_end># If we don't have per-vertex coefficients, we can just reshape the # input shininess to broadcast later, rather than interpolating an # additional vertex attribute: <if_stmt>len(shininess_coefficients.shape)<l>2<block_start>vertex_attributes=tf.concat([normals vertices diffuse_colors specular_colors] axis=2)<block_end><else_stmt><block_start>vertex_attributes=tf.concat([normals vertices diffuse_colors specular_colors tf.expand_dims(shininess_coefficients axis=2)] axis=2)<block_end><block_end><else_stmt><block_start>vertex_attributes=tf.concat([normals vertices diffuse_colors] axis=2)<block_end># camera_matrices = camera_utils.look_at(camera_position, camera_lookat, # camera_up) # # perspective_transforms = camera_utils.perspective(image_width / image_height, # fov_y, near_clip, far_clip) clip_space_transforms=tf.matmul(mtx_perspective_frustrum mtx_camera name="mtx_clip_space_transforms_batch")<line_sep>pixel_attributes,alpha,tri_ids=rasterize_triangles(vertices vertex_attributes triangles clip_space_transforms image_width image_height [background]<times>vertex_attributes.shape[2].value)<line_sep># Extract the interpolated vertex attributes from the pixel buffer and # supply them to the shader: pixel_normals=tf.nn.l2_normalize(pixel_attributes[: : : 0:3] dim=3)<line_sep>pixel_positions=pixel_attributes[: : : 3:6]<line_sep>diffuse_colors=pixel_attributes[: : : 6:9]<if_stmt>specular_colors<is><not><none><block_start>specular_colors=pixel_attributes[: : : 9:12]<line_sep># Retrieve the interpolated shininess coefficients if necessary, or just # reshape our input for broadcasting: <if_stmt>len(shininess_coefficients.shape)<eq>2<block_start>shininess_coefficients=pixel_attributes[: : : 12]<block_end><else_stmt><block_start>shininess_coefficients=tf.reshape(shininess_coefficients [-1 1 1])<block_end><block_end>pixel_mask=tf.cast(tf.reduce_any(diffuse_colors<ge>0 axis=3) tf.float32)<line_sep>renders=phong_shader(normals=pixel_normals alphas=pixel_mask pixel_positions=pixel_positions light_positions=light_positions light_intensities=light_intensities diffuse_colors=diffuse_colors camera_position=camera_position<if>specular_colors<is><not><none><else><none> specular_colors=specular_colors shininess_coefficients=shininess_coefficients ambient_color=ambient_color)<line_sep>#return renders, pixel_mask pixel_mask=alpha<g>0.5<line_sep>pixel_mask=tf.cast(pixel_mask dtype=tf.float32)<line_sep>pixel_mask=tf.reverse(pixel_mask axis=[1])<line_sep><return>renders pixel_mask<block_end><def_stmt>mesh_depthmap_camera vertices triangles mtx_ext mtx_camera mtx_perspective_frustrum image_width image_height<block_start>"""Renders an input scene using phong shading, and returns an output image. Args: vertices: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each triplet is an xyz position in world space. triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet should contain vertex indices describing a triangle such that the triangle's normal points toward the viewer if the forward order of the triplet defines a clockwise winding of the vertices. Gradients with respect to this tensor are not available. normals: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each triplet is the xyz vertex normal for its corresponding vertex. Each vector is assumed to be already normalized. mtx_camera: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with shape [4, 4] specifying the camera model view matrix mtx_perspective_frustrum: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with shape [4, 4] specifying the perspective and frustrum matrix camera_position: 2-D tensor with shape [batch_size, 3] or 1-D tensor with shape [3] specifying the XYZ world space camera position. image_width: int specifying desired output image width in pixels. image_height: int specifying desired output image height in pixels. Returns: A 4-D float32 tensor of shape [batch_size, image_height, image_width, 4] containing the lit RGBA color values for each image at each pixel. RGB colors are the intensity values before tonemapping and can be in the range [0, infinity]. Clipping to the range [0,1] with tf_render.clip_by_value is likely reasonable for both viewing and training most scenes. More complex scenes with multiple lights should tone map color values for display only. One simple tonemapping approach is to rescale color values as x/(1+x); gamma compression is another common techinque. Alpha values are zero for background pixels and near one for mesh pixels. Raises: ValueError: An invalid argument to the method is detected. """<if_stmt>len(vertices.shape)<ne>3<block_start><raise>ValueError('Vertices must have shape [batch_size, vertex_count, 3].')<block_end>batch_size=vertices.shape[0].value<line_sep># TODO: Debug Shape <if_stmt>mtx_camera.get_shape().as_list()<eq>[4 4]<block_start>mtx_camera=tf.tile(tf.expand_dims(mtx_camera axis=0) [batch_size 1 1])<block_end><elif_stmt>mtx_camera.get_shape().as_list()<ne>[batch_size 4 4]<block_start><raise>ValueError('Camera_lookat must have shape [batch_size, 4, 4]')<block_end><if_stmt>mtx_perspective_frustrum.get_shape().as_list()<eq>[4 4]<block_start>mtx_camera=tf.tile(tf.expand_dims(mtx_perspective_frustrum axis=0) [batch_size 1])<block_end><elif_stmt>mtx_camera.get_shape().as_list()<ne>[batch_size 4 4]<block_start><raise>ValueError('Camera_lookat must have shape [batch_size, 4, 4]')<block_end># vertex attribute of depthmap is only z vertex_attributes=vertices<line_sep>#vertex_attributes = tf_render.expand_dims(vertex_attributes, -1) # camera_matrices = camera_utils.look_at(camera_position, camera_lookat, # camera_up) # # perspective_transforms = camera_utils.perspective(image_width / image_height, # fov_y, near_clip, far_clip) clip_space_transforms=tf.matmul(mtx_perspective_frustrum mtx_camera name="mtx_clip_space_transforms_batch")<line_sep>pixel_attributes,alpha,_=rasterize_triangles(vertices vertex_attributes triangles clip_space_transforms image_width image_height [99999999]<times>vertex_attributes.shape[2].value)<line_sep># Extract the interpolated vertex attributes from the pixel buffer and # supply them to the shader: filler_homo=tf.ones(shape=[pixel_attributes.shape[0] pixel_attributes.shape[1] pixel_attributes.shape[2] 1])<line_sep>pixel_attributes=tf.concat([pixel_attributes filler_homo] axis=3)<line_sep>pixel_attributes=tf.reshape(pixel_attributes shape=[batch_size -1 4])<line_sep>pixel_attributes=tf.transpose(pixel_attributes perm=[0 2 1])<line_sep>pixel_attributes=tf.matmul(mtx_ext pixel_attributes)<line_sep>pixel_attributes=tf.transpose(pixel_attributes perm=[0 2 1])<line_sep>pixel_attributes=tf.reshape(pixel_attributes shape=[batch_size image_height image_width 4])<line_sep>depth_map=pixel_attributes[: : : 2]<line_sep>pixel_mask=alpha<g>0.5<line_sep>pixel_mask=tf.cast(pixel_mask dtype=tf.float32)<line_sep>depth_map=tf.reverse(depth_map axis=[1])<line_sep>pixel_mask=tf.reverse(pixel_mask axis=[1])<line_sep><return>depth_map pixel_mask<block_end># ortho <def_stmt>mesh_rendererOrtho_camera vertices triangles normals diffuse_colors mtx_camera mtx_perspective_frustrum light_positions light_intensities image_width image_height ambient_color=<none> background=-1<block_start>"""Renders an input scene using phong shading, and returns an output image. Args: vertices: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each triplet is an xyz position in world space. triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet should contain vertex indices describing a triangle such that the triangle's normal points toward the viewer if the forward order of the triplet defines a clockwise winding of the vertices. Gradients with respect to this tensor are not available. normals: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each triplet is the xyz vertex normal for its corresponding vertex. Each vector is assumed to be already normalized. diffuse_colors: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. The RGB diffuse reflection in the range [0,1] for each vertex. mtx_camera: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with shape [4, 4] specifying the camera model view matrix mtx_perspective_frustrum: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with shape [4, 4] specifying the perspective and frustrum matrix camera_position: 2-D tensor with shape [batch_size, 3] or 1-D tensor with shape [3] specifying the XYZ world space camera position. light_positions: a 3-D tensor with shape [batch_size, light_count, 3]. The XYZ position of each light in the scene. In the same coordinate space as pixel_positions. light_intensities: a 3-D tensor with shape [batch_size, light_count, 3]. The RGB intensity values for each light. Intensities may be above one. image_width: int specifying desired output image width in pixels. image_height: int specifying desired output image height in pixels. specular_colors: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. The RGB specular reflection in the range [0, 1] for each vertex. If supplied, specular reflections will be computed, and both specular_colors and shininess_coefficients are expected. shininess_coefficients: a 0D-2D float32 tensor with maximum shape [batch_size, vertex_count]. The phong shininess coefficient of each vertex. A 0D tensor or float gives a constant shininess coefficient across all batches and images. A 1D tensor must have shape [batch_size], and a single shininess coefficient per image is used. ambient_color: a 2D tensor with shape [batch_size, 3]. The RGB ambient color, which is added to each pixel in the scene. If None, it is assumed to be black. Returns: A 4-D float32 tensor of shape [batch_size, image_height, image_width, 4] containing the lit RGBA color values for each image at each pixel. RGB colors are the intensity values before tonemapping and can be in the range [0, infinity]. Clipping to the range [0,1] with tf_render.clip_by_value is likely reasonable for both viewing and training most scenes. More complex scenes with multiple lights should tone map color values for display only. One simple tonemapping approach is to rescale color values as x/(1+x); gamma compression is another common techinque. Alpha values are zero for background pixels and near one for mesh pixels. Raises: ValueError: An invalid argument to the method is detected. """<if_stmt>len(vertices.shape)<ne>3<block_start><raise>ValueError('Vertices must have shape [batch_size, vertex_count, 3].')<block_end>batch_size=vertices.shape[0].value<if_stmt>len(normals.shape)<ne>3<block_start><raise>ValueError('Normals must have shape [batch_size, vertex_count, 3].')<block_end><if_stmt>len(light_positions.shape)<ne>3<block_start><raise>ValueError('Light_positions must have shape [batch_size, light_count, 3].')<block_end><if_stmt>len(light_intensities.shape)<ne>3<block_start><raise>ValueError('Light_intensities must have shape [batch_size, light_count, 3].')<block_end><if_stmt>len(diffuse_colors.shape)<ne>3<block_start><raise>ValueError('vertex_diffuse_colors must have shape [batch_size, vertex_count, 3].')<block_end><if_stmt>(ambient_color<is><not><none><and>ambient_color.get_shape().as_list()<ne>[batch_size 3])<block_start><raise>ValueError('Ambient_color must have shape [batch_size, 3].')<block_end># TODO: Debug Shape <if_stmt>mtx_camera.get_shape().as_list()<eq>[4 4]<block_start>mtx_camera=tf.tile(tf.expand_dims(mtx_camera axis=0) [batch_size 1 1])<block_end><elif_stmt>mtx_camera.get_shape().as_list()<ne>[batch_size 4 4]<block_start><raise>ValueError('Camera_lookat must have shape [batch_size, 4, 4]')<block_end><if_stmt>mtx_perspective_frustrum.get_shape().as_list()<eq>[4 4]<block_start>mtx_camera=tf.tile(tf.expand_dims(mtx_perspective_frustrum axis=0) [batch_size 1])<block_end><elif_stmt>mtx_camera.get_shape().as_list()<ne>[batch_size 4 4]<block_start><raise>ValueError('Camera_lookat must have shape [batch_size, 4, 4]')<block_end>vertex_attributes=tf.concat([normals vertices diffuse_colors] axis=2)<line_sep>clip_space_transforms=tf.matmul(mtx_perspective_frustrum mtx_camera name="mtx_clip_space_transforms_batch")<line_sep>pixel_attributes,alpha,tri_ids=rasterize_triangles(vertices vertex_attributes triangles clip_space_transforms image_width image_height [background]<times>vertex_attributes.shape[2].value)<line_sep># Extract the interpolated vertex attributes from the pixel buffer and # supply them to the shader: pixel_normals=tf.nn.l2_normalize(pixel_attributes[: : : 0:3] dim=3)<line_sep>pixel_positions=pixel_attributes[: : : 3:6]<line_sep>diffuse_colors=pixel_attributes[: : : 6:9]<line_sep>pixel_mask=tf.cast(tf.reduce_any(diffuse_colors<ge>0 axis=3) tf.float32)<line_sep>renders=phong_shader(normals=pixel_normals alphas=pixel_mask pixel_positions=pixel_positions light_positions=light_positions light_intensities=light_intensities diffuse_colors=diffuse_colors camera_position=<none> specular_colors=<none> shininess_coefficients=<none> ambient_color=ambient_color)<line_sep>#return renders, pixel_mask pixel_mask=alpha<g>0.5<line_sep>pixel_mask=tf.cast(pixel_mask dtype=tf.float32)<line_sep>pixel_mask=tf.reverse(pixel_mask axis=[1])<line_sep><return>renders pixel_mask<block_end><def_stmt>mesh_depthmapOrtho_camera vertices triangles mtx_ext mtx_perspective_frustrum image_width image_height<block_start>"""Renders an input scene using phong shading, and returns an output image. Args: vertices: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each triplet is an xyz position in world space. triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet should contain vertex indices describing a triangle such that the triangle's normal points toward the viewer if the forward order of the triplet defines a clockwise winding of the vertices. Gradients with respect to this tensor are not available. normals: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each triplet is the xyz vertex normal for its corresponding vertex. Each vector is assumed to be already normalized. mtx_camera: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with shape [4, 4] specifying the camera model view matrix mtx_perspective_frustrum: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with shape [4, 4] specifying the perspective and frustrum matrix camera_position: 2-D tensor with shape [batch_size, 3] or 1-D tensor with shape [3] specifying the XYZ world space camera position. image_width: int specifying desired output image width in pixels. image_height: int specifying desired output image height in pixels. Returns: A 4-D float32 tensor of shape [batch_size, image_height, image_width, 4] containing the lit RGBA color values for each image at each pixel. RGB colors are the intensity values before tonemapping and can be in the range [0, infinity]. Clipping to the range [0,1] with tf_render.clip_by_value is likely reasonable for both viewing and training most scenes. More complex scenes with multiple lights should tone map color values for display only. One simple tonemapping approach is to rescale color values as x/(1+x); gamma compression is another common techinque. Alpha values are zero for background pixels and near one for mesh pixels. Raises: ValueError: An invalid argument to the method is detected. """<if_stmt>len(vertices.shape)<ne>3<block_start><raise>ValueError('Vertices must have shape [batch_size, vertex_count, 3].')<block_end>batch_size=vertices.shape[0].value<line_sep># TODO: Debug Shape <if_stmt>mtx_ext.get_shape().as_list()<eq>[4 4]<block_start>mtx_ext=tf.tile(tf.expand_dims(mtx_ext axis=0) [batch_size 1 1])<block_end><elif_stmt>mtx_ext.get_shape().as_list()<ne>[batch_size 4 4]<block_start><raise>ValueError('Camera_lookat must have shape [batch_size, 4, 4]')<block_end><if_stmt>mtx_perspective_frustrum.get_shape().as_list()<eq>[4 4]<block_start>mtx_perspective_frustrum=tf.tile(tf.expand_dims(mtx_perspective_frustrum axis=0) [batch_size 1])<block_end><elif_stmt>mtx_perspective_frustrum.get_shape().as_list()<ne>[batch_size 4 4]<block_start><raise>ValueError('Camera_lookat must have shape [batch_size, 4, 4]')<block_end># vertex attribute of depthmap is only z vertex_attributes=vertices<line_sep>#vertex_attributes = tf_render.expand_dims(vertex_attributes, -1) # camera_matrices = camera_utils.look_at(camera_position, camera_lookat, # camera_up) # # perspective_transforms = camera_utils.perspective(image_width / image_height, # fov_y, near_clip, far_clip) clip_space_transforms=tf.matmul(mtx_perspective_frustrum mtx_ext name="mtx_clip_space_transforms_batch")<line_sep>pixel_attributes,alpha,_=rasterize_triangles(vertices vertex_attributes triangles clip_space_transforms image_width image_height [99999999]<times>vertex_attributes.shape[2].value)<line_sep># Extract the interpolated vertex attributes from the pixel buffer and # supply them to the shader: filler_homo=tf.ones(shape=[pixel_attributes.shape[0] pixel_attributes.shape[1] pixel_attributes.shape[2] 1])<line_sep>pixel_attributes=tf.concat([pixel_attributes filler_homo] axis=3)<line_sep>pixel_attributes=tf.reshape(pixel_attributes shape=[batch_size -1 4])<line_sep>pixel_attributes=tf.transpose(pixel_attributes perm=[0 2 1])<line_sep>pixel_attributes=tf.matmul(mtx_ext pixel_attributes)<line_sep>pixel_attributes=tf.transpose(pixel_attributes perm=[0 2 1])<line_sep>pixel_attributes=tf.reshape(pixel_attributes shape=[batch_size image_height image_width 4])<line_sep>depth_map=pixel_attributes[: : : 2]<line_sep>pixel_mask=alpha<g>0.5<line_sep>pixel_mask=tf.cast(pixel_mask dtype=tf.float32)<line_sep>depth_map=tf.reverse(depth_map axis=[1])<line_sep>pixel_mask=tf.reverse(pixel_mask axis=[1])<line_sep><return>depth_map pixel_mask<block_end>
<import_from_stmt>cupy _util<line_sep># Attributes and Methods for fallback_mode # Auto-execute numpy method when corresponding cupy method is not found # "NOQA" to suppress flake8 warning <import_from_stmt>cupyx.fallback_mode.fallback numpy# NOQA _util.experimental('cupyx.fallback_mode.numpy')<line_sep>
# License: BSD 3 clause # import tick.base <import_from_stmt>.history History<line_sep>__all__=["History"]<line_sep>
<import_stmt>datetime<as>dt<import_stmt>dateutil.parser<import_stmt>json<import_stmt>random<import_from_stmt>server.models Client db Assignment Backup Course User Version Group <import_from_stmt>server.utils encode_id<import_from_stmt>tests OkTestCase<class_stmt>TestApi(OkTestCase)<block_start><def_stmt>_test_backup self submit delay=10 success=<true><block_start>self.setup_course()<line_sep>email=self.user1.email<line_sep>self.login(email)<line_sep>user=User.lookup(email)<line_sep>course=self.course<line_sep>assignment=self.assignment<line_sep># Offset the due date & lock_dates assignment.due_date=assignment.due_date+dt.timedelta(hours=delay)<line_sep>assignment.lock_date=assignment.lock_date+dt.timedelta(days=delay)<line_sep>okversion=Version(name="ok-client" current_version="v1.5.0" download_link="http://localhost/ok")<line_sep>db.session.add(okversion)<line_sep>db.session.commit()<line_sep>data={'assignment':assignment.name 'messages':{'file_contents':{'hog.py':'print("Hello world!")'}} 'submit':submit }<line_sep>response=self.client.post('/api/v3/backups/?client_version=v1.5.0' data=json.dumps(data) headers=[('Content-Type' 'application/json')])<line_sep>backup=Backup.query.filter(Backup.submitter_id<eq>user.id).first()<assert_stmt>backup<is><not><none><if_stmt>success<or><not>submit<block_start><assert_stmt>response.json['data']<eq>{'email':email 'key':encode_id(backup.id) 'course':{'id':course.id 'offering':course.offering 'display_name':course.display_name 'active':course.active 'timezone':'America/Los_Angeles'} 'assignment':assignment.name}<line_sep>self.assert_200(response)<block_end><if_stmt><not>success<block_start>self.assert_403(response)<line_sep>submit=<false><assert_stmt>response.json['data']<eq>{'data':{'backup':<true> 'late':<true>}}<block_end><assert_stmt>backup.assignment<eq>assignment<assert_stmt>backup.submitter_id<eq>user.id<assert_stmt>len(backup.messages)<eq>len(data['messages'])<assert_stmt>backup.submit<eq>submit<block_end><def_stmt>test_backup self<block_start>self._test_backup(<false>)<block_end><def_stmt>test_backup_after_deadline self<block_start>self._test_backup(<false> delay=-2)<block_end><def_stmt>test_submit self<block_start>self._test_backup(<true>)<block_end><def_stmt>test_submit_after_deadline self<block_start>self._test_backup(<true> delay=-2 success=<false>)<block_end><def_stmt>test_api self<block_start>response=self.client.get('/api/v3/')<line_sep>self.assert_200(response)<assert_stmt>response.json['data']<eq>{'version':'v3' 'url':'/api/v3/' 'documentation':'https://okpy.github.io/documentation' 'github':'https://github.com/okpy/ok'}<assert_stmt>response.json['message']<eq>'success'<assert_stmt>response.json['code']<eq>200<block_end><def_stmt>test_no_envelope self<block_start>response=self.client.get('/api/v3/?envelope=false')<line_sep>self.assert_200(response)<assert_stmt>'data'<not><in>response.json<assert_stmt>'message'<not><in>response.json<assert_stmt>'code'<not><in>response.json<assert_stmt>response.json['version']<eq>'v3'<block_end><def_stmt>test_non_existant_api self<block_start>response=self.client.get('/api/v3/doesnotexist')<line_sep>self.assert_404(response)<assert_stmt>response.json['data']<eq>{}<assert_stmt>response.json['code']<eq>404<block_end><def_stmt>test_get_backup self<block_start>self._test_backup(<false>)<line_sep>backup=Backup.query.first()<line_sep>submission_time=(self.assignment.due_date-dt.timedelta(days=random.randrange(0 10)))<line_sep>backup.custom_submission_time=submission_time<line_sep>response=self.client.get('/api/v3/backups/{}/'.format(backup.hashid))<line_sep>self.assert_200(response)<line_sep>course=backup.assignment.course<line_sep>user_json={"email":backup.submitter.email "id":encode_id(backup.submitter_id) }<line_sep>response_json=response.json['data']<line_sep>time_threshold=dt.timedelta(seconds=5)<line_sep>self.assertAlmostEqual(dateutil.parser.parse(response_json['created']) backup.created delta=time_threshold)<line_sep>self.assertAlmostEqual(dateutil.parser.parse(response_json['submission_time']) submission_time delta=time_threshold)<line_sep>self.assertAlmostEqual(dateutil.parser.parse(response_json['messages'][0]['created']) backup.created delta=time_threshold)<line_sep># Unset timestamps already tested. <del_stmt>response_json['created']<del_stmt>response_json['submission_time']<del_stmt>response_json['messages'][0]['created']<assert_stmt>response_json<eq>{"submitter":user_json "submit":backup.submit "group":[user_json] "is_late":backup.is_late "external_files":[] "assignment":{"name":backup.assignment.name "course":{"id":course.id "active":course.active "display_name":course.display_name "offering":course.offering "timezone":course.timezone.zone } } "id":backup.hashid "messages":[{"kind":"file_contents" "contents":backup.files() } ] }<block_end><def_stmt>test_bad_hashid self<block_start>self.setup_course()<line_sep>response=self.client.get('/api/v3/backups/xyzxyz/')<line_sep>self.assert_401(response)<assert_stmt>response.json['data']<eq>{}<assert_stmt>response.json['code']<eq>401<line_sep>self.login(self.user1.email)<line_sep>response=self.client.get('/api/v3/backups/xyzxyz/')<line_sep>self.assert_404(response)<assert_stmt>response.json['data']<eq>{}<assert_stmt>response.json['code']<eq>404<block_end><def_stmt>test_version_api self<block_start>okversion=Version(name="ok" current_version="v1.5.0" download_link="http://localhost/ok")<line_sep>db.session.add(okversion)<line_sep>ok2version=Version(name="ok2" current_version="v2.5.0" download_link="http://localhost/ok2")<line_sep>db.session.add(ok2version)<line_sep>response=self.client.get('/api/v3/version/')<line_sep>self.assert_200(response)<assert_stmt>response.json['data']<eq>{'results':[{"current_version":"v1.5.0" "download_link":"http://localhost/ok" "name":"ok"} {"current_version":"v2.5.0" "download_link":"http://localhost/ok2" "name":"ok2"}]}<assert_stmt>response.json['message']<eq>'success'<line_sep>response=self.client.get('/api/v3/version/ok')<line_sep>self.assert_200(response)<assert_stmt>response.json['data']<eq>{'results':[{"current_version":"v1.5.0" "download_link":"http://localhost/ok" "name":"ok"}]}<line_sep>self.setup_course()<line_sep>self.login(self.user1.email)<line_sep>response=self.client.post('/api/v3/version/ok' data={'current_version':'v1.5.1' 'download_link':'http://localhost/versions/v1.5.1/ok' })<line_sep>self.assert_403(response)<line_sep>self.login(self.staff1.email)<line_sep>response=self.client.post('/api/v3/version/ok' data={'current_version':'v1.5.1' 'download_link':'http://localhost/versions/v1.5.1/ok' })<line_sep># Staff members do not have permission to edit versions self.assert_403(response)<line_sep>self.login(self.admin.email)<line_sep>response=self.client.post('/api/v3/version/ok' data={'current_version':'v1.5.1' 'download_link':'http://example.com/doesnotexist' })<line_sep>self.assert_400(response)<line_sep>response=self.client.post('/api/v3/version/ok' data={'current_version':'v1.5.1' 'download_link':'http://example.com' })<line_sep>self.assert_200(response)<line_sep>response=self.client.get('/api/v3/version/')<assert_stmt>response.json['data']<eq>{'results':[{"current_version":"v1.5.1" "download_link":"http://example.com" "name":"ok"} {"current_version":"v2.5.0" "download_link":"http://localhost/ok2" "name":"ok2"}]}<line_sep>response=self.client.get('/api/v3/version/ok')<line_sep>self.assert_200(response)<assert_stmt>response.json['data']<eq>{'results':[{"current_version":"v1.5.1" "download_link":"http://example.com" "name":"ok"}]}<block_end><def_stmt>test_score_anon self<block_start>response=self.client.post('/api/v3/score/')<line_sep>self.assert_401(response)<assert_stmt>response.json['code']<eq>401<block_end><def_stmt>test_score_student self<block_start>self._test_backup(<true>)<line_sep>email=self.user1.email<line_sep>self.login(email)<line_sep>user=User.lookup(email)<line_sep>response=self.client.post('/api/v3/score/')<line_sep>self.assert_400(response)<assert_stmt>response.json['code']<eq>400<line_sep>backup=Backup.query.filter(Backup.submitter_id<eq>user.id).first()<line_sep>data={'bid':encode_id(backup.id) 'kind':'Total' 'score':128.2 'message':'wow'}<line_sep>response=self.client.post('/api/v3/score/' data=data)<line_sep>self.assert_401(response)<assert_stmt>response.json['code']<eq>401<block_end><def_stmt>test_export_user self<block_start>self._test_backup(<true>)<line_sep>student=User.lookup(self.user1.email)<line_sep>self.login(self.staff1.email)<line_sep>backup=Backup.query.filter(Backup.submitter_id<eq>student.id).first()<line_sep>endpoint='/api/v3/assignment/{0}/export/{1}'.format(self.assignment.name student.email)<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>backups=response.json['data']['backups']<line_sep>self.assertEqual(len(backups) 1)<line_sep>self.assertTrue('submission_time'<in>backups[0])<line_sep>self.assertEqual(backups[0]['submission_time'] backups[0]['created'])<line_sep>self.assertEqual(response.json['data']['count'] 1)<line_sep>self.assertEqual(response.json['data']['limit'] 150)<line_sep>self.assertEqual(response.json['data']['offset'] 0)<line_sep>self.assertEqual(response.json['data']['has_more'] <false>)<line_sep>response=self.client.get(endpoint+"?offset=20&limit=2")<line_sep>self.assert_200(response)<line_sep>backups=response.json['data']['backups']<line_sep>self.assertEqual(len(backups) 0)<line_sep>self.assertEqual(response.json['data']['count'] 1)<line_sep>self.assertEqual(response.json['data']['limit'] 2)<line_sep>self.assertEqual(response.json['data']['offset'] 20)<line_sep>self.assertEqual(response.json['data']['has_more'] <false>)<block_end><def_stmt>test_export_final self<block_start>self._test_backup(<true>)<line_sep>student=User.lookup(self.user1.email)<line_sep>backup=Backup.query.filter(Backup.submitter_id<eq>student.id).first()<line_sep>endpoint='/api/v3/assignment/{0}/submissions/'.format(self.assignment.name)<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_403(response)<line_sep>self.login(self.staff1.email)<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>backups=response.json['data']['backups']<line_sep>self.assertEqual(len(backups) 1)<line_sep>self.assertEqual(backups[0]['is_late'] <false>)<line_sep>self.assertEqual(len(backups[0]['group']) 1)<line_sep>self.assertEqual(backups[0]['group'][0]['email'] self.user1.email)<line_sep>self.assertEqual(len(backups[0]['messages']) 1)<line_sep>self.assertEqual(response.json['data']['count'] 1)<line_sep>self.assertEqual(response.json['data']['has_more'] <false>)<line_sep>self.assertEqual(response.json['data']['offset'] 0)<line_sep>response=self.client.get(endpoint+'?offset=1')<line_sep>self.assert_200(response)<line_sep>backups=response.json['data']['backups']<line_sep>self.assertEqual(len(backups) 0)<line_sep>self.assertEqual(response.json['data']['count'] 1)<line_sep>self.assertEqual(response.json['data']['has_more'] <false>)<line_sep>self.assertEqual(response.json['data']['offset'] 1)<block_end><def_stmt>test_assignment_api self<block_start>self._test_backup(<true>)<line_sep>student=User.lookup(self.user1.email)<line_sep>endpoint='/api/v3/assignment/{0}'.format(self.assignment.name)<line_sep># View a public assignment response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep># Change assignment to be hidden self.assignment.visible=<false><line_sep>db.session.commit()<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_403(response)<line_sep>self.assignment.visible=<true><line_sep>db.session.commit()<line_sep>self.login(self.staff1.email)<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>self.assertEqual(response.json['data']['name'] self.assignment.name)<line_sep># Hidden assignment, but should be visible to staff self.assignment.visible=<false><line_sep>db.session.commit()<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>self.login(self.user1.email)<line_sep>self.assignment.visible=<false><line_sep>db.session.commit()<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_403(response)<block_end><def_stmt>test_group_api self<block_start>self._test_backup(<true>)<line_sep>self.logout()<line_sep>student=User.lookup(self.user1.email)<line_sep>Group.invite(self.user1 self.user2 self.assignment)<line_sep>group=Group.lookup(self.user1 self.assignment)<line_sep>group.accept(self.user2)<line_sep>base_api='/api/v3/assignment/{0}/group/{1}'<line_sep>endpoint=base_api.format(self.assignment.name self.user1.email)<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_401(response)<line_sep>self.login(self.user1.email)<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>members=response.json['data']['members']<line_sep>self.assertEqual(len(members) 2)<assert_stmt>'email'<in>members[0]['user']<line_sep># Make sure user2 can access user1's endpoint self.login(self.user2.email)<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>members=response.json['data']['members']<line_sep>self.assertEqual(len(members) 2)<assert_stmt>'email'<in>members[1]['user']<line_sep>self.login(self.staff1.email)<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>members=response.json['data']['members']<line_sep>self.assertEqual(len(members) 2)<assert_stmt>'email'<in>members[0]['user']<line_sep># Login as some random user self.login(self.user3.email)<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_403(response)<line_sep># Check for existence of email response=self.client.get(base_api.format(self.assignment.name '<EMAIL>'))<line_sep>self.assert_403(response)<line_sep>self.login(self.admin.email)<line_sep>response=self.client.get(base_api.format(self.assignment.name '<EMAIL>'))<line_sep>self.assert_404(response)<block_end><def_stmt>test_score_staff self<block_start>self._test_backup(<true>)<line_sep>user=User.lookup(self.user1.email)<line_sep>self.login(self.staff1.email)<line_sep>response=self.client.post('/api/v3/score/')<line_sep>self.assert_400(response)<assert_stmt>response.json['code']<eq>400<line_sep>backup=Backup.query.filter(Backup.submitter_id<eq>user.id).first()<line_sep>data={'bid':encode_id(backup.id) 'kind':'Total' 'score':128.2 'message':'wow'}<line_sep>response=self.client.post('/api/v3/score/' data=data)<line_sep>self.assert_200(response)<assert_stmt>response.json['code']<eq>200<line_sep>self.logout()<line_sep>self.login(self.admin.email)<line_sep>data={'bid':encode_id(backup.id) 'kind':'Total' 'score':128.2 'message':'wow'}<line_sep>response=self.client.post('/api/v3/score/' data=data)<line_sep>self.assert_200(response)<assert_stmt>response.json['code']<eq>200<block_end><def_stmt>test_comment_staff self<block_start>self._test_backup(<true>)<line_sep>user=User.lookup(self.user1.email)<line_sep>self.login(self.staff1.email)<line_sep>backup=Backup.query.filter(Backup.submitter_id<eq>user.id).first()<line_sep>comment_url="/api/v3/backups/{}/comment/".format(encode_id(backup.id))<line_sep>response=self.client.post(comment_url)<line_sep>self.assert_400(response)# Not all fields present <assert_stmt>response.json['code']<eq>400<line_sep>data={'line':2 'filename':'fizzbuzz.py' 'message':'wow'}<line_sep>response=self.client.post(comment_url data=data)<line_sep>self.assert_200(response)<assert_stmt>response.json['code']<eq>200<line_sep>self.logout()<line_sep>self.login(self.admin.email)<line_sep>data={'line':2 'filename':'fizzbuzz.py' 'message':'wow'}<line_sep>response=self.client.post(comment_url data=data)<line_sep>self.assert_200(response)<assert_stmt>response.json['code']<eq>200<line_sep># Check that another student is not able to comment self.login(self.user2.email)<line_sep>data={'line':2 'filename':'fizzbuzz.py' 'message':'wow'}<line_sep>response=self.client.post(comment_url data=data)<line_sep>self.assert_403(response)<assert_stmt>response.json['code']<eq>403<def_stmt>test_get_comments self<block_start>self._test_backup(<true>)<line_sep>user=User.lookup(self.user1.email)<line_sep>staff=User.lookup(self.staff1.email)<line_sep>backup=Backup.query.filter(Backup.submitter_id<eq>user.id).first()<line_sep>comment_url="/api/v3/backups/{}/comment/".format(encode_id(backup.id))<line_sep>comment1=Comment(backupid=backup author_id=staff.id filename='fizzbuzz.py' line=2 message='hello world')<line_sep>comment2=Comment(backupid=backup author_id=staff.id filename='fizzbuzz.py' line=5 message='wow')<line_sep>db.session.add(comment1)<line_sep>db.session.add(comment2)<line_sep>#check to see if student can view comments on own backup's comments self.login(self.user1.email)<line_sep>response=self.client.get(comment_url)<line_sep>self.assert_200(response)<line_sep>self.assertEqual(len(response['data']['comments']) 2)<line_sep>self.assertEqual(response['data']['comments'][0].message 'hello world')<line_sep>self.assertEqual(response['data']['comments'][1].message 'wow')<line_sep>self.logout()<line_sep>#check to see if staff can access comments self.login(self.staff1.email)<line_sep>response=self.client.get(comment_url)<line_sep>self.assert_200(response)<line_sep>self.logout()<line_sep>#check to see another student can't see others' backup's comments self.login(self.user2.email)<line_sep>response=self.client.get(comment_url)<line_sep>self.assert_403(response)<line_sep>self.logout()<block_end><block_end><def_stmt>test_create_assignment self<block_start>self.setup_course()<line_sep>self.login(self.staff1.email)<line_sep>response=self.client.post("/api/v3/assignment/"+self.course.offering+"/newassignment" json={'display_name':'API Test Assignment' 'due_date':'2016-11-07T06:59:59' 'lock_date':'2016-11-08T06:59:59' })<line_sep>self.assert200(response)<line_sep>assignment=Assignment.query.filter_by(name=self.course.offering+'/newassignment').one()<line_sep>self.assertEqual(assignment.display_name 'API Test Assignment')<line_sep>self.assertEqual(assignment.due_date.day 7)<line_sep>response=self.client.post("/api/v3/assignment/"+self.course.offering+"/newassignment" json={'display_name':'API Test Assignment' 'due_date':'2016-11-10T06:59:59' 'lock_date':'2016-11-11T06:59:59' })<line_sep>self.assert200(response)<line_sep>assignment=Assignment.query.filter_by(name=self.course.offering+'/newassignment').one()<line_sep>self.assertEqual(assignment.due_date.day 10)<line_sep>self.login(self.user1.email)<line_sep>response=self.client.post("/api/v3/assignment/"+self.course.offering+"/newassignment2" json={'display_name':'API Test Assignment' 'due_date':'2016-11-07T06:59:59' 'lock_date':'2016-11-08T06:59:59' })<line_sep>self.assert403(response)<line_sep>assignment=Assignment.query.filter_by(name=self.course.offering+'/newassignment2').one_or_none()<line_sep>self.assertEqual(assignment <none>)<block_end><def_stmt>test_user_api self<block_start>self._test_backup(<true>)<line_sep>self.logout()<line_sep>student=User.lookup(self.user1.email)<def_stmt>test_both_endpoints user<block_start>base_api='/api/v3/user/{0}'<line_sep>user1_endpoint=base_api.format(user.email)<line_sep>current_user_endpoint=base_api.format('')<line_sep>current=self.client.get(current_user_endpoint)<line_sep>specific=self.client.get(user1_endpoint)<line_sep><return>current specific<block_end>current,specific=test_both_endpoints(student)<line_sep>self.assert_401(current)<line_sep>self.assert_401(specific)<line_sep># Should be able to view self self.login(self.user1.email)<line_sep>current,specific=test_both_endpoints(student)<line_sep>self.assert_200(current)<line_sep>self.assert_200(specific)<line_sep>members=current.json['data']['participations']<line_sep>self.assertEqual(len(members) 1)<line_sep>self.assertEqual(current.json['data'] specific.json['data'])<line_sep># Staff don't get permission self.login(self.staff1.email)<line_sep>current,specific=test_both_endpoints(student)<line_sep>self.assert_200(current)<line_sep>self.assert_403(specific)<line_sep># Login as some random user self.login(self.user3.email)<line_sep>current,specific=test_both_endpoints(student)<line_sep>self.assert_200(current)<line_sep>self.assert_403(specific)<line_sep># Admins should have acess self.login(self.admin.email)<line_sep>current,specific=test_both_endpoints(student)<line_sep>self.assert_200(current)<line_sep>self.assert_200(specific)<line_sep>self.assertEqual(specific.json['data']['email'] student.email)<line_sep># Lab Assistants don't have access self.login(self.lab_assistant1.email)<line_sep>current,specific=test_both_endpoints(student)<line_sep>self.assert_200(current)<line_sep>self.assert_403(specific)<block_end><def_stmt>test_course_enrollment self<block_start>self._test_backup(<true>)<line_sep>student=User.lookup(self.user1.email)<line_sep>courses=student.enrollments()<line_sep>course=courses[0]<line_sep>student_endpoint='/api/v3/course/cal/cs61a/sp16/enrollment'<line_sep>self.login(self.staff1.email)<line_sep>response=self.client.get(student_endpoint)<line_sep>self.assert_200(response)<line_sep>student_emails=[s['email']<for>s response.json['data']['student']]<line_sep>self.assertEqual(self.user1.email<in>student_emails <true>)<line_sep>self.login(self.user1.email)<line_sep>response=self.client.get(student_endpoint)<line_sep>self.assert_403(response)<block_end><def_stmt>test_course_assignments self<block_start>self._test_backup(<true>)<line_sep>student=User.lookup(self.user1.email)<line_sep>courses=student.enrollments()<line_sep>course=courses[0]<line_sep>student_endpoint='/api/v3/course/cal/cs61a/sp16/assignments'<line_sep>anon_response=self.client.get(student_endpoint)<line_sep>self.assert_200(anon_response)<line_sep>active_assignments=len([a<for>a self.course.assignments<if>a.active])<line_sep>self.assertEqual(active_assignments len(anon_response.json['data']['assignments']))<line_sep>self.login(self.staff1.email)<line_sep>auth_response=self.client.get(student_endpoint)<line_sep>self.assert_200(auth_response)<line_sep>self.assertEqual(anon_response.json['data'] auth_response.json['data'])<block_end><def_stmt>test_client self<block_start>self.setup_course()<line_sep>self.login(self.staff1.email)<line_sep>db.session.add(Client(name='Test Client' description='' user=self.staff1 client_id='test_client' client_secret='secret' redirect_uris=[] default_scopes=['all'] is_confidential=<false>))<line_sep>response=self.client.get('/api/v3/client/test_client')<line_sep>self.assertEqual(response.json['data'] {'allowed_redirects':[] 'client_id':'test_client' 'client_name':'<NAME>' 'description':'' 'is_confidential':<false> 'owner_email':'<EMAIL>'})<line_sep>response=self.client.post('/api/v3/client/test_client/redirect_urls' json={'url':'test'})<line_sep>self.assert_200(response)<line_sep>response=self.client.get('/api/v3/client/test_client')<line_sep>self.assertEqual(response.json['data']['allowed_redirects'] ['test'])<line_sep>self.login(self.admin.email)<line_sep>response=self.client.post('/api/v3/client/test_client/redirect_urls' json={'url':'test2'})<line_sep>self.assert_200(response)<line_sep>response=self.client.get('/api/v3/client/test_client')<line_sep>self.assertEqual(response.json['data']['allowed_redirects'] ['test' 'test2'])<line_sep>self.login(self.staff2.email)<line_sep>response=self.client.post('/api/v3/client/test_client/redirect_urls' json={'url':'test3'})<line_sep>self.assert_403(response)<line_sep>response=self.client.get('/api/v3/client/test_client')<line_sep>self.assert_403(response)<block_end><def_stmt>test_course_grades self<block_start>self._test_backup(<true>)<line_sep>self.login(self.staff1.email)<line_sep>endpoint='/api/v3/course/cal/cs61a/sp16/grades'<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>self.login(self.staff2.email)<line_sep>endpoint='/api/v3/course/cal/cs61a/sp16/grades'<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>self.login(self.user1.email)<line_sep>endpoint='/api/v3/course/cal/cs61a/sp16/grades'<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_403(response)<line_sep>self.login(self.user6.email)<line_sep>endpoint='/api/v3/course/cal/cs61a/sp16/grades'<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_403(response)<line_sep>self.login(self.admin.email)<line_sep>endpoint='/api/v3/course/cal/cs61a/sp16/grades'<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<block_end><def_stmt>test_course_roster self<block_start>self._test_backup(<true>)<line_sep>self.login(self.staff1.email)<line_sep>endpoint='/api/v3/course/cal/cs61a/sp16/roster'<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>self.login(self.staff2.email)<line_sep>endpoint='/api/v3/course/cal/cs61a/sp16/roster'<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<line_sep>self.login(self.user1.email)<line_sep>endpoint='/api/v3/course/cal/cs61a/sp16/roster'<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_403(response)<line_sep>self.login(self.user6.email)<line_sep>endpoint='/api/v3/course/cal/cs61a/sp16/roster'<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_403(response)<line_sep>self.login(self.admin.email)<line_sep>endpoint='/api/v3/course/cal/cs61a/sp16/roster'<line_sep>response=self.client.get(endpoint)<line_sep>self.assert_200(response)<block_end><block_end>
# NOTE: pyrollbar requires both `Flask` and `blinker` packages to be installed first <import_from_stmt>flask Flask<import_from_stmt>flask got_request_exception<import_stmt>rollbar<import_stmt>rollbar.contrib.flask<line_sep>app=Flask(__name__)<line_sep>@app.before_first_request<def_stmt>init_rollbar <block_start>rollbar.init('ACCESS_TOKEN' environment='development')<line_sep># send exceptions from `app` to rollbar, using flask's signal system. got_request_exception.connect(rollbar.contrib.flask.report_exception app)<block_end>@app.route('/')<def_stmt>root <block_start>foo()<line_sep><return>'<html><body>Hello World</body></html>'<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run()<block_end>
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>argparse<import_stmt>sys<import_stmt>yaml<class_stmt>TestOutputValidator<block_start>""" Functions that validate the output of the test """<def_stmt>__init__ self config test_name analyzer_log<block_start>self._config=config<line_sep>self._models=config['profile_models']<line_sep>self._analyzer_log=analyzer_log<line_sep>check_function=self.__getattribute__(f'check_{test_name}')<if_stmt>check_function()<block_start>sys.exit(0)<block_end><else_stmt><block_start>sys.exit(1)<block_end><block_end><def_stmt>check_steps_stability self<block_start>""" Makes sure that there were the same number of configurations tried in each search iteration. """<with_stmt>open(self._analyzer_log 'r+')<as>f<block_start>log_contents=f.read()<block_end>logs_for_iteration=log_contents.split('Profiling server only metrics...')[1:]<line_sep>logs_for_model=logs_for_iteration[0].split("config search for model:")[1:]<line_sep>expected_step_counts=[]<for_stmt>model_log logs_for_model<block_start>expected_step_counts.append(model_log.count('[Search Step]'))<block_end><for_stmt>i range(1 4)<block_start>logs_for_model=logs_for_iteration[i].split("config search for model:")[1:]<for_stmt>j,model_log enumerate(logs_for_model)<block_start>actual_step_count=model_log.count('[Search Step]')<if_stmt>abs(actual_step_count-expected_step_counts[j])<g>1<block_start>print("\n***\n*** Expected number of search steps for "<concat>f"{self._models[j]} : {expected_step_counts[j]}."<concat>f"Took {actual_step_count}. \n***")<line_sep><return><false><block_end><block_end><block_end><return><true><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('-f' '--config-file' type=str required=<true> help='The path to the config yaml file.')<line_sep>parser.add_argument('-l' '--analyzer-log-file' type=str required=<true> help='The full path to the analyzer log.')<line_sep>parser.add_argument('-t' '--test-name' type=str required=<true> help='The name of the test to be run.')<line_sep>args=parser.parse_args()<with_stmt>open(args.config_file 'r')<as>f<block_start>config=yaml.safe_load(f)<block_end>TestOutputValidator(config args.test_name args.analyzer_log_file)<block_end>
<import_from_stmt>homeassistant.util dt<def_stmt>orbit_time_to_local_time timestamp:str<block_start><if_stmt>timestamp<is><not><none><block_start><return>dt.as_local(dt.parse_datetime(timestamp))<block_end><return><none><block_end><def_stmt>anonymize device<block_start>device["address"]="REDACTED"<line_sep>device["full_location"]="REDACTED"<line_sep>device["location"]="REDACTED"<line_sep><return>device<block_end>
""" Initialization script for cx_Freeze which behaves similarly to the one for console based applications but must handle the case where Python has already been initialized and another DLL of this kind has been loaded. As such it does not block the path unless sys.frozen is not already set. """<import_stmt>sys<if_stmt><not>hasattr(sys "frozen")<block_start>sys.frozen=<true><line_sep>sys.path=sys.path[:4]<block_end><def_stmt>run <block_start><pass><block_end>
<def_stmt>permutations_with_dups string<block_start>hash_table={}<line_sep>permutations=[]<for_stmt>character string<block_start><if_stmt>character<in>hash_table<block_start>hash_table[character]<augadd>1<block_end><else_stmt><block_start>hash_table[character]=1<block_end><block_end>helper('' hash_table permutations)<line_sep><return>permutations<block_end><def_stmt>helper string hash_table permutations<block_start><if_stmt>sum(hash_table.values())<le>0<block_start>permutations.append(string)<block_end><else_stmt><block_start><for_stmt>character hash_table<block_start>local_hash_table=hash_table.copy()<if_stmt>local_hash_table[character]<le>1<block_start>local_hash_table.pop(character <none>)<block_end><else_stmt><block_start>local_hash_table[character]<augsub>1<block_end>helper(string+character local_hash_table permutations)<block_end><block_end><block_end>
<import_stmt>os<import_stmt>click<import_stmt>csv<import_stmt>random<import_stmt>sys<import_from_stmt>osp.common config<import_from_stmt>osp.common.utils query_bar<import_from_stmt>osp.corpus.corpus Corpus<import_from_stmt>osp.corpus.models Document<import_from_stmt>osp.corpus.models Document_Format<import_from_stmt>osp.corpus.models Document_Text<import_from_stmt>osp.corpus.jobs ext_format<import_from_stmt>osp.corpus.jobs ext_text<import_from_stmt>peewee create_model_tables<import_from_stmt>prettytable PrettyTable<line_sep>@click.group()<def_stmt>cli <block_start><pass><block_end>@cli.command()<def_stmt>init_db <block_start>""" Create the database tables. """<line_sep>create_model_tables([Document Document_Format Document_Text] fail_silently=<true>)<block_end>@cli.command()<def_stmt>insert_documents <block_start>""" Insert documents in the database. """<line_sep>Document.insert_documents()<block_end>@cli.command()<def_stmt>queue_format <block_start>""" Queue format extraction tasks in the worker. """<for_stmt>doc query_bar(Document.select())<block_start>config.rq.enqueue(ext_format doc.id)<block_end><block_end>@cli.command()<def_stmt>queue_text <block_start>""" Queue text extraction tasks in the worker. """<for_stmt>doc query_bar(Document.select())<block_start>config.rq.enqueue(ext_text doc.id)<block_end><block_end>@cli.command()<def_stmt>format_counts <block_start>""" Print a table of file format -> count. """<line_sep>t=PrettyTable(['File Type' 'Doc Count'])<line_sep>t.align='l'<for_stmt>c Document_Format.format_counts()<block_start>t.add_row(c)<block_end>click.echo(t)<block_end>@cli.command()<def_stmt>file_count <block_start>""" Print the total number of files. """<line_sep>corpus=Corpus.from_env()<line_sep>click.echo(corpus.file_count)<block_end>
''' convert pysph .npz output to vtk file format '''<import_from_future_stmt> print_function<import_stmt>os<import_stmt>re<import_from_stmt>enthought.tvtk.api tvtk write_data<import_from_stmt>numpy array c_ ravel load zeros_like<def_stmt>write_vtk data filename scalars=<none> vectors={'V':('u' 'v' 'w')} tensors={} coords=('x' 'y' 'z') dims=<none> **kwargs<block_start>''' write data in to vtk file Parameters ---------- data : dict mapping of variable name to their numpy array filename : str the file to write to (can be any recognized vtk extension) if extension is missing .vts extension is appended scalars : list list of arrays to write as scalars (defaults to data.keys()) vectors : dict mapping of vector name to vector component names to take from data tensors : dict mapping of tensor name to tensor component names to take from data coords : list the name of coordinate data arrays (default=('x','y','z')) dims : 3 tuple the size along the dimensions for (None means x.shape) **kwargs : extra arguments for the file writer example file_type=binary/ascii '''<line_sep>x=data[coords[0]]<line_sep>y=data.get(coords[1] zeros_like(x))<line_sep>z=data.get(coords[2] zeros_like(x))<if_stmt>dims<is><none><block_start>dims=array([1 1 1])<line_sep>dims[:x.ndim]=x.shape<block_end><else_stmt><block_start>dims=array(dims)<block_end>sg=tvtk.StructuredGrid(points=c_[x.flat y.flat z.flat] dimensions=array(dims))<line_sep>pd=tvtk.PointData()<if_stmt>scalars<is><none><block_start>scalars=[i<for>i data.keys()<if>i<not><in>coords]<block_end><for_stmt>v scalars<block_start>pd.scalars=ravel(data[v])<line_sep>pd.scalars.name=v<line_sep>sg.point_data.add_array(pd.scalars)<block_end><for_stmt>vec,vec_vars vectors.items()<block_start>u,v,w=[data[i]<for>i vec_vars]<line_sep>pd.vectors=c_[ravel(u) ravel(v) ravel(w)]<line_sep>pd.vectors.name=vec<line_sep>sg.point_data.add_array(pd.vectors)<block_end><for_stmt>ten,ten_vars tensors.items()<block_start>vars=[data[i]<for>i ten_vars]<line_sep>tensors=c_[[ravel(i)<for>i vars]].T<line_sep>pd.tensors=tensors<line_sep>pd.tensors.name=ten<line_sep>sg.point_data.add_array(pd.tensors)<block_end>write_data(sg filename **kwargs)<block_end><def_stmt>detect_vectors_tensors keys<block_start>''' detect the vectors and tensors from given array names Vectors are identified as the arrays with common prefix followed by 0,1 and 2 in their names Tensors are identified as the arrays with common prefix followed by two character codes representing ij indices (00,01,02,11,12,22) for a symmetric tensor (00,01,02,10,11,12,20,21,22) for a tensor Arrays not belonging to vectors or tensors are returned as scalars Returns scalars,vectors,tensors in a format suitable to be used as arguments for :py:func:`write_vtk` '''<line_sep>d={}<for_stmt>k keys<block_start>d[len(k)]=d.get(len(k) [])<line_sep>d[len(k)].append(k)<block_end>scalars=[]<line_sep>vectors={}<line_sep>tensors={}<for_stmt>n,l d.items()<block_start><if_stmt>n<l>2<block_start><continue><block_end>l.sort()<line_sep>idx=-1<while_stmt>idx<l>len(l)-1<block_start>idx<augadd>1<line_sep>k=l[idx]<line_sep># check if last char is 0 <if_stmt>k[-1]<eq>'0'# check for tensor <block_start><if_stmt>k[-2]<eq>'0'# check for 9 tensor <block_start>ten=[]<for_stmt>i range(3)<block_start><for_stmt>j range(3)<block_start>ten.append(k[:-2]+str(j)+str(i))<block_end><block_end>ten.sort()<if_stmt>l[idx:idx+9]<eq>ten<block_start>tensors[k[:-2]]=ten<line_sep>idx<augadd>8<line_sep><continue><block_end># check for symm 6 tensor ten2=[]<for_stmt>i range(3)<block_start><for_stmt>j range(i+1)<block_start>ten2.append(k[:-2]+str(j)+str(i))<block_end><block_end>ten2.sort()<if_stmt>l[idx:idx+6]<eq>ten2<block_start>ten=[]<for_stmt>i range(3)<block_start><for_stmt>j range(3)<block_start>ten.append(k[:-2]+str(min(i j))+str(max(i j)))<block_end><block_end>tensors[k[:-2]]=ten<line_sep>idx<augadd>5<line_sep><continue><block_end><block_end># check for vector vec=[]<for_stmt>i range(3)<block_start>vec.append(k[:-1]+str(i))<block_end><if_stmt>l[idx:idx+3]<eq>vec<block_start>vectors[k[:-1]]=vec<line_sep>idx<augadd>2<line_sep><continue><block_end><block_end>scalars.append(k)<block_end><block_end><return>scalars vectors tensors<block_end><def_stmt>get_output_details path<block_start>solvers={}<if_stmt><not>os.path.isdir(path)<block_start>path=os.path.dirname(path)<block_end>files=os.listdir(path)<line_sep>files.sort()<line_sep>pat=re.compile(r'(?P<solver>.+)_(?P<rank>\d+)_(?P<entity>.+)_(?P<time>[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?).npz')<line_sep>matches=[(f pat.match(f))<for>f files]<line_sep>files=[]<for_stmt>filename,match matches<block_start><if_stmt>match<is><none><block_start><continue><block_end>files.append(filename)<line_sep>groups=match.groupdict()<line_sep>solvername=groups['solver']<line_sep>solver=solvers.get(solvername)<if_stmt>solver<is><none><block_start>solver=[set([]) set([]) set([])]<line_sep>solvers[solvername]=solver<block_end>solver[0].add(groups['rank'])<line_sep>solver[1].add(groups['entity'])<line_sep>solver[2].add(groups['time'])<block_end># {solver:(entities,procs,times)} <return>solvers<block_end><def_stmt>pysph_to_vtk path merge_procs=<false> skip_existing=<true> binary=<true><block_start>''' convert pysph output .npz files into vtk format Parameters ---------- path : str directory where .npz files are located merge_procs : bool whether to merge the data from different procs into a single file (not yet implemented) skip_existing : bool skip files where corresponding vtk already exist this is useful if you've converted vtk files while a solver is running only want to convert the newly added files binary : bool whether to use binary format in vtk file The output vtk files are stored in a directory `solver_name` _vtk within the `path` directory '''<if_stmt>binary<block_start>data_mode='binary'<block_end><else_stmt><block_start>data_mode='ascii'<block_end><if_stmt>merge_procs<is><true># FIXME: implement <block_start><raise>NotImplementedError('merge_procs=True not implemented yet')<block_end>solvers=get_output_details(path)<for_stmt>solver,(procs entities times) solvers.items()<block_start>print('converting solver:' solver)<line_sep>dir=os.path.join(path solver+'_vtk')<if_stmt><not>os.path.exists(dir)<block_start>os.mkdir(dir)<block_end>procs=sorted(procs)<line_sep>entities=sorted(entities)<line_sep>times=sorted(times key=float)<line_sep>times_file=open(os.path.join(dir 'times') 'w')<for_stmt>entity entities<block_start>print(' entity:' entity)<for_stmt>proc procs<block_start>print(' proc:' proc)<line_sep>print(' timesteps:' len(times))<line_sep>f='%s_%s_%s_'%(solver proc entity)<line_sep>of=os.path.join(dir f)<for_stmt>i,time enumerate(times)<block_start>print('\r' i )<if_stmt>skip_existing<and>os.path.exists(f+str(i))<block_start><continue><block_end>d=load(os.path.join(path f+time+'.npz'))<line_sep>arrs={}<for_stmt>nam,val d.items()<block_start><if_stmt>val.ndim<g>0<block_start>arrs[nam]=val<block_end><block_end>d.close()<line_sep>scalars,vectors,tensors=detect_vectors_tensors(arrs)<line_sep>vectors['V']=['u' 'v' 'w']<line_sep>z=zeros_like(arrs['x'])<if_stmt>'v'<not><in>arrs<block_start>arrs['v']=z<block_end><if_stmt>'w'<not><in>arrs<block_start>arrs['w']=z<block_end>write_vtk(arrs of+str(i) scalars=scalars vectors=vectors tensors=tensors data_mode=data_mode)<line_sep>times_file.write('%d\t%s\n'%(i time))<block_end><block_end><block_end>times_file.close()<block_end><block_end><def_stmt>extract_text path particle_idx props=['x' 'y' 'u' 'v' 'p' 'rho' 'sigma00' 'sigma01' 'sigma11'] ent=<none> solvers=<none><block_start><if_stmt>solvers<block_start><raise>NotImplementedError<block_end><else_stmt><block_start>solvers=get_output_details(path)<block_end><for_stmt>solver,(procs entities times) solvers.items()<block_start>print('converting solver:' solver)<line_sep>dir=os.path.join(path solver+'_vtk')<if_stmt><not>os.path.exists(dir)<block_start>os.mkdir(dir)<block_end>procs=sorted(procs)<line_sep>entities=sorted(entities)<line_sep>times=sorted(times key=float)<line_sep>times_file=open(os.path.join(dir 'times') 'w')<line_sep>e=ent<if_stmt>ent<is><none><block_start>e=entities<block_end><for_stmt>entity entities<block_start><if_stmt>entity<not><in>e<block_start><continue><block_end>print(' entity:' entity)<for_stmt>proc procs<block_start>print(' proc:' proc)<line_sep>print(' timesteps:' len(times))<line_sep>f='%s_%s_%s_'%(solver proc entity)<line_sep>of=os.path.join(dir f)<line_sep>files=[open(os.path.join(path f+'%d.dat'%particle_id) 'w')<for>particle_id particle_idx]<line_sep>print(files)<for_stmt>file files<block_start>file.write('i\tt\t'+'\t'.join(props))<block_end><for_stmt>i,time enumerate(times)<block_start>print('\r' i )<line_sep>d=load(os.path.join(path f+time+'.npz'))<line_sep>s='\n%d\t%s'%(i time)<for_stmt>j,file enumerate(files)<block_start>file.write(s)<for_stmt>prop props<block_start>file.write('\t')<line_sep>file.write(str(d[prop][particle_idx[j]]))<block_end><block_end>d.close()<block_end><for_stmt>file files<block_start>file.close()<block_end><block_end><block_end><block_end><block_end><def_stmt>test <block_start>l=['x'+str(i)<for>i range(3)]<line_sep>l.append('a0')<line_sep>l.append('a1')<for_stmt>i range(3)<block_start><for_stmt>j range(3)<block_start><if_stmt>i<eq>j<block_start>l.append('XX%d'%i)<block_end><if_stmt>i<le>j<block_start>l.append('S%d%d'%(i j))<block_end>l.append('T%d%d'%(i j))<block_end><block_end>scalars,vectors,tensors=detect_vectors_tensors(l)<assert_stmt>set(scalars)<eq>set(['a0' 'a1'])<assert_stmt>set(vectors)<eq>set(['x' 'XX'])<assert_stmt>set(tensors)<eq>set(['S' 'T'])<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys<line_sep>pysph_to_vtk(path=sys.argv[1])<block_end>
<import_stmt>tkinter<as>tk<line_sep>win=tk.Tk()<line_sep>current_index=tk.StringVar()<line_sep>text=tk.Text(win bg="white" fg="black")<line_sep>lab=tk.Label(win textvar=current_index)<def_stmt>update_index event=<none><block_start>cursor_position=text.index(tk.INSERT)<line_sep>cursor_position_pieces=str(cursor_position).split('.')<line_sep>cursor_line=cursor_position_pieces[0]<line_sep>cursor_char=cursor_position_pieces[1]<line_sep>current_index.set('line: '+cursor_line+' char: '+cursor_char+' index: '+str(cursor_position))<block_end>text.pack(side=tk.TOP fill=tk.BOTH expand=1)<line_sep>lab.pack(side=tk.BOTTOM fill=tk.X expand=1)<line_sep>text.bind('<KeyRelease>' update_index)<line_sep>win.mainloop()<line_sep>
# Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None <class_stmt>Solution(object)# def __init__(self): # self.curr_head = None # # def isPalindrome(self, head): # """ # :type head: ListNode # :rtype: bool # """ # self.curr_head = head # return self.check(head) # # def check(self, node): # if node is None: # return True # isPal = self.check(node.next) and (self.curr_head.val == node.val) # self.curr_head = self.curr_head.next # return isPal <block_start><def_stmt>isPalindrome self head# p2 is 2 times faster than p3 # p1 and pre is used to reverse the first half of the list # so when the first while is over # p1 is in the middle # p3 is in middle + 1 # p2 is in the end <block_start><if_stmt>head<is><none><block_start><return><true><block_end>p1,p2=head head<line_sep>p3,pre=p1.next p1<while_stmt>p2.next<is><not><none><and>p2.next.next<is><not><none><block_start>p2=p2.next.next<line_sep>pre=p1<line_sep>p1=p3<line_sep>p3=p3.next<line_sep>p1.next=pre<block_end><if_stmt>p2.next<is><none><block_start>p1=p1.next<block_end><while_stmt>p3<is><not><none><block_start><if_stmt>p1.val<ne>p3.val<block_start><return><false><block_end>p1=p1.next<line_sep>p3=p3.next<block_end><return><true><block_end><block_end>
<import_stmt>sys<import_stmt>types<import_stmt>inspect<def_stmt>isstring s# if we use Python 3 <block_start><if_stmt>(sys.version_info[0]<ge>3)<block_start><return>isinstance(s str)<block_end># we use Python 2 <return>isinstance(s basestring)<block_end><def_stmt>normalize_func func# return None for builtins <block_start><if_stmt>(inspect.isbuiltin(func))<block_start><return><none><block_end><return>func<block_end><def_stmt>get_doc func<block_start>doc=inspect.getdoc(func)<if_stmt>doc<is><none><block_start>func=normalize_func(func)<if_stmt>func<is><none><block_start><return><none><block_end><else_stmt><block_start>doc=inspect.getdoc(func)<block_end><block_end><return>doc<block_end><def_stmt>get_property_doc target prop<block_start><for_stmt>name,obj inspect.getmembers(type(target) inspect.isdatadescriptor)<block_start><if_stmt>(isinstance(obj property)<and>name<eq>prop)<block_start><return>inspect.getdoc(obj.fget)<block_end><block_end><return><none><block_end><def_stmt>get_argspec func<block_start><try_stmt><block_start><if_stmt>sys.version_info[0]<ge>3<block_start><return>inspect.getfullargspec(func)<block_end><else_stmt><block_start><return>inspect.getargspec(func)<block_end><block_end><except_stmt>TypeError<block_start><return><none><block_end><block_end><def_stmt>get_arguments func<block_start>func=normalize_func(func)<if_stmt>func<is><none><block_start><return><none><block_end>argspec=get_argspec(func)<if_stmt>argspec<is><none><block_start><return><none><block_end>args=argspec.args<if_stmt>'self'<in>args<block_start>args.remove('self')<block_end><return>args<block_end><def_stmt>get_r_representation default<block_start><if_stmt>callable(default)<and>hasattr(default '__name__')<block_start>arg_value=default.__name__<block_end><else_stmt><block_start><if_stmt>default<is><none><block_start>arg_value="NULL"<block_end><elif_stmt>type(default)<eq>type(<true>)<block_start><if_stmt>default<eq><true><block_start>arg_value="TRUE"<block_end><else_stmt><block_start>arg_value="FALSE"<block_end><block_end><elif_stmt>isstring(default)<block_start>arg_value="\"%s\""%default<block_end><elif_stmt>isinstance(default int)<block_start>arg_value="%rL"%default<block_end><elif_stmt>isinstance(default float)<block_start>arg_value="%r"%default<block_end><elif_stmt>isinstance(default list)<block_start>arg_value="c("<for_stmt>i,item enumerate(default)<block_start><if_stmt>i<is>(len(default)-1)<block_start>arg_value<augadd>"%s)"%get_r_representation(item)<block_end><else_stmt><block_start>arg_value<augadd>"%s, "%get_r_representation(item)<block_end><block_end><block_end><elif_stmt>isinstance(default (tuple set))<block_start>arg_value="list("<for_stmt>i,item enumerate(default)<block_start><if_stmt>i<is>(len(default)-1)<block_start>arg_value<augadd>"%s)"%get_r_representation(item)<block_end><else_stmt><block_start>arg_value<augadd>"%s, "%get_r_representation(item)<block_end><block_end><block_end><elif_stmt>isinstance(default dict)<block_start>arg_value="list("<for_stmt>i range(len(default))<block_start>i_arg_value="%s = %s"%(default.keys()[i] get_r_representation(default.values()[i]))<if_stmt>i<is>(len(default)-1)<block_start>arg_value<augadd>"%s)"%i_arg_value<block_end><else_stmt><block_start>arg_value<augadd>"%s, "%i_arg_value<block_end><block_end><block_end><else_stmt><block_start>arg_value="%r"%default<block_end><block_end># if the value starts with "tf." then convert to $ usage <if_stmt>(arg_value.startswith("tf."))<block_start>arg_value=arg_value.replace("." "$")<block_end><return>(arg_value)<block_end><def_stmt>generate_signature_for_function func<block_start>"""Given a function, returns a string representing its args."""<line_sep>func=normalize_func(func)<if_stmt>func<is><none><block_start><return><none><block_end>args_list=[]<line_sep>argspec=get_argspec(func)<if_stmt>argspec<is><none><block_start><return><none><block_end>first_arg_with_default=(len(argspec.args<or>[])-len(argspec.defaults<or>[]))<for_stmt>arg argspec.args[:first_arg_with_default]<block_start><if_stmt>arg<eq>"self"# Python documentation typically skips `self` when printing method # signatures. <block_start><continue><block_end>args_list.append(arg)<block_end><if_stmt>argspec.varargs<eq>"args"<and>hasattr(argspec 'keywords')<and>argspec.keywords<eq>"kwds"<block_start>original_func=func.__closure__[0].cell_contents<line_sep><return>generate_signature_for_function(original_func)<block_end><if_stmt>argspec.defaults<block_start><for_stmt>arg,default zip(argspec.args[first_arg_with_default:] argspec.defaults)<block_start>arg_value=get_r_representation(default)<line_sep>args_list.append("%s = %s"%(arg arg_value))<block_end><block_end><if_stmt>argspec.varargs<block_start>args_list.append("...")<block_end><if_stmt>hasattr(argspec 'keywords')<and>argspec.keywords<block_start>args_list.append("...")<block_end><return>"("+", ".join(args_list)+")"<block_end>
""" Find serial devices and update serial device IDs """<import_from_stmt>..util log<line_sep>CONNECT_MESSAGE=""" Connect just one Serial device (AllPixel) and press enter..."""<def_stmt>run args<block_start><import_from_stmt>..drivers.serial.driver Serial<import_from_stmt>..drivers.serial.devices Devices<import_stmt>serial<line_sep>run=<true><line_sep>log.printer("Press Ctrl+C any time to exit.")<try_stmt><block_start><while_stmt>run<block_start><try_stmt><block_start>input(CONNECT_MESSAGE)<line_sep>devices=Devices(args.hardware_id args.baud)<line_sep>ports=devices.find_serial_devices()<if_stmt><not>ports<block_start>log.printer("No serial devices found. Please connect one.")<line_sep><continue><block_end>port=sorted(ports.items())[0][1][0]<line_sep>id=devices.get_device_id(port)<line_sep>log.printer("Device ID of {}: {}".format(port id))<line_sep>newID=input("Input new ID (enter to skip): ")<if_stmt>newID<ne>''<block_start><try_stmt><block_start>newID=int(newID)<if_stmt>newID<l>0<or>newID<g>255<block_start><raise>ValueError()<block_end>devices.set_device_id(port newID)<line_sep>id=devices.get_device_id(port)<line_sep>log.printer("Device ID set to: %s"%id)<block_end><except_stmt>ValueError<block_start>log.printer("Please enter a number between 0 and 255.")<block_end><block_end><block_end><except_stmt>serial.SerialException<as>e<block_start>log.printer("Problem connecting to serial device. %s"%e)<block_end><except_stmt>Exception<as>e<block_start>log.printer('Programmer error with exception %s'%e)<block_end><block_end><block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end><block_end><def_stmt>add_arguments parser<block_start>parser.set_defaults(run=run)<line_sep>parser.add_argument('--hardware-id' default='1D50:60AB' help='USB Vendor ID : Product ID of device. '<concat>'Defaults to VID:PID for AllPixel')<line_sep>parser.add_argument('--baud' default=921600 type=int help='Serial baud rate.')<block_end>
# -*- coding: utf-8 -*- <import_stmt>aiohttp<line_sep>session=aiohttp.ClientSession()<async_keyword><def_stmt>geocode place<block_start>params={'sensor':'false' 'address':place}<async_keyword><with_stmt>session.get('https://maps.googleapis.com/maps/api/geocode/json' params=params)<as>response<block_start>result=<await>response.json()<line_sep><return>result['results']<block_end><block_end>
# -*- coding: utf-8 -*- # @File : client.py # @Date : 2019/8/28 # @Desc : # @license : Copyright(C), funnywolf # @Author: funnywolf # @Contact : github.com/FunnyWolf <import_stmt>argparse<import_stmt>struct<import_stmt>threading<import_stmt>time<import_from_stmt>socket AF_INET SOCK_STREAM<import_from_stmt>threading Thread<import_stmt>ipaddr<import_from_stmt>config *<try_stmt><block_start><import_stmt>requests<import_from_stmt>requests.packages.urllib3.exceptions InsecureRequestWarning<block_end><except_stmt>Exception<as>E<block_start><import_stmt>urllib3<line_sep>urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)<block_end><global>globalClientCenter<class_stmt>ClientCenter(threading.Thread)<block_start><def_stmt>__init__ self<block_start>self.headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36' "Connection":"keep-alive" 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' "Accept-Language":"zh-CN,zh;q=0.8" 'Accept-Encoding':'gzip' }<line_sep>self.proxy=<none><line_sep>self.CACHE_CONNS={}<line_sep>self.MIRROR_CHCHE_CONNS={}<line_sep># { # "conn": self.request, # "targetaddr": TARGET_ADDR, # "new": True, # } # socket参数 self.LOCAL_ADDR=<none><line_sep>self.READ_BUFF_SIZE=11200<line_sep>self.POST_RETRY_COUNT=10# post请求重试最大次数 # 日志参数 self.LOG_LEVEL="INFO"<line_sep>self.logger=get_logger(level=self.LOG_LEVEL name="StreamLogger")<line_sep># webshell参数 self.WEBSHELL=<none><line_sep>self.REMOTE_SERVER=<none><line_sep>self.SINGLE_MODE=<false><line_sep># mirror self.SOCKET_TIMEOUT=DEFAULT_SOCKET_TIMEOUT<line_sep>self.TARGET_IP="127.0.0.1"<line_sep>self.TARGET_PORT=60020<line_sep># 缓存变量 self.die_client_address=[]<line_sep>self.mirror_die_client_address=[]<line_sep>self.session=requests.session()<line_sep>self.session.verify=<false><line_sep># 多线程变量 self.post_send_data={}<line_sep>self.post_return_data={}<line_sep>threading.Thread.__init__(self)<block_end><def_stmt>custom_header self inputstr<block_start><try_stmt><block_start>str_headers=inputstr.split(",")<for_stmt>str_header str_headers<block_start>header_type=str_header.split(":")[0].strip()<line_sep>header_value=str_header.split(":")[1].strip()<line_sep>self.headers[header_type]=header_value<block_end><block_end><except_stmt>Exception<as>E<block_start>self.logger.exception(E)<line_sep><return><false><block_end>self.logger.info("------------ Custom Http Request Header ------------")<line_sep>self.logger.info(self.headers)<line_sep>self.logger.info("\n")<line_sep><return><true><block_end><def_stmt>custom_proxy self proxy<block_start>self.proxy={'http':proxy 'https':proxy}<line_sep>self.session.proxies=self.proxy<line_sep>self.logger.info("------------ Custom Http Request Proxy ------------")<line_sep>self.logger.info(self.proxy)<line_sep>self.logger.info("\n")<line_sep><return><true><block_end><def_stmt>recv_socks_data self client_address<block_start>"""socks数据接收"""<line_sep>client_socket_conn=self.CACHE_CONNS.get(client_address).get("conn")<try_stmt><block_start>tcp_recv_data=client_socket_conn.recv(self.READ_BUFF_SIZE)<line_sep>self.logger.debug("CLIENT_ADDRESS:{} TCP_RECV_DATA:{}".format(client_address tcp_recv_data))<if_stmt>len(tcp_recv_data)<g>0<block_start>has_data=<true><line_sep>self.logger.info("CLIENT_ADDRESS:{} TCP_RECV_LEN:{}".format(client_address len(tcp_recv_data)))<block_end><block_end><except_stmt>Exception<as>err<block_start>tcp_recv_data=b""<line_sep>self.logger.debug("TCP_RECV_NONE")<block_end># 编码问题,data数据(tcp传输的数据)需要额外再base64编码一次 client_socket_targetaddr=self.CACHE_CONNS.get(client_address).get("targetaddr")<line_sep># 每一个client_address的数据结构体 client_address_one_data={"data":base64.b64encode(tcp_recv_data) "targetaddr":client_socket_targetaddr }<line_sep>self.post_send_data[client_address]=client_address_one_data<block_end><def_stmt>send_socks_data self client_address# 将返回的数据发送到client Tcp连接中 # 读取server返回的数据 <block_start><try_stmt><block_start>client_socket_conn=self.CACHE_CONNS.get(client_address).get("conn")<line_sep>server_tcp_send_data=base64.b64decode(self.post_return_data.get(client_address).get("data"))<block_end><except_stmt>Exception<as>E<block_start><if_stmt>self.SINGLE_MODE<is><true><block_start>self.logger.warning("CLIENT_ADDRESS:{} server socket not in client socket list".format(client_address))<line_sep>self.logger.warning("SINGLE_MODE: {} ,remove is conn from server".format(self.SINGLE_MODE))<line_sep>self.die_client_address.append(client_address)<block_end><return><block_end><if_stmt>server_tcp_send_data<eq>""# 无数据返回继续下一个连接 <block_start><return><block_end># 将返回的数据发送到client Tcp连接中 <try_stmt><block_start>client_socket_conn.send(server_tcp_send_data)<line_sep>self.logger.debug("CLIENT_ADDRESS:{} TCP_SEND_DATA:{}".format(client_address server_tcp_send_data))<block_end><except_stmt>Exception<as>E<block_start>self.logger.warning("CLIENT_ADDRESS:{} Client socket send failed".format(client_address))<line_sep>self.die_client_address.append(client_address)<try_stmt><block_start>self.CACHE_CONNS.pop(client_address)<line_sep>client_socket_conn.close()<block_end><except_stmt>Exception<as>E<block_start><pass><block_end><block_end><block_end><def_stmt>_post_data self url data={}<block_start>"""发送数据到webshell"""<line_sep>payload={"Remoteserver":self.REMOTE_SERVER "Endpoint":url "SENDDATA":diyEncode(data)}<line_sep>self.logger.debug(payload)<for_stmt>i range(self.POST_RETRY_COUNT)<block_start><try_stmt># timeout 要大于脚本中post的超时时间 <block_start>r=self.session.post(self.WEBSHELL data=payload verify=<false> timeout=15 headers=self.headers)<block_end><except_stmt>Exception<as>E<block_start>self.logger.warning("Post data to WEBSHELL failed")<line_sep>self.logger.exception(E)<line_sep>time.sleep(3)# 错误后延时 <continue><block_end><try_stmt><block_start>web_return_data=diyDecode(r.content)<if_stmt>isinstance(web_return_data dict)<and>web_return_data.get(ERROR_CODE)<is><not><none><block_start>self.logger.error(web_return_data.get(ERROR_CODE))<line_sep>self.logger.warning(r.content)<line_sep><return><none><block_end><else_stmt><block_start><return>web_return_data<block_end><block_end><except_stmt>Exception<as>E<block_start>self.logger.warning("WEBSHELL return wrong data")<line_sep>self.logger.debug(r.content)<line_sep>time.sleep(3)# 错误后延时 <continue><block_end><block_end># 超过重试次数后,退出 <return><none><block_end><def_stmt>run self<block_start>self.logger.warning("LoopThread start")<while_stmt><true><block_start>self._sync_data()<block_end><block_end><def_stmt>_sync_data self<block_start>has_data=<false><line_sep># 清除无效的client <for_stmt>client_address self.die_client_address<block_start><try_stmt><block_start>one=self.CACHE_CONNS.pop(client_address)<line_sep>one.get("conn").close()<line_sep>self.logger.warning("CLIENT_ADDRESS:{} close client in die_client_address".format(client_address))<block_end><except_stmt>Exception<as>E<block_start>self.logger.warning("CLIENT_ADDRESS:{} close client close client in die_client_address error".format(client_address))<block_end><block_end># 从tcp中读取数据 thread_list=[]<line_sep>self.post_send_data={}<for_stmt>client_address list(self.CACHE_CONNS.keys())<block_start>temp=Thread(target=self.recv_socks_data args=(client_address ))<line_sep>thread_list.append(temp)<block_end><for_stmt>temp thread_list<block_start>temp.start()<block_end><for_stmt>temp thread_list<block_start>temp.join()<block_end># 从tcp中读取数据(mirror) mirror_post_send_data={}<for_stmt>mirror_client_address list(self.MIRROR_CHCHE_CONNS.keys())<block_start>client_socket_conn=self.MIRROR_CHCHE_CONNS.get(mirror_client_address).get("conn")<try_stmt><block_start>tcp_recv_data=client_socket_conn.recv(self.READ_BUFF_SIZE)<line_sep>self.logger.debug("CLIENT_ADDRESS:{} TCP_RECV_DATA:{}".format(mirror_client_address tcp_recv_data))<if_stmt>len(tcp_recv_data)<g>0<block_start>has_data=<true><line_sep>self.logger.info("MIRROR_CLIENT_ADDRESS:{} CLIENT_TCP_RECV_LEN:{}".format(mirror_client_address len(tcp_recv_data)))<block_end><block_end><except_stmt>Exception<as>err<block_start>tcp_recv_data=b""<line_sep>self.logger.debug("TCP_RECV_NONE")<block_end># 每一个client_address的数据结构体 client_address_one_data={# 编码问题,data数据(tcp传输的数据)需要额外再base64编码一次 "data":base64.b64encode(tcp_recv_data) }<line_sep>mirror_post_send_data[mirror_client_address]=client_address_one_data<block_end># 组装数据 payload={}<line_sep>payload[DATA_TAG]=self.post_send_data# 发送的数据 payload[DIE_CLIENT_ADDRESS_TAG]=self.die_client_address# 需要清除的连接 payload[MIRROR_DATA_TAG]=mirror_post_send_data# 发送的数据 payload[MIRROR_DIE_CLIENT_ADDRESS_TAG]=self.mirror_die_client_address# 需要清除的连接 # 发送读取的数据到webshell return_data=self._post_data(URL_STINGER_SYNC data=payload)<if_stmt>return_data<is><none># 获取数据失败,退出此次同步 <block_start><return><block_end># 处理post返回数据 # 读取server返回的数据 self.post_return_data=return_data.get(RETURN_DATA)<line_sep>self.die_client_address=[]<line_sep>thread_list=[]<for_stmt>client_address list(self.post_return_data.keys())<block_start>temp=Thread(target=self.send_socks_data args=(client_address ))<line_sep>thread_list.append(temp)<block_end><for_stmt>temp thread_list<block_start>temp.start()<block_end><for_stmt>temp thread_list<block_start>temp.join()<block_end># 检查没有在server返回列表中的client <for_stmt>client_address list(self.CACHE_CONNS.keys())<block_start><if_stmt>self.post_return_data.get(client_address)<is><none><block_start><if_stmt>self.CACHE_CONNS.get(client_address).get("new")<is><true><block_start>self.CACHE_CONNS[client_address]["new"]=<false><line_sep><pass><block_end><else_stmt><block_start>self.logger.warning("CLIENT_ADDRESS:{} remove client not in server CHCHE_CONNS".format(client_address))<line_sep>self.logger.warning("CLIENT_ADDRESS:{} append in die_client_address".format(client_address))<line_sep>self.die_client_address.append(client_address)<block_end><block_end><block_end># mirror处理 mirror_post_return_data=return_data.get(MIRROR_RETURN_DATA)<line_sep>self.mirror_die_client_address=[]<for_stmt>mirror_client_address list(mirror_post_return_data.keys())# 处理socket连接 <block_start><if_stmt>self.MIRROR_CHCHE_CONNS.get(mirror_client_address)<is><none># 新建链接 <block_start><try_stmt><block_start>server_socket_conn=socket.socket(AF_INET SOCK_STREAM)<line_sep>server_socket_conn.settimeout(self.SOCKET_TIMEOUT)<line_sep>server_socket_conn.connect((self.TARGET_IP self.TARGET_PORT) )# json不支持元组,自动转化为list self.MIRROR_CHCHE_CONNS[mirror_client_address]={"conn":server_socket_conn}<line_sep>self.logger.info("MIRROR_CLIENT_ADDRESS:{} Create new tcp socket, TARGET_ADDRESS:{}:{}".format(mirror_client_address self.TARGET_IP self.TARGET_PORT))<block_end><except_stmt>Exception<as>E<block_start>self.logger.warning("MIRROR_CLIENT_ADDRESS:{} TARGET_ADDR:{}:{} Create new socket failed. {}".format(mirror_client_address self.TARGET_IP self.TARGET_PORT E))<line_sep>self.mirror_die_client_address.append(mirror_client_address)<line_sep><continue><block_end><block_end><else_stmt><block_start>server_socket_conn=self.MIRROR_CHCHE_CONNS.get(mirror_client_address).get("conn")<block_end># 读取server返回的数据 <try_stmt><block_start>server_tcp_send_data=base64.b64decode(mirror_post_return_data.get(mirror_client_address).get("data"))<line_sep>server_socket_conn.send(server_tcp_send_data)<line_sep>self.logger.debug("MIRROR_CLIENT_ADDRESS:{} SERVER_TCP_SEND_DATA:{}".format(mirror_client_address server_tcp_send_data))<if_stmt>len(server_tcp_send_data)<g>0<block_start>self.logger.info("MIRROR_CLIENT_ADDRESS:{} SERVER_TCP_SEND_LEN:{}".format(mirror_client_address len(server_tcp_send_data)))<block_end><block_end><except_stmt>Exception<as>E<block_start>self.logger.info("MIRROR_CLIENT_ADDRESS:{} socket send data failed. {}".format(mirror_client_address E))<line_sep>self.mirror_die_client_address.append(mirror_client_address)<line_sep>one=self.MIRROR_CHCHE_CONNS.pop(mirror_client_address)<line_sep>one.get("conn").close()<line_sep><continue><block_end><block_end># 检查没有在server返回列表中的client <for_stmt>mirror_client_address list(self.MIRROR_CHCHE_CONNS.keys())<block_start><if_stmt>mirror_post_return_data.get(mirror_client_address)<is><none><block_start>self.logger.warning("MIRROR_CLIENT_ADDRESS:{} remove client not in server MIRROR_CHCHE_CONNS".format(mirror_client_address))<line_sep># self.mirror_die_client_address.append(mirror_client_address) one=self.MIRROR_CHCHE_CONNS.pop(mirror_client_address)<line_sep>one.get("conn").close()<block_end><block_end># 等待时间 <if_stmt>has_data<block_start>wait=0<block_end><else_stmt><block_start>wait=return_data.get(WAIT_TIME)<block_end>time.sleep(wait)<block_end><def_stmt>setc_webshell self WEBSHELL<block_start><try_stmt><block_start>r=requests.get(WEBSHELL verify=<false> timeout=3 headers=self.headers proxies=self.proxy)<if_stmt>b"UTF-8"<in>r.content<block_start>self.WEBSHELL=WEBSHELL<line_sep><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><except_stmt>requests.exceptions.ProxyError<as>proxyError<block_start>self.logger.error("Connet to proxy failed : {}".format(self.proxy))<line_sep><return><false><block_end><except_stmt>Exception<as>E<block_start>self.logger.exception(E)<line_sep><return><false><block_end><block_end><def_stmt>setc_remoteserver self remote_server=<none><block_start><if_stmt>remote_server<is><none><block_start><for_stmt>port CONTROL_PORT<block_start><for_stmt>i range(2)<block_start>self.REMOTE_SERVER="http://{}:{}".format(LOCALADDR port)<line_sep>result=self._post_data(URL_CHECK)<if_stmt>result<is><none># 失败回退 <block_start>self.REMOTE_SERVER=<none><line_sep><continue><block_end><else_stmt><block_start><return>result<block_end><block_end><block_end><return><none><block_end>self.REMOTE_SERVER=remote_server<line_sep>result=self._post_data(URL_CHECK)<if_stmt>result<is><none># 失败回退 <block_start>self.REMOTE_SERVER=<none><block_end><return>result<block_end><def_stmt>setc_localaddr self ip port<block_start><if_stmt>port_is_used(port ip)<block_start><return><false><block_end><else_stmt><block_start>self.LOCAL_ADDR="{}:{}".format(ip port)<block_end><return><true><block_end><def_stmt>sets_config self tag data<block_start>payload={CONFIG_TAG:tag CONFIG_DATA:data}<line_sep>web_return_data=self._post_data(URL_SET_CONFIG payload)<line_sep><return>web_return_data<block_end><def_stmt>send_cmd self tag data=<none><block_start>payload={CONFIG_TAG:tag CONFIG_DATA:data}<line_sep>web_return_data=self._post_data(URL_CMD payload)<line_sep><return>web_return_data<block_end><block_end><class_stmt>ClientRequest(object)<block_start>'''Represents a client SOCKS4 request'''<def_stmt>__init__ self data<block_start>'''Construct a new ClientRequeset from the given raw SOCKS request'''<line_sep>self.invalid=<false><line_sep># Client requests must be at least 9 bytes to hold all necessary data <if_stmt>len(data)<l>9<block_start>self.invalid=<true><line_sep><return><block_end># Version number (VN) self.parse_vn(data)<line_sep># SOCKS command code (CD) self.parse_cd(data)<line_sep># Destination port self.parse_dst_port(data)<line_sep># Destination IP / Domain name (if specified) self.parse_ip(data)<line_sep># Userid self.parse_userid(data)<block_end>@classmethod<def_stmt>parse_fixed cls data<block_start>'''Parse and return the fixed-length part of a SOCKS request Returns a tuple containing (vn, cd, dst_port, dst_ip) given the raw socks request '''<line_sep><return>struct.unpack('>BBHL' data[:8])<block_end><def_stmt>parse_vn self data<block_start>'''Parse and store the version number given the raw SOCKS request'''<line_sep>vn,_,_,_=ClientRequest.parse_fixed(data)<if_stmt>(vn<ne>CLIENT_VN)<block_start>self.invalid=<true><block_end><block_end><def_stmt>parse_dst_port self data<block_start>'''Parse and store the destination port given the raw SOCKS request'''<line_sep>_,_,dst_port,_=ClientRequest.parse_fixed(data)<line_sep>self.dst_port=dst_port<block_end><def_stmt>parse_cd self data<block_start>'''Parse and store the request code given the raw SOCKS request'''<line_sep>_,cd,_,_=ClientRequest.parse_fixed(data)<if_stmt>(cd<eq>REQUEST_CD_CONNECT<or>cd<eq>REQUEST_CD_BIND)<block_start>self.cd=cd<block_end><else_stmt><block_start>self.invalid=<true><block_end><block_end><def_stmt>parse_ip self data<block_start>'''Parse and store the destination ip given the raw SOCKS request If the IP is of the form 0.0.0.(1-255), attempt to resolve the domain name specified, then store the resolved ip as the destination ip. '''<line_sep>_,_,_,dst_ip=ClientRequest.parse_fixed(data)<line_sep>ip=ipaddr.IPv4Address(dst_ip)<line_sep>o1,o2,o3,o4=ip.packed<line_sep># Invalid ip address specifying that we must resolve the domain # specified in data (As specified in SOCKS4a) <if_stmt>(o1 o2 o3)<eq>(0 0 0)<and>o4<ne>0<block_start><try_stmt># Variable length part of the request containing the userid # and domain (8th byte onwards) <block_start>userid_and_domain=data[8:]<line_sep># Extract the domain to resolve _,domain,_=userid_and_domain.split(b'\x00')<block_end><except_stmt>ValueError# Error parsing request <block_start>self.invalid=<true><line_sep><return><block_end><try_stmt><block_start>resolved_ip=socket.gethostbyname(domain)<block_end><except_stmt>socket.gaierror# Domain name not found <block_start>self.invalid=<true><line_sep><return><block_end>self.dst_ip=resolved_ip<block_end><else_stmt><block_start>self.dst_ip=ip.exploded<block_end><block_end><def_stmt>parse_userid self data<block_start>'''Parse and store the userid given the raw SOCKS request'''<try_stmt><block_start>index=data.index(b'\x00')<line_sep>self.userid=data[8:index]<block_end><except_stmt>ValueError<block_start>self.invalid=<true><block_end><except_stmt>IndexError<block_start>self.invalid=<true><block_end><block_end><def_stmt>isInvalid self<block_start>'''Returns true if this request is invalid, false otherwise'''<line_sep><return>self.invalid<block_end><block_end><class_stmt>Socks4aProxy(threading.Thread)<block_start>'''A SOCKS4a Proxy'''<def_stmt>__init__ self host="127.0.0.1" port=-1 timeout=0.05 bufsize=BUFSIZE<block_start>'''Create a new SOCKS4 proxy on the specified port'''<line_sep>self._host=host<line_sep>self._port=port<line_sep>self._bufsize=bufsize<line_sep>self._backlog=BACKLOG<line_sep>self._timeout=timeout<line_sep>self.logger=logging.getLogger("StreamLogger")<line_sep>threading.Thread.__init__(self)<block_end>@staticmethod<def_stmt>build_socks_reply cd dst_port=0x0000 dst_ip='0.0.0.0'<block_start>''' Build a SOCKS4 reply with the specified reply code, destination port and destination ip. '''<line_sep># dst_ip_bytes = ipaddress.IPv4Address(dst_ip).packed dst_ip_bytes=ipaddr.IPv4Address(dst_ip).packed<line_sep>dst_ip_raw,=struct.unpack('>L' dst_ip_bytes)<line_sep><return>struct.pack('>BBHL' SERVER_VN cd dst_port dst_ip_raw)<block_end><def_stmt>run self<block_start><try_stmt><block_start>s=socket.socket(socket.AF_INET socket.SOCK_STREAM)<line_sep>s.setsockopt(socket.SOL_SOCKET socket.SO_REUSEADDR 1)<line_sep>s.bind((self._host self._port))<line_sep>s.listen(self._backlog)<line_sep>self.logger.warning("socks4a server start on {}:{}".format(self._host self._port))<block_end><except_stmt>Exception<as>E<block_start>self.logger.exception(E)<line_sep>self.logger.error("start socks4a server failed on {}:{}, maybe port is using by other process".format(self._host self._port))<line_sep><return><false><block_end>self.logger.warning("Socks4a ready to accept")<while_stmt><true><block_start><try_stmt><block_start>conn,addr=s.accept()<line_sep>conn.settimeout(self._timeout)<line_sep>data=conn.recv(self._bufsize)<line_sep># Got a connection, handle it with process_request() self._process_request(data conn addr)<line_sep>self.logger.info("Socks4a process_request finish")<block_end><except_stmt>KeyboardInterrupt<as>ki<block_start>self.logger.warning('Caught KeyboardInterrupt, exiting')<line_sep>s.close()<line_sep>sys.exit(0)<block_end><except_stmt>Exception<as>E<block_start>self.logger.exception(E)<try_stmt><block_start>conn.close()<block_end><except_stmt>Exception<as>E<block_start><pass><block_end><block_end><block_end><block_end><def_stmt>_process_request self data client_conn addr<block_start>'''Process a general SOCKS request'''<line_sep>client_request=ClientRequest(data)<line_sep># Handle invalid requests <if_stmt>client_request.isInvalid()<block_start>client_conn.send(self.build_socks_reply(RESPONSE_CD_REQUEST_REJECTED))<line_sep>client_conn.close()<line_sep><return><block_end><if_stmt>client_request.cd<eq>REQUEST_CD_CONNECT<block_start>globalClientCenter.logger.warning('Got connection from {}'.format(addr))<line_sep>key="{}:{}".format(addr[0] addr[1])<line_sep>globalClientCenter.CACHE_CONNS[key]={"conn":client_conn "targetaddr":(client_request.dst_ip client_request.dst_port) "new":<true> # 新的连接,第一次检查略过 }<line_sep>client_conn.settimeout(self._timeout)<line_sep>client_conn.send(self.build_socks_reply(RESPONSE_CD_REQUEST_GRANTED))# 处理完成,开始正式连接 <block_end><else_stmt><block_start>self.logger.warning("Socks4a do not support bind request")<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description="Make sure the stinger_server is running on webserver "<concat>"(stinger_server will listen 127.0.0.1:60010 127.0.0.1:60020)")<line_sep>parser.add_argument('-w' '--webshell' metavar='http://192.168.3.10:8080/proxy.jsp' help="webshell url" required=<true>)<line_sep>parser.add_argument('--header' metavar='Authorization: XXX,Cookie: XXX' help="custom http request header" default=<none>)<line_sep>parser.add_argument('--proxy' metavar='socks5://127.0.0.1:1080' help="Connect webshell through proxy" default=<none>)<line_sep>parser.add_argument('-l' '--locallistenaddress' metavar='127.0.0.1/0.0.0.0' help="local listen address for socks4" default='127.0.0.1')<line_sep>parser.add_argument('-p' '--locallistenport' default=10800 metavar='N' type=int help="local listen port for socks4" )<line_sep>parser.add_argument('-st' '--sockettimeout' default=0.2 metavar="N" type=float help="socket timeout value,the biger the timeout, the slower the transmission speed" )<line_sep>parser.add_argument('-ti' '--targetipaddress' metavar='127.0.0.1' help="reverse proxy target ipaddress" required=<false>)<line_sep>parser.add_argument('-tp' '--targetport' metavar='60020' help="reverse proxy target port" required=<false>)<line_sep>parser.add_argument('-c' '--cleansockst' default=<false> nargs='?' metavar="true" type=bool help="clean server exist socket(this will kill other client connect)" )<line_sep>parser.add_argument('-sm' '--singlemode' default=<false> nargs='?' metavar="true" type=bool help="clean server exist socket(this will kill other client connect)" )<line_sep>args=parser.parse_args()<line_sep>WEBSHELL=args.webshell<line_sep>LISTEN_ADDR=args.locallistenaddress<line_sep>LISTEN_PORT=args.locallistenport<line_sep>CLEAN_SOCKET=args.cleansockst<if_stmt>CLEAN_SOCKET<is><not><false><block_start>CLEAN_SOCKET=<true><block_end><else_stmt><block_start>CLEAN_SOCKET=<false><block_end># 处理header参数 globalClientCenter=ClientCenter()<line_sep>header=args.header<if_stmt>header<is><not><none><block_start>flag=globalClientCenter.custom_header(header)<if_stmt>flag<is><not><true><block_start>sys.exit(1)<block_end><block_end># 处理proxy参数 proxy=args.proxy<if_stmt>proxy<is><not><none><block_start>flag=globalClientCenter.custom_proxy(proxy)<if_stmt>flag<is><not><true><block_start>sys.exit(1)<block_end><block_end># 处理singlemode参数 SINGLE_MODE=args.singlemode<if_stmt>SINGLE_MODE<is><not><false><block_start>SINGLE_MODE=<true><line_sep>globalClientCenter.SINGLE_MODE=SINGLE_MODE<line_sep>globalClientCenter.logger.info("SINGLE_MODE : {}".format(SINGLE_MODE))<block_end><else_stmt><block_start>SINGLE_MODE=<false><block_end># 本地端口检查 globalClientCenter.logger.info("------------------- Local check -------------------")<line_sep>flag=globalClientCenter.setc_localaddr(LISTEN_ADDR LISTEN_PORT)<if_stmt>flag<block_start>globalClientCenter.logger.info("Local listen check : pass")<block_end><else_stmt><block_start>globalClientCenter.logger.error("Local listen check failed, please check if {}:{} is available".format(LISTEN_ADDR LISTEN_PORT))<line_sep>globalClientCenter.logger.error(WEBSHELL)<line_sep>sys.exit(1)<block_end># 检查webshell是否可用 webshell_alive=globalClientCenter.setc_webshell(WEBSHELL)<if_stmt>webshell_alive<block_start>globalClientCenter.logger.info("WEBSHELL check : pass")<line_sep>globalClientCenter.logger.info("WEBSHELL: {}".format(WEBSHELL))<block_end><else_stmt><block_start>globalClientCenter.logger.error("WEBSHELL check failed!")<line_sep>globalClientCenter.logger.error(WEBSHELL)<line_sep>sys.exit(1)<block_end># 检查stinger_server是否可用 result=globalClientCenter.setc_remoteserver()<if_stmt>result<is><none><block_start>globalClientCenter.logger.error("Read REMOTE_SERVER failed,please check whether server is running")<line_sep>sys.exit(1)<block_end><else_stmt><block_start>MIRROR_LISTEN="127.0.0.1:60020"<line_sep>globalClientCenter.logger.info("REMOTE_SERVER check : pass")<line_sep>globalClientCenter.logger.info("\n")<line_sep>globalClientCenter.logger.info("------------------- Get Sever Config -------------------")<for_stmt>key result<block_start>globalClientCenter.logger.info("{} : {}".format(key result.get(key)))<if_stmt>key<eq>"MIRROR_LISTEN"<block_start>MIRROR_LISTEN=result.get(key)<block_end><block_end>globalClientCenter.logger.info("\n")<block_end>globalClientCenter.logger.info("------------------- Set Server Config -------------------")<line_sep># 是否清理已有连接 <if_stmt>CLEAN_SOCKET<block_start>flag=globalClientCenter.send_cmd("CLEAN_SOCKET")<line_sep>globalClientCenter.logger.info("CLEAN_SOCKET cmd : {}".format(flag))<block_end># server建立内网tcp连接的超时时间 sockettimeout=args.sockettimeout<if_stmt>sockettimeout<ne>DEFAULT_SOCKET_TIMEOUT<block_start>flag=globalClientCenter.sets_config("SOCKET_TIMEOUT" sockettimeout)<line_sep>globalClientCenter.logger.info("Set server SOCKET_TIMEOUT => {}".format(flag))<line_sep>globalClientCenter.SOCKET_TIMEOUT=sockettimeout<block_end>globalClientCenter.logger.info("\n")<line_sep># 映射到本地的地址 TARGET_IP=args.targetipaddress<if_stmt>TARGET_IP<is><none><block_start>globalClientCenter.TARGET_IP=MIRROR_LISTEN.split(":")[0]<block_end><else_stmt><block_start>globalClientCenter.TARGET_IP=TARGET_IP<block_end># 映射到本地的端口 TARGET_PORT=args.targetport<if_stmt>TARGET_PORT<is><none><block_start>globalClientCenter.TARGET_PORT=int(MIRROR_LISTEN.split(":")[1])<block_end><else_stmt><block_start>globalClientCenter.TARGET_PORT=int(TARGET_PORT)<block_end>globalClientCenter.logger.info("------------------! RAT Config !------------------")<line_sep>globalClientCenter.logger.info("Socks4a on {}:{}".format(LISTEN_ADDR LISTEN_PORT))<line_sep>globalClientCenter.logger.info("Handler/LISTENER should listen on {}:{}".format(globalClientCenter.TARGET_IP globalClientCenter.TARGET_PORT))<line_sep>globalClientCenter.logger.info("Payload should connect to {}".format(MIRROR_LISTEN))<line_sep>globalClientCenter.logger.info("------------------! RAT Config !------------------\n")<line_sep># 设置线程为守护线程 globalClientCenter.setDaemon(<true>)<line_sep>t2=Socks4aProxy(host=args.locallistenaddress port=args.locallistenport timeout=sockettimeout)<line_sep>t2.setDaemon(<true>)<line_sep># 启动服务 globalClientCenter.start()<line_sep>t2.start()<line_sep># 保持程序运行,处理结束信号 <while_stmt><true><block_start><try_stmt><block_start>time.sleep(10)<block_end><except_stmt>KeyboardInterrupt<as>ki<block_start>print('Caught KeyboardInterrupt, exiting')<line_sep>sys.exit(1)<block_end><block_end><block_end>
# # Collective Knowledge (caffe CK front-end) # # See CK LICENSE.txt for licensing details # See CK COPYRIGHT.txt for copyright details # # Developer: cTuning foundation, <EMAIL>, http://cTuning.org # cfg={}# Will be updated by CK (meta description of this module) work={}# Will be updated by CK (temporal data) ck=<none># Will be updated by CK (initialized CK kernel) # Local settings ############################################################################## # Initialize module <def_stmt>init i<block_start>""" Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """<line_sep><return>{'return':0}<block_end>############################################################################## # crowd-benchmark caffe <def_stmt>crowdbench i<block_start>""" Input: { } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """<line_sep>i['action']='crowdsource'<line_sep>i['module_uoa']=cfg['module_deps']['experiment.bench.caffe']<line_sep><return>ck.access(i)<block_end>############################################################################## # TBD: classification demo using webcam + benchmarking/tuning via CK <def_stmt>demo i<block_start>""" Input: { (camera_id) - camera ID (delay) - delay } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """<line_sep># Deps <import_stmt>time<import_stmt>cv2<import_stmt>os<line_sep># Prepare tmp entry if doesn't exist duoa=cfg['demo']['data_uoa']<line_sep>image_name=cfg['demo']['image_name']<line_sep>r=ck.access({'action':'load' 'module_uoa':cfg['module_deps']['tmp'] 'data_uoa':duoa})<if_stmt>r['return']<g>0<block_start><if_stmt>r['return']<ne>16<block_start><return>r<block_end>r=ck.access({'action':'add' 'module_uoa':cfg['module_deps']['tmp'] 'data_uoa':duoa})<if_stmt>r['return']<g>0<block_start><return>r<block_end><block_end>p=r['path']<line_sep>pf=os.path.join(p image_name)<line_sep># Initialize web cam ci=int(i.get('camera_id' 0))<line_sep>dl=int(i.get('delay' 1))<line_sep>wcam=cv2.VideoCapture(ci)<line_sep># Permanent loop <while_stmt><true><block_start>ck.out('Obtaining picture from webcam ...')<line_sep>s,img=wcam.read()<if_stmt>s# frame captured without any errors # cv2.namedWindow("cam-test") # cv2.imshow("cam-test",img) # destroyWindow("cam-test") <block_start>cv2.imwrite(pf img)<block_end>time.sleep(dl)<block_end><return>{'return':0}<block_end>############################################################################## # autotune Caffe workloads <def_stmt>autotune i<block_start>""" Input: { } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """<line_sep>i['module_uoa']=cfg['module_deps']['program']<line_sep>i['data_uoa']='caffe'<line_sep>i['explore']='yes'<line_sep>i['extra_tags']='dnn'<line_sep>i['skip_collaborative']='yes'<line_sep>i['skip_pruning']='yes'<line_sep>i['iterations']=-1<line_sep>i['new']='yes'<line_sep>i['cmd_keys']=['time_cpu' 'time_gpu']<line_sep><return>ck.access(i)<block_end>
<import_from_stmt>rest_framework.test APITestCase<import_from_stmt>tests.testapp.models TimeSeries<import_from_stmt>wq.io load_file<class_stmt>ExcelTestCase(APITestCase)<block_start><def_stmt>setUp self<block_start>data=(('2014-01-01' 0.5) ('2014-01-02' 0.4) ('2014-01-03' 0.6) ('2014-01-04' 0.2) ('2014-01-05' 0.1) )<for_stmt>date,value data<block_start>TimeSeries.objects.create(date=date value=value)<block_end><block_end><def_stmt>test_xls self<block_start>response=self.client.get("/timeseries.xls")<line_sep>self.assertEqual('attachment; filename="Time Series.xls"' response['content-disposition'] )<line_sep>xlfile=open('tests/output.xls' 'wb')<line_sep>xlfile.write(response.content)<line_sep>xlfile.close()<line_sep>data=load_file("tests/output.xls")<line_sep>self.assertEqual(len(data) 5)<line_sep>self.assertEqual(data[0].date.year 2014)<line_sep>self.assertEqual(data[0].value 0.5)<block_end><def_stmt>test_xlsx self<block_start>response=self.client.get("/timeseries.xlsx")<line_sep>self.assertEqual('attachment; filename="Time Series.xlsx"' response['content-disposition'] )<line_sep>xlfile=open('tests/output.xlsx' 'wb')<line_sep>xlfile.write(response.content)<line_sep>xlfile.close()<line_sep>data=load_file("tests/output.xlsx")<line_sep>self.assertEqual(len(data) 5)<line_sep>self.assertEqual(data[0].date.year 2014)<line_sep>self.assertEqual(data[0].value 0.5)<block_end><block_end>
<import_stmt>elasticsearch<import_from_stmt>kibana_dashboard_api Visualization Dashboard<import_from_stmt>kibana_dashboard_api VisualizationsManager DashboardsManager<import_from_stmt>..exceptions KibanaConfigNotFoundError<def_stmt>generate_dashboard es_conn sensor_names index_name timefield='time' update=<true><block_start>""" Generate a Kibana dashboard given a list of sensor names """<line_sep>es_conn.index(index='.kibana' doc_type="index-pattern" id=index_name body={"title":index_name "timeFieldName":"time"})<line_sep>dashboards=DashboardsManager(es_conn)<line_sep>dashboard=Dashboard()<line_sep>dashboard.id="%s-dashboard"%index_name<line_sep>dashboard.title="%s dashboard"%index_name<line_sep>dashboard.panels=[]<line_sep>dashboard.options={"darkTheme":<true>}<line_sep>dashboard.time_from="now-15m"<line_sep>dashboard.refresh_interval_value=5000<line_sep>dashboard.search_source={"filter":[{"query":{"query_string":{"analyze_wildcard":<true> "query":"*"}}}]}<line_sep>visualizations=VisualizationsManager(es_conn)<line_sep>vis_list=visualizations.get_all()# list all visualizations panels=[]<line_sep>i=0<for_stmt>sensor sensor_names<block_start>viz_id="%s-%s"%(index_name sensor)<line_sep># Check if visualization exists viz=next((v<for>v vis_list<if>v.id<eq>viz_id) <none>)<if_stmt><not>viz# If not, create it <block_start>viz=Visualization()<line_sep>viz.id=viz_id<line_sep>viz.title="%s-%s"%(index_name sensor)<line_sep>viz.search_source={"index":index_name "query":{"query_string":{"analyze_wildcard":<true> "query":"*"}} "filter":[]}<line_sep>viz.vis_state={"title":"%s-%s"%(index_name sensor) "type":"line" "params":{"addLegend":<true> "addTimeMarker":<true> "addTooltip":<true> "defaultYExtents":<true> "drawLinesBetweenPoints":<true> "interpolate":"linear" "radiusRatio":9 "scale":"linear" "setYExtents":<false> "shareYAxis":<true> "showCircles":<true> "smoothLines":<true> "times":[] "yAxis":{}} "aggs":[{"id":"1" "type":"avg" "schema":"metric" "params":{"field":sensor "customLabel":sensor.replace('_' ' ')}} {"id":"2" "type":"max" "schema":"radius" "params":{"field":"SCORE_%s"%sensor}} {"id":"3" "type":"date_histogram" "schema":"segment" "params":{"field":timefield "interval":"custom" "customInterval":"5s" "min_doc_count":1 "extended_bounds":{}}}] "listeners":{}}<try_stmt><block_start>res=visualizations.add(viz)<assert_stmt>res['_id']<eq>viz_id<block_end><except_stmt>elasticsearch.exceptions.ConflictError<block_start><if_stmt>update<block_start>res=visualizations.update(viz)<block_end><block_end><block_end>panel={"id":viz_id "panelIndex":i "row":i "col":i "size_x":7 "size_y":4 "type":"visualization"}<line_sep>panels.append(panel)<line_sep>ret=dashboard.add_visualization(viz)<line_sep>i<augadd>1<block_end># Create the index if it does not exist <if_stmt><not>es_conn.indices.exists(index_name)<block_start>index_properties={"time":{"type":"date"}}<line_sep>body={"mappings":{index_name:{"properties":index_properties}}}<line_sep>es_conn.indices.create(index=index_name body=body)<block_end><try_stmt><block_start>ret=dashboards.add(dashboard)<block_end><except_stmt>elasticsearch.exceptions.ConflictError# Dashboard already exists, let's update it if we have to <block_start><if_stmt>update<block_start>ret=dashboards.update(dashboard)<block_end><block_end># Create the index pattern es_conn.index(index='.kibana' doc_type="index-pattern" id=index_name body={"title":index_name "timeFieldName":"time"})<line_sep># Search for kibana config kibana_config=es_conn.search(index='.kibana' sort={'_uid':{'order':'desc'}} doc_type='config')<try_stmt><block_start>kibana_id=kibana_config['hits']['hits'][0]['_id']<block_end><except_stmt><block_start><raise>KibanaConfigNotFoundError()<block_end>es_conn.update(index='.kibana' doc_type='config' id=kibana_id body={"doc":{"defaultIndex":index_name}})<line_sep><return>ret<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_stmt>logging<if_stmt>__name__<eq>'__main__'<block_start>logging.basicConfig()<block_end>_log=logging.getLogger(__name__)<import_stmt>sys<import_stmt>pyxb<import_stmt>unittest<class_stmt>TestTrac0132(unittest.TestCase)<block_start>message='bad character \u2620'<def_stmt>testDecode self<block_start>e=pyxb.PyXBException(self.message)<if_stmt>sys.version_info[:2]<g>(2 4)<block_start>self.assertEqual(self.message e.args[0])<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>django.db.models Q<import_stmt>sal.plugin<line_sep>TITLES={'ok':'Machines with Gatekeeper enabled' 'alert':'Machines without Gatekeeper enabled' 'unknown':'Machines with unknown Gatekeeper status'}<line_sep>PLUGIN_Q=Q(pluginscriptsubmission__plugin='Gatekeeper')<line_sep>SCRIPT_Q=Q(pluginscriptsubmission__pluginscriptrow__pluginscript_name='Gatekeeper')<class_stmt>Gatekeeper(sal.plugin.Widget)<block_start>supported_os_families=[sal.plugin.OSFamilies.darwin]<def_stmt>get_context self queryset **kwargs<block_start>queryset=queryset.filter(os_family='Darwin')<line_sep>context=self.super_get_context(queryset **kwargs)<line_sep>context['ok']=self._filter(queryset 'ok').count()<line_sep>context['alert']=self._filter(queryset 'alert').count()<line_sep>context['unknown']=queryset.count()-context['ok']-context['alert']<line_sep><return>context<block_end><def_stmt>filter self machines data<block_start><if_stmt>data<not><in>TITLES<block_start><return><none> <none><block_end><return>self._filter(machines data) TITLES[data]<block_end><def_stmt>_filter self machines data<block_start>machines=machines.filter(os_family='Darwin')<if_stmt>data<eq>'ok'<block_start>machines=(machines.filter(PLUGIN_Q SCRIPT_Q pluginscriptsubmission__pluginscriptrow__pluginscript_data='Enabled'))<block_end><elif_stmt>data<eq>'alert'<block_start>machines=(machines.filter(PLUGIN_Q SCRIPT_Q pluginscriptsubmission__pluginscriptrow__pluginscript_data='Disabled'))<block_end><elif_stmt>data<eq>'unknown'<block_start>machines=(machines.exclude(pk__in=self._filter(machines 'ok').values('pk')).exclude(pk__in=self._filter(machines 'alert').values('pk')))<block_end><return>machines<block_end><block_end>
<import_from_stmt>.save_images SaveGeneratedImageExtension SaveRawImageExtension<line_sep>
<import_stmt>copy<def_stmt>merge target *args<block_start>"""Merges arbitrary data - copied from http://blog.impressiver.com/post/31434674390/deep-merge-multiple-python-dicts :param target: the data structure to fill :param args: a list of data structures to merge into target :return: target, with all data in args merged into it :rtype: whatever type was originally passed in """<if_stmt>len(args)<g>1<block_start><for_stmt>item args<block_start>merge(target item)<block_end><return>target<block_end>item=args[0]<if_stmt><not>isinstance(item dict)<block_start><return>item<block_end><for_stmt>key,value item.items()<block_start><if_stmt>key<in>target<and>isinstance(target[key] dict)<block_start>merge(target[key] value)<block_end><else_stmt><block_start><if_stmt><not>key<in>target<block_start>target[key]=copy.deepcopy(value)<block_end><block_end><block_end><return>target<block_end># vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
<import_from_stmt>unittest TestCase<import_from_stmt>sklearn_evaluation table<class_stmt>TestMissingInput(TestCase)<block_start><def_stmt>test_feature_importances self<block_start><with_stmt>self.assertRaisesRegex(ValueError "needed to tabulate")<block_start>table.feature_importances(<none>)<block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>os<import_stmt>json<import_stmt>jieba.analyse<import_stmt>jieba<line_sep>CURRENT_PATH=os.path.dirname(os.path.abspath(__file__))<line_sep>sentiment_path=os.path.join(CURRENT_PATH 'data' 'sentimentDict.json')<line_sep>stopwords_path=os.path.join(CURRENT_PATH 'data' 'stopwords.txt.json')<line_sep>degree_path=os.path.join(CURRENT_PATH 'data' 'degreeDict.json')<line_sep>not_path=os.path.join(CURRENT_PATH 'data' 'notDict.json')<line_sep>jieba_dic_path=os.path.join(CURRENT_PATH 'data' 'jieba.dic')<line_sep># 加载情感词典 jieba.load_userdict(jieba_dic_path)<class_stmt>SentimentAnalysis()<block_start><def_stmt>__init__ self<block_start>self.sentiment_score_dic=self.load_json(sentiment_path)<line_sep>self.degree_score=self.load_json(degree_path)<line_sep>self.notwords=self.load_json(not_path)<block_end><def_stmt>load_json self json_file_path<block_start><with_stmt>open(json_file_path 'r' encoding='utf-8')<as>f<block_start><return>json.loads(f.read() encoding='utf-8')<block_end><block_end><def_stmt>analysis self sentence<block_start>words=jieba.lcut(sentence)<line_sep>score=self.sentiment_score_dic.get(words[0] 0)<if_stmt>len(words)<g>1<block_start>score<augadd>self.sentiment_score_dic.get(words[1] 0)<times>self.notwords.get(words[0] 1)<times>self.degree_score.get(words[0] 1)<if_stmt>len(words)<g>2<block_start><for_stmt>i range(2 len(words))<block_start>score<augadd>self.sentiment_score_dic.get(words[i] 0)<times>self.notwords.get(words[i-1] 1)<times>self.degree_score.get(words[i-1] 1)<times>self.degree_score.get(words[i-2] 1)<times>self.notwords.get(words[i-2] 1)<block_end><block_end><block_end><if_stmt>score<l>0<block_start><return>{'negative':score}<block_end><if_stmt>score<g>0<block_start><return>{'positive':score}<block_end><return>{'middle':score}<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>cctbx.sgtbx.direct_space_asu direct_space_asu<import_from_stmt>cctbx.sgtbx.direct_space_asu.short_cuts *<import_from_stmt>six.moves range<def_stmt>asu_01 # p_1 (s.g. 1) <block_start><return>(direct_space_asu('P 1')&x0&+x1&y0&+y1&z0&+z1)<block_end><def_stmt>asu_02 # p_2 (s.g. 3) <block_start><return>(direct_space_asu('P 2')&x0(y2)&x2(y2)&y0&+y1&z0&+z1)<block_end><def_stmt>asu_03 # p_m (s.g. 6) <block_start><return>(direct_space_asu('P -2x')&x0&x2&y0&+y1&z0&+z1)<block_end><def_stmt>asu_04 # p_g (s.g. 7) <block_start><return>(direct_space_asu('P -2xb')&x0(+y2)&x2(+y2)&y0&+y1&z0&+z1)<block_end><def_stmt>asu_05 # c_m (s.g. 8) <block_start><return>(direct_space_asu('C -2x')&x0&x2&y0&+y2&z0&+z1)<block_end><def_stmt>asu_06 # p_2_m_m (s.g. 25) <block_start><return>(direct_space_asu('P 2 -2')&x0&x2&y0&y2&z0&+z1)<block_end><def_stmt>asu_07 # p_2_m_g (s.g. 28) <block_start><return>(direct_space_asu('P 2 -2a')&x0(y2)&x4&y0&+y1&z0&+z1)<block_end><def_stmt>asu_08 # p_2_g_g (s.g. 32) <block_start><return>(direct_space_asu('P 2 -2ab')&x0&x2(-y0)&y0&+y2&z0&+z1)<block_end><def_stmt>asu_09 # c_2_m_m (s.g. 35) <block_start><return>(direct_space_asu('C 2 -2')&x0&x4(y4)&y0&y2&z0&+z1)<block_end><def_stmt>asu_10 # p_4 (s.g. 75) <block_start><return>(direct_space_asu('P 4')&x0(-y0)&x2&y0&y2(-x2)&z0&+z1)<block_end><def_stmt>asu_11 # p_4_m_m (s.g. 99) <block_start><return>(direct_space_asu('P 4 -2')&x0&y2&-p0&z0&+z1)<block_end><def_stmt>asu_12 # p_4_g_m (s.g. 100) <block_start><return>(direct_space_asu('P 4 -2ab')&x0(-y0)&y0&m2&z0&+z1)<block_end><def_stmt>asu_13 # p_3 (s.g. 143) <block_start><return>(direct_space_asu('P 3')&x0(-y0)&y0&k1&m1(-h1|-k1)&h1&z0&+z1)<block_end><def_stmt>asu_14 # p_3_m_1 (s.g. 156) <block_start><return>(direct_space_asu('P 3 -2"')&h0&m1&k0&z0&+z1)<block_end><def_stmt>asu_15 # p_3_1_m (s.g. 157) <block_start><return>(direct_space_asu('P 3 -2')&y0&k1&m1(y3)&p0&z0&+z1)<block_end><def_stmt>asu_16 # p_6 (s.g. 168) <block_start><return>(direct_space_asu('P 6')&y0&k1&m1(y3)&p0(-y0)&z0&+z1)<block_end><def_stmt>asu_17 # p_6_m_m (s.g. 183) <block_start><return>(direct_space_asu('P 6 -2')&y0&k1&-h0&z0&+z1)<block_end><def_stmt>get_asu point_group_number<block_start><return>eval("asu_%02d"%point_group_number)()<block_end><if_stmt>(__name__<eq>"__main__")<block_start><for_stmt>i range(1 17+1)<block_start>get_asu(i).show_summary()<block_end><block_end>
# -*- coding: utf-8 -*- """ Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order. You may assume no duplicates in the array. Here are few examples. [1,3,5,6], 5 → 2 [1,3,5,6], 2 → 1 [1,3,5,6], 7 → 4 [1,3,5,6], 0 → 0 """<class_stmt>Solution(object)<block_start><def_stmt>searchInsert self nums target<block_start>""" :type nums: List[int] :type target: int :rtype: int """<line_sep>n=len(nums)<if_stmt><not>nums<block_start><return>0<block_end><else_stmt><block_start>left=0<line_sep>right=n-1<while_stmt>left<le>right<block_start>mid=(left+right)/2<if_stmt>nums[mid]<eq>target<block_start><return>mid<block_end><elif_stmt>(mid<l>n-1<and>nums[mid]<l>target<and>nums[mid+1]<g>target)<block_start><return>mid+1<block_end><elif_stmt>target<l>nums[mid]<block_start>right=mid-1<block_end><else_stmt><block_start>left=mid+1<block_end><block_end><if_stmt>left<g>n-1<block_start><return>n<block_end><elif_stmt>right<l>0<block_start><return>0<block_end><block_end><block_end><block_end>a=[1 3 5 6]<line_sep>s=Solution()<line_sep>print(s.searchInsert(a 5))<line_sep>print(s.searchInsert(a 2))<line_sep>print(s.searchInsert(a 7))<line_sep>print(s.searchInsert(a 0))<line_sep>
<import_from_stmt>thespian.system.transport ResultCallback<import_from_stmt>datetime datetime timedelta<import_from_stmt>time sleep<class_stmt>TestUnitResultCallback(object)<block_start><def_stmt>_good self result value<block_start><if_stmt><not>hasattr(self 'goods')<block_start>self.goods=[]<block_end>self.goods.append((result value))<block_end><def_stmt>_fail self result value<block_start><if_stmt><not>hasattr(self 'fails')<block_start>self.fails=[]<block_end>self.fails.append((result value))<block_end><def_stmt>testGoodCallback self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc.resultCallback(<true> 5)<assert_stmt>self.goods<eq>[(<true> 5)]<assert_stmt>self.fails<eq>[]<block_end><def_stmt>testFailCallback self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc.resultCallback(<false> 9)<assert_stmt>self.goods<eq>[]<assert_stmt>self.fails<eq>[(<false> 9)]<block_end><def_stmt>testGoodCallbackReCall self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc.resultCallback(<true> 5)<assert_stmt>self.goods<eq>[(<true> 5)]<assert_stmt>self.fails<eq>[]<line_sep>rc.resultCallback(<true> 4)<assert_stmt>self.goods<eq>[(<true> 5)]<assert_stmt>self.fails<eq>[]<block_end><def_stmt>testFailCallbackReCall self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc.resultCallback(<false> 9)<assert_stmt>self.goods<eq>[]<assert_stmt>self.fails<eq>[(<false> 9)]<line_sep>rc.resultCallback(<false> 8)<assert_stmt>self.goods<eq>[]<assert_stmt>self.fails<eq>[(<false> 9)]<block_end><def_stmt>testGoodCallbackReCallFail self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc.resultCallback(<true> 5)<assert_stmt>self.goods<eq>[(<true> 5)]<assert_stmt>self.fails<eq>[]<line_sep>rc.resultCallback(<false> 4)<assert_stmt>self.goods<eq>[(<true> 5)]<assert_stmt>self.fails<eq>[]<block_end><def_stmt>testFailCallbackReCallGood self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc.resultCallback(<false> 9)<assert_stmt>self.goods<eq>[]<assert_stmt>self.fails<eq>[(<false> 9)]<line_sep>rc.resultCallback(<true> 8)<assert_stmt>self.goods<eq>[]<assert_stmt>self.fails<eq>[(<false> 9)]<block_end><def_stmt>testManyGoodCallbacks self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=[ResultCallback(self._good self._fail)<for>N range(20)]<for_stmt>num,each enumerate(rc)<block_start>each.resultCallback(<true> num)<block_end><assert_stmt>self.goods<eq>[(<true> N)<for>N range(20)]<assert_stmt>self.fails<eq>[]<block_end><def_stmt>testManyFailCallbacks self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=[ResultCallback(self._good self._fail)<for>N range(20)]<for_stmt>num,each enumerate(rc)<block_start>each.resultCallback(<false> num)<block_end><assert_stmt>self.goods<eq>[]<assert_stmt>self.fails<eq>[(<false> N)<for>N range(20)]<block_end><def_stmt>testManyGoodAndFailCallbacks self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=[ResultCallback(self._good self._fail)<for>N range(20)]<for_stmt>num,each enumerate(rc)<block_start>each.resultCallback(0<eq>num%3 num)<block_end><assert_stmt>self.goods<eq>[(<true> N)<for>N range(20)<if>N%3<eq>0]<assert_stmt>self.fails<eq>[(<false> N)<for>N range(20)<if>N%3]<block_end><def_stmt>testChainedGoodCallbacks self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc2=ResultCallback(self._good self._fail rc)<line_sep>rc3=ResultCallback(self._good self._fail rc2)<line_sep>rc3.resultCallback(<true> 'good')<assert_stmt>self.goods<eq>[(<true> 'good')]<times>3<assert_stmt>self.fails<eq>[]<block_end><def_stmt>testChainedFailCallbacks self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc2=ResultCallback(self._good self._fail rc)<line_sep>rc3=ResultCallback(self._good self._fail rc2)<line_sep>rc3.resultCallback(<false> 'oops')<assert_stmt>self.goods<eq>[]<assert_stmt>self.fails<eq>[(<false> 'oops')]<times>3<block_end><def_stmt>testChainedGoodCallbacksDoNotDuplicate self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc2=ResultCallback(self._good self._fail rc)<line_sep>rc3=ResultCallback(self._good self._fail rc2)<line_sep>rc2.resultCallback(<true> 'ok')<assert_stmt>self.goods<eq>[(<true> 'ok') (<true> 'ok')]<assert_stmt>self.fails<eq>[]<line_sep>rc3.resultCallback(<true> 'good')<assert_stmt>self.goods<eq>[(<true> 'ok') (<true> 'ok') (<true> 'good')]<assert_stmt>self.fails<eq>[]<block_end><def_stmt>testChainedFailCallbacksDoNotDuplicate self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc2=ResultCallback(self._good self._fail rc)<line_sep>rc3=ResultCallback(self._good self._fail rc2)<line_sep>rc2.resultCallback(<false> 'bad')<assert_stmt>self.goods<eq>[]<assert_stmt>self.fails<eq>[(<false> 'bad') (<false> 'bad')]<line_sep>rc3.resultCallback(<false> 'oops')<assert_stmt>self.goods<eq>[]<assert_stmt>self.fails<eq>[(<false> 'bad') (<false> 'bad') (<false> 'oops')]<block_end><def_stmt>testChainedGoodCallbacksDoNotDuplicateOnFail self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc2=ResultCallback(self._good self._fail rc)<line_sep>rc3=ResultCallback(self._good self._fail rc2)<line_sep>rc2.resultCallback(<true> 'ok')<assert_stmt>self.goods<eq>[(<true> 'ok') (<true> 'ok')]<assert_stmt>self.fails<eq>[]<line_sep>rc3.resultCallback(<false> 'bad')<assert_stmt>self.goods<eq>[(<true> 'ok') (<true> 'ok')]<assert_stmt>self.fails<eq>[(<false> 'bad')]<block_end><def_stmt>testChainedFailCallbacksDoNotDuplicateOnGood self<block_start>self.goods=[]<line_sep>self.fails=[]<line_sep>rc=ResultCallback(self._good self._fail)<line_sep>rc2=ResultCallback(self._good self._fail rc)<line_sep>rc3=ResultCallback(self._good self._fail rc2)<line_sep>rc2.resultCallback(<false> 'bad')<assert_stmt>self.goods<eq>[]<assert_stmt>self.fails<eq>[(<false> 'bad') (<false> 'bad')]<line_sep>rc3.resultCallback(<true> 'yippee')<assert_stmt>self.goods<eq>[(<true> 'yippee')]<assert_stmt>self.fails<eq>[(<false> 'bad') (<false> 'bad')]<block_end><block_end>
<import_from_stmt>.pykeyvi_autowrap_conversion_providers *<import_from_stmt>autowrap.ConversionProvider special_converters<def_stmt>register_converters <block_start>special_converters.append(MatchIteratorPairConverter())<block_end>
<import_from_stmt>.sender TelemetrySender<line_sep>
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module includes classical defense algorithms in defencing adversarial examples and enhancing model security and trustworthy. """<import_from_stmt>.adversarial_defense AdversarialDefense<import_from_stmt>.adversarial_defense AdversarialDefenseWithAttacks<import_from_stmt>.adversarial_defense EnsembleAdversarialDefense<import_from_stmt>.natural_adversarial_defense NaturalAdversarialDefense<import_from_stmt>.projected_adversarial_defense ProjectedAdversarialDefense<line_sep>__all__=['AdversarialDefense' 'AdversarialDefenseWithAttacks' 'NaturalAdversarialDefense' 'ProjectedAdversarialDefense' 'EnsembleAdversarialDefense']<line_sep>
expected_output={"service_instance":{501:{"interfaces":{"TenGigabitEthernet0/3/0":{"state":"Up" "type":"Static"} "TenGigabitEthernet0/1/0":{"state":"Up" "type":"Static"} }} 502:{"interfaces":{"TenGigabitEthernet0/3/0":{"state":"Up" "type":"Static"}}} }}<line_sep>
<import_from_stmt>setuptools setup find_packages<line_sep>setup(name="d4rl_pybullet" version="0.1" license="MIT" description="Datasets for data-driven deep reinforcement learnig with Pybullet environments" url="https://github.com/takuseno/d4rl-pybullet" install_requires=["gym" "pybullet" "h5py"] packages=["d4rl_pybullet"])<line_sep>
# Copyright 2021 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> annotations<import_stmt>logging<import_from_stmt>typing Any<import_from_stmt>typing Dict<import_from_stmt>typing Optional<import_from_stmt>repokid CONFIG<import_from_stmt>repokid.types RepokidConfig<line_sep>logger=logging.getLogger("repokid")<class_stmt>RepokidPlugin<block_start><def_stmt>__init__ self config:Optional[RepokidConfig]=<none><block_start><if_stmt>config<block_start>self.config=config<block_end><else_stmt><block_start>self.config=CONFIG<block_end><block_end><block_end><class_stmt>M_A(type)<block_start><pass><block_end><class_stmt>Singleton(M_A)<block_start>_instances:Dict[str Singleton]={}<def_stmt>__call__ cls *args:Any **kwargs:Any<arrow>Singleton<block_start><if_stmt>cls.__name__<not><in>cls._instances<block_start>cls._instances[cls.__name__]=super(Singleton cls).__call__(*args **kwargs)<block_end><return>cls._instances[cls.__name__]<block_end><block_end>
# This file is generated by profile_coder.py. DO NOT EDIT! <import_from_future_stmt> annotations<import_from_stmt>gaphor.core.modeling.properties association attribute relation_many relation_one <import_from_stmt>gaphor.UML Actor Package<class_stmt>C4Container(Package)<block_start>description:attribute[str]<line_sep>location:attribute[str]<line_sep>ownerContainer:relation_one[C4Container]<line_sep>owningContainer:relation_many[C4Container]<line_sep>technology:attribute[str]<line_sep>type:attribute[str]<block_end><class_stmt>C4Database(C4Container)<block_start><pass><block_end><class_stmt>C4Person(Actor)<block_start>description:attribute[str]<line_sep>location:attribute[str]<block_end>C4Container.description=attribute("description" str)<line_sep>C4Container.location=attribute("location" str)<line_sep>C4Container.ownerContainer=association("ownerContainer" C4Container upper=1 opposite="owningContainer")<line_sep>C4Container.owningContainer=association("owningContainer" C4Container composite=<true> opposite="ownerContainer")<line_sep>C4Container.technology=attribute("technology" str)<line_sep>C4Container.type=attribute("type" str)<line_sep>C4Person.description=attribute("description" str)<line_sep>C4Person.location=attribute("location" str)<line_sep>C4Container.namespace.subsets.add(C4Container.ownerContainer)# type: ignore[attr-defined] C4Container.ownedMember.subsets.add(C4Container.owningContainer)# type: ignore[attr-defined]
# Copyright 2021 The Layout Parser team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>typing List Union Dict Dict Any Optional Tuple<import_stmt>numpy<as>np<import_from_stmt>PIL Image<def_stmt>cvt_coordinates_to_points coords:Tuple[float float float float]<arrow>np.ndarray<block_start>x_1,y_1,x_2,y_2=coords<line_sep><return>np.array([[x_1 y_1] # Top Left [x_2 y_1] # Top Right [x_2 y_2] # Bottom Right [x_1 y_2] # Bottom Left ])<block_end><def_stmt>cvt_points_to_coordinates points:np.ndarray<arrow>Tuple[float float float float]<block_start>x_1=points[: 0].min()<line_sep>y_1=points[: 1].min()<line_sep>x_2=points[: 0].max()<line_sep>y_2=points[: 1].max()<line_sep><return>(x_1 y_1 x_2 y_2)<block_end><def_stmt>perspective_transformation M:np.ndarray points:np.ndarray is_inv:bool=<false><arrow>np.ndarray<block_start><if_stmt>is_inv<block_start>M=np.linalg.inv(M)<block_end>src_mid=np.hstack([points np.ones((points.shape[0] 1))]).T# 3x4 dst_mid=np.matmul(M src_mid)<line_sep>dst=(dst_mid/dst_mid[-1]).T[: :2]# 4x2 <return>dst<block_end><def_stmt>vertice_in_polygon vertice:np.ndarray polygon_points:np.ndarray<arrow>bool# The polygon_points are ordered clockwise # The implementation is based on the algorithm from # https://demonstrations.wolfram.com/AnEfficientTestForAPointToBeInAConvexPolygon/ <block_start>points=polygon_points-vertice# shift the coordinates origin to the vertice edges=np.append(points points[0:1 :] axis=0)<line_sep><return>all([np.linalg.det([e1 e2])<ge>0<for>e1,e2 zip(edges edges[1:])])<line_sep># If the points are ordered clockwise, the det should <=0 <block_end><def_stmt>polygon_area xs:np.ndarray ys:np.ndarray<arrow>float<block_start>"""Calculate the area of polygons using `Shoelace Formula <https://en.wikipedia.org/wiki/Shoelace_formula>`_. Args: xs (`np.ndarray`): The x coordinates of the points ys (`np.ndarray`): The y coordinates of the points """<line_sep># Refer to: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates # The formula is equivalent to the original one indicated in the wikipedia # page. <return>0.5<times>np.abs(np.dot(xs np.roll(ys 1))-np.dot(ys np.roll(xs 1)))<block_end>
<import_stmt>sexpr<import_stmt>sys<import_stmt>os<import_from_stmt>pprint pprint<import_from_stmt>subprocess Popen PIPE<line_sep>fname=sys.argv[1]<line_sep>name=os.path.basename(fname).split('.')[0]<line_sep>file=open(fname)<line_sep>source=""<for_stmt>line file.readlines()<block_start><if_stmt>line[0]<ne>"#"<block_start>source<augadd>line<block_end><block_end>sexpr.input(source)<line_sep>s=sexpr.parse()<while_stmt>len(s)<eq>1<block_start>s=s[0]<block_end>table={}<for_stmt>x s<block_start>table[x[0]]=x[1:]<block_end><class_stmt>Element()<block_start><def_stmt>__init__ self name<block_start>self.name=name<line_sep>self.cfg=[]<line_sep>self.inputs=[]<line_sep>self.outputs=[]<block_end><def_stmt>canelide self<block_start><if_stmt>len(self.cfg)<eq>0<block_start><if_stmt>len(self.inputs)<eq>0<and>len(self.outputs)<eq>1<block_start><return>self.outputs[0]<eq>self.name<block_end><elif_stmt>len(self.inputs)<eq>1<and>len(self.outputs)<eq>0<block_start><return>self.inputs[0]<eq>self.name<block_end><block_end><return><false><block_end><block_end><class_stmt>Primitive()<block_start><def_stmt>__init__ self sexpr<block_start>self.name=sexpr[1]<line_sep>#pprint(sexpr) input,output=Element("input") Element("output")<line_sep>self.elements=[input output]<line_sep>self.connections={}# (e0,outputpin,e1,inputpin) => true <for_stmt>i sexpr[4:]<block_start><if_stmt>i[0]<eq>"pin"<block_start><if_stmt>i[3]<eq>"input"<block_start>input.outputs.append(i[2])<line_sep>self.connections[("input" i[2] i[1] i[2])]=<true><block_end><else_stmt><block_start>output.inputs.append(i[2])<line_sep>self.connections[(i[1] i[2] "output" i[2])]=<true><block_end><block_end><elif_stmt>i[0]<eq>"element"<block_start>e=Element(i[1])<line_sep>self.elements.append(e)<for_stmt>ii i[2:]<block_start><if_stmt>isinstance(ii list)<block_start><if_stmt>ii[0]<eq>"pin"<block_start>getattr(e ii[2]+"s").append(ii[1])<block_end><elif_stmt>ii[0]<eq>"conn"<block_start><if_stmt>ii[3]<eq>"==>"<block_start>self.connections[(ii[1] ii[2] ii[4] ii[5])]=<true><block_end><else_stmt><block_start>self.connections[(ii[4] ii[5] ii[1] ii[2])]=<true><block_end><block_end><elif_stmt>ii[0]<eq>"cfg"<block_start>e.cfg=ii[1:]<block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>save self<block_start>print("Saving %s"%self.name)<line_sep>p=Popen(["dot" "-Tpdf" "-o" "%s_%s.pdf"%(self.name name)] stdin=PIPE)<line_sep>f=p.stdin<def_stmt>write s<block_start>f.write(s)<if_stmt>self.name<eq>"PCIE_3_0"<block_start>sys.stdout.write(s)<block_end><block_end>write("digraph G {\n")<line_sep>write(" graph [rankdir = LR];\n")<line_sep>write(" node[shape=record];\n")<for_stmt>e self.elements<block_start><def_stmt>namefmt xs<block_start><return>"|".join(["<%s>%s"%(x x)<for>x xs])<block_end><def_stmt>quote x<block_start><return>""" \\"%s\\" """%x.replace("<" "\\<").replace(">" "\\>").replace("|" "\\|")<block_end>cfgstring='\\n'.join([quote(x)<for>x e.cfg])<if_stmt>e.canelide()<block_start>write(""" %s[label="<%s>%s"];\n"""%(e.name e.name e.name))<block_end><else_stmt><block_start>write(""" %s[label="{ {%s} | %s\\n%s | {%s} }"];\n"""%(e.name namefmt(e.inputs) e.name cfgstring namefmt(e.outputs)))<block_end><block_end><for_stmt>t self.connections.keys()<block_start>write(" %s:%s -> %s:%s;\n"%t)<block_end>write("}")<line_sep>f.close()<if_stmt>p.wait()<ne>0<block_start><raise><block_end><block_end><block_end><for_stmt>i table["primitive_defs"]<block_start><if_stmt>i[0]<eq>"primitive_def"<block_start>p=Primitive(i)<try_stmt><block_start>p.save()<block_end><except_stmt><block_start>print("Failed to save %s"%p.name)<block_end><block_end><block_end>
<import_from_stmt>tkinter ttk<import_stmt>tkinter<import_stmt>logging<import_stmt>gevent<import_stmt>click<import_stmt>sys<import_from_stmt>microraiden Session<import_from_stmt>microraiden utils<line_sep>log=logging.getLogger(__name__)<class_stmt>ETHTickerClient(ttk.Frame)<block_start><def_stmt>__init__ self sender_privkey:str session:Session=<none> poll_interval:float=5<arrow><none><block_start>self.poll_interval=poll_interval<line_sep>self.root=tkinter.Tk()<line_sep>ttk.Frame.__init__(self self.root)<line_sep>self.root.title('µRaiden ETH Ticker')<line_sep>self.root.protocol('WM_DELETE_WINDOW' self.close)<line_sep>self.pack()<line_sep>self.pricevar=tkinter.StringVar(value='0.00 USD')<line_sep>ttk.Label(self textvariable=self.pricevar font=('Helvetica' '72')).pack()<if_stmt>session<is><none><block_start>self.session=Session(private_key=sender_privkey close_channel_on_exit=<true> endpoint_url='http://localhost:5000')<block_end><else_stmt><block_start>self.session=session<block_end>self.active_query=<false><line_sep>self.running=<false><block_end><def_stmt>run self<block_start>self.running=<true><line_sep>self.root.after(0 self.query_price)<line_sep>self.root.mainloop()<block_end><def_stmt>query_price self<block_start><if_stmt><not>self.running<block_start><return><block_end>self.active_query=<true><line_sep>response=self.session.get('http://localhost:5000/ETHUSD')<if_stmt>response<block_start>price=float(response.json()['last_price'])<line_sep>log.info('New price received: {:.2f} USD'.format(price))<line_sep>self.pricevar.set('{:.2f} USD'.format(price))<block_end><else_stmt><block_start>log.warning('No response.')<block_end><if_stmt>self.running<block_start>self.root.after(int(self.poll_interval<times>1000) self.query_price)<block_end>self.active_query=<false><block_end><def_stmt>close self<block_start>log.info('Shutting down gracefully.')<line_sep>self.running=<false><line_sep>self.root.destroy()<line_sep># Sloppy handling of thread joining but works for this small demo. <while_stmt>self.active_query<block_start>gevent.sleep(1)<block_end>self.session.close()<block_end><block_end>@click.command()@click.option('--private-key' required=<true> help='Path to private key file of the proxy' type=click.Path(exists=<true> dir_okay=<false> resolve_path=<true>))@click.option('--private-key-password-file' default=<none> help='Path to file containing password for the JSON-encoded private key' type=click.Path(exists=<true> dir_okay=<false> resolve_path=<true>))<def_stmt>main private_key private_key_password_file <block_start>private_key=utils.get_private_key(private_key private_key_password_file)<if_stmt>private_key<is><none><block_start>sys.exit(1)<block_end>ticker=<none><try_stmt><block_start>ticker=ETHTickerClient(private_key)<line_sep>ticker.run()<block_end><except_stmt>KeyboardInterrupt<block_start><if_stmt>ticker<block_start>ticker.close()<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>gevent monkey<line_sep>monkey.patch_all()<line_sep>logging.basicConfig(level=logging.INFO)<line_sep>main()<block_end>
"""OpenAPI core contrib flask responses module"""<import_from_stmt>werkzeug.datastructures Headers<import_from_stmt>openapi_core.validation.response.datatypes OpenAPIResponse<class_stmt>FlaskOpenAPIResponseFactory<block_start>@classmethod<def_stmt>create cls response<block_start>header=Headers(response.headers)<line_sep><return>OpenAPIResponse(data=response.data status_code=response._status_code headers=header mimetype=response.mimetype )<block_end><block_end>
<import_stmt>logging<import_stmt>requests<import_stmt>json<import_from_stmt>celery shared_task<import_from_stmt>system.models Users<import_from_stmt>seal settings<line_sep>logger=logging.getLogger('system_celery')<line_sep>@shared_task<def_stmt>system_demo one##因为开启了时区,所以django在数据库里面保存的为 utc 时间, 调用的时候会帮你 转为 东八区, celery会自动识别时间 <block_start><import_from_stmt>django.utils timezone<for_stmt>i Users.objects.all()<block_start>print(i.last_login)## 直接读取时间,会是 utc时间,未转换,如果需要处理 请注意 print(timezone.localtime(i.last_login).strftime("%Y-%m-%d %H:%M:%S"))## 时间格式化为 正常时间 <block_end>print("celery定时任务demo 每分钟执行一遍" one)<line_sep><return><block_end>@shared_task<def_stmt>ding_ding_to_info content type=<none><block_start>""" 钉钉接口 异步调用 ding_ding_to_info.delay("报警1") :param content: 文本内容 :param type: :return: """<line_sep>web_hook_url=getattr(settings 'web_hook_url') <line_sep>headers={'content-type':'application/json'}<line_sep>data={"msgtype":"text" "text":{"content":content} "at":{"atMobiles":[] }}<try_stmt><block_start>r=requests.post(web_hook_url[0] data=json.dumps(data) headers=headers)<line_sep>print(r.text)<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<block_end><block_end>
"""Contains DeepSpeech2 model."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>sys<import_stmt>os<import_stmt>time<import_stmt>logging<import_stmt>gzip<import_stmt>copy<import_stmt>numpy<as>np<import_stmt>inspect<import_from_stmt>utils.decoder.swig_wrapper Scorer<import_from_stmt>utils.decoder.swig_wrapper ctc_greedy_decoder<import_from_stmt>utils.decoder.swig_wrapper ctc_beam_search_decoder_batch<class_stmt>LM_decoder(object)<block_start><def_stmt>__init__ self beam_alpha beam_beta language_model_path vocab_list<block_start>"""Initialize the external scorer. :param beam_alpha: Parameter associated with language model. :type beam_alpha: float :param beam_beta: Parameter associated with word count. :type beam_beta: float :param language_model_path: Filepath for language model. If it is empty, the external scorer will be set to None, and the decoding method will be pure beam search without scorer. :type language_model_path: basestring|None :param vocab_list: List of tokens in the vocabulary, for decoding. :type vocab_list: list """<if_stmt>language_model_path<ne>''<block_start>print("begin to initialize the external scorer "<concat>"for decoding")<line_sep>self._ext_scorer=Scorer(beam_alpha beam_beta language_model_path vocab_list)<line_sep>lm_char_based=self._ext_scorer.is_character_based()<line_sep>lm_max_order=self._ext_scorer.get_max_order()<line_sep>lm_dict_size=self._ext_scorer.get_dict_size()<line_sep>print("language model: "<concat>"is_character_based = %d,"%lm_char_based+" max_order = %d,"%lm_max_order+" dict_size = %d"%lm_dict_size)<line_sep>print("end initializing scorer")<block_end><else_stmt><block_start>self._ext_scorer=<none><line_sep>print("no language model provided, "<concat>"decoding by pure beam search without scorer.")<block_end><block_end><def_stmt>decode_batch_beam_search self probs_split beam_alpha beam_beta beam_size cutoff_prob cutoff_top_n vocab_list num_processes<block_start>"""Decode by beam search for a batch of probs matrix input. :param probs_split: List of 2-D probability matrix, and each consists of prob vectors for one speech utterancce. :param probs_split: List of matrix :param beam_alpha: Parameter associated with language model. :type beam_alpha: float :param beam_beta: Parameter associated with word count. :type beam_beta: float :param beam_size: Width for Beam search. :type beam_size: int :param cutoff_prob: Cutoff probability in pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in vocabulary will be used in beam search, default 40. :type cutoff_top_n: int :param vocab_list: List of tokens in the vocabulary, for decoding. :type vocab_list: list :param num_processes: Number of processes (CPU) for decoder. :type num_processes: int :return: List of transcription texts. :rtype: List of basestring """<if_stmt>self._ext_scorer<ne><none><block_start>self._ext_scorer.reset_params(beam_alpha beam_beta)<block_end># beam search decode num_processes=min(num_processes np.shape(probs_split)[0])<line_sep>beam_search_results=ctc_beam_search_decoder_batch(probs_split=probs_split vocabulary=vocab_list beam_size=beam_size num_processes=num_processes ext_scoring_func=self._ext_scorer cutoff_prob=cutoff_prob cutoff_top_n=cutoff_top_n)<line_sep>results=[result[0][1]<for>result beam_search_results]<line_sep><return>results<block_end><def_stmt>_adapt_feeding_dict self feeding_dict<block_start>"""Adapt feeding dict according to network struct. To remove impacts from padding part, we add scale_sub_region layer and sub_seq layer. For sub_seq layer, 'sequence_offset' and 'sequence_length' fields are appended. For each scale_sub_region layer 'convN_index_range' field is appended. :param feeding_dict: Feeding is a map of field name and tuple index of the data that reader returns. :type feeding_dict: dict|list :return: Adapted feeding dict. :rtype: dict|list """<line_sep>adapted_feeding_dict=copy.deepcopy(feeding_dict)<if_stmt>isinstance(feeding_dict dict)<block_start>adapted_feeding_dict["sequence_offset"]=len(adapted_feeding_dict)<line_sep>adapted_feeding_dict["sequence_length"]=len(adapted_feeding_dict)<for_stmt>i xrange(self._num_conv_layers)<block_start>adapted_feeding_dict["conv%d_index_range"%i]=len(adapted_feeding_dict)<block_end><block_end><elif_stmt>isinstance(feeding_dict list)<block_start>adapted_feeding_dict.append("sequence_offset")<line_sep>adapted_feeding_dict.append("sequence_length")<for_stmt>i xrange(self._num_conv_layers)<block_start>adapted_feeding_dict.append("conv%d_index_range"%i)<block_end><block_end><else_stmt><block_start><raise>ValueError("Type of feeding_dict is %s, not supported."%type(feeding_dict))<block_end><return>adapted_feeding_dict<block_end><def_stmt>_adapt_data self data<block_start>"""Adapt data according to network struct. For each convolution layer in the conv_group, to remove impacts from padding data, we can multiply zero to the padding part of the outputs of each batch normalization layer. We add a scale_sub_region layer after each batch normalization layer to reset the padding data. For rnn layers, to remove impacts from padding data, we can truncate the padding part before output data feeded into the first rnn layer. We use sub_seq layer to achieve this. :param data: Data from data_provider. :type data: list|function :return: Adapted data. :rtype: list|function """<def_stmt>adapt_instance instance<block_start><if_stmt>len(instance)<l>2<or>len(instance)<g>3<block_start><raise>ValueError("Size of instance should be 2 or 3.")<block_end>padded_audio=instance[0]<line_sep>text=instance[1]<line_sep># no padding part <if_stmt>len(instance)<eq>2<block_start>audio_len=padded_audio.shape[1]<block_end><else_stmt><block_start>audio_len=instance[2]<block_end>adapted_instance=[padded_audio text]<line_sep># Stride size for conv0 is (3, 2) # Stride size for conv1 to convN is (1, 2) # Same as the network, hard-coded here padded_conv0_h=(padded_audio.shape[0]-1)<floordiv>2+1<line_sep>padded_conv0_w=(padded_audio.shape[1]-1)<floordiv>3+1<line_sep>valid_w=(audio_len-1)<floordiv>3+1<line_sep>adapted_instance<augadd>[[0] # sequence offset, always 0 [valid_w] # valid sequence length # Index ranges for channel, height and width # Please refer scale_sub_region layer to see details [1 32 1 padded_conv0_h valid_w+1 padded_conv0_w]]<line_sep>pre_padded_h=padded_conv0_h<for_stmt>i xrange(self._num_conv_layers-1)<block_start>padded_h=(pre_padded_h-1)<floordiv>2+1<line_sep>pre_padded_h=padded_h<line_sep>adapted_instance<augadd>[[1 32 1 padded_h valid_w+1 padded_conv0_w]]<block_end><return>adapted_instance<block_end><if_stmt>isinstance(data list)<block_start><return>map(adapt_instance data)<block_end><elif_stmt>inspect.isgeneratorfunction(data)<block_start><def_stmt>adapted_reader <block_start><for_stmt>instance data()<block_start><yield>map(adapt_instance instance)<block_end><block_end><return>adapted_reader<block_end><else_stmt><block_start><raise>ValueError("Type of data is %s, not supported."%type(data))<block_end><block_end><def_stmt>_create_parameters self model_path=<none><block_start>"""Load or create model parameters."""<if_stmt>model_path<is><none><block_start>self._parameters=paddle.parameters.create(self._loss)<block_end><else_stmt><block_start>self._parameters=paddle.parameters.Parameters.from_tar(gzip.open(model_path))<block_end><block_end><def_stmt>_create_network self vocab_size num_conv_layers num_rnn_layers rnn_layer_size use_gru share_rnn_weights<block_start>"""Create data layers and model network."""<line_sep># paddle.data_type.dense_array is used for variable batch input. # The size 161 * 161 is only an placeholder value and the real shape # of input batch data will be induced during training. audio_data=paddle.layer.data(name="audio_spectrogram" type=paddle.data_type.dense_array(161<times>161))<line_sep>text_data=paddle.layer.data(name="transcript_text" type=paddle.data_type.integer_value_sequence(vocab_size))<line_sep>seq_offset_data=paddle.layer.data(name='sequence_offset' type=paddle.data_type.integer_value_sequence(1))<line_sep>seq_len_data=paddle.layer.data(name='sequence_length' type=paddle.data_type.integer_value_sequence(1))<line_sep>index_range_datas=[]<for_stmt>i xrange(num_rnn_layers)<block_start>index_range_datas.append(paddle.layer.data(name='conv%d_index_range'%i type=paddle.data_type.dense_vector(6)))<block_end>self._log_probs,self._loss=deep_speech_v2_network(audio_data=audio_data text_data=text_data seq_offset_data=seq_offset_data seq_len_data=seq_len_data index_range_datas=index_range_datas dict_size=vocab_size num_conv_layers=num_conv_layers num_rnn_layers=num_rnn_layers rnn_size=rnn_layer_size use_gru=use_gru share_rnn_weights=share_rnn_weights)<block_end><block_end>
<import_from_stmt>hashlib sha256<import_from_stmt>remerkleable.byte_arrays Bytes32<import_from_stmt>typing Union<line_sep>ZERO_BYTES32=b'\x00'<times>32<def_stmt>hash x:Union[bytes bytearray memoryview]<arrow>Bytes32<block_start><return>Bytes32(sha256(x).digest())<block_end>
# SPDX-License-Identifier: MIT # Copyright © 2020 <NAME> """Functions to fix various known issues with exported TFJS models"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>base64<import_from_stmt>typing Any Dict List Optional<import_stmt>tfjs_graph_converter.common<as>common<def_stmt>_find_if_has_key obj:Dict[str Any] key:str of_type:Optional[type]=<none><arrow>List[Any]<block_start>""" Recursively find all objects with a given key in a dictionary Args: obj: Dictionary to search key: Key to find of_type: [optional] Type of the referenced item Returns: List of all objects that contain an item with the given key and matching type """<def_stmt>get_children item:Any<arrow>List[Any]<block_start><return>[val<for>val item.values()<if>isinstance(val dict)]<block_end>found=[]<line_sep>stack=get_children(obj)<while_stmt>len(stack)<g>0<block_start>item=stack.pop()<if_stmt>key<in>item<and>(of_type<is><none><or>isinstance(item[key] of_type))<block_start>found.append(item)<block_end>stack.extend(get_children(item))<block_end><return>found<block_end><def_stmt>_convert_string_attrs node:Dict[str Any]<arrow><none><block_start>""" Deep search string attributes (labelled "s" in GraphDef proto) and convert ascii code lists to base64-encoded strings if necessary """<line_sep>attr_key=common.TFJS_NODE_ATTR_KEY<line_sep>str_key=common.TFJS_ATTR_STRING_VALUE_KEY<line_sep># some layers (e.g. PReLU) don't contain the `attr` key, # so test for its presence attrs:list=[]<if_stmt>attr_key<in>node<block_start>attrs=_find_if_has_key(node[attr_key] key=str_key of_type=list)<block_end><for_stmt>attr attrs<block_start>array=attr[str_key]<line_sep># check if conversion is actually necessary <if_stmt>(len(array)<g>0)<and>isinstance(array list)<and>isinstance(array[0] int)<block_start>string=''.join(map(chr array))<line_sep>binary=string.encode('utf8')<line_sep>attr[str_key]=base64.encodebytes(binary)<block_end><elif_stmt>len(array)<eq>0<block_start>attr[str_key]=<none><block_end><block_end><block_end><def_stmt>_fix_dilation_attrs node:Dict[str Any]<arrow><none><block_start>""" Search dilations-attribute and convert misaligned dilation rates if necessary see https://github.com/patlevin/tfjs-to-tf/issues/1 """<line_sep>path=['attr' 'dilations' 'list']<line_sep>values=node<line_sep>found=<true><for_stmt>key path<block_start><if_stmt>key<in>values<block_start>values=values[key]<block_end><else_stmt><block_start>found=<false><line_sep><break><block_end><block_end># if dilations are present, they're stored in 'values' now ints=common.TFJS_ATTR_INT_VALUE_KEY<if_stmt>found<and>ints<in>values<and>isinstance(values[ints] list)<block_start>value=values[ints]<if_stmt>len(value)<ne>4# must be NCHW-formatted 4D tensor or else TF can't handle it <block_start><raise>ValueError("Unsupported 'dilations'-attribute in node "<concat>f'{node[common.TFJS_NAME_KEY]}')<block_end># check for [>1,>1,1,1], which is likely a mistranslated [1,>1,>1,1] <if_stmt>int(value[0] 10)<g>1<block_start>values[ints]=['1' value[0] value[1] '1']<block_end><block_end><block_end><def_stmt>fix_node_attributes message_dict:Dict[str Any]<arrow>Dict[str Any]<block_start>""" Fix various known issues found "in the wild": • Node attributes in deserialised JSON may contain strings as lists of ascii codes when the TF GraphDef proto expects base64 encoded strings • 'dilation' attributes may be misaligned in a way unsupported by TF Further fixes will be added as issues are reported. Args: message_dict: Graph model formatted as parsed JSON dictionary Returns: Updated message dictionary with fixes applied if necessary """<if_stmt>common.TFJS_NODE_KEY<in>message_dict<block_start>nodes=message_dict[common.TFJS_NODE_KEY]<for_stmt>node nodes<block_start>_convert_string_attrs(node)<line_sep>_fix_dilation_attrs(node)<block_end><block_end><return>message_dict<block_end>
""" @author: Heerozh (<NAME>) @copyright: Copyright 2019-2020, Heerozh. All rights reserved. @license: Apache 2.0 @email: <EMAIL> """<import_stmt>math<def_stmt>sign x<block_start><return>math.copysign(1 x)<block_end><class_stmt>PriceTracker<block_start><def_stmt>__init__ self current_price recorder=max<block_start>self.last_price=current_price<line_sep>self.recorder=recorder<line_sep>self.recorded_price=current_price<line_sep>self.tracking_position=<none><block_end><def_stmt>update_price self last_price<block_start>self.recorded_price=self.recorder(self.recorded_price last_price)<line_sep>self.last_price=last_price<block_end><def_stmt>process_split self inverse_ratio:float<block_start>self.recorded_price<augdiv>inverse_ratio<block_end><block_end># ----------------------------------------------------------------------------- <class_stmt>StopTracker(PriceTracker)<block_start><def_stmt>__init__ self current_price stop_price callback<block_start>super().__init__(current_price <lambda>_ x:x)<line_sep>self._stop_price=stop_price<line_sep>self.stop_loss=stop_price<l>current_price<line_sep>self.callback=callback<block_end>@property<def_stmt>stop_price self<block_start><return>self._stop_price<block_end><def_stmt>fire self *args<block_start><if_stmt>callable(self.callback)<block_start><return>self.callback(*args)<block_end><else_stmt><block_start><return>self.callback<block_end><block_end><def_stmt>check_trigger self *args<block_start><if_stmt>self.stop_loss<block_start><if_stmt>self.last_price<le>self.stop_price<block_start><return>self.fire(*args)<block_end><block_end><else_stmt><block_start><if_stmt>self.last_price<ge>self.stop_price<block_start><return>self.fire(*args)<block_end><block_end><return><false><block_end><block_end><class_stmt>StopModel<block_start><def_stmt>__init__ self ratio:float callback=<none><block_start>self.ratio=ratio<line_sep>self.callback=callback<block_end><def_stmt>new_tracker self current_price inverse<block_start><if_stmt>inverse<block_start>stop_price=current_price<times>(1-self.ratio)<block_end><else_stmt><block_start>stop_price=current_price<times>(1+self.ratio)<block_end><return>StopTracker(current_price stop_price self.callback)<block_end><block_end># ----------------------------------------------------------------------------- <class_stmt>TrailingStopTracker(StopTracker)<block_start><def_stmt>__init__ self current_price ratio callback<block_start>self.ratio=ratio<line_sep>stop_price=current_price<times>(1+self.ratio)<line_sep>StopTracker.__init__(self current_price stop_price callback=callback)<line_sep>PriceTracker.__init__(self current_price recorder=max<if>ratio<l>0<else>min)<block_end>@property<def_stmt>stop_price self<block_start><return>self.recorded_price<times>(1+self.ratio)<block_end><block_end><class_stmt>TrailingStopModel(StopModel)<block_start>""" Unlike trailing stop order, the ratio in this model is relative to the highest / lowest price, so -0.1 means stop price is 90% of the highest price from now to the future; 0.1 means stop price is 110% of the lowest price from now to the future. """<def_stmt>new_tracker self current_price inverse<block_start>ratio=-self.ratio<if>inverse<else>self.ratio<line_sep><return>TrailingStopTracker(current_price ratio self.callback)<block_end><block_end># ----------------------------------------------------------------------------- <class_stmt>DecayTrailingStopTracker(TrailingStopTracker)<block_start><def_stmt>__init__ self current_price ratio target decay_rate max_decay callback<block_start>self.initial_ratio=ratio<line_sep>self.max_decay=max_decay<line_sep>self.decay_rate=decay_rate<line_sep>self.target=target<line_sep>super().__init__(current_price ratio callback)<block_end>@property<def_stmt>current self<block_start><raise>NotImplementedError("abstractmethod")<block_end>@property<def_stmt>stop_price self<block_start>decay=max(self.decay_rate<power>(self.current/self.target) self.max_decay)<line_sep>self.ratio=self.initial_ratio<times>decay<line_sep><return>self.recorded_price<times>(1+self.ratio)<block_end><block_end><class_stmt>PnLDecayTrailingStopTracker(DecayTrailingStopTracker)<block_start>@property<def_stmt>current self<block_start>pos=self.tracking_position<line_sep>pnl=(self.recorded_price/pos.average_price-1)<times>sign(pos.shares)<line_sep>pnl=max(pnl 0)<if>self.target<g>0<else>min(pnl 0)<line_sep><return>pnl<block_end><block_end><class_stmt>PnLDecayTrailingStopModel(StopModel)<block_start>""" Exponential decay to the stop ratio: `ratio * decay_rate ^ (PnL% / PnL_target%)`. If it's stop gain model, `PnL_target` should be Loss Target (negative). So, the lower the `ratio` when PnL% approaches the target, and if PnL% exceeds PnL_target%, any small opposite changes will trigger stop. """<def_stmt>__init__ self ratio:float pnl_target:float callback=<none> decay_rate=0.05 max_decay=0<block_start>super().__init__(ratio callback)<line_sep>self.decay_rate=decay_rate<line_sep>self.pnl_target=pnl_target<line_sep>self.max_decay=max_decay<block_end><def_stmt>new_tracker self current_price inverse<block_start>ratio=-self.ratio<if>inverse<else>self.ratio<line_sep><return>PnLDecayTrailingStopTracker(current_price ratio self.pnl_target self.decay_rate self.max_decay self.callback)<block_end><block_end><class_stmt>TimeDecayTrailingStopTracker(DecayTrailingStopTracker)<block_start>@property<def_stmt>current self<block_start>pos=self.tracking_position<line_sep><return>pos.period<block_end><block_end><class_stmt>TimeDecayTrailingStopModel(StopModel)<block_start><def_stmt>__init__ self ratio:float period_target:'pd.Timedelta' callback=<none> decay_rate=0.05 max_decay=0<block_start>super().__init__(ratio callback)<line_sep>self.decay_rate=decay_rate<line_sep>self.period_target=period_target<line_sep>self.max_decay=max_decay<block_end><def_stmt>new_tracker self current_price inverse<block_start>ratio=-self.ratio<if>inverse<else>self.ratio<line_sep><return>TimeDecayTrailingStopTracker(current_price ratio self.period_target self.decay_rate self.max_decay self.callback)<block_end><block_end>
<import_from_stmt>conans ConanFile tools RunEnvironment<import_stmt>os logging<class_stmt>AndroidSdkToolsConanFile(ConanFile)<block_start>name="android-sdk-tools"<line_sep>version="4.0"<line_sep>user="aac-sdk"<line_sep>channel="stable"<line_sep>no_copy_source=<true><line_sep>exports_sources=["cmake-wrapper.cmd" "cmake-wrapper"]<line_sep>settings="os" "arch" "compiler" "build_type"<line_sep>requires=["zulu-openjdk/11.0.8"]<line_sep>options={"sdk_version":"ANY" "ndk_version":"ANY" "android_stl":["c++_shared" "c++_static"]}<line_sep>default_options={"sdk_version":"7302050" "ndk_version":"20.0.5594570" "android_stl":"c++_shared"}<line_sep>@staticmethod<def_stmt>chmod_plus_x filename<block_start><if_stmt>os.name<eq>"posix"<block_start>os.chmod(filename os.stat(filename).st_mode|0o111)<block_end><block_end><def_stmt>fix_permissions self root_folder<block_start><if_stmt>os.name<ne>"posix"<block_start><return><block_end><for_stmt>root,_,files os.walk(root_folder)<block_start><for_stmt>filename files<block_start>filename=os.path.join(root filename)<with_stmt>open(filename "rb")<as>f<block_start>sig=f.read(4)<if_stmt>type(sig)<is>str<block_start>sig=[ord(s)<for>s sig]<block_end><else_stmt><block_start>sig=[s<for>s sig]<block_end><if_stmt>len(sig)<g>2<and>sig[0]<eq>0x23<and>sig[1]<eq>0x21<block_start>logging.info(f"chmod on script file: {filename}")<line_sep>self.chmod_plus_x(filename)<block_end><elif_stmt>sig<eq>[0x7F 0x45 0x4C 0x46]<block_start>logging.info(f"chmod on ELF file: {filename}")<line_sep>self.chmod_plus_x(filename)<block_end><elif_stmt>sig<eq>[0xCA 0xFE 0xBA 0xBE]<or>sig<eq>[0xBE 0xBA 0xFE 0xCA]<or>sig<eq>[0xFE 0xED 0xFA 0xCF]<or>sig<eq>[0xCF 0xFA 0xED 0xFE]<or>sig<eq>[0xFE 0xEF 0xFA 0xCE]<or>sig<eq>[0xCE 0xFA 0xED 0xFE]<block_start>logging.info(f"chmod on Mach-O file: {filename}")<line_sep>self.chmod_plus_x(filename)<block_end><block_end><block_end><block_end><block_end>@property<def_stmt>_build_os self<block_start>settings_build=getattr(self "settings_build" <none>)<line_sep><return>settings_build.os<if>settings_build<else>self.settings.os<block_end><def_stmt>source self<block_start><if_stmt>self._build_os<eq>"Macos"<block_start>package=f"commandlinetools-mac-{self.options.sdk_version}_latest"<block_end><elif_stmt>self._build_os<eq>"Linux"<block_start>package=f"commandlinetools-linux-{self.options.sdk_version}_latest"<block_end><else_stmt><block_start><raise>Exception(f"settings.os not supported: {self._build_os}")<block_end>#download the command line tools package tools.get(f"https://dl.google.com/android/repository/{package}.zip")<block_end><def_stmt>package self<block_start>self.copy("*" src="cmdline-tools" dst="cmdline-tools")<line_sep>self.copy("cmake-wrapper.cmd")<line_sep>self.copy("cmake-wrapper")<line_sep># fix executable permisions for command line tools self.fix_permissions(self.package_folder)<line_sep># check the license -- needs to be accepted once sdk_manager=os.path.join(self.package_folder "cmdline-tools" "bin" "sdkmanager")<line_sep>auto_accept_licenses=os.getenv("BUILDER_ACCEPT_LICENSES" "False").lower()<eq>"true"<line_sep>env_run=RunEnvironment(self)<with_stmt>tools.environment_append(env_run.vars)# check the license -- needs to be accepted once <block_start>check_yes_opt=f"yes | {sdk_manager}"<if>auto_accept_licenses<else>sdk_manager<line_sep>self.run(f"{check_yes_opt} --sdk_root={self.package_folder} --licenses" run_environment=<true>)<line_sep># install android sdk self.run(f"{sdk_manager} --sdk_root={self.package_folder} 'platform-tools' 'platforms;android-{self.settings_target.os.api_level}'" run_environment=<true>)<line_sep># install android ndk self.run(f"{sdk_manager} --sdk_root={self.package_folder} --install 'ndk;{self.options.ndk_version}'" run_environment=<true>)<block_end><block_end>@property<def_stmt>_platform self<block_start><return>{"Windows":"windows" "Macos":"darwin" "Linux":"linux"}.get(str(self._build_os))<block_end>@property<def_stmt>_android_abi self<block_start><return>{"x86":"x86" "x86_64":"x86_64" "armv7hf":"armeabi-v7a" "armv8":"arm64-v8a"}.get(str(self.settings_target.arch))<block_end>@property<def_stmt>_llvm_triplet self<block_start>arch={'armv7hf':'arm' 'armv8':'aarch64' 'x86':'i686' 'x86_64':'x86_64'}.get(str(self.settings_target.arch))<line_sep>abi='androideabi'<if>self.settings_target.arch<eq>'armv7hf'<else>'android'<line_sep><return>f"{arch}-linux-{abi}"<block_end>@property<def_stmt>_clang_triplet self<block_start>arch={'armv7hf':'armv7a' 'armv8':'aarch64' 'x86':'i686' 'x86_64':'x86_64'}.get(str(self.settings_target.arch))<line_sep>abi='androideabi'<if>self.settings_target.arch<eq>'armv7hf'<else>'android'<line_sep><return>f"{arch}-linux-{abi}"<block_end>@property<def_stmt>_sdk_home self<block_start><return>os.path.join(self.package_folder)<block_end>@property<def_stmt>_ndk_home self<block_start><return>os.path.join(self.package_folder "ndk" str(self.options.ndk_version))<block_end>@property<def_stmt>_ndk_root self<block_start><return>os.path.join(self._ndk_home "toolchains" "llvm" "prebuilt" f"{self._platform}-x86_64")<block_end><def_stmt>_tool_name self tool<block_start><if_stmt>'clang'<in>tool<block_start>suffix='.cmd'<if>self._build_os<eq>'Windows'<else>''<line_sep><return>f"{self._clang_triplet}{self.settings_target.os.api_level}-{tool}{suffix}"<block_end><else_stmt><block_start>suffix='.exe'<if>self._build_os<eq>'Windows'<else>''<line_sep><return>f"{self._llvm_triplet}-{tool}{suffix}"<block_end><block_end><def_stmt>_define_tool_var self name value<block_start>ndk_bin=os.path.join(self._ndk_root 'bin')<line_sep>path=os.path.join(ndk_bin self._tool_name(value))<line_sep>logging.info(f"Creating {name} environment variable: {path}")<line_sep><return>path<block_end><def_stmt>package_info self# set the android sdk environment variables <block_start>logging.info(f"Creating ANDROID_SDK_ROOT environment variable: {self._sdk_home}")<line_sep>self.env_info.ANDROID_SDK_ROOT=self._sdk_home<line_sep># test shall pass, so this runs also in the build as build requirement context # ndk-build: https://developer.android.com/ndk/guides/ndk-build self.env_info.PATH.append(self._ndk_home)<line_sep># You should use the ANDROID_NDK_ROOT environment variable to indicate where the NDK is located. # That's what most NDK-related scripts use (inside the NDK, and outside of it). # https://groups.google.com/g/android-ndk/c/qZjhOaynHXc logging.info(f"Creating ANDROID_NDK_ROOT environment variable: {self._ndk_home}")<line_sep>self.env_info.ANDROID_NDK_ROOT=self._ndk_home<line_sep># Gradle is complaining about the ANDROID_NDK_HOME environment variable: # WARNING: Support for ANDROID_NDK_HOME is deprecated and will be removed in the future. # Use android.ndkVersion in build.gradle instead. # logging.info(f"Creating ANDROID_NDK_HOME environment variable: {self._ndk_home}") # self.env_info.ANDROID_NDK_HOME = self._ndk_home logging.info(f"Creating NDK_ROOT environment variable: {self._ndk_root}")<line_sep>self.env_info.NDK_ROOT=self._ndk_root<line_sep>logging.info(f"Creating CHOST environment variable: {self._llvm_triplet}")<line_sep>self.env_info.CHOST=self._llvm_triplet<line_sep>ndk_sysroot=os.path.join(self._ndk_root 'sysroot')<line_sep>logging.info(f"Creating CONAN_CMAKE_FIND_ROOT_PATH environment variable: {ndk_sysroot}")<line_sep>self.env_info.CONAN_CMAKE_FIND_ROOT_PATH=ndk_sysroot<line_sep>logging.info(f"Creating SYSROOT environment variable: {ndk_sysroot}")<line_sep>self.env_info.SYSROOT=ndk_sysroot<line_sep>logging.info(f"Creating self.cpp_info.sysroot: {ndk_sysroot}")<line_sep>self.cpp_info.sysroot=ndk_sysroot<line_sep>logging.info(f"Creating ANDROID_NATIVE_API_LEVEL environment variable: {self.settings_target.os.api_level}")<line_sep>self.env_info.ANDROID_NATIVE_API_LEVEL=str(self.settings_target.os.api_level)<line_sep>self.chmod_plus_x(os.path.join(self.package_folder "cmake-wrapper"))<line_sep>cmake_wrapper="cmake-wrapper.cmd"<if>self._build_os<eq>"Windows"<else>"cmake-wrapper"<line_sep>cmake_wrapper=os.path.join(self.package_folder cmake_wrapper)<line_sep>logging.info(f"Creating CONAN_CMAKE_PROGRAM environment variable: {cmake_wrapper}")<line_sep>self.env_info.CONAN_CMAKE_PROGRAM=cmake_wrapper<line_sep>toolchain=os.path.join(self._ndk_home "build" "cmake" "android.toolchain.cmake")<line_sep>logging.info(f"Creating CONAN_CMAKE_TOOLCHAIN_FILE environment variable: {toolchain}")<line_sep>self.env_info.CONAN_CMAKE_TOOLCHAIN_FILE=toolchain<line_sep>self.env_info.CC=self._define_tool_var('CC' 'clang')<line_sep>self.env_info.CXX=self._define_tool_var('CXX' 'clang++')<line_sep>self.env_info.LD=self._define_tool_var('LD' 'ld')<line_sep>self.env_info.AR=self._define_tool_var('AR' 'ar')<line_sep>self.env_info.AS=self._define_tool_var('AS' 'as')<line_sep>self.env_info.RANLIB=self._define_tool_var('RANLIB' 'ranlib')<line_sep>self.env_info.STRIP=self._define_tool_var('STRIP' 'strip')<line_sep>self.env_info.ADDR2LINE=self._define_tool_var('ADDR2LINE' 'addr2line')<line_sep>self.env_info.NM=self._define_tool_var('NM' 'nm')<line_sep>self.env_info.OBJCOPY=self._define_tool_var('OBJCOPY' 'objcopy')<line_sep>self.env_info.OBJDUMP=self._define_tool_var('OBJDUMP' 'objdump')<line_sep>self.env_info.READELF=self._define_tool_var('READELF' 'readelf')<line_sep>self.env_info.ELFEDIT=self._define_tool_var('ELFEDIT' 'elfedit')<line_sep>self.env_info.ANDROID_PLATFORM=f"android-{self.settings_target.os.api_level}"<line_sep>self.env_info.ANDROID_TOOLCHAIN="clang"<line_sep>self.env_info.ANDROID_ABI=self._android_abi<line_sep>self.env_info.ANDROID_STL=f"{self.options.android_stl}"<line_sep># set the stl shared lib path if specified by the android_stl option <if_stmt>self.options.android_stl<eq>"c++_shared"<block_start>self.env_info.ANDROID_STL_SHARED_LIB=f"{os.path.join(ndk_sysroot 'usr' 'lib' self._llvm_triplet 'libc++_shared.so')}"<line_sep>logging.info(f"Creating ANDROID_STL_SHARED_LIB environment variable: {self.env_info.ANDROID_STL_SHARED_LIB}")<block_end>self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PROGRAM="BOTH"<line_sep>self.env_info.CMAKE_FIND_ROOT_PATH_MODE_LIBRARY="BOTH"<line_sep>self.env_info.CMAKE_FIND_ROOT_PATH_MODE_INCLUDE="BOTH"<line_sep>self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PACKAGE="BOTH"<block_end><block_end>
<import_stmt>unittest<import_from_stmt>Skoarcery langoids terminals nonterminals dragonsets parsetable emissions<import_from_stmt>Skoarcery.langoids Terminal Nonterminal<class_stmt>Code_Parser_Py(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>terminals.init()<line_sep>nonterminals.init()<line_sep>langoids.init()<line_sep>dragonsets.init()<line_sep>parsetable.init()<line_sep>emissions.init()<block_end><def_stmt>test_PY_rdpp self<block_start><import_from_stmt>Skoarcery.dragonsets FIRST FOLLOW<import_from_stmt>Skoarcery.terminals Empty<line_sep>fd=open("SkoarPyon/rdpp.py" "w")<line_sep>PY=emissions.PY<line_sep>PY.fd=fd<line_sep># Header # Imports # class SkoarParseException # class SkoarParser: # __init__ # fail self.code_start()<line_sep>PY.tab<augadd>1<line_sep>N=nonterminals.nonterminals.values()<line_sep># precompute desirables PY.method("init_desirables")<for_stmt>A N<block_start>R=A.production_rules<line_sep>PY.nl()<line_sep>PY.cmt(str(A))<line_sep># each production <for_stmt>P R<block_start><if_stmt>P.derives_empty<block_start><continue><block_end># A -> alpha alpha=P.production<line_sep>desires=FIRST(alpha)<if_stmt>Empty<in>desires<block_start>desires.discard(Empty)<line_sep>desires.update(FOLLOW(A))<block_end>i=0<line_sep>n=len(desires)<line_sep>PY.dict_set("self.desirables" str(P) "[" end="")<for_stmt>toke desires<block_start>PY.raw(toke.toker_name)<line_sep>i<augadd>1<if_stmt>i<ne>n<block_start><if_stmt>i%5<eq>0<block_start>PY.raw(",\n")<line_sep>PY.stmt(" " end="")<block_end><else_stmt><block_start>PY.raw(", ")<block_end><block_end><block_end><else_stmt><block_start>PY.raw("]\n")<block_end><block_end><block_end>PY.end()<line_sep># write each nonterminal as a function <for_stmt>A N<block_start>R=A.production_rules<line_sep>#PY.cmt(str(A)) PY.stmt("def "+A.name+"(self, parent):")<line_sep>PY.tab<augadd>1<line_sep>PY.stmt("self.tab += 1")<if_stmt>A.intermediate<block_start>PY.var("noad" "parent")<block_end><else_stmt><block_start>PY.var("noad" PY.v_new("SkoarNoad" PY.v_sym(A.name) "parent"))<block_end>PY.nl()<line_sep>#PY.code_line("print('" + A.name + "')") <for_stmt>P R<block_start><if_stmt>P.derives_empty<block_start><continue><block_end># A -> alpha alpha=P.production<line_sep>PY.stmt("desires = "+PY.v_dict_get("self.desirables" str(P)))<line_sep>PY.if_("self.toker.sees(desires)")<line_sep>#PY.print(str(P)) <for_stmt>x alpha<block_start><if_stmt>isinstance(x Terminal)<block_start>PY.stmt("noad.add_toke('"+x.toker_name+"', self.toker.burn("+x.toker_name+"))")<line_sep>#PY.print("burning: " + x.name) <block_end><else_stmt><block_start><if_stmt>x.intermediate<block_start>PY.stmt("self."+x.name+"(noad)")<block_end><else_stmt><block_start>PY.stmt("noad.add_noad(self."+x.name+"(noad))")<block_end><block_end><block_end><else_stmt><block_start>PY.return_("noad")<line_sep>PY.tab<augsub>1<line_sep>PY.nl()<block_end><block_end><if_stmt>A.derives_empty<block_start>PY.cmt("<e>")<line_sep>#PY.print("burning empty") PY.return_("noad")<block_end><else_stmt><block_start>PY.cmt("Error State")<line_sep>PY.stmt("self.fail()")<block_end>PY.tab<augsub>1<line_sep>PY.nl()<block_end>PY.tab<augsub>1<line_sep>fd.close()<block_end><def_stmt>code_start self<block_start><import_from_stmt>Skoarcery.terminals Empty<line_sep>PY=emissions.PY<line_sep>PY.file_header("rdpp" "PyRDPP - Create Recursive Descent Predictive Parser")<line_sep>s="from Skoarcery.SkoarPyon.apparatus import SkoarNoad\n"<concat>"from Skoarcery.SkoarPyon.lex import "<line_sep>T=terminals.tokens.values()<line_sep>n=len(T)<line_sep>i=0<for_stmt>t T<block_start><if_stmt>t<eq>Empty<block_start>n<augsub>1<line_sep><continue><block_end>s<augadd>t.toker_name<line_sep>i<augadd>1<if_stmt>i<l>n<block_start><if_stmt>i%5<eq>0<block_start>s<augadd>", \\\n "<block_end><else_stmt><block_start>s<augadd>", "<block_end><block_end><block_end>PY.raw(s+""" class SkoarParseException(Exception): pass class SkoarParser: def __init__(self, runtime): self.runtime = runtime self.toker = runtime.toker self.tab = 0 self.desirables = dict() self.init_desirables() def fail(self): self.toker.dump() raise SkoarParseException @property def tabby(self): if self.tab == 0: return "" return ("{:>" + str(self.tab * 2) + "}").format(" ") def print(self, line, end): print(self.tabby + line, end=end) """)<block_end><block_end>
<import_from_stmt>httpolice.citation RFC<import_from_stmt>httpolice.parse auto empty fill_names literal maybe_str octet_range pivot string string1 string_times subst <import_from_stmt>httpolice.syntax.common ALPHA DIGIT HEXDIG<line_sep>pct_encoded='%'+HEXDIG+HEXDIG<g>auto<line_sep>sub_delims=(literal('!')|'$'|'&'|"'"|'('|')'|'*'|'+'|','|';'|'=')<g>auto<line_sep>unreserved=ALPHA|DIGIT|'-'|'.'|'_'|'~'<g>auto<line_sep>pchar=unreserved|sub_delims|':'|'@'|pct_encoded<g>auto<line_sep>segment=string(pchar)<g>auto<line_sep>segment_nz=string1(pchar)<g>auto<line_sep>segment_nz_nc=string1(unreserved|sub_delims|'@'|pct_encoded)<g>auto<line_sep>scheme=ALPHA+string(ALPHA|DIGIT|'+'|'-'|'.')<g>pivot<line_sep>userinfo=string(unreserved|sub_delims|':'|pct_encoded)<g>pivot<line_sep>dec_octet=(DIGIT|octet_range(0x31 0x39)+DIGIT|'1'+DIGIT+DIGIT|'2'+octet_range(0x30 0x34)+DIGIT|'25'+octet_range(0x30 0x35))<g>auto<line_sep>IPv4address=(dec_octet+'.'+dec_octet+'.'+dec_octet+'.'+dec_octet)<g>pivot<line_sep>h16=string_times(1 4 HEXDIG)<g>auto<line_sep>ls32=(h16+':'+h16)|IPv4address<g>auto<line_sep>IPv6address=(string_times(6 6 h16+':')+ls32|'::'+string_times(5 5 h16+':')+ls32|maybe_str(h16)+'::'+string_times(4 4 h16+':')+ls32|maybe_str(string_times(0 1 h16+':')+h16)+'::'+string_times(3 3 h16+':')+ls32|maybe_str(string_times(0 2 h16+':')+h16)+'::'+string_times(2 2 h16+':')+ls32|maybe_str(string_times(0 3 h16+':')+h16)+'::'+h16+':'+ls32|maybe_str(string_times(0 4 h16+':')+h16)+'::'+ls32|maybe_str(string_times(0 5 h16+':')+h16)+'::'+h16|maybe_str(string_times(0 6 h16+':')+h16)+'::')<g>pivot<line_sep>IPvFuture=('v'+string1(HEXDIG)+'.'+string1(unreserved|sub_delims|':'))<g>pivot<line_sep># As updated by RFC 6874 ZoneID=string1(unreserved|pct_encoded)<g>pivot<line_sep>IPv6addrz=IPv6address+'%25'+ZoneID<g>pivot<line_sep>IP_literal='['+(IPv6address|IPv6addrz|IPvFuture)+']'<g>pivot<line_sep>reg_name=string(unreserved|sub_delims|pct_encoded)<g>pivot<line_sep>host=IP_literal|IPv4address|reg_name<g>pivot<line_sep>port=string(DIGIT)<g>pivot<line_sep>authority=maybe_str(userinfo+'@')+host+maybe_str(':'+port)<g>pivot<line_sep>path_abempty=string('/'+segment)<g>auto<line_sep>path_absolute='/'+maybe_str(segment_nz+string('/'+segment))<g>auto<line_sep>path_noscheme=segment_nz_nc+string('/'+segment)<g>auto<line_sep>path_rootless=segment_nz+string('/'+segment)<g>auto<line_sep>path_empty=subst(u'')<lshift>empty<g>auto<line_sep>hier_part=('//'+authority+path_abempty|path_absolute|path_rootless|path_empty)<g>pivot<line_sep>query=string(pchar|'/'|'?')<g>pivot<line_sep>fragment=string(pchar|'/'|'?')<g>pivot<line_sep>absolute_URI=scheme+':'+hier_part+maybe_str('?'+query)<g>pivot<line_sep>relative_part=('//'+authority+path_abempty|path_absolute|path_noscheme|path_empty)<g>pivot<line_sep>URI=(scheme+':'+hier_part+maybe_str('?'+query)+maybe_str('#'+fragment))<g>pivot<line_sep>relative_ref=(relative_part+maybe_str('?'+query)+maybe_str('#'+fragment))<g>pivot<line_sep>URI_reference=URI|relative_ref<g>pivot<line_sep>fill_names(globals() RFC(3986))<line_sep>
<import_stmt>argparse<import_stmt>json<line_sep># import pandas as pd <import_stmt>os<line_sep># import sys # import re <import_stmt>yaml<import_stmt>itertools<line_sep># from bokeh.layouts import column, row, layout, gridplot # from bokeh.plotting import figure, output_file, show # from bokeh.sampledata.autompg import autompg # from bokeh.transform import jitter <import_from_stmt>bokeh.palettes Category10<import_from_stmt>bokeh.models HoverTool Div Range1d HoverTool<import_from_stmt>bokeh.plotting figure output_file show<line_sep># from bokeh.models import Legend # from bokeh.models import ColumnDataSource, CategoricalTicker, Div # from bokeh.models import ColumnDataSource, DataTable, DateFormatter, TableColumn # from bokeh.transform import jitter <import_from_stmt>collections defaultdict<import_from_stmt>datetime datetime<as>dt<import_from_stmt>torchbenchmark.util.data load_data_dir load_data_files<import_from_stmt>torchbenchmark.score.compute_score TorchBenchScore<line_sep>TORCHBENCH_SCORE_VERSION="v1"<if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(description=__doc__)<line_sep>parser.add_argument("data_dir" nargs='+' help="One or more directories containing benchmark json files. "<concat>"Each directory will be plotted as a separate series. "<concat>"By default, the first file in the first directory will be used"<concat>" to generate a score configuration with a target of 1000,"<concat>" and everything else will be relative to that.")<line_sep>parser.add_argument("--output_html" default='plot.html' help="html file to write")<line_sep>parser.add_argument("--plot_all" action='store_true' help="Plots the scores for each configuration")<line_sep>parser.add_argument("--reference_json" required=<true> help="file defining score norm values, usually first json in first data_dir")<line_sep>args=parser.parse_args()<line_sep>plot_height=800<line_sep>plot_width=1000<assert_stmt>len(args.data_dir)<g>0 "Must provide at least one data directory"<line_sep>compare_datasets=[load_data_dir(d most_recent_files=-1)<for>d args.data_dir]<with_stmt>open(args.reference_json)<as>f<block_start>ref_data=json.load(f)<block_end>plot_all=args.plot_all<line_sep>score_config=TorchBenchScore(ref_data=ref_data version=TORCHBENCH_SCORE_VERSION)<line_sep>p=figure(plot_width=plot_width plot_height=plot_height x_axis_type='datetime')<line_sep>xs=[]<line_sep>ys=[]<line_sep>zs=[]<line_sep>max_score=0<for_stmt>d compare_datasets<block_start>scores={}<line_sep>scores_db=defaultdict(list)<for_stmt>i range(len(d._json_raw))<block_start>data=d._json_raw[i]<line_sep>pytorch_ver=data['machine_info']['pytorch_version']<line_sep># Slice the portion after '+' pytorch_ver_cuda_loc=pytorch_ver.rfind('+')<line_sep>pytorch_ver=pytorch_ver[:pytorch_ver_cuda_loc]<line_sep>date=dt.strptime(pytorch_ver[pytorch_ver.index("dev")+len("dev"):] "%Y%m%d")<line_sep>score=score_config.compute_score(data)<line_sep>scores[date]=score<block_end>dates=[]<line_sep>total_scores=[]<line_sep>all_scores=[]<for_stmt>date sorted(scores.keys())<block_start>dates.append(date)<line_sep>total_scores.append(scores[date]["total"])<line_sep>max_score=max(max_score max(total_scores))<line_sep>all_scores.append(scores[date])<block_end>xs.append(dates)<line_sep>ys.append(total_scores)<if_stmt>plot_all<block_start>zs.append(all_scores)<block_end><block_end>colors=itertools.cycle(Category10[10])<line_sep>basenames=map(os.path.basename args.data_dir)<if_stmt>plot_all<block_start><for_stmt>x,z zip(xs zs)<block_start>basename=next(basenames)<line_sep>color=next(colors)<line_sep>configs=z[0].keys()<for_stmt>config configs<block_start><if_stmt><not>("subscore"<in>config<or>"total"<in>config)<block_start><continue><block_end>color=next(colors)<line_sep>scores=[]<for_stmt>s z<block_start>scores.append(s[config])<block_end>p.line(x scores color=color line_width=2 legend_label=basename+'-'+config)<block_end><block_end>p.legend.click_policy="hide"<block_end><else_stmt><block_start><for_stmt>x,y,color zip(xs ys colors)<block_start>p.line(x y color=color line_width=2 legend_label=next(basenames))<block_end><for_stmt>x,y,color zip(xs ys colors)<block_start>p.circle(x y color=color)<block_end><block_end>p.legend.location="bottom_right"<line_sep>p.y_range=Range1d(0 max_score<times>1.25)<line_sep>p.add_tools(HoverTool(tooltips=[('date' '@x{%F}') ('score' '@y{0.00 a}') ] formatters={'@x':'datetime' '@y':'numeral' } ))<line_sep>output_file(args.output_html)<line_sep>show(p)<block_end>
"""Imports that should be exposed outside the package"""<import_from_stmt>.hello_world write<as>write_hello_world<line_sep>
""" Execution: python symbol_graph.py filename.txt delimiter Data files: https://algs4.cs.princeton.edu/41graph/routes.txt https://algs4.cs.princeton.edu/41graph/movies.txt https://algs4.cs.princeton.edu/41graph/moviestiny.txt https://algs4.cs.princeton.edu/41graph/moviesG.txt https://algs4.cs.princeton.edu/41graph/moviestopGrossing.txt % python symbol_graph.py routes.txt " " JFK MCO ATL ORD LAX PHX LAS % python symbol_graph.py movies.txt "/" Tin Men (1987) Hershey, Barbara Geppi, Cindy <NAME> (II) Herr, Marcia ... Blumenfeld, Alan DeBoy, David Bacon, Kevin Woodsman, The (2004) Wild Things (1998) Where the Truth Lies (2005) Tremors (1990) ... Apollo 13 (1995) Animal House (1978) Assumes that input file is encoded using UTF-8. % iconv -f ISO-8859-1 -t UTF-8 movies-iso8859.txt > movies.txt """<import_from_stmt>algs4.st ST<import_from_stmt>algs4.graph Graph<class_stmt>SymbolGraph<block_start><def_stmt>__init__ self stream sp<block_start>self.st=ST()<for_stmt>line open(stream)<block_start>a=line.strip().split(sp)<for_stmt>i range(len(a))<block_start><if_stmt><not>self.st.contains(a[i])<block_start>self.st.put(a[i] self.st.size())<block_end><block_end><block_end>self.keys=[""<for>_ range(self.st.size())]<for_stmt>key self.st.keys()<block_start>self.keys[self.st.get(key)]=key<block_end>self.G=Graph(self.st.size())<for_stmt>line open(stream)<block_start>a=line.strip().split(sp)<line_sep>v=self.st.get(a[0])<for_stmt>i range(1 len(a))<block_start>self.G.add_edge(v self.st.get(a[i]))<block_end><block_end><block_end><def_stmt>contains self s<block_start><return>self.st.contains(s)<block_end><def_stmt>index self s<block_start><return>self.st.get(s)<block_end><def_stmt>name self v<block_start><return>self.keys[v]<block_end><def_stmt>graph self<block_start><return>self.G<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>sys<line_sep>filename,delimiter=sys.argv[1] sys.argv[2]<line_sep>sg=SymbolGraph(filename delimiter)<line_sep>graph=sg.graph()<for_stmt>line sys.stdin<block_start>source=line.strip()<if_stmt>sg.contains(source)<block_start>s=sg.index(source)<for_stmt>v graph.adj[s]<block_start>print(" " sg.name(v) end='')<block_end><block_end><else_stmt><block_start>print("input not contains source: " source)<block_end><block_end><block_end>
""" Copyright (c) 2019 Cisco Systems, Inc. All rights reserved. License at https://github.com/cisco/mercury/blob/master/LICENSE """<import_stmt>os<import_stmt>sys<import_stmt>functools<import_from_stmt>socket AF_INET AF_INET6 inet_ntop<line_sep>sys.path.append(os.path.dirname(os.path.abspath(__file__)))<line_sep>sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')<import_from_stmt>pmercury.protocols.protocol Protocol<line_sep>MAX_CACHED_RESULTS=2<power>24<class_stmt>DHCP(Protocol)<block_start><def_stmt>__init__ self fp_database=<none> config=<none># populate fingerprint databases <block_start>self.fp_db=<none><line_sep>DHCP.static_data=set([0x35 0x37])<line_sep>DHCP.contextual_data={0x03:('router' <lambda>x:inet_ntop(AF_INET x)) 0x06:('domain_name_server' <lambda>x:inet_ntop(AF_INET x)) 0x0c:('hostname' <lambda>x:x.decode()) 0x0f:('domain_name' <lambda>x:x.decode()) 0x32:('requested_ip' <lambda>x:inet_ntop(AF_INET x)) 0x3c:('vendor_class_id' <lambda>x:x.decode())}<block_end>@staticmethod<def_stmt>proto_identify data offset data_len<block_start><if_stmt>data_len<l>230<block_start><return><false><block_end><if_stmt>(data[offset]<ne>0x01<or>data[offset+236]<ne>0x63<or>data[offset+237]<ne>0x82<or>data[offset+238]<ne>0x53<or>data[offset+239]<ne>0x63)<block_start><return><false><block_end><return><true><block_end>@staticmethod<def_stmt>fingerprint data offset data_len<block_start>hardware_address_length=data[offset+2]<line_sep>cmac=data[offset+28:offset+28+hardware_address_length].hex()<line_sep>context=[{'name':'client_mac_address' 'data':'%s'%':'.join(a+b<for>a,b zip(cmac[::2] cmac[1::2]))}]<line_sep>offset<augadd>240<line_sep>fp_='('<while_stmt>offset<l>data_len<block_start>kind=data[offset]<if_stmt>kind<eq>0xff<or>kind<eq>0x00# End / Padding <block_start>fp_<augadd>'(%02x)'%kind<line_sep><break><block_end>length=data[offset+1]<if_stmt>kind<in>DHCP.contextual_data<block_start>name_,transform_=DHCP.contextual_data[kind]<line_sep>context.append({'name':name_ 'data':transform_(data[offset+2:offset+2+length])})<block_end><if_stmt>offset+length+2<ge>data_len<block_start><return><none><block_end><if_stmt>kind<not><in>DHCP.static_data<block_start>fp_<augadd>'(%02x)'%kind<line_sep>offset<augadd>length+2<line_sep><continue><block_end>fp_<augadd>'(%s)'%data[offset:offset+2+length].hex()<line_sep>offset<augadd>length+2<block_end>fp_<augadd>')'<line_sep><return>fp_ context<block_end><block_end>
r""" Deep Learning for Astronomers with Tensorflow """<import_from_stmt>pkg_resources get_distribution<line_sep>version=__version__=get_distribution('astroNN').version<line_sep>
# Copyright (c) Facebook, Inc. and its affiliates. <import_stmt>numpy<as>np<line_sep># vertices: frames x meshVerNum x 3 # trifaces: facePolygonNum x 3 = 22800 x 3 <def_stmt>ComputeNormal vertices trifaces<block_start><if_stmt>vertices.shape[0]<g>5000<block_start>print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape))<line_sep><return><block_end>#compute vertex Normals for all frames U=vertices[: trifaces[: 1] :]-vertices[: trifaces[: 0] :]#frames x faceNum x 3 V=vertices[: trifaces[: 2] :]-vertices[: trifaces[: 1] :]#frames x faceNum x 3 originalShape=U.shape#remember: frames x faceNum x 3 U=np.reshape(U [-1 3])<line_sep>V=np.reshape(V [-1 3])<line_sep>faceNormals=np.cross(U V)#frames x 13776 x 3 <import_from_stmt>sklearn.preprocessing normalize<if_stmt>np.isnan(np.max(faceNormals))<block_start>print('ComputeNormal: Warning nan is detected {0}')<line_sep><return><block_end>faceNormals=normalize(faceNormals)<line_sep>faceNormals=np.reshape(faceNormals originalShape)<if_stmt><false>#Slow version <block_start>vertex_normals=np.zeros(vertices.shape)#(frames x 11510) x 3 <for_stmt>fIdx,vIdx enumerate(trifaces[: 0])<block_start>vertex_normals[: vIdx :]<augadd>faceNormals[: fIdx :]<block_end><for_stmt>fIdx,vIdx enumerate(trifaces[: 1])<block_start>vertex_normals[: vIdx :]<augadd>faceNormals[: fIdx :]<block_end><for_stmt>fIdx,vIdx enumerate(trifaces[: 2])<block_start>vertex_normals[: vIdx :]<augadd>faceNormals[: fIdx :]<block_end><block_end><else_stmt>#Faster version # Computing vertex normals, much faster (and obscure) replacement <block_start>index=np.vstack((np.ravel(trifaces) np.repeat(np.arange(len(trifaces)) 3))).T<line_sep>index_sorted=index[index[: 0].argsort()]<line_sep>vertex_normals=np.add.reduceat(faceNormals[: index_sorted[: 1] :][0] np.concatenate(([0] np.cumsum(np.unique(index_sorted[: 0] return_counts=<true>)[1])[:-1])))[<none> :]<line_sep>vertex_normals=vertex_normals.astype(np.float64)<block_end>originalShape=vertex_normals.shape<line_sep>vertex_normals=np.reshape(vertex_normals [-1 3])<line_sep>vertex_normals=normalize(vertex_normals)<line_sep>vertex_normals=np.reshape(vertex_normals originalShape)<line_sep><return>vertex_normals<block_end><def_stmt>ComputeNormal_gpu vertices trifaces<block_start><import_stmt>torch<import_stmt>torch.nn.functional<as>F<if_stmt>vertices.shape[0]<g>5000<block_start>print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape))<line_sep><return><block_end>#compute vertex Normals for all frames #trifaces_cuda = torch.from_numpy(trifaces.astype(np.long)).cuda() vertices_cuda=torch.from_numpy(vertices.astype(np.float32)).cuda()<line_sep>U_cuda=vertices_cuda[: trifaces[: 1] :]-vertices_cuda[: trifaces[: 0] :]#frames x faceNum x 3 V_cuda=vertices_cuda[: trifaces[: 2] :]-vertices_cuda[: trifaces[: 1] :]#frames x faceNum x 3 originalShape=list(U_cuda.size())#remember: frames x faceNum x 3 U_cuda=torch.reshape(U_cuda [-1 3])#.astype(np.float32) V_cuda=torch.reshape(V_cuda [-1 3])#.astype(np.float32) faceNormals=U_cuda.cross(V_cuda)<line_sep>faceNormals=F.normalize(faceNormals dim=1)<line_sep>faceNormals=torch.reshape(faceNormals originalShape)<line_sep># trifaces has duplicated vertex index, so cannot be parallazied # vertex_normals = torch.zeros(vertices.shape,dtype=torch.float32).cuda() #(frames x 11510) x 3 # for fIdx, vIdx in enumerate(trifaces[:,0]): # vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:] # for fIdx, vIdx in enumerate(trifaces[:,1]): # vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:] # for fIdx, vIdx in enumerate(trifaces[:,2]): # vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:] # Computing vertex normals, much faster (and obscure) replacement index=np.vstack((np.ravel(trifaces) np.repeat(np.arange(len(trifaces)) 3))).T<line_sep>index_sorted=index[index[: 0].argsort()]<line_sep>vertex_normals=np.add.reduceat(faceNormals[: index_sorted[: 1] :][0] np.concatenate(([0] np.cumsum(np.unique(index_sorted[: 0] return_counts=<true>)[1])[:-1])))[<none> :]<line_sep>vertex_normals=torch.from_numpy(vertex_normals).float().cuda()<line_sep>vertex_normals=F.normalize(vertex_normals dim=2)<line_sep>vertex_normals=vertex_normals.data.cpu().numpy()#(batch, chunksize, dim) <return>vertex_normals<block_end>
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== <import_from_future_stmt> absolute_import division print_function<import_stmt>numpy<as>np<import_from_stmt>rlgraph get_backend<import_from_stmt>rlgraph.agents Agent<import_from_stmt>rlgraph.components Component Synchronizable Memory ValueFunction ContainerMerger PrioritizedReplay<import_from_stmt>rlgraph.components.loss_functions.sac_loss_function SACLossFunction<import_from_stmt>rlgraph.spaces FloatBox BoolBox IntBox ContainerSpace<import_from_stmt>rlgraph.spaces.space_utils sanity_check_space<import_from_stmt>rlgraph.utils RLGraphError<import_from_stmt>rlgraph.utils.decorators rlgraph_api graph_fn<import_from_stmt>rlgraph.utils.ops flatten_op DataOpTuple<import_from_stmt>rlgraph.utils.util strip_list force_list<if_stmt>get_backend()<eq>"tf"<block_start><import_stmt>tensorflow<as>tf<block_end><elif_stmt>get_backend()<eq>"pytorch"<block_start><import_stmt>torch<block_end><class_stmt>SyncSpecification(object)<block_start>"""Describes a synchronization schedule, used to update the target value weights. The target values are gradually updates using exponential moving average as suggested by the paper."""<def_stmt>__init__ self sync_interval=<none> sync_tau=<none><block_start>""" Arguments: sync_interval: How often to update the target. sync_tau: The smoothing constant to use in the averaging. Setting to 1 replaces the values each iteration. """<line_sep>self.sync_interval=sync_interval<line_sep>self.sync_tau=sync_tau<block_end><block_end><class_stmt>SACAgentComponent(Component)<block_start><def_stmt>__init__ self agent policy q_function preprocessor memory discount initial_alpha target_entropy optimizer vf_optimizer alpha_optimizer q_sync_spec num_q_functions=2<block_start>super(SACAgentComponent self).__init__(nesting_level=0)<line_sep>self.agent=agent<line_sep>self._policy=policy<line_sep>self._preprocessor=preprocessor<line_sep>self._memory=memory<line_sep>self._q_functions=[q_function]<line_sep>self._q_functions<augadd>[q_function.copy(scope="{}-{}".format(q_function.scope i+1) trainable=<true>)<for>i range(num_q_functions-1)]<line_sep># Set number of return values for get_q_values graph_fn. self.graph_fn_num_outputs["_graph_fn_get_q_values"]=num_q_functions<for_stmt>q self._q_functions# TODO: is there a better way to do this? <block_start><if_stmt>"synchronizable"<not><in>q.sub_components<block_start>q.add_components(Synchronizable() expose_apis="sync")<block_end><block_end>self._target_q_functions=[q.copy(scope="target-"+q.scope trainable=<true>)<for>q self._q_functions]<for_stmt>target_q self._target_q_functions# TODO: is there a better way to do this? <block_start><if_stmt>"synchronizable"<not><in>target_q.sub_components<block_start>target_q.add_components(Synchronizable() expose_apis="sync")<block_end><block_end>self._optimizer=optimizer<line_sep>self.vf_optimizer=vf_optimizer<line_sep>self.alpha_optimizer=alpha_optimizer<line_sep>self.initial_alpha=initial_alpha<line_sep>self.log_alpha=<none><line_sep>self.target_entropy=target_entropy<line_sep>self.loss_function=SACLossFunction(target_entropy=target_entropy discount=discount num_q_functions=num_q_functions)<line_sep>memory_items=["states" "actions" "rewards" "next_states" "terminals"]<line_sep>self._merger=ContainerMerger(*memory_items)<line_sep>q_names=["q_{}".format(i)<for>i range(len(self._q_functions))]<line_sep>self._q_vars_merger=ContainerMerger(*q_names scope="q_vars_merger")<line_sep>self.add_components(policy preprocessor memory self._merger self.loss_function optimizer vf_optimizer self._q_vars_merger)<line_sep># , self._q_vars_splitter) self.add_components(*self._q_functions)<line_sep>self.add_components(*self._target_q_functions)<if_stmt>self.alpha_optimizer<is><not><none><block_start>self.add_components(self.alpha_optimizer)<block_end>self.steps_since_last_sync=<none><line_sep>self.q_sync_spec=q_sync_spec<line_sep>self.env_action_space=<none><line_sep>self.episode_reward=<none><block_end><def_stmt>check_input_spaces self input_spaces action_space=<none><block_start><for_stmt>s ["states" "actions" "env_actions" "preprocessed_states" "rewards" "terminals"]<block_start>sanity_check_space(input_spaces[s] must_have_batch_rank=<true>)<block_end>self.env_action_space=input_spaces["env_actions"].flatten()<block_end><def_stmt>create_variables self input_spaces action_space=<none><block_start>self.steps_since_last_sync=self.get_variable("steps_since_last_sync" dtype="int" initializer=0)<line_sep>self.log_alpha=self.get_variable("log_alpha" dtype="float" initializer=np.log(self.initial_alpha))<line_sep>self.episode_reward=self.get_variable("episode_reward" shape=() initializer=0.0)<block_end>@rlgraph_api<def_stmt>get_policy_weights self<block_start><return>self._policy.variables()<block_end>@rlgraph_api<def_stmt>get_q_weights self<block_start>merged_weights=self._q_vars_merger.merge(*[q.variables()<for>q self._q_functions])<line_sep><return>merged_weights<block_end>@rlgraph_api(must_be_complete=<false>)<def_stmt>set_policy_weights self weights<block_start><return>self._policy.sync(weights)<block_end>""" TODO: need to define the input space @rlgraph_api(must_be_complete=False) def set_q_weights(self, q_weights): split_weights = self._q_vars_splitter.call(q_weights) assert len(split_weights) == len(self._q_functions) update_ops = [q.sync(q_weights) for q_weights, q in zip(split_weights, self._q_functions)] update_ops.extend([q.sync(q_weights) for q_weights, q in zip(split_weights, self._target_q_functions)]) return tuple(update_ops) """<line_sep>@rlgraph_api<def_stmt>preprocess_states self states<block_start><return>self._preprocessor.preprocess(states)<block_end>@rlgraph_api<def_stmt>insert_records self preprocessed_states env_actions rewards next_states terminals<block_start>records=self._merger.merge(preprocessed_states env_actions rewards next_states terminals)<line_sep><return>self._memory.insert_records(records)<block_end>@rlgraph_api<def_stmt>update_from_memory self batch_size=64 time_percentage=<none><block_start>records,sample_indices,importance_weights=self._memory.get_records(batch_size)<line_sep>result=self.update_from_external_batch(records["states"] records["actions"] records["rewards"] records["terminals"] records["next_states"] importance_weights time_percentage)<if_stmt>isinstance(self._memory PrioritizedReplay)<block_start>update_pr_step_op=self._memory.update_records(sample_indices result["critic_loss_per_item"])<line_sep>result["update_pr_step_op"]=update_pr_step_op<block_end><return>result<block_end>@rlgraph_api<def_stmt>update_from_external_batch self preprocessed_states env_actions rewards terminals next_states importance_weights time_percentage=<none><block_start>actions=self._graph_fn_one_hot(env_actions)<line_sep>actor_loss,actor_loss_per_item,critic_loss,critic_loss_per_item,alpha_loss,alpha_loss_per_item=self.get_losses(preprocessed_states actions rewards terminals next_states importance_weights)<line_sep>policy_vars=self._policy.variables()<line_sep>q_vars=[q_func.variables()<for>q_func self._q_functions]<line_sep>merged_q_vars=self._q_vars_merger.merge(*q_vars)<line_sep>critic_step_op=self.vf_optimizer.step(merged_q_vars critic_loss critic_loss_per_item time_percentage)<line_sep>actor_step_op=self._optimizer.step(policy_vars actor_loss actor_loss_per_item time_percentage)<if_stmt>self.target_entropy<is><not><none><block_start>alpha_step_op=self._graph_fn_update_alpha(alpha_loss alpha_loss_per_item time_percentage)<block_end><else_stmt><block_start>alpha_step_op=self._graph_fn_no_op()<block_end># TODO: optimizer for alpha sync_op=self.sync_targets()<line_sep># Increase the global training step counter. alpha_step_op=self._graph_fn_training_step(alpha_step_op)<line_sep><return>dict(actor_step_op=actor_step_op critic_step_op=critic_step_op sync_op=sync_op alpha_step_op=alpha_step_op actor_loss=actor_loss actor_loss_per_item=actor_loss_per_item critic_loss=critic_loss critic_loss_per_item=critic_loss_per_item alpha_loss=alpha_loss alpha_loss_per_item=alpha_loss_per_item)<block_end>@graph_fn(flatten_ops=<true> split_ops=<true> add_auto_key_as_first_param=<true>)<def_stmt>_graph_fn_one_hot self key env_actions<block_start><if_stmt>isinstance(self.env_action_space[key] IntBox)<block_start>env_actions=tf.one_hot(env_actions depth=self.env_action_space[key].num_categories axis=-1)<block_end><return>env_actions<block_end>@graph_fn(requires_variable_completeness=<true>)<def_stmt>_graph_fn_update_alpha self alpha_loss alpha_loss_per_item time_percentage=<none><block_start>alpha_step_op=self.alpha_optimizer.step(DataOpTuple([self.log_alpha]) alpha_loss alpha_loss_per_item time_percentage)<line_sep><return>alpha_step_op<block_end>@rlgraph_api# `returns` are determined in ctor <def_stmt>_graph_fn_get_q_values self preprocessed_states actions target=<false><block_start>backend=get_backend()<line_sep>flat_actions=flatten_op(actions)<line_sep>actions=[]<for_stmt>flat_key,action_component self._policy.action_space.flatten().items()<block_start>actions.append(flat_actions[flat_key])<block_end><if_stmt>backend<eq>"tf"<block_start>actions=tf.concat(actions axis=-1)<block_end><elif_stmt>backend<eq>"pytorch"<block_start>actions=torch.cat(actions dim=-1)<block_end>q_funcs=self._q_functions<if>target<is><false><else>self._target_q_functions<line_sep># We do not concat states yet because we might pass states through a conv stack before merging it # with actions. <return>tuple(q.state_action_value(preprocessed_states actions)<for>q q_funcs)<block_end>@rlgraph_api<def_stmt>get_losses self preprocessed_states actions rewards terminals next_states importance_weights# TODO: internal states <block_start>samples_next=self._policy.get_action_and_log_likelihood(next_states deterministic=<false>)<line_sep>next_sampled_actions=samples_next["action"]<line_sep>log_probs_next_sampled=samples_next["log_likelihood"]<line_sep>q_values_next_sampled=self.get_q_values(next_states next_sampled_actions target=<true>)<line_sep>q_values=self.get_q_values(preprocessed_states actions)<line_sep>samples=self._policy.get_action_and_log_likelihood(preprocessed_states deterministic=<false>)<line_sep>sampled_actions=samples["action"]<line_sep>log_probs_sampled=samples["log_likelihood"]<line_sep>q_values_sampled=self.get_q_values(preprocessed_states sampled_actions)<line_sep>alpha=self._graph_fn_compute_alpha()<line_sep><return>self.loss_function.loss(alpha log_probs_next_sampled q_values_next_sampled q_values log_probs_sampled q_values_sampled rewards terminals)<block_end>@rlgraph_api<def_stmt>get_preprocessed_state_and_action self states deterministic=<false><block_start>preprocessed_states=self._preprocessor.preprocess(states)<line_sep><return>self.action_from_preprocessed_state(preprocessed_states deterministic)<block_end>@rlgraph_api<def_stmt>action_from_preprocessed_state self preprocessed_states deterministic=<false><block_start>out=self._policy.get_action(preprocessed_states deterministic=deterministic)<line_sep><return>out["action"] preprocessed_states<block_end>@rlgraph_api(requires_variable_completeness=<true>)<def_stmt>reset_targets self<block_start>ops=(target_q.sync(q.variables())<for>q,target_q zip(self._q_functions self._target_q_functions))<line_sep><return>tuple(ops)<block_end>@rlgraph_api(requires_variable_completeness=<true>)<def_stmt>sync_targets self<block_start>should_sync=self._graph_fn_get_should_sync()<line_sep><return>self._graph_fn_sync(should_sync)<block_end>@rlgraph_api<def_stmt>get_memory_size self<block_start><return>self._memory.get_size()<block_end>@graph_fn<def_stmt>_graph_fn_compute_alpha self<block_start>backend=get_backend()<if_stmt>backend<eq>"tf"<block_start><return>tf.exp(self.log_alpha)<block_end><elif_stmt>backend<eq>"pytorch"<block_start><return>torch.exp(self.log_alpha)<block_end><block_end># TODO: Move this into generic AgentRootComponent. @graph_fn<def_stmt>_graph_fn_training_step self other_step_op=<none><block_start><if_stmt>self.agent<is><not><none><block_start>add_op=tf.assign_add(self.agent.graph_executor.global_training_timestep 1)<line_sep>op_list=[add_op]+[other_step_op]<if>other_step_op<is><not><none><else>[]<with_stmt>tf.control_dependencies(op_list)<block_start><return>tf.no_op()<if>other_step_op<is><none><else>other_step_op<block_end><block_end><else_stmt><block_start><return>tf.no_op()<if>other_step_op<is><none><else>other_step_op<block_end><block_end>@graph_fn(returns=1 requires_variable_completeness=<true>)<def_stmt>_graph_fn_get_should_sync self<block_start><if_stmt>get_backend()<eq>"tf"<block_start>inc_op=tf.assign_add(self.steps_since_last_sync 1)<line_sep>should_sync=inc_op<ge>self.q_sync_spec.sync_interval<def_stmt>reset_op <block_start>op=tf.assign(self.steps_since_last_sync 0)<with_stmt>tf.control_dependencies([op])<block_start><return>tf.no_op()<block_end><block_end>sync_op=tf.cond(pred=inc_op<ge>self.q_sync_spec.sync_interval true_fn=reset_op false_fn=tf.no_op)<with_stmt>tf.control_dependencies([sync_op])<block_start><return>tf.identity(should_sync)<block_end><block_end><else_stmt><block_start><raise>NotImplementedError("TODO")<block_end><block_end>@graph_fn(returns=1 requires_variable_completeness=<true>)<def_stmt>_graph_fn_sync self should_sync<block_start>assign_ops=[]<line_sep>tau=self.q_sync_spec.sync_tau<if_stmt>tau<ne>1.0<block_start>all_source_vars=[source.get_variables(collections=<none> custom_scope_separator="-")<for>source self._q_functions]<line_sep>all_dest_vars=[destination.get_variables(collections=<none> custom_scope_separator="-")<for>destination self._target_q_functions]<for_stmt>source_vars,dest_vars zip(all_source_vars all_dest_vars)<block_start><for_stmt>(source_key source_var),(dest_key dest_var) zip(sorted(source_vars.items()) sorted(dest_vars.items()))<block_start>assign_ops.append(tf.assign(dest_var tau<times>source_var+(1.0-tau)<times>dest_var))<block_end><block_end><block_end><else_stmt><block_start>all_source_vars=[source.variables()<for>source self._q_functions]<for_stmt>source_vars,destination zip(all_source_vars self._target_q_functions)<block_start>assign_ops.append(destination.sync(source_vars))<block_end><block_end><assert_stmt>len(assign_ops)<g>0<line_sep>grouped_op=tf.group(assign_ops)<def_stmt>assign_op # Make sure we are returning no_op as opposed to reference <block_start><with_stmt>tf.control_dependencies([grouped_op])<block_start><return>tf.no_op()<block_end><block_end>cond_assign_op=tf.cond(should_sync true_fn=assign_op false_fn=tf.no_op)<with_stmt>tf.control_dependencies([cond_assign_op])<block_start><return>tf.no_op()<block_end><block_end>@graph_fn<def_stmt>_graph_fn_no_op self<block_start><return>tf.no_op()<block_end>@rlgraph_api<def_stmt>get_global_timestep self<block_start><return>self.read_variable(self.agent.graph_executor.global_timestep)<block_end>@rlgraph_api<def_stmt>_graph_fn_update_global_timestep self increment<block_start><if_stmt>get_backend()<eq>"tf"<block_start>add_op=tf.assign_add(self.agent.graph_executor.global_timestep increment)<line_sep><return>add_op<block_end><elif_stmt>get_backend<eq>"pytorch"<block_start>self.agent.graph_executor.global_timestep<augadd>increment<line_sep><return>self.agent.graph_executor.global_timestep<block_end><block_end>@rlgraph_api<def_stmt>_graph_fn_get_episode_reward self<block_start><return>self.episode_reward<block_end>@rlgraph_api<def_stmt>_graph_fn_set_episode_reward self episode_reward<block_start><return>tf.assign(self.episode_reward episode_reward)<block_end><block_end><class_stmt>SACAgent(Agent)<block_start><def_stmt>__init__ self state_space action_space discount=0.98 preprocessing_spec=<none> network_spec=<none> internal_states_space=<none> policy_spec=<none> value_function_spec=<none> execution_spec=<none> optimizer_spec=<none> value_function_optimizer_spec=<none> observe_spec=<none> update_spec=<none> summary_spec=<none> saver_spec=<none> auto_build=<true> name="sac-agent" double_q=<true> initial_alpha=1.0 gumbel_softmax_temperature=1.0 target_entropy=<none> memory_spec=<none> value_function_sync_spec=<none><block_start>""" This is an implementation of the Soft-Actor Critic algorithm. Paper: http://arxiv.org/abs/1801.01290 Args: state_space (Union[dict,Space]): Spec dict for the state Space or a direct Space object. action_space (Union[dict,Space]): Spec dict for the action Space or a direct Space object. preprocessing_spec (Optional[list,PreprocessorStack]): The spec list for the different necessary states preprocessing steps or a PreprocessorStack object itself. discount (float): The discount factor (gamma). network_spec (Optional[list,NeuralNetwork]): Spec list for a NeuralNetwork Component or the NeuralNetwork object itself. internal_states_space (Optional[Union[dict,Space]]): Spec dict for the internal-states Space or a direct Space object for the Space(s) of the internal (RNN) states. policy_spec (Optional[dict]): An optional dict for further kwargs passing into the Policy c'tor. value_function_spec (list, dict, ValueFunction): Neural network specification for baseline or instance of ValueFunction. execution_spec (Optional[dict,Execution]): The spec-dict specifying execution settings. optimizer_spec (Optional[dict,Optimizer]): The spec-dict to create the Optimizer for this Agent. value_function_optimizer_spec (dict): Optimizer config for value function optimizer. If None, the optimizer spec for the policy is used (same learning rate and optimizer type). observe_spec (Optional[dict]): Spec-dict to specify `Agent.observe()` settings. update_spec (Optional[dict]): Spec-dict to specify `Agent.update()` settings. summary_spec (Optional[dict]): Spec-dict to specify summary settings. saver_spec (Optional[dict]): Spec-dict to specify saver settings. auto_build (Optional[bool]): If True (default), immediately builds the graph using the agent's graph builder. If false, users must separately call agent.build(). Useful for debugging or analyzing components before building. name (str): Some name for this Agent object. double_q (bool): Whether to train two q networks independently. initial_alpha (float): "The temperature parameter α determines the relative importance of the entropy term against the reward". gumbel_softmax_temperature (float): Temperature parameter for the Gumbel-Softmax distribution used for discrete actions. memory_spec (Optional[dict,Memory]): The spec for the Memory to use for the DQN algorithm. update_spec (dict): Here we can have sync_interval or sync_tau (for the value network update). """<line_sep># If VF spec is a network spec, wrap with SAC vf type. The VF must concatenate actions and states, # which can require splitting the network in the case of e.g. conv-inputs. <if_stmt>isinstance(value_function_spec list)<block_start>value_function_spec=dict(type="sac_value_function" network_spec=value_function_spec)<line_sep>self.logger.info("Using default SAC value function.")<block_end><elif_stmt>isinstance(value_function_spec ValueFunction)<block_start>self.logger.info("Using value function object {}".format(ValueFunction))<block_end><if_stmt>policy_spec<is><none># Continuous action space: Use squashed normal. # Discrete: Gumbel-softmax. <block_start>policy_spec=dict(deterministic=<false> distributions_spec=dict(bounded_distribution_type="squashed" discrete_distribution_type="gumbel_softmax" gumbel_softmax_temperature=gumbel_softmax_temperature))<block_end>super(SACAgent self).__init__(state_space=state_space action_space=action_space discount=discount preprocessing_spec=preprocessing_spec network_spec=network_spec internal_states_space=internal_states_space policy_spec=policy_spec value_function_spec=value_function_spec execution_spec=execution_spec optimizer_spec=optimizer_spec value_function_optimizer_spec=value_function_optimizer_spec observe_spec=observe_spec update_spec=update_spec summary_spec=summary_spec saver_spec=saver_spec auto_build=auto_build name=name)<line_sep>self.double_q=double_q<line_sep>self.target_entropy=target_entropy<line_sep>self.initial_alpha=initial_alpha<line_sep># Assert that the synch interval is a multiple of the update_interval. <if_stmt>"sync_interval"<in>self.update_spec<block_start><if_stmt>self.update_spec["sync_interval"]/self.update_spec["update_interval"]<ne>self.update_spec["sync_interval"]<floordiv>self.update_spec["update_interval"]<block_start><raise>RLGraphError("ERROR: sync_interval ({}) must be multiple of update_interval "<concat>"({})!".format(self.update_spec["sync_interval"] self.update_spec["update_interval"]))<block_end><block_end><elif_stmt>"sync_tau"<in>self.update_spec<block_start><if_stmt>self.update_spec["sync_tau"]<le>0<or>self.update_spec["sync_tau"]<g>1.0<block_start><raise>RLGraphError("sync_tau ({}) must be in interval (0.0, 1.0]!".format(self.update_spec["sync_tau"]))<block_end><block_end><else_stmt><block_start>self.update_spec["sync_tau"]=0.005<block_end># The value mentioned in the paper # Extend input Space definitions to this Agent's specific API-methods. preprocessed_state_space=self.preprocessed_state_space.with_batch_rank()<line_sep>reward_space=FloatBox(add_batch_rank=<true>)<line_sep>terminal_space=BoolBox(add_batch_rank=<true>)<line_sep>#self.iterations = self.update_spec["num_iterations"] self.batch_size=self.update_spec["batch_size"]<line_sep>float_action_space=self.action_space.with_batch_rank().map(mapping=<lambda>flat_key space:space.as_one_hot_float_space()<if>isinstance(space IntBox)<else>space)<line_sep>self.input_spaces.update(dict(env_actions=self.action_space.with_batch_rank() actions=float_action_space preprocessed_states=preprocessed_state_space rewards=reward_space terminals=terminal_space next_states=preprocessed_state_space states=self.state_space.with_batch_rank(add_batch_rank=<true>) batch_size=int importance_weights=FloatBox(add_batch_rank=<true>) deterministic=bool weights="variables:{}".format(self.policy.scope)))<if_stmt>value_function_sync_spec<is><none><block_start>value_function_sync_spec=SyncSpecification(sync_interval=self.update_spec["sync_interval"]<floordiv>self.update_spec["update_interval"] sync_tau=self.update_spec["sync_tau"]<if>"sync_tau"<in>self.update_spec<else>5e-3)<block_end>self.memory=Memory.from_spec(memory_spec)<line_sep>self.alpha_optimizer=self.optimizer.copy(scope="alpha-"+self.optimizer.scope)<if>self.target_entropy<is><not><none><else><none><line_sep>self.root_component=SACAgentComponent(agent=self policy=self.policy q_function=self.value_function preprocessor=self.preprocessor memory=self.memory discount=self.discount initial_alpha=self.initial_alpha target_entropy=target_entropy optimizer=self.optimizer vf_optimizer=self.value_function_optimizer alpha_optimizer=self.alpha_optimizer q_sync_spec=value_function_sync_spec num_q_functions=2<if>self.double_q<is><true><else>1)<line_sep>extra_optimizers=[self.value_function_optimizer]<if_stmt>self.alpha_optimizer<is><not><none><block_start>extra_optimizers.append(self.alpha_optimizer)<block_end>self.build_options=dict(optimizers=extra_optimizers)<if_stmt>self.auto_build<block_start>self._build_graph([self.root_component] self.input_spaces optimizer=self.optimizer batch_size=self.update_spec["batch_size"] build_options=self.build_options)<line_sep>self.graph_built=<true><block_end><block_end><def_stmt>set_weights self policy_weights value_function_weights=<none># TODO: Overrides parent but should this be policy of value function? <block_start><return>self.graph_executor.execute((self.root_component.set_policy_weights policy_weights))<block_end><def_stmt>get_weights self<block_start><return>dict(policy_weights=self.graph_executor.execute(self.root_component.get_policy_weights))<block_end><def_stmt>get_action self states internals=<none> use_exploration=<true> apply_preprocessing=<true> extra_returns=<none> time_percentage=<none># TODO: common pattern - move to Agent <block_start>""" Args: extra_returns (Optional[Set[str],str]): Optional string or set of strings for additional return values (besides the actions). Possible values are: - 'preprocessed_states': The preprocessed states after passing the given states through the preprocessor stack. - 'internal_states': The internal states returned by the RNNs in the NN pipeline. - 'used_exploration': Whether epsilon- or noise-based exploration was used or not. Returns: tuple or single value depending on `extra_returns`: - action - the preprocessed states """<line_sep>extra_returns={extra_returns}<if>isinstance(extra_returns str)<else>(extra_returns<or>set())<line_sep># States come in without preprocessing -> use state space. <if_stmt>apply_preprocessing<block_start>call_method=self.root_component.get_preprocessed_state_and_action<line_sep>batched_states,remove_batch_rank=self.state_space.force_batch(states)<block_end><else_stmt><block_start>call_method=self.root_component.action_from_preprocessed_state<line_sep>batched_states=states<line_sep>remove_batch_rank=<false><block_end>#remove_batch_rank = batched_states.ndim == np.asarray(states).ndim + 1 # Increase timesteps by the batch size (number of states in batch). batch_size=len(batched_states)<line_sep>self.timesteps<augadd>batch_size<line_sep># Control, which return value to "pull" (depending on `additional_returns`). return_ops=[0 1]<if>"preprocessed_states"<in>extra_returns<else>[0]<line_sep>ret=force_list(self.graph_executor.execute((call_method [batched_states <not>use_exploration] # deterministic = not use_exploration # 0=preprocessed_states, 1=action return_ops)))<line_sep># Convert Gumble (relaxed one-hot) sample back into int type for all discrete composite actions. <if_stmt>isinstance(self.action_space ContainerSpace)<block_start>ret[0]=ret[0].map(mapping=<lambda>key action:np.argmax(action axis=-1).astype(action.dtype)<if>isinstance(self.flat_action_space[key] IntBox)<else>action)<block_end><elif_stmt>isinstance(self.action_space IntBox)<block_start>ret[0]=np.argmax(ret[0] axis=-1).astype(self.action_space.dtype)<block_end><if_stmt>remove_batch_rank<block_start>ret[0]=strip_list(ret[0])<block_end><if_stmt>"preprocessed_states"<in>extra_returns<block_start><return>ret[0] ret[1]<block_end><else_stmt><block_start><return>ret[0]<block_end><block_end><def_stmt>_observe_graph self preprocessed_states actions internals rewards next_states terminals<block_start>self.graph_executor.execute((self.root_component.insert_records [preprocessed_states actions rewards next_states terminals]))<block_end><def_stmt>update self batch=<none> time_percentage=<none> **kwargs<block_start><if_stmt>batch<is><none><block_start>size=self.graph_executor.execute(self.root_component.get_memory_size)<line_sep># TODO: is this necessary? <if_stmt>size<l>self.batch_size<block_start><return>0.0 0.0 0.0<block_end>ret=self.graph_executor.execute((self.root_component.update_from_memory [self.batch_size time_percentage]))<block_end><else_stmt><block_start>ret=self.graph_executor.execute((self.root_component.update_from_external_batch [batch["states"] batch["actions"] batch["rewards"] batch["terminals"] batch["next_states"] batch["importance_weights"] time_percentage]))<block_end><return>ret["actor_loss"] ret["actor_loss_per_item"] ret["critic_loss"] ret["alpha_loss"]<block_end><def_stmt>reset self<block_start>""" Resets our preprocessor, but only if it contains stateful PreprocessLayer Components (meaning the PreprocessorStack has at least one variable defined). """<if_stmt>self.preprocessing_required<and>len(self.preprocessor.variables)<g>0<block_start>self.graph_executor.execute("reset_preprocessor")<block_end>self.graph_executor.execute(self.root_component.reset_targets)<block_end><def_stmt>__repr__ self<block_start><return>"SACAgent(double-q={}, initial-alpha={}, target-entropy={})".format(self.double_q self.initial_alpha self.target_entropy)<block_end><block_end>
<import_stmt>datetime<import_from_stmt>decimal Decimal<import_stmt>unittest<import_from_stmt>qstrader.event FillEvent OrderEvent SignalEvent<import_from_stmt>qstrader.portfolio_handler PortfolioHandler<import_from_stmt>qstrader.price_handler.base AbstractTickPriceHandler<import_from_stmt>qstrader.compat queue<class_stmt>PriceHandlerMock(AbstractTickPriceHandler)<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>get_best_bid_ask self ticker<block_start>prices={"MSFT":(Decimal("50.28") Decimal("50.31")) "GOOG":(Decimal("705.46") Decimal("705.46")) "AMZN":(Decimal("564.14") Decimal("565.14")) }<line_sep><return>prices[ticker]<block_end><block_end><class_stmt>PositionSizerMock(object)<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>size_order self portfolio initial_order<block_start>""" This PositionSizerMock object simply modifies the quantity to be 100 of any share transacted. """<line_sep>initial_order.quantity=100<line_sep><return>initial_order<block_end><block_end><class_stmt>RiskManagerMock(object)<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>refine_orders self portfolio sized_order<block_start>""" This RiskManagerMock object simply lets the sized order through, creates the corresponding OrderEvent object and adds it to a list. """<line_sep>order_event=OrderEvent(sized_order.ticker sized_order.action sized_order.quantity)<line_sep><return>[order_event]<block_end><block_end><class_stmt>TestSimpleSignalOrderFillCycleForPortfolioHandler(unittest.TestCase)<block_start>""" Tests a simple Signal, Order and Fill cycle for the PortfolioHandler. This is, in effect, a sanity check. """<def_stmt>setUp self<block_start>""" Set up the PortfolioHandler object supplying it with $500,000.00 USD in initial cash. """<line_sep>initial_cash=Decimal("500000.00")<line_sep>events_queue=queue.Queue()<line_sep>price_handler=PriceHandlerMock()<line_sep>position_sizer=PositionSizerMock()<line_sep>risk_manager=RiskManagerMock()<line_sep># Create the PortfolioHandler object from the rest self.portfolio_handler=PortfolioHandler(initial_cash events_queue price_handler position_sizer risk_manager)<block_end><def_stmt>test_create_order_from_signal_basic_check self<block_start>""" Tests the "_create_order_from_signal" method as a basic sanity check. """<line_sep>signal_event=SignalEvent("MSFT" "BOT")<line_sep>order=self.portfolio_handler._create_order_from_signal(signal_event)<line_sep>self.assertEqual(order.ticker "MSFT")<line_sep>self.assertEqual(order.action "BOT")<line_sep>self.assertEqual(order.quantity 0)<block_end><def_stmt>test_place_orders_onto_queue_basic_check self<block_start>""" Tests the "_place_orders_onto_queue" method as a basic sanity check. """<line_sep>order=OrderEvent("MSFT" "BOT" 100)<line_sep>order_list=[order]<line_sep>self.portfolio_handler._place_orders_onto_queue(order_list)<line_sep>ret_order=self.portfolio_handler.events_queue.get()<line_sep>self.assertEqual(ret_order.ticker "MSFT")<line_sep>self.assertEqual(ret_order.action "BOT")<line_sep>self.assertEqual(ret_order.quantity 100)<block_end><def_stmt>test_convert_fill_to_portfolio_update_basic_check self<block_start>""" Tests the "_convert_fill_to_portfolio_update" method as a basic sanity check. """<line_sep>fill_event_buy=FillEvent(datetime.datetime.utcnow() "MSFT" "BOT" 100 "ARCA" Decimal("50.25") Decimal("1.00"))<line_sep>self.portfolio_handler._convert_fill_to_portfolio_update(fill_event_buy)<line_sep># Check the Portfolio values within the PortfolioHandler port=self.portfolio_handler.portfolio<line_sep>self.assertEqual(port.cur_cash Decimal("494974.00"))<line_sep># TODO: Finish this off and check it works via Interactive Brokers fill_event_sell=FillEvent(datetime.datetime.utcnow() "MSFT" "SLD" 100 "ARCA" Decimal("50.25") Decimal("1.00"))<line_sep>self.portfolio_handler._convert_fill_to_portfolio_update(fill_event_sell)<block_end><def_stmt>test_on_signal_basic_check self<block_start>""" Tests the "on_signal" method as a basic sanity check. """<line_sep>signal_event=SignalEvent("MSFT" "BOT")<line_sep>self.portfolio_handler.on_signal(signal_event)<line_sep>ret_order=self.portfolio_handler.events_queue.get()<line_sep>self.assertEqual(ret_order.ticker "MSFT")<line_sep>self.assertEqual(ret_order.action "BOT")<line_sep>self.assertEqual(ret_order.quantity 100)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>