Commit ·
795c5a5
1
Parent(s): efabd0e
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +4 -0
- .idea/.gitignore +3 -0
- .idea/OffsetCorrection.iml +10 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +7 -0
- .idea/modules.xml +8 -0
- .idea/vcs.xml +6 -0
- .idea/workspace.xml +198 -0
- Clustering.py +35 -0
- DDGP_torch.py +96 -0
- Gradio_Test.py +81 -0
- LinearRegressionExample.py +224 -0
- LogisticRegressionExample.py +14 -0
- MyEnv.py +241 -0
- OffsetCorrectionDS.xlsx +0 -0
- Oreilly_DecTree.py +37 -0
- Oreilly_Ensamble_RandomForest.py +74 -0
- Oreilly_Example1.py +376 -0
- Oreilly_Example2.py +255 -0
- Oreilly_Example3.py +34 -0
- Oreilly_LogisticReg.py +24 -0
- Oreilly_PolynomialReg.py +27 -0
- Oreilly_SVM.py +81 -0
- Panda.py +177 -0
- Q-Learning Example.py +134 -0
- README.md +3 -9
- RoboEnv_View.py +153 -0
- SimplifiedOffsetCorrectionDS.xlsx +0 -0
- data_X.csv +3 -0
- data_Y.csv +0 -0
- data_visualization.py +75 -0
- datasets/housing/housing.csv +0 -0
- datasets/housing/housing.tgz +3 -0
- dqn_lunar.zip +3 -0
- flagged/Polynomial SVM Classifier/tmp1qkjy2sm.json +1 -0
- flagged/log.csv +102 -0
- iris_tree.dot +13 -0
- iris_tree.png +0 -0
- kaggle_example.py +52 -0
- main.py +43 -0
- meshes/katana/katana_base_link.stl +0 -0
- meshes/katana/katana_gripper_l_finger.stl +0 -0
- meshes/katana/katana_gripper_link.stl +0 -0
- meshes/katana/katana_gripper_r_finger.stl +0 -0
- meshes/katana/katana_internal_controlbox.stl +0 -0
- meshes/katana/katana_motor1_pan_link.stl +0 -0
- meshes/katana/katana_motor2_lift_link.stl +0 -0
- meshes/katana/katana_motor3_lift_link.stl +0 -0
- meshes/katana/katana_motor4_lift_link.stl +0 -0
- meshes/katana/katana_motor5_wrist_roll_link.stl +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
data_X.csv filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
swig/Doc/Manual/SWIGDocumentation.pdf filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
swig/swig.exe filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
venv/Lib/site-packages/Box2D/_Box2D.cp38-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
.idea/.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Default ignored files
|
| 2 |
+
/shelf/
|
| 3 |
+
/workspace.xml
|
.idea/OffsetCorrection.iml
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<module type="PYTHON_MODULE" version="4">
|
| 3 |
+
<component name="NewModuleRootManager">
|
| 4 |
+
<content url="file://$MODULE_DIR$">
|
| 5 |
+
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
| 6 |
+
</content>
|
| 7 |
+
<orderEntry type="jdk" jdkName="Python 3.8 (3)" jdkType="Python SDK" />
|
| 8 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
| 9 |
+
</component>
|
| 10 |
+
</module>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<component name="InspectionProjectProfileManager">
|
| 2 |
+
<settings>
|
| 3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
| 4 |
+
<version value="1.0" />
|
| 5 |
+
</settings>
|
| 6 |
+
</component>
|
.idea/misc.xml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (3)" project-jdk-type="Python SDK" />
|
| 4 |
+
<component name="PythonCompatibilityInspectionAdvertiser">
|
| 5 |
+
<option name="version" value="3" />
|
| 6 |
+
</component>
|
| 7 |
+
</project>
|
.idea/modules.xml
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="ProjectModuleManager">
|
| 4 |
+
<modules>
|
| 5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/OffsetCorrection.iml" filepath="$PROJECT_DIR$/.idea/OffsetCorrection.iml" />
|
| 6 |
+
</modules>
|
| 7 |
+
</component>
|
| 8 |
+
</project>
|
.idea/vcs.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="VcsDirectoryMappings">
|
| 4 |
+
<mapping directory="$PROJECT_DIR$/../.." vcs="Git" />
|
| 5 |
+
</component>
|
| 6 |
+
</project>
|
.idea/workspace.xml
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="AutoImportSettings">
|
| 4 |
+
<option name="autoReloadType" value="SELECTIVE" />
|
| 5 |
+
</component>
|
| 6 |
+
<component name="ChangeListManager">
|
| 7 |
+
<list default="true" id="dfea19b1-4908-4385-95c7-1034aa4a3734" name="Changes" comment="" />
|
| 8 |
+
<option name="SHOW_DIALOG" value="false" />
|
| 9 |
+
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
| 10 |
+
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
|
| 11 |
+
<option name="LAST_RESOLUTION" value="IGNORE" />
|
| 12 |
+
</component>
|
| 13 |
+
<component name="FileTemplateManagerImpl">
|
| 14 |
+
<option name="RECENT_TEMPLATES">
|
| 15 |
+
<list>
|
| 16 |
+
<option value="Python Script" />
|
| 17 |
+
</list>
|
| 18 |
+
</option>
|
| 19 |
+
</component>
|
| 20 |
+
<component name="Git.Settings">
|
| 21 |
+
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$/../.." />
|
| 22 |
+
</component>
|
| 23 |
+
<component name="MarkdownSettingsMigration">
|
| 24 |
+
<option name="stateVersion" value="1" />
|
| 25 |
+
</component>
|
| 26 |
+
<component name="ProjectId" id="2R5tSkfENsWTRYKIGSNHXfkC3Ne" />
|
| 27 |
+
<component name="ProjectViewState">
|
| 28 |
+
<option name="hideEmptyMiddlePackages" value="true" />
|
| 29 |
+
<option name="showLibraryContents" value="true" />
|
| 30 |
+
</component>
|
| 31 |
+
<component name="PropertiesComponent">{
|
| 32 |
+
"keyToString": {
|
| 33 |
+
"RunOnceActivity.ShowReadmeOnStart": "true",
|
| 34 |
+
"ToolWindowRun.ShowToolbar": "false",
|
| 35 |
+
"git-widget-placeholder": "master",
|
| 36 |
+
"settings.editor.selected.configurable": "com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable"
|
| 37 |
+
}
|
| 38 |
+
}</component>
|
| 39 |
+
<component name="RunManager" selected="Python.data_visualization">
|
| 40 |
+
<configuration name="Oreilly_Ensamble_RandomForest" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
| 41 |
+
<module name="OffsetCorrection" />
|
| 42 |
+
<option name="INTERPRETER_OPTIONS" value="" />
|
| 43 |
+
<option name="PARENT_ENVS" value="true" />
|
| 44 |
+
<envs>
|
| 45 |
+
<env name="PYTHONUNBUFFERED" value="1" />
|
| 46 |
+
</envs>
|
| 47 |
+
<option name="SDK_HOME" value="" />
|
| 48 |
+
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
|
| 49 |
+
<option name="IS_MODULE_SDK" value="true" />
|
| 50 |
+
<option name="ADD_CONTENT_ROOTS" value="true" />
|
| 51 |
+
<option name="ADD_SOURCE_ROOTS" value="true" />
|
| 52 |
+
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/Oreilly_Ensamble_RandomForest.py" />
|
| 53 |
+
<option name="PARAMETERS" value="" />
|
| 54 |
+
<option name="SHOW_COMMAND_LINE" value="false" />
|
| 55 |
+
<option name="EMULATE_TERMINAL" value="false" />
|
| 56 |
+
<option name="MODULE_MODE" value="false" />
|
| 57 |
+
<option name="REDIRECT_INPUT" value="false" />
|
| 58 |
+
<option name="INPUT_FILE" value="" />
|
| 59 |
+
<method v="2" />
|
| 60 |
+
</configuration>
|
| 61 |
+
<configuration name="Oreilly_Example1" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
| 62 |
+
<module name="OffsetCorrection" />
|
| 63 |
+
<option name="INTERPRETER_OPTIONS" value="" />
|
| 64 |
+
<option name="PARENT_ENVS" value="true" />
|
| 65 |
+
<envs>
|
| 66 |
+
<env name="PYTHONUNBUFFERED" value="1" />
|
| 67 |
+
</envs>
|
| 68 |
+
<option name="SDK_HOME" value="" />
|
| 69 |
+
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
|
| 70 |
+
<option name="IS_MODULE_SDK" value="true" />
|
| 71 |
+
<option name="ADD_CONTENT_ROOTS" value="true" />
|
| 72 |
+
<option name="ADD_SOURCE_ROOTS" value="true" />
|
| 73 |
+
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/Oreilly_Example1.py" />
|
| 74 |
+
<option name="PARAMETERS" value="" />
|
| 75 |
+
<option name="SHOW_COMMAND_LINE" value="false" />
|
| 76 |
+
<option name="EMULATE_TERMINAL" value="false" />
|
| 77 |
+
<option name="MODULE_MODE" value="false" />
|
| 78 |
+
<option name="REDIRECT_INPUT" value="false" />
|
| 79 |
+
<option name="INPUT_FILE" value="" />
|
| 80 |
+
<method v="2" />
|
| 81 |
+
</configuration>
|
| 82 |
+
<configuration name="Oreilly_LogisticReg" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
| 83 |
+
<module name="OffsetCorrection" />
|
| 84 |
+
<option name="INTERPRETER_OPTIONS" value="" />
|
| 85 |
+
<option name="PARENT_ENVS" value="true" />
|
| 86 |
+
<envs>
|
| 87 |
+
<env name="PYTHONUNBUFFERED" value="1" />
|
| 88 |
+
</envs>
|
| 89 |
+
<option name="SDK_HOME" value="" />
|
| 90 |
+
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
|
| 91 |
+
<option name="IS_MODULE_SDK" value="true" />
|
| 92 |
+
<option name="ADD_CONTENT_ROOTS" value="true" />
|
| 93 |
+
<option name="ADD_SOURCE_ROOTS" value="true" />
|
| 94 |
+
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/Oreilly_LogisticReg.py" />
|
| 95 |
+
<option name="PARAMETERS" value="" />
|
| 96 |
+
<option name="SHOW_COMMAND_LINE" value="false" />
|
| 97 |
+
<option name="EMULATE_TERMINAL" value="false" />
|
| 98 |
+
<option name="MODULE_MODE" value="false" />
|
| 99 |
+
<option name="REDIRECT_INPUT" value="false" />
|
| 100 |
+
<option name="INPUT_FILE" value="" />
|
| 101 |
+
<method v="2" />
|
| 102 |
+
</configuration>
|
| 103 |
+
<configuration name="Oreilly_SVM" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
| 104 |
+
<module name="OffsetCorrection" />
|
| 105 |
+
<option name="INTERPRETER_OPTIONS" value="" />
|
| 106 |
+
<option name="PARENT_ENVS" value="true" />
|
| 107 |
+
<envs>
|
| 108 |
+
<env name="PYTHONUNBUFFERED" value="1" />
|
| 109 |
+
</envs>
|
| 110 |
+
<option name="SDK_HOME" value="" />
|
| 111 |
+
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
|
| 112 |
+
<option name="IS_MODULE_SDK" value="true" />
|
| 113 |
+
<option name="ADD_CONTENT_ROOTS" value="true" />
|
| 114 |
+
<option name="ADD_SOURCE_ROOTS" value="true" />
|
| 115 |
+
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/Oreilly_SVM.py" />
|
| 116 |
+
<option name="PARAMETERS" value="" />
|
| 117 |
+
<option name="SHOW_COMMAND_LINE" value="false" />
|
| 118 |
+
<option name="EMULATE_TERMINAL" value="false" />
|
| 119 |
+
<option name="MODULE_MODE" value="false" />
|
| 120 |
+
<option name="REDIRECT_INPUT" value="false" />
|
| 121 |
+
<option name="INPUT_FILE" value="" />
|
| 122 |
+
<method v="2" />
|
| 123 |
+
</configuration>
|
| 124 |
+
<configuration name="data_visualization" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
| 125 |
+
<module name="OffsetCorrection" />
|
| 126 |
+
<option name="INTERPRETER_OPTIONS" value="" />
|
| 127 |
+
<option name="PARENT_ENVS" value="true" />
|
| 128 |
+
<envs>
|
| 129 |
+
<env name="PYTHONUNBUFFERED" value="1" />
|
| 130 |
+
</envs>
|
| 131 |
+
<option name="SDK_HOME" value="" />
|
| 132 |
+
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
|
| 133 |
+
<option name="IS_MODULE_SDK" value="true" />
|
| 134 |
+
<option name="ADD_CONTENT_ROOTS" value="true" />
|
| 135 |
+
<option name="ADD_SOURCE_ROOTS" value="true" />
|
| 136 |
+
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/data_visualization.py" />
|
| 137 |
+
<option name="PARAMETERS" value="" />
|
| 138 |
+
<option name="SHOW_COMMAND_LINE" value="false" />
|
| 139 |
+
<option name="EMULATE_TERMINAL" value="false" />
|
| 140 |
+
<option name="MODULE_MODE" value="false" />
|
| 141 |
+
<option name="REDIRECT_INPUT" value="false" />
|
| 142 |
+
<option name="INPUT_FILE" value="" />
|
| 143 |
+
<method v="2" />
|
| 144 |
+
</configuration>
|
| 145 |
+
<configuration name="main" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true">
|
| 146 |
+
<module name="OffsetCorrection" />
|
| 147 |
+
<option name="INTERPRETER_OPTIONS" value="" />
|
| 148 |
+
<option name="PARENT_ENVS" value="true" />
|
| 149 |
+
<envs>
|
| 150 |
+
<env name="PYTHONUNBUFFERED" value="1" />
|
| 151 |
+
</envs>
|
| 152 |
+
<option name="SDK_HOME" value="" />
|
| 153 |
+
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
|
| 154 |
+
<option name="IS_MODULE_SDK" value="true" />
|
| 155 |
+
<option name="ADD_CONTENT_ROOTS" value="true" />
|
| 156 |
+
<option name="ADD_SOURCE_ROOTS" value="true" />
|
| 157 |
+
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/main.py" />
|
| 158 |
+
<option name="PARAMETERS" value="" />
|
| 159 |
+
<option name="SHOW_COMMAND_LINE" value="false" />
|
| 160 |
+
<option name="EMULATE_TERMINAL" value="false" />
|
| 161 |
+
<option name="MODULE_MODE" value="false" />
|
| 162 |
+
<option name="REDIRECT_INPUT" value="false" />
|
| 163 |
+
<option name="INPUT_FILE" value="" />
|
| 164 |
+
<method v="2" />
|
| 165 |
+
</configuration>
|
| 166 |
+
<recent_temporary>
|
| 167 |
+
<list>
|
| 168 |
+
<item itemvalue="Python.data_visualization" />
|
| 169 |
+
<item itemvalue="Python.Oreilly_Ensamble_RandomForest" />
|
| 170 |
+
<item itemvalue="Python.Oreilly_Example1" />
|
| 171 |
+
<item itemvalue="Python.Oreilly_SVM" />
|
| 172 |
+
<item itemvalue="Python.Oreilly_LogisticReg" />
|
| 173 |
+
</list>
|
| 174 |
+
</recent_temporary>
|
| 175 |
+
</component>
|
| 176 |
+
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
|
| 177 |
+
<component name="TaskManager">
|
| 178 |
+
<task active="true" id="Default" summary="Default task">
|
| 179 |
+
<changelist id="dfea19b1-4908-4385-95c7-1034aa4a3734" name="Changes" comment="" />
|
| 180 |
+
<created>1686551771847</created>
|
| 181 |
+
<option name="number" value="Default" />
|
| 182 |
+
<option name="presentableId" value="Default" />
|
| 183 |
+
<updated>1686551771847</updated>
|
| 184 |
+
</task>
|
| 185 |
+
<servers />
|
| 186 |
+
</component>
|
| 187 |
+
<component name="XDebuggerManager">
|
| 188 |
+
<breakpoint-manager>
|
| 189 |
+
<breakpoints>
|
| 190 |
+
<line-breakpoint enabled="true" suspend="THREAD" type="python-line">
|
| 191 |
+
<url>file://$PROJECT_DIR$/RoboEnv_View.py</url>
|
| 192 |
+
<line>135</line>
|
| 193 |
+
<option name="timeStamp" value="1" />
|
| 194 |
+
</line-breakpoint>
|
| 195 |
+
</breakpoints>
|
| 196 |
+
</breakpoint-manager>
|
| 197 |
+
</component>
|
| 198 |
+
</project>
|
Clustering.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from sklearn.datasets import make_blobs
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
|
| 5 |
+
n_samples = 1500
|
| 6 |
+
random_state = 170
|
| 7 |
+
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
|
| 8 |
+
|
| 9 |
+
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
|
| 10 |
+
X_aniso = np.dot(X, transformation) # Anisotropic blobs
|
| 11 |
+
X_varied, y_varied = make_blobs(
|
| 12 |
+
n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state
|
| 13 |
+
) # Unequal variance
|
| 14 |
+
X_filtered = np.vstack(
|
| 15 |
+
(X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])
|
| 16 |
+
) # Unevenly sized blobs
|
| 17 |
+
y_filtered = [0] * 500 + [1] * 100 + [2] * 10
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
|
| 21 |
+
|
| 22 |
+
axs[0, 0].scatter(X[:, 0], X[:, 1], c=y)
|
| 23 |
+
axs[0, 0].set_title("Mixture of Gaussian Blobs")
|
| 24 |
+
|
| 25 |
+
axs[0, 1].scatter(X_aniso[:, 0], X_aniso[:, 1], c=y)
|
| 26 |
+
axs[0, 1].set_title("Anisotropically Distributed Blobs")
|
| 27 |
+
|
| 28 |
+
axs[1, 0].scatter(X_varied[:, 0], X_varied[:, 1], c=y_varied)
|
| 29 |
+
axs[1, 0].set_title("Unequal Variance")
|
| 30 |
+
|
| 31 |
+
axs[1, 1].scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_filtered)
|
| 32 |
+
axs[1, 1].set_title("Unevenly Sized Blobs")
|
| 33 |
+
|
| 34 |
+
plt.suptitle("Ground truth clusters").set_y(0.95)
|
| 35 |
+
plt.show()
|
DDGP_torch.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gymnasium as gym
|
| 2 |
+
from stable_baselines3 import PPO
|
| 3 |
+
from stable_baselines3 import A2C # Actor-Critic
|
| 4 |
+
import os
|
| 5 |
+
import time
|
| 6 |
+
|
| 7 |
+
def iterate_models(model_no):
|
| 8 |
+
if model_no == 1:
|
| 9 |
+
models_dir = f"../models/PPO-{int(time.time())}"
|
| 10 |
+
log_dir = f"../logs/PPO-{int(time.time())}"
|
| 11 |
+
|
| 12 |
+
os.makedirs(models_dir, exist_ok=True)
|
| 13 |
+
os.makedirs(log_dir, exist_ok=True)
|
| 14 |
+
|
| 15 |
+
env = gym.make("BipedalWalker-v3")
|
| 16 |
+
|
| 17 |
+
env.reset()
|
| 18 |
+
|
| 19 |
+
model = PPO("MlpPolicy", env, verbose=1, tensorboard_log=log_dir) # Mlp = MultilayerPerceptron
|
| 20 |
+
|
| 21 |
+
TIMESTEPS = 10_000
|
| 22 |
+
for i in range(1, 100):
|
| 23 |
+
model.learn(total_timesteps=TIMESTEPS, reset_num_timesteps=False, tb_log_name="PPO")
|
| 24 |
+
model.save(f"{models_dir}/{TIMESTEPS * i}")
|
| 25 |
+
|
| 26 |
+
env.close()
|
| 27 |
+
|
| 28 |
+
if model_no == 2:
|
| 29 |
+
models_dir = f"../models/A2C-{int(time.time())}"
|
| 30 |
+
log_dir = f"../logs/A2C-{int(time.time())}"
|
| 31 |
+
|
| 32 |
+
os.makedirs(models_dir, exist_ok=True)
|
| 33 |
+
os.makedirs(log_dir, exist_ok=True)
|
| 34 |
+
|
| 35 |
+
env = gym.make("BipedalWalker-v3")
|
| 36 |
+
|
| 37 |
+
env.reset()
|
| 38 |
+
|
| 39 |
+
model = A2C("MlpPolicy", env, verbose=1, tensorboard_log=log_dir) # Mlp = MultilayerPerceptron
|
| 40 |
+
|
| 41 |
+
TIMESTEPS = 10000
|
| 42 |
+
for i in range(1, 30):
|
| 43 |
+
model.learn(total_timesteps=TIMESTEPS, reset_num_timesteps=False, tb_log_name="A2C")
|
| 44 |
+
env.render("human")
|
| 45 |
+
model.save(f"{models_dir}/{TIMESTEPS * i}")
|
| 46 |
+
|
| 47 |
+
env.close()
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
"""import gymnasium as gym
|
| 51 |
+
|
| 52 |
+
from stable_baselines3 import DQN
|
| 53 |
+
from stable_baselines3 import PPO
|
| 54 |
+
|
| 55 |
+
from stable_baselines3.common.evaluation import evaluate_policy
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# Create environment
|
| 59 |
+
env = gym.make("LunarLander-v2", render_mode="rgb_array")
|
| 60 |
+
|
| 61 |
+
# Instantiate the agent
|
| 62 |
+
model = PPO("MlpPolicy", env, verbose=1)
|
| 63 |
+
# Train the agent and display a progress bar
|
| 64 |
+
model.learn(total_timesteps=int(2e5), progress_bar=True)
|
| 65 |
+
# Save the agent
|
| 66 |
+
model.save("ppo_lunar")
|
| 67 |
+
del model # delete trained model to demonstrate loading
|
| 68 |
+
|
| 69 |
+
# Load the trained agent
|
| 70 |
+
# NOTE: if you have loading issue, you can pass `print_system_info=True`
|
| 71 |
+
# to compare the system on which the model was trained vs the current one
|
| 72 |
+
# model = DQN.load("dqn_lunar", env=env, print_system_info=True)
|
| 73 |
+
model = PPO.load("ppo_lunar", env=env)
|
| 74 |
+
|
| 75 |
+
# Evaluate the agent
|
| 76 |
+
# NOTE: If you use wrappers with your environment that modify rewards,
|
| 77 |
+
# this will be reflected here. To evaluate with original rewards,
|
| 78 |
+
# wrap environment in a "Monitor" wrapper before other wrappers.
|
| 79 |
+
mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=10)
|
| 80 |
+
|
| 81 |
+
# Enjoy trained agent
|
| 82 |
+
vec_env = model.get_env()
|
| 83 |
+
obs = vec_env.reset()
|
| 84 |
+
for i in range(1000):
|
| 85 |
+
action, _states = model.predict(obs, deterministic=True)
|
| 86 |
+
obs, rewards, dones, info = vec_env.step(action)
|
| 87 |
+
vec_env.render("human")"""
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def teach(model_no):
|
| 91 |
+
for i in range(1, model_no + 1):
|
| 92 |
+
iterate_models(i)
|
| 93 |
+
print(f"Teaching of {model_no} is done.")
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
teach(2)
|
Gradio_Test.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#Libs
|
| 2 |
+
"""
|
| 3 |
+
We can import libraries when they necessary in if clauses.
|
| 4 |
+
"""
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from matplotlib import pyplot as plt
|
| 8 |
+
import gradio as gr
|
| 9 |
+
import sklearn.datasets
|
| 10 |
+
from sklearn.metrics import confusion_matrix, mean_squared_error, roc_auc_score, precision_recall_curve
|
| 11 |
+
from sklearn.metrics import roc_curve, accuracy_score, f1_score, precision_score, recall_score
|
| 12 |
+
import os.path
|
| 13 |
+
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedShuffleSplit
|
| 14 |
+
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, StratifiedKFold, cross_val_predict
|
| 15 |
+
from sklearn.ensemble import RandomForestRegressor
|
| 16 |
+
from sklearn.impute import SimpleImputer
|
| 17 |
+
from sklearn.preprocessing import OneHotEncoder
|
| 18 |
+
from sklearn.pipeline import Pipeline
|
| 19 |
+
from sklearn.preprocessing import StandardScaler
|
| 20 |
+
from sklearn.compose import ColumnTransformer
|
| 21 |
+
from sklearn.linear_model import LinearRegression, SGDClassifier
|
| 22 |
+
from sklearn.preprocessing import PolynomialFeatures
|
| 23 |
+
from sklearn.tree import DecisionTreeRegressor
|
| 24 |
+
from sklearn.multiclass import OneVsOneClassifier
|
| 25 |
+
from sklearn.neighbors import KNeighborsClassifier
|
| 26 |
+
from sklearn.datasets import make_moons
|
| 27 |
+
from sklearn.svm import SVC
|
| 28 |
+
|
| 29 |
+
#Gradio Function
|
| 30 |
+
def polynomial_svm(file,n_sample,noise,degree,C,coef0,head_value):
|
| 31 |
+
df = pd.read_csv(file.name)
|
| 32 |
+
X, y = make_moons(n_samples=n_sample, noise=noise, random_state=44)
|
| 33 |
+
polynomial_svm_clf = Pipeline([
|
| 34 |
+
("scaler", StandardScaler()),
|
| 35 |
+
("svm_clf", SVC(kernel="poly", degree=degree, C=C, coef0=coef0,probability=True)),
|
| 36 |
+
])
|
| 37 |
+
polynomial_svm_clf.fit(X, y)
|
| 38 |
+
fig = plt.figure()
|
| 39 |
+
# Create a meshgrid of points covering the input space
|
| 40 |
+
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
|
| 41 |
+
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
|
| 42 |
+
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
|
| 43 |
+
np.linspace(y_min, y_max, 200))
|
| 44 |
+
# Generate predictions for each point in the meshgrid
|
| 45 |
+
Z = polynomial_svm_clf.predict(np.c_[xx.ravel(), yy.ravel()])
|
| 46 |
+
Z = Z.reshape(xx.shape)
|
| 47 |
+
# Create a color map
|
| 48 |
+
cmap = plt.cm.Pastel1
|
| 49 |
+
# Plot the decision boundary and the data points
|
| 50 |
+
plt.contourf(xx, yy, Z, alpha=0.8, cmap=cmap)
|
| 51 |
+
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', cmap=cmap)
|
| 52 |
+
plt.xlabel('Feature 1')
|
| 53 |
+
plt.ylabel('Feature 2')
|
| 54 |
+
plt.title('Polynomial SVM Classifier Decision Boundary')
|
| 55 |
+
plt.xlim(xx.min(), xx.max())
|
| 56 |
+
plt.ylim(yy.min(), yy.max())
|
| 57 |
+
plt.xticks(())
|
| 58 |
+
plt.yticks(())
|
| 59 |
+
|
| 60 |
+
return fig,df.head(head_value)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
inputs = [
|
| 64 |
+
gr.File(label='Import a CSV File',file_count='single',file_types=['csv']),
|
| 65 |
+
gr.Slider(100,3000,1500,label='Number of Samples'),
|
| 66 |
+
gr.Slider(0,1,0.15,label='Noise'),
|
| 67 |
+
gr.Slider(1,5,3,step=1,label='Degree'),
|
| 68 |
+
gr.Slider(1,10,5,step=1,label='C'),
|
| 69 |
+
gr.Slider(1,10,3,step=1,label='coefficient 0'),
|
| 70 |
+
gr.Slider(1,20,10,step=1,label='Head'),
|
| 71 |
+
]
|
| 72 |
+
outputs = [gr.Plot(label='Polynomial SVM Classifier'),
|
| 73 |
+
gr.Textbox(label='Head')]
|
| 74 |
+
|
| 75 |
+
demo = gr.Interface(
|
| 76 |
+
fn = polynomial_svm,
|
| 77 |
+
inputs=inputs,
|
| 78 |
+
outputs=outputs,
|
| 79 |
+
)
|
| 80 |
+
if __name__ == "__main__":
|
| 81 |
+
demo.launch(share=True)
|
LinearRegressionExample.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Linear Regression Example
|
| 2 |
+
"""# Code source: Jaques Grobler
|
| 3 |
+
# License: BSD 3 clause
|
| 4 |
+
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
import numpy as np
|
| 7 |
+
from sklearn import datasets, linear_model
|
| 8 |
+
from sklearn.metrics import mean_squared_error, r2_score
|
| 9 |
+
import pandas as pd
|
| 10 |
+
# Load the diabetes dataset
|
| 11 |
+
diabetes_X, diabetes_y = datasets.load_diabetes(return_X_y=True)
|
| 12 |
+
print(f"DATASET\n{datasets.load_diabetes()}")
|
| 13 |
+
|
| 14 |
+
# Use only one feature
|
| 15 |
+
diabetes_X = diabetes_X[:, np.newaxis, 2]
|
| 16 |
+
|
| 17 |
+
# Split the data into training/testing sets
|
| 18 |
+
diabetes_X_train = diabetes_X[:-20]
|
| 19 |
+
diabetes_X_test = diabetes_X[-20:]
|
| 20 |
+
|
| 21 |
+
# Split the targets into training/testing sets
|
| 22 |
+
diabetes_y_train = diabetes_y[:-20]
|
| 23 |
+
diabetes_y_test = diabetes_y[-20:]
|
| 24 |
+
|
| 25 |
+
# Create linear regression object
|
| 26 |
+
regr = linear_model.LinearRegression()
|
| 27 |
+
|
| 28 |
+
# Train the model using the training sets
|
| 29 |
+
regr.fit(diabetes_X_train, diabetes_y_train)
|
| 30 |
+
|
| 31 |
+
# Make predictions using the testing set
|
| 32 |
+
diabetes_y_pred = regr.predict(diabetes_X_test)
|
| 33 |
+
|
| 34 |
+
# The coefficients
|
| 35 |
+
print("Coefficients: \n", regr.coef_)
|
| 36 |
+
# The mean squared error
|
| 37 |
+
print("Mean squared error: %.2f" % mean_squared_error(diabetes_y_test, diabetes_y_pred))
|
| 38 |
+
# The coefficient of determination: 1 is perfect prediction
|
| 39 |
+
print("Coefficient of determination: %.2f" % r2_score(diabetes_y_test, diabetes_y_pred))
|
| 40 |
+
|
| 41 |
+
# Plot outputs
|
| 42 |
+
plt.scatter(diabetes_X_test, diabetes_y_test, color="black")
|
| 43 |
+
plt.plot(diabetes_X_test, diabetes_y_pred, color="blue", linewidth=3)
|
| 44 |
+
|
| 45 |
+
plt.xticks(())
|
| 46 |
+
plt.yticks(())
|
| 47 |
+
|
| 48 |
+
plt.show()"""
|
| 49 |
+
|
| 50 |
+
import numpy as np
|
| 51 |
+
import pybullet as p
|
| 52 |
+
from scipy.optimize import minimize
|
| 53 |
+
|
| 54 |
+
p.connect(p.GUI)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class OPAX:
|
| 58 |
+
def __init__(self, dynamics_model, observation_model):
|
| 59 |
+
self.dynamics_model = dynamics_model
|
| 60 |
+
self.observation_model = observation_model
|
| 61 |
+
|
| 62 |
+
def optimize(self, initial_state):
|
| 63 |
+
def objective(x):
|
| 64 |
+
predicted_state = self.dynamics_model.predict(x)
|
| 65 |
+
info_gain = self.observation_model.info_gain(initial_state, predicted_state)
|
| 66 |
+
return -info_gain
|
| 67 |
+
|
| 68 |
+
x0 = np.zeros(self.dynamics_model.input_dim)
|
| 69 |
+
result = minimize(objective, x0)
|
| 70 |
+
return result.x
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class RobotDynamicsModel:
|
| 74 |
+
def __init__(self, robot):
|
| 75 |
+
self.robot = robot
|
| 76 |
+
self.input_dim = 6 # 6 DOF
|
| 77 |
+
|
| 78 |
+
def predict(self, control_inputs):
|
| 79 |
+
# Apply the control inputs to the robot and get the new state
|
| 80 |
+
self.robot.apply_control(control_inputs)
|
| 81 |
+
new_state = self.robot.get_state()
|
| 82 |
+
return new_state
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class RobotObservationModel:
|
| 86 |
+
def __init__(self, robot, box_id, target_point):
|
| 87 |
+
self.robot = robot
|
| 88 |
+
self.box_id = box_id
|
| 89 |
+
self.target_point = target_point
|
| 90 |
+
|
| 91 |
+
def info_gain(self, initial_state, predicted_state):
|
| 92 |
+
# The information gain could be the reduction in distance to the target point
|
| 93 |
+
initial_distance = np.linalg.norm(self.target_point - initial_state[:3])
|
| 94 |
+
predicted_distance = np.linalg.norm(self.target_point - predicted_state[:3])
|
| 95 |
+
info_gain = initial_distance - predicted_distance
|
| 96 |
+
|
| 97 |
+
# Check for collision between the robot's end effector and the box
|
| 98 |
+
collision_points = self.robot.check_collision(self.box_id)
|
| 99 |
+
if collision_points:
|
| 100 |
+
# If there is a collision, increase the information gain (reward)
|
| 101 |
+
info_gain += 1000
|
| 102 |
+
|
| 103 |
+
return info_gain
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class MyRobot:
|
| 107 |
+
def __init__(self):
|
| 108 |
+
# Load the robot model into PyBullet
|
| 109 |
+
self.robot_id = p.loadURDF("C:/IT/Assembly/urdf/Assembly.urdf")
|
| 110 |
+
self.end_effector_index = 6 # Assuming the last link is the end effector
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def check_collision(self, other_body_id):
|
| 114 |
+
|
| 115 |
+
return p.getClosestPoints(self.robot_id, other_body_id, distance=0, linkIndexA=self.end_effector_index)
|
| 116 |
+
def set_state(self, state):
|
| 117 |
+
# Set the position and orientation of the robot's base
|
| 118 |
+
position = state[:3]
|
| 119 |
+
orientation = p.getQuaternionFromEuler(state[3:])
|
| 120 |
+
p.resetBasePositionAndOrientation(self.robot_id, position, orientation)
|
| 121 |
+
|
| 122 |
+
def apply_control(self, control_inputs):
|
| 123 |
+
# Apply control inputs to the robot
|
| 124 |
+
# Here, we assume that control_inputs is an array of joint torques
|
| 125 |
+
for i in range(6): # 6 DOF
|
| 126 |
+
p.setJointMotorControl2(self.robot_id, i, p.TORQUE_CONTROL, force=control_inputs[i])
|
| 127 |
+
|
| 128 |
+
# Step the simulation forward
|
| 129 |
+
p.stepSimulation()
|
| 130 |
+
|
| 131 |
+
def get_state(self):
|
| 132 |
+
# Get the current state of the robot
|
| 133 |
+
# Here, we'll just get the position and orientation of the robot's base
|
| 134 |
+
position, orientation = p.getBasePositionAndOrientation(self.robot_id)
|
| 135 |
+
|
| 136 |
+
# Convert the orientation from a quaternion to Euler angles for simplicity
|
| 137 |
+
orientation = p.getEulerFromQuaternion(orientation)
|
| 138 |
+
|
| 139 |
+
# Concatenate position and orientation into a single array
|
| 140 |
+
state = np.concatenate([position, orientation])
|
| 141 |
+
|
| 142 |
+
return state
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# Initialize PyBullet and the robot
|
| 146 |
+
p.connect(p.DIRECT)
|
| 147 |
+
robot = MyRobot() # Replace with your robot class
|
| 148 |
+
|
| 149 |
+
# Define the target point in 3D space
|
| 150 |
+
target_point = np.array([1, 1, 1])
|
| 151 |
+
# Load a box at the target point
|
| 152 |
+
box_id = p.loadURDF("C:/IT/Assembly/urdf/target.urdf", target_point)
|
| 153 |
+
# Create the dynamics model and observation model
|
| 154 |
+
dynamics_model = RobotDynamicsModel(robot)
|
| 155 |
+
target_point = np.array([1,2,3])
|
| 156 |
+
observation_model = RobotObservationModel(robot, box_id,target_point)
|
| 157 |
+
|
| 158 |
+
# Create an instance of OPAX
|
| 159 |
+
opax = OPAX(dynamics_model, observation_model)
|
| 160 |
+
|
| 161 |
+
# Use OPAX to find the optimal control inputs
|
| 162 |
+
initial_state = robot.get_state()
|
| 163 |
+
optimal_control_inputs = opax.optimize(initial_state)
|
| 164 |
+
import time
|
| 165 |
+
|
| 166 |
+
# ... (rest of your code)
|
| 167 |
+
|
| 168 |
+
# Number of episodes for training
|
| 169 |
+
NUM_EPISODES = 10_000
|
| 170 |
+
|
| 171 |
+
# Range of possible initial states
|
| 172 |
+
initial_state_range = np.array([[0, 0, 0, -np.pi, -np.pi, -np.pi],
|
| 173 |
+
[1, 1, 1, np.pi, np.pi, np.pi]])
|
| 174 |
+
rewards = []
|
| 175 |
+
import matplotlib.pyplot as plt
|
| 176 |
+
for episode in range(NUM_EPISODES):
|
| 177 |
+
# Choose a random initial state within the specified range
|
| 178 |
+
initial_state = np.random.uniform(initial_state_range[0], initial_state_range[1])
|
| 179 |
+
|
| 180 |
+
# Set the robot's state to the initial state
|
| 181 |
+
robot.set_state(initial_state)
|
| 182 |
+
|
| 183 |
+
# Use OPAX to find the optimal control inputs
|
| 184 |
+
optimal_control_inputs = opax.optimize(initial_state)
|
| 185 |
+
|
| 186 |
+
# Apply the control inputs to the robot
|
| 187 |
+
robot.apply_control(optimal_control_inputs)
|
| 188 |
+
|
| 189 |
+
# Step the simulation forward
|
| 190 |
+
p.stepSimulation()
|
| 191 |
+
|
| 192 |
+
# Only visualize every 1000th episode
|
| 193 |
+
if episode % 1000 == 0:
|
| 194 |
+
for _ in range(1000):
|
| 195 |
+
p.stepSimulation()
|
| 196 |
+
time.sleep(0.01)
|
| 197 |
+
# Reset the state of the robot
|
| 198 |
+
robot.set_state(initial_state)
|
| 199 |
+
|
| 200 |
+
# Optimize the control inputs
|
| 201 |
+
optimal_control_inputs = opax.optimize(initial_state)
|
| 202 |
+
|
| 203 |
+
# Apply the optimal control inputs to the robot
|
| 204 |
+
robot.apply_control(optimal_control_inputs)
|
| 205 |
+
|
| 206 |
+
# Get the new state of the robot
|
| 207 |
+
new_state = robot.get_state()
|
| 208 |
+
reward = observation_model.info_gain(initial_state,new_state)
|
| 209 |
+
rewards.append(reward)
|
| 210 |
+
|
| 211 |
+
# Check for collision between the robot's end effector and the box
|
| 212 |
+
collision_points = robot.check_collision(box_id)
|
| 213 |
+
if collision_points:
|
| 214 |
+
print(f"Success in episode {episode} {reward}!")
|
| 215 |
+
|
| 216 |
+
else:
|
| 217 |
+
print(f"Failure in episode {episode} {reward}.")
|
| 218 |
+
|
| 219 |
+
plt.plot(rewards)
|
| 220 |
+
plt.xlabel('Episode')
|
| 221 |
+
plt.ylabel('Reward')
|
| 222 |
+
plt.title('Learning Curve')
|
| 223 |
+
plt.show()
|
| 224 |
+
|
LogisticRegressionExample.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
#X represents the size of a tumor in centimeters.
|
| 3 |
+
X = np.array([3.78, 2.44, 2.09, 0.14, 1.72, 1.65, 4.92, 4.37, 4.96, 4.52, 3.69, 5.88]).reshape(-1,1)
|
| 4 |
+
|
| 5 |
+
#Note: X has to be reshaped into a column from a row for the LogisticRegression() function to work.
|
| 6 |
+
#y represents whether or not the tumor is cancerous (0 for "No", 1 for "Yes").
|
| 7 |
+
y = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
|
| 8 |
+
from sklearn.linear_model import LogisticRegression
|
| 9 |
+
|
| 10 |
+
logr = LogisticRegression()
|
| 11 |
+
logr.fit(X,y)
|
| 12 |
+
#predict if tumor is cancerous where the size is 3.46mm:
|
| 13 |
+
predicted = logr.predict(np.array([3.46]).reshape(-1,1))
|
| 14 |
+
print(predicted)
|
MyEnv.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gym
|
| 2 |
+
import numpy as np
|
| 3 |
+
import os
|
| 4 |
+
import tensorflow as tf
|
| 5 |
+
import keras
|
| 6 |
+
from keras.layers import Dense
|
| 7 |
+
from keras.optimizers import Adam
|
| 8 |
+
from keras.metrics import MSE
|
| 9 |
+
|
| 10 |
+
class ReplayBuffer:
|
| 11 |
+
def __init__(self, max_size, input_shape, n_actions):
|
| 12 |
+
self.mem_size = max_size
|
| 13 |
+
self.mem_cntr = 0
|
| 14 |
+
self.state_memory = np.zeros((self.mem_size, *input_shape))
|
| 15 |
+
self.new_state_memory = np.zeros((self.mem_size, *input_shape))
|
| 16 |
+
self.action_memory = np.zeros((self.mem_size, n_actions))
|
| 17 |
+
self.reward_memory = np.zeros(self.mem_size)
|
| 18 |
+
self.terminal_memory = np.zeros(self.mem_size, dtype=np.bool)
|
| 19 |
+
|
| 20 |
+
def store_transition(self, state, action, reward, new_state, done):
|
| 21 |
+
index = self.mem_cntr % self.mem_size
|
| 22 |
+
|
| 23 |
+
self.state_memory[index] = state
|
| 24 |
+
self.new_state_memory[index] = new_state
|
| 25 |
+
self.action_memory[index] = action
|
| 26 |
+
self.reward_memory[index] = reward
|
| 27 |
+
self.terminal_memory[index] = done
|
| 28 |
+
|
| 29 |
+
def sample_buffer(self, batch_size):
|
| 30 |
+
max_mem = min(self.mem_cntr, self.mem_size)
|
| 31 |
+
batch = np.random.choice(max_mem, batch_size, replace=False)
|
| 32 |
+
|
| 33 |
+
states = self.state_memory[batch]
|
| 34 |
+
states_ = self.new_state_memory[batch]
|
| 35 |
+
actions = self.action_memory[batch]
|
| 36 |
+
rewards = self.reward_memory[batch]
|
| 37 |
+
dones = self.terminal_memory[batch]
|
| 38 |
+
|
| 39 |
+
return states, actions, rewards, states_, dones
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class CriticNetwork(keras.Model):
|
| 43 |
+
def __init__(self, n_actions, fc1_dims=512, fc2_dims=512, name='critic', chkpt_dir='tmp/ddpg'):
|
| 44 |
+
super(CriticNetwork, self).__init__()
|
| 45 |
+
self.fc1_dims = fc1_dims
|
| 46 |
+
self.fc2_dims = fc2_dims
|
| 47 |
+
self.n_actions = n_actions
|
| 48 |
+
|
| 49 |
+
self.model_name = name
|
| 50 |
+
self.checkpoint_dir = chkpt_dir
|
| 51 |
+
self.checkpoint_file = os.path.join(self.checkpoint_dir, self.model_name + '_ddpg.h5')
|
| 52 |
+
|
| 53 |
+
self.fc1 = Dense(self.fc1_dims, activation='relu')
|
| 54 |
+
self.fc2 = Dense(self.fc2_dims, activation='relu')
|
| 55 |
+
self.q = Dense(1, activation=None)
|
| 56 |
+
|
| 57 |
+
def call(self, state, action):
|
| 58 |
+
action_value = self.fc1(tf.concat([state, action], axis=1))
|
| 59 |
+
action_value = self.fc1(action_value)
|
| 60 |
+
|
| 61 |
+
q = self.q(action_value)
|
| 62 |
+
|
| 63 |
+
return q
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class ActorNetwork(keras.Model):
|
| 67 |
+
def __init__(self, fc1_dims=512, fc2_dims=512, n_actions=2, name='actor', chkpt_dir='tmp/ddpg'):
|
| 68 |
+
super(ActorNetwork, self).__init__()
|
| 69 |
+
self.fc1_dims = fc1_dims
|
| 70 |
+
self.fc2_dims = fc2_dims
|
| 71 |
+
self.n_actions = n_actions
|
| 72 |
+
|
| 73 |
+
self.model_name = name
|
| 74 |
+
self.checkpoint_dir = chkpt_dir
|
| 75 |
+
self.checkpoint_file = os.path.join(self.checkpoint_dir, self.model_name + '_ddpg.h5')
|
| 76 |
+
|
| 77 |
+
self.fc1 = Dense(self.fc1_dims, activation='relu')
|
| 78 |
+
self.fc2 = Dense(self.fc2_dims, activation='relu')
|
| 79 |
+
self.mu = Dense(self.n_actions, activation='tanh')
|
| 80 |
+
|
| 81 |
+
def call(self, state):
|
| 82 |
+
prob = self.fc1(state)
|
| 83 |
+
prob = self.fc2(prob)
|
| 84 |
+
|
| 85 |
+
# if action bounds not +/- 1, can multiply here
|
| 86 |
+
mu = self.mu(prob)
|
| 87 |
+
return mu
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class Agent:
|
| 91 |
+
def __init__(self, input_dims, alpha=0.001, beta=0.002, env=None,
|
| 92 |
+
gamma=0.99, n_actions=2, max_size=1_000_000, tau=0.005,
|
| 93 |
+
fc1=400, fc2=300, batch_size=64, noise=0.1):
|
| 94 |
+
|
| 95 |
+
self.gamma = gamma
|
| 96 |
+
self.tau = tau
|
| 97 |
+
self.memory = ReplayBuffer(max_size, input_dims, n_actions)
|
| 98 |
+
self.batch_size = batch_size
|
| 99 |
+
self.n_actions = n_actions
|
| 100 |
+
self.noise = noise
|
| 101 |
+
self.max_action = env.action_space.high[0]
|
| 102 |
+
self.min_action = env.action_space.low[0]
|
| 103 |
+
|
| 104 |
+
self.actor = ActorNetwork(n_actions=n_actions, name='actor')
|
| 105 |
+
self.critic = CriticNetwork(n_actions=n_actions, name='critic')
|
| 106 |
+
self.target_actor = ActorNetwork(n_actions=n_actions, name='target_actor')
|
| 107 |
+
self.target_critic = CriticNetwork(n_actions=n_actions, name='target_critic')
|
| 108 |
+
|
| 109 |
+
self.actor.compile(optimizer=Adam(learning_rate=alpha))
|
| 110 |
+
self.critic.compile(optimizer=Adam(learning_rate=beta))
|
| 111 |
+
self.target_actor.compile(optimizer=Adam(learning_rate=alpha))
|
| 112 |
+
self.target_critic.compile(optimizer=Adam(learning_rate=beta))
|
| 113 |
+
|
| 114 |
+
self.update_network_parameters(tau=1)
|
| 115 |
+
|
| 116 |
+
def update_network_parameters(self, tau=None):
|
| 117 |
+
if tau is None:
|
| 118 |
+
tau = self.tau
|
| 119 |
+
|
| 120 |
+
weights = []
|
| 121 |
+
targets = self.target_actor.weights
|
| 122 |
+
for i, weight in enumerate(self.actor.weights):
|
| 123 |
+
weights.append(weight * tau + targets[i] * (1 - tau))
|
| 124 |
+
self.target_actor.set_weights(weights)
|
| 125 |
+
|
| 126 |
+
weights = []
|
| 127 |
+
targets = self.target_critic.weights
|
| 128 |
+
for i, weight in enumerate(self.critic.weights):
|
| 129 |
+
weights.append(weight * tau + targets[i] * (1 - tau))
|
| 130 |
+
self.target_critic.set_weights(weights)
|
| 131 |
+
|
| 132 |
+
def remember(self, state, action, reward, new_state, done):
|
| 133 |
+
self.memory.store_transition(state, action, reward, new_state, done)
|
| 134 |
+
|
| 135 |
+
def save_models(self):
|
| 136 |
+
print('...Saving Models...')
|
| 137 |
+
self.actor.save_weights(self.actor.checkpoint_file)
|
| 138 |
+
self.critic.save_weights(self.critic.checkpoint_file)
|
| 139 |
+
self.target_actor.save_weights(self.target_actor.checkpoint_file)
|
| 140 |
+
self.target_critic.save_weights(self.target_critic.checkpoint_file)
|
| 141 |
+
|
| 142 |
+
def load_models(self):
|
| 143 |
+
print('...Loading Models...')
|
| 144 |
+
self.actor.load_weights(self.actor.checkpoint_file)
|
| 145 |
+
self.critic.load_weights(self.critic.checkpoint_file)
|
| 146 |
+
self.target_actor.load_weights(self.target_actor.checkpoint_file)
|
| 147 |
+
self.target_critic.load_weights(self.target_critic.checkpoint_file)
|
| 148 |
+
|
| 149 |
+
def choose_action(self, observation, evaluate=False):
|
| 150 |
+
state = tf.convert_to_tensor([observation], dtype=tf.float32)
|
| 151 |
+
actions = self.actor(state)
|
| 152 |
+
|
| 153 |
+
if not evaluate:
|
| 154 |
+
actions += tf.random.normal(shape=[self.n_actions],
|
| 155 |
+
mean=0.0, stddev=self.noise)
|
| 156 |
+
|
| 157 |
+
actions = tf.clip_by_value(actions, self.min_action, self.max_action)
|
| 158 |
+
|
| 159 |
+
return actions[0]
|
| 160 |
+
|
| 161 |
+
def learn(self):
|
| 162 |
+
if self.memory.mem_cntr < self.batch_size:
|
| 163 |
+
return
|
| 164 |
+
|
| 165 |
+
state, action, reward, new_state, done = self.memory.sample_buffer(self.batch_size)
|
| 166 |
+
|
| 167 |
+
states = tf.convert_to_tensor(state, dtype=tf.float32)
|
| 168 |
+
states_ = tf.convert_to_tensor(new_state, dtype=tf.float32)
|
| 169 |
+
actions = tf.convert_to_tensor(action, dtype=tf.float32)
|
| 170 |
+
rewards = tf.convert_to_tensor(reward, dtype=tf.float32)
|
| 171 |
+
|
| 172 |
+
with tf.GradientTape() as tape:
|
| 173 |
+
target_actions = self.target_actor(states_)
|
| 174 |
+
critic_value_ = tf.squeeze(self.target_critic(states_, target_actions), 1)
|
| 175 |
+
critic_value = tf.squeeze(self.critic(states, actions), 1)
|
| 176 |
+
target = reward + self.gamma * critic_value_ * (1 - done)
|
| 177 |
+
critic_lose = MSE(target, critic_value)
|
| 178 |
+
critic_network_gradient = tape.gradient(critic_lose, self.critic.trainable_variables)
|
| 179 |
+
self.critic.optimizer.apply_gradients(zip(critic_network_gradient, self.critic.trainable_variables))
|
| 180 |
+
|
| 181 |
+
with tf.GradientTape() as tape:
|
| 182 |
+
new_policy_actions = self.actor(states)
|
| 183 |
+
actor_loss = -self.critic(states, new_policy_actions) # Gradient descent rather than gradient ascent
|
| 184 |
+
actor_loss = tf.math.reduce_mean(actor_loss)
|
| 185 |
+
|
| 186 |
+
actor_network_gradient = tape.gradient(actor_loss, self.actor.trainable_variables)
|
| 187 |
+
self.actor.optimizer.apply_gradients(zip(actor_network_gradient, self.actor.trainable_variables))
|
| 188 |
+
|
| 189 |
+
self.update_network_parameters()
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
if __name__ == '__main__':
|
| 193 |
+
env = gym.make('Pendulum-v1')
|
| 194 |
+
agent = Agent(input_dims=env.observation_space.shape, env=env,
|
| 195 |
+
n_actions=env.action_space.shape[0])
|
| 196 |
+
|
| 197 |
+
n_games = 500
|
| 198 |
+
|
| 199 |
+
figure_file = 'plots/pendulum.png'
|
| 200 |
+
|
| 201 |
+
best_score = env.reward_range[0]
|
| 202 |
+
score_history = []
|
| 203 |
+
|
| 204 |
+
load_checkpoint = False
|
| 205 |
+
|
| 206 |
+
if load_checkpoint:
|
| 207 |
+
n_steps = 0
|
| 208 |
+
while n_steps <= agent.batch_size:
|
| 209 |
+
observation = env.reset()
|
| 210 |
+
action = env.action_space.sample()
|
| 211 |
+
observation_, reward, done, info = env.step(action)
|
| 212 |
+
agent.remember(observation, action, reward, observation_, done)
|
| 213 |
+
n_steps += 1
|
| 214 |
+
agent.learn()
|
| 215 |
+
agent.load_models()
|
| 216 |
+
evaluate = True
|
| 217 |
+
else:
|
| 218 |
+
evaluate = False
|
| 219 |
+
|
| 220 |
+
for i in range(n_games):
|
| 221 |
+
observation = env.reset()
|
| 222 |
+
done = False
|
| 223 |
+
score = 0
|
| 224 |
+
while not done:
|
| 225 |
+
action = agent.choose_action(observation, evaluate)
|
| 226 |
+
observation_, reward, done, info = env.step(action)
|
| 227 |
+
score += reward
|
| 228 |
+
agent.remember(observation, action, reward, observation_, done)
|
| 229 |
+
if not load_checkpoint:
|
| 230 |
+
agent.learn()
|
| 231 |
+
observation = observation_
|
| 232 |
+
|
| 233 |
+
score_history.append(score)
|
| 234 |
+
avg_score = np.mean(score_history[-100:])
|
| 235 |
+
|
| 236 |
+
if avg_score > best_score:
|
| 237 |
+
best_score = avg_score
|
| 238 |
+
if not load_checkpoint:
|
| 239 |
+
agent.save_models()
|
| 240 |
+
|
| 241 |
+
print('episode', i, 'score %.1f' % score, 'avg score %.1f' % avg_score)
|
OffsetCorrectionDS.xlsx
ADDED
|
Binary file (12 kB). View file
|
|
|
Oreilly_DecTree.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn.datasets import load_iris
|
| 2 |
+
from sklearn.tree import DecisionTreeClassifier
|
| 3 |
+
|
| 4 |
+
iris = load_iris()
|
| 5 |
+
X = iris.data[:,2:] #petal length and width
|
| 6 |
+
y = iris.target
|
| 7 |
+
tree_clf = DecisionTreeClassifier(max_depth=2)
|
| 8 |
+
tree_clf.fit(X,y)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
from sklearn.tree import export_graphviz
|
| 12 |
+
export_graphviz(
|
| 13 |
+
tree_clf,
|
| 14 |
+
out_file="iris_tree.dot",
|
| 15 |
+
feature_names=iris.feature_names[2:],
|
| 16 |
+
class_names=iris.target_names,
|
| 17 |
+
rounded=True,
|
| 18 |
+
filled=True
|
| 19 |
+
)
|
| 20 |
+
"""
|
| 21 |
+
cmd -> C:\IT\PythonProject\OffsetCorrection>dot -Tpng iris_tree.dot -o iris_tree.png
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
"""
|
| 25 |
+
Model Interpretation: White Box Versus Black Box
|
| 26 |
+
Decision Trees are fairly intuitive and their decisions are easy to inter‐
|
| 27 |
+
pret. Such models are often called white box models. In contrast, as we will see, Ran‐
|
| 28 |
+
dom Forests or neural networks are generally considered black box models. They
|
| 29 |
+
make great predictions, and you can easily check the calculations that they performed
|
| 30 |
+
to make these predictions; nevertheless, it is usually hard to explain in simple terms
|
| 31 |
+
why the predictions were made. For example, if a neural network says that a particu‐
|
| 32 |
+
lar person appears on a picture, it is hard to know what actually contributed to this
|
| 33 |
+
prediction: did the model recognize that person’s eyes? Her mouth? Her nose? Her
|
| 34 |
+
shoes? Or even the couch that she was sitting on? Conversely, Decision Trees provide
|
| 35 |
+
nice and simple classification rules that can even be applied manually if need be (e.g.,
|
| 36 |
+
for flower classification).
|
| 37 |
+
"""
|
Oreilly_Ensamble_RandomForest.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
from sklearn.ensemble import RandomForestClassifier,VotingClassifier
|
| 3 |
+
from sklearn.linear_model import LogisticRegression
|
| 4 |
+
from sklearn.svm import SVC
|
| 5 |
+
|
| 6 |
+
log_clf = LogisticRegression()
|
| 7 |
+
rnd_clf = RandomForestClassifier()
|
| 8 |
+
svm_clf = SVC(probability=True)
|
| 9 |
+
|
| 10 |
+
voting_clf = VotingClassifier(
|
| 11 |
+
estimators=[('lr',log_clf),('rf',rnd_clf),('svc',svm_clf)],
|
| 12 |
+
voting='soft'
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
from sklearn.datasets import make_moons
|
| 17 |
+
from sklearn.model_selection import train_test_split
|
| 18 |
+
import pandas as pd
|
| 19 |
+
|
| 20 |
+
moons = make_moons(n_samples=2000,noise=0.30,random_state=15)
|
| 21 |
+
print(moons[0][:,-2].shape)
|
| 22 |
+
df = pd.DataFrame()
|
| 23 |
+
df['X_1'] = moons[0][:,0]
|
| 24 |
+
df['X_2'] = moons[0][:,1]
|
| 25 |
+
df['target'] = moons[1]
|
| 26 |
+
print(df)
|
| 27 |
+
|
| 28 |
+
X = df.iloc[:,0:2]
|
| 29 |
+
print(X)
|
| 30 |
+
y = df.iloc[:,-1]
|
| 31 |
+
|
| 32 |
+
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.33,random_state=42)
|
| 33 |
+
print('Y_train\n',y_train)
|
| 34 |
+
voting_clf.fit(X_train, y_train)
|
| 35 |
+
|
| 36 |
+
from sklearn.metrics import accuracy_score
|
| 37 |
+
|
| 38 |
+
for clf in (log_clf,rnd_clf,svm_clf,voting_clf):
|
| 39 |
+
clf.fit(X_train,y_train)
|
| 40 |
+
y_pred = clf.predict(X_test)
|
| 41 |
+
print(clf.__class__.__name__,accuracy_score(y_test,y_pred))
|
| 42 |
+
|
| 43 |
+
"""
|
| 44 |
+
If all classifiers are able to estimate class probabilities (i.e., they have a pre
|
| 45 |
+
dict_proba() method), then you can tell Scikit-Learn to predict the class with the
|
| 46 |
+
highest class probability, averaged over all the individual classifiers. This is called so
|
| 47 |
+
voting. It often achieves higher performance than hard voting because it gives more
|
| 48 |
+
weight to highly confident votes. All you need to do is replace voting="hard" with
|
| 49 |
+
voting="soft" and ensure that all classifiers can estimate class probabilities. This is
|
| 50 |
+
not the case of the SVC class by default, so you need to set its probability hyperpara‐
|
| 51 |
+
meter to True (this will make the SVC class use cross-validation to estimate class prob‐
|
| 52 |
+
abilities, slowing down training, and it will add a predict_proba() method)
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
from sklearn.ensemble import BaggingClassifier
|
| 56 |
+
from sklearn.tree import DecisionTreeClassifier
|
| 57 |
+
|
| 58 |
+
bag_clf = BaggingClassifier(DecisionTreeClassifier(), n_estimators=500,n_jobs=-1,max_samples=100,bootstrap=True)
|
| 59 |
+
bag_clf.fit(X_train,y_train)
|
| 60 |
+
y_pred = bag_clf.predict(X_test)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
print(bag_clf.score(X_test,y_pred))
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
x_min,x_max = X.iloc[:,0].min() - 0.5, X.iloc[:,0].max() + 0.5
|
| 70 |
+
y_min,y_max = X.iloc[:,1].min() - 0.5, X.iloc[:,1].max() + 0.5
|
| 71 |
+
print(x_min,x_max)
|
| 72 |
+
print(y_min,y_max)
|
| 73 |
+
cm_bright = plt.cm.RdBu
|
| 74 |
+
plt.scatter(X_train.iloc[:,0],X_train.iloc[:,1],c=y_train,cmap=cm_bright,edgecolors='k')
|
Oreilly_Example1.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# to extract files from rar and save.
|
| 2 |
+
"""import os
|
| 3 |
+
import tarfile
|
| 4 |
+
from six.moves import urllib
|
| 5 |
+
|
| 6 |
+
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
|
| 7 |
+
HOUSING_PATH = os.path.join("datasets", "housing")
|
| 8 |
+
|
| 9 |
+
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
|
| 13 |
+
if not os.path.isdir(housing_path):
|
| 14 |
+
os.makedirs(housing_path)
|
| 15 |
+
|
| 16 |
+
tgz_path = os.path.join(housing_path, "housing.tgz")
|
| 17 |
+
urllib.request.urlretrieve(housing_url, tgz_path)
|
| 18 |
+
housing_tgz = tarfile.open(tgz_path)
|
| 19 |
+
housing_tgz.extractall(path=housing_path)
|
| 20 |
+
housing_tgz.close()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
fetch_housing_data()
|
| 24 |
+
"""
|
| 25 |
+
import os.path
|
| 26 |
+
|
| 27 |
+
import matplotlib.pyplot as plt
|
| 28 |
+
import pandas as pd
|
| 29 |
+
|
| 30 |
+
HOUSING_PATH = "datasets/housing"
|
| 31 |
+
|
| 32 |
+
pd.set_option("display.max_columns",15) #to set max column value.
|
| 33 |
+
pd.set_option("display.max_rows",15) #to set max row value.
|
| 34 |
+
pd.set_option("display.width",1200) #to set width.
|
| 35 |
+
|
| 36 |
+
#for more info about set_option -> https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.set_option.html
|
| 37 |
+
def load_housing_data(housing_path=HOUSING_PATH):
|
| 38 |
+
csv_path = os.path.join(housing_path, "housing.csv")
|
| 39 |
+
return pd.read_csv(csv_path)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
housing_data = load_housing_data("datasets/housing")
|
| 43 |
+
print(housing_data.head(15))
|
| 44 |
+
print(housing_data.columns)
|
| 45 |
+
print(housing_data.info)
|
| 46 |
+
print(housing_data.count())
|
| 47 |
+
print(housing_data["total_bedrooms"].count()) #20433 rows, means there are missing values to be handled.
|
| 48 |
+
print(housing_data.dtypes)
|
| 49 |
+
print(housing_data["total_bedrooms"].dtype)
|
| 50 |
+
print(housing_data["ocean_proximity"].dtype) # ocean_proximity is not float64.
|
| 51 |
+
|
| 52 |
+
print(housing_data["ocean_proximity"].value_counts())
|
| 53 |
+
print(type(housing_data["ocean_proximity"].describe()))
|
| 54 |
+
|
| 55 |
+
print(housing_data.describe()) #to get count, mean, standard deviation, min, 25%,50%,75%,max.
|
| 56 |
+
#histogram of the data
|
| 57 |
+
|
| 58 |
+
housing_data.hist()
|
| 59 |
+
|
| 60 |
+
plt.show()
|
| 61 |
+
|
| 62 |
+
housing_data["ocean_proximity"].hist()
|
| 63 |
+
plt.show()
|
| 64 |
+
|
| 65 |
+
import matplotlib.pyplot as plt
|
| 66 |
+
|
| 67 |
+
housing_data.hist(bins=100,figsize=(10,12),grid=False) #bins is the value of X being shown on plot.
|
| 68 |
+
"""
|
| 69 |
+
When a feature has a bell-shaped normal
|
| 70 |
+
distribution (also called a Gaussian distribution), which is very common, the “68-95-99.7” rule applies: about
|
| 71 |
+
68% of the values fall within 1σ of the mean, 95% within 2σ, and 99.7% within 3σ.
|
| 72 |
+
"""
|
| 73 |
+
plt.show()
|
| 74 |
+
|
| 75 |
+
import numpy as np
|
| 76 |
+
|
| 77 |
+
def split_train_test(data,test_ratio):
|
| 78 |
+
shuffled_indices = np.random.permutation(len(data)) #shuffle indices.
|
| 79 |
+
test_set_size = int(len(data)*test_ratio)
|
| 80 |
+
test_indices = shuffled_indices[:test_set_size]
|
| 81 |
+
train_indices = shuffled_indices[test_set_size:]
|
| 82 |
+
return data.iloc[train_indices], data.iloc[test_indices]
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
train_set, test_set = split_train_test(housing_data,0.2)
|
| 86 |
+
print(len(train_set))
|
| 87 |
+
print(len(test_set))
|
| 88 |
+
|
| 89 |
+
from sklearn.model_selection import train_test_split
|
| 90 |
+
|
| 91 |
+
train_set, test_set = train_test_split(housing_data,test_size=0.33,random_state=42)
|
| 92 |
+
|
| 93 |
+
housing_data["income_cat"] = pd.cut(housing_data["median_income"], bins=[0.,1.5,3.0,4.5,6.,np.inf],labels=[1,2,3,4,5])
|
| 94 |
+
housing_data["income_cat"].hist()
|
| 95 |
+
plt.show()
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
from sklearn.model_selection import StratifiedShuffleSplit
|
| 99 |
+
split = StratifiedShuffleSplit(n_splits=1,test_size=0.33,random_state=42)
|
| 100 |
+
|
| 101 |
+
for train_index, test_index in split.split(housing_data, housing_data["income_cat"]):
|
| 102 |
+
strat_train_set = housing_data.loc[train_index]
|
| 103 |
+
strat_test_set = housing_data.loc[test_index]
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
print(strat_test_set["income_cat"].value_counts())
|
| 107 |
+
len(strat_test_set)
|
| 108 |
+
|
| 109 |
+
for set_ in (strat_train_set,strat_test_set):
|
| 110 |
+
set_.drop("income_cat",axis=1,inplace=True)
|
| 111 |
+
|
| 112 |
+
housing = strat_train_set.copy()
|
| 113 |
+
|
| 114 |
+
housing.plot(kind="scatter",x="longitude",y="latitude",alpha=0.1)
|
| 115 |
+
print(housing)
|
| 116 |
+
plt.show()
|
| 117 |
+
housing.plot(kind="scatter",x="longitude",y="latitude",alpha=0.4,s=housing["population"]/100,label="population",
|
| 118 |
+
figsize=(10,7),c="median_house_value",cmap=plt.get_cmap("jet"),colorbar=True,)
|
| 119 |
+
plt.legend()
|
| 120 |
+
plt.show()
|
| 121 |
+
|
| 122 |
+
"""Since the dataset is not too large, you can easily compute the standard correlation
|
| 123 |
+
cooefficient (also called Pearson’s r) between every pair of attributes using the corr()
|
| 124 |
+
method:"""
|
| 125 |
+
housing = housing.drop(["ocean_proximity"],axis=1) #to drop multiple columns housing.drop(["A","B"...],axis=1)
|
| 126 |
+
corr_matrix = housing.corr()
|
| 127 |
+
mhv_corr_m = corr_matrix["median_house_value"].sort_values(ascending=False)
|
| 128 |
+
|
| 129 |
+
print(mhv_corr_m)
|
| 130 |
+
"""
|
| 131 |
+
it ranges between from -1 to 1.
|
| 132 |
+
if correlation coefficient is close to 1, it means that there is a strong positive correlation.
|
| 133 |
+
coefficients close to zero mean that there is no linear correlation.
|
| 134 |
+
The correlation coefficient only measures linear correlations.
|
| 135 |
+
|
| 136 |
+
"""
|
| 137 |
+
from pandas.plotting import scatter_matrix
|
| 138 |
+
|
| 139 |
+
attributes = ["median_house_value","median_income","total_rooms","housing_median_age"]
|
| 140 |
+
scatter_matrix(housing[attributes],figsize=(12,8))
|
| 141 |
+
#plt.show()
|
| 142 |
+
|
| 143 |
+
housing["rooms_per_household"] = housing["total_rooms"]/housing["households"]
|
| 144 |
+
housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"]
|
| 145 |
+
housing["population_per_household"] = housing["population"]/housing["households"]
|
| 146 |
+
|
| 147 |
+
corr_matrix = housing.corr()
|
| 148 |
+
print(corr_matrix["median_house_value"].sort_values(ascending=False))
|
| 149 |
+
|
| 150 |
+
#DATA CLEANING
|
| 151 |
+
housing = strat_train_set.drop("median_house_value",axis=1)
|
| 152 |
+
housing_labels = strat_train_set["median_house_value"].copy()
|
| 153 |
+
|
| 154 |
+
"""
|
| 155 |
+
#option 1
|
| 156 |
+
housing.dropna(subset=["total_bedrooms"])
|
| 157 |
+
|
| 158 |
+
#option 2
|
| 159 |
+
housing.drop("total_bedrooms",axis=1)
|
| 160 |
+
|
| 161 |
+
#option 3
|
| 162 |
+
median = housing["total_bedrooms"].median()
|
| 163 |
+
housing["total_bedrooms"].fillna(median,inplace=True)
|
| 164 |
+
"""
|
| 165 |
+
|
| 166 |
+
from sklearn.impute import SimpleImputer
|
| 167 |
+
imputer = SimpleImputer(strategy="median") # only can be applied to numerical attributes.
|
| 168 |
+
|
| 169 |
+
housing_num = housing.drop("ocean_proximity",axis=1)
|
| 170 |
+
|
| 171 |
+
imputer.fit(housing_num)
|
| 172 |
+
|
| 173 |
+
print(imputer.statistics_) #medians of each numerical attribute.
|
| 174 |
+
|
| 175 |
+
X = imputer.transform(housing_num)
|
| 176 |
+
housing_transform = pd.DataFrame(X,columns=housing_num.columns)
|
| 177 |
+
|
| 178 |
+
#Handling Text and Categorical Attributes
|
| 179 |
+
|
| 180 |
+
housing_cat = housing[["ocean_proximity"]]
|
| 181 |
+
print(housing_cat.head(10))
|
| 182 |
+
|
| 183 |
+
from sklearn.preprocessing import OrdinalEncoder
|
| 184 |
+
ordinal_encoder = OrdinalEncoder()
|
| 185 |
+
|
| 186 |
+
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
|
| 187 |
+
print(housing_cat_encoded[:10])
|
| 188 |
+
print(ordinal_encoder.categories_)
|
| 189 |
+
|
| 190 |
+
"""
|
| 191 |
+
One issue with this representation is that ML algorithms will assume that two nearby
|
| 192 |
+
values are more similar than two distant values. This may be fine in some cases (e.g.,
|
| 193 |
+
for ordered categories such as “bad”, “average”, “good”, “excellent”), but it is obviously
|
| 194 |
+
not the case for the ocean_proximity column (for example, categories 0 and 4 are
|
| 195 |
+
clearly more similar than categories 0 and 1). To fix this issue, a common solution is
|
| 196 |
+
to create one binary attribute per category: one attribute equal to 1 when the category
|
| 197 |
+
is “<1H OCEAN” (and 0 otherwise), another attribute equal to 1 when the category is
|
| 198 |
+
“INLAND” (and 0 otherwise), and so on. This is called one-hot encoding, because
|
| 199 |
+
only one attribute will be equal to 1 (hot), while the others will be 0 (cold). The new
|
| 200 |
+
attributes are sometimes called dummy attributes. Scikit-Learn provides a OneHotEn
|
| 201 |
+
coder class to convert categorical values into one-hot vectors.
|
| 202 |
+
"""
|
| 203 |
+
from sklearn.preprocessing import OneHotEncoder
|
| 204 |
+
cat_encoder = OneHotEncoder()
|
| 205 |
+
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
|
| 206 |
+
print(housing_cat_1hot)
|
| 207 |
+
|
| 208 |
+
"""
|
| 209 |
+
If a categorical attribute has a large number of possible categories
|
| 210 |
+
(e.g., country code, profession, species, etc.), then one-hot encod‐
|
| 211 |
+
ing will result in a large number of input features. This may slow
|
| 212 |
+
down training and degrade performance. If this happens, you may
|
| 213 |
+
want to replace the categorical input with useful numerical features
|
| 214 |
+
related to the categories: for example, you could replace the
|
| 215 |
+
ocean_proximity feature with the distance to the ocean (similarly,
|
| 216 |
+
a country code could be replaced with the country’s population and
|
| 217 |
+
GDP per capita). Alternatively, you could replace each category
|
| 218 |
+
with a learnable low dimensional vector called an embedding.
|
| 219 |
+
"""
|
| 220 |
+
#Check page 98 l8r.
|
| 221 |
+
|
| 222 |
+
from sklearn.pipeline import Pipeline
|
| 223 |
+
from sklearn.preprocessing import StandardScaler
|
| 224 |
+
|
| 225 |
+
num_pipeline = Pipeline([
|
| 226 |
+
('imputer', SimpleImputer(strategy='median')),
|
| 227 |
+
('std_scaler',StandardScaler())
|
| 228 |
+
])
|
| 229 |
+
|
| 230 |
+
housing_num_tr = num_pipeline.fit_transform(housing_num)
|
| 231 |
+
|
| 232 |
+
from sklearn.compose import ColumnTransformer
|
| 233 |
+
|
| 234 |
+
num_attribs = list(housing_num) #Numerical Attributes
|
| 235 |
+
cat_attribs = ["ocean_proximity"] #Categorical Attributes
|
| 236 |
+
|
| 237 |
+
full_pipeline = ColumnTransformer([
|
| 238 |
+
("num",num_pipeline,num_attribs),
|
| 239 |
+
("cat",OneHotEncoder(),cat_attribs)
|
| 240 |
+
])
|
| 241 |
+
"""The Pipeline constructor takes a list of name/estimator pairs defining a sequence of
|
| 242 |
+
steps. All but the last estimator must be transformers (i.e., they must have a
|
| 243 |
+
fit_transform() method)."""
|
| 244 |
+
housing_prepared = full_pipeline.fit_transform(housing)
|
| 245 |
+
|
| 246 |
+
"""
|
| 247 |
+
OneHotEncoder returns a sparse matrix, while the num_pipeline returns
|
| 248 |
+
a dense matrix. When there is such a mix of sparse and dense matrices, the Colum
|
| 249 |
+
nTransformer estimates the density of the final matrix (i.e., the ratio of non-zero
|
| 250 |
+
cells), and it returns a sparse matrix if the density is lower than a given threshold (by
|
| 251 |
+
default, sparse_threshold=0.3). In this example, it returns a dense matrix.
|
| 252 |
+
"""
|
| 253 |
+
|
| 254 |
+
from sklearn.linear_model import LinearRegression
|
| 255 |
+
|
| 256 |
+
lin_reg = LinearRegression()
|
| 257 |
+
lin_reg.fit(housing_prepared,housing_labels)
|
| 258 |
+
|
| 259 |
+
some_data = housing.iloc[:5]
|
| 260 |
+
some_labels = housing_labels.iloc[:5]
|
| 261 |
+
some_data_prepared = full_pipeline.transform(some_data)
|
| 262 |
+
print("Predictions:", lin_reg.predict(some_data_prepared))
|
| 263 |
+
print("Labels:", list(some_labels))
|
| 264 |
+
|
| 265 |
+
from sklearn.metrics import mean_squared_error
|
| 266 |
+
housing_predictions = lin_reg.predict(housing_prepared)
|
| 267 |
+
lin_mse = mean_squared_error(housing_labels, housing_predictions)
|
| 268 |
+
lin_rmse = np.sqrt(lin_mse)
|
| 269 |
+
print(lin_rmse)
|
| 270 |
+
|
| 271 |
+
#DecisionTreeRegressor
|
| 272 |
+
|
| 273 |
+
from sklearn.tree import DecisionTreeRegressor
|
| 274 |
+
|
| 275 |
+
tree_reg = DecisionTreeRegressor()
|
| 276 |
+
tree_reg.fit(housing_prepared,housing_labels)
|
| 277 |
+
|
| 278 |
+
housing_predictions = tree_reg.predict(housing_prepared)
|
| 279 |
+
tree_mse = mean_squared_error(housing_labels, housing_predictions)
|
| 280 |
+
tree_rmse = np.sqrt(tree_mse)
|
| 281 |
+
print(tree_rmse)
|
| 282 |
+
"""
|
| 283 |
+
use Scikit-Learn’s K-fold cross-validation feature. The follow‐
|
| 284 |
+
ing code randomly splits the training set into 10 distinct subsets called `folds`, then it
|
| 285 |
+
trains and evaluates the Decision Tree model 10 times, picking a different fold for
|
| 286 |
+
evaluation every time and training on the other 9 folds. The result is an array con‐
|
| 287 |
+
taining the 10 evaluation scores:
|
| 288 |
+
"""
|
| 289 |
+
from sklearn.model_selection import cross_val_score
|
| 290 |
+
scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
|
| 291 |
+
tree_rmse_scores = np.sqrt(-scores)
|
| 292 |
+
print("10 evalution scores:", tree_rmse_scores)
|
| 293 |
+
|
| 294 |
+
"""
|
| 295 |
+
Cross-validation features expect a utility function
|
| 296 |
+
(greater is better) rather than a cost function (lower is better), so
|
| 297 |
+
the scoring function is actually the opposite of the MSE (i.e., a neg‐
|
| 298 |
+
ative value), which is why the preceding code computes -scores
|
| 299 |
+
before calculating the square root.
|
| 300 |
+
"""
|
| 301 |
+
def display_scores(scores):
|
| 302 |
+
print("Scores:", scores)
|
| 303 |
+
print("Mean:", scores.mean())
|
| 304 |
+
print("Standard deviation:", scores.std())
|
| 305 |
+
display_scores(tree_rmse_scores)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
|
| 309 |
+
lin_rmse_scores = np.sqrt(-lin_scores)
|
| 310 |
+
display_scores(lin_rmse_scores)
|
| 311 |
+
"""
|
| 312 |
+
Building a model on top of many
|
| 313 |
+
other models is called Ensemble Learning, and it is often a great way to push ML algo‐
|
| 314 |
+
rithms even further
|
| 315 |
+
"""
|
| 316 |
+
from sklearn.ensemble import RandomForestRegressor #RFR takes long time to compute in this computer.
|
| 317 |
+
forest_reg = RandomForestRegressor()
|
| 318 |
+
forest_reg.fit(housing_prepared, housing_labels)
|
| 319 |
+
housing_predictions = forest_reg.predict(housing_prepared)
|
| 320 |
+
forest_mse = mean_squared_error(housing_labels, housing_predictions)
|
| 321 |
+
forest_rmse = np.sqrt(forest_mse)
|
| 322 |
+
print(forest_rmse)
|
| 323 |
+
forest_scores = cross_val_score(forest_reg,housing_prepared,housing_labels,scoring="neg_mean_squared_error",cv=10)
|
| 324 |
+
forest_rmse_scores = np.sqrt(-forest_scores)
|
| 325 |
+
display_scores(forest_rmse_scores)
|
| 326 |
+
|
| 327 |
+
#Grid Search
|
| 328 |
+
|
| 329 |
+
from sklearn.model_selection import GridSearchCV
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
param_grid = [
|
| 333 |
+
|
| 334 |
+
{'n_estimators':[3,10,30,50],'max_features':[2,4,6,8]},
|
| 335 |
+
{'bootstrap':[False],'n_estimators':[3,10],'max_features':[2,3,4]},
|
| 336 |
+
{'n_estimators':[3,10,30,50,100],'max_features':[2,4,6,8],'min_samples_split':[2,3,5]}
|
| 337 |
+
]
|
| 338 |
+
forest_reg = RandomForestRegressor()
|
| 339 |
+
grid_search = GridSearchCV(forest_reg, param_grid,cv=5,scoring='neg_mean_squared_error',return_train_score=True,refit=True)
|
| 340 |
+
grid_search.fit(housing_prepared,housing_labels)
|
| 341 |
+
|
| 342 |
+
print(grid_search.best_params_)
|
| 343 |
+
print(grid_search.best_estimator_)
|
| 344 |
+
|
| 345 |
+
cv_res = grid_search.cv_results_
|
| 346 |
+
|
| 347 |
+
for mean_score, params in zip(cv_res['mean_test_score'],cv_res['params']):
|
| 348 |
+
print(np.sqrt(-mean_score),params)
|
| 349 |
+
|
| 350 |
+
"""
|
| 351 |
+
The grid search approach is fine when you are exploring relatively few combinations,
|
| 352 |
+
like in the previous example, but when the hyperparameter search space is large, it is
|
| 353 |
+
often preferable to use RandomizedSearchCV instead.
|
| 354 |
+
This class can be used in much the same way as the GridSearchCV class, but instead of trying out all possible combi‐
|
| 355 |
+
nations, it evaluates a given number of random combinations by selecting a random
|
| 356 |
+
value for each hyperparameter at every iteration. This approach has two main bene‐
|
| 357 |
+
fits:
|
| 358 |
+
•If you let the randomized search run for, say, 1,000 iterations, this approach will
|
| 359 |
+
explore 1,000 different values for each hyperparameter (instead of just a few val‐
|
| 360 |
+
ues per hyperparameter with the grid search approach).
|
| 361 |
+
• You have more control over the computing budget you want to allocate to hyper‐
|
| 362 |
+
parameter search, simply by setting the number of iterations.
|
| 363 |
+
"""
|
| 364 |
+
|
| 365 |
+
from sklearn.model_selection import RandomizedSearchCV
|
| 366 |
+
|
| 367 |
+
rand_grid_search = RandomizedSearchCV(forest_reg,param_grid,n_iter=100,scoring='neg_mean_squared_error',return_train_score=True,refit=True)
|
| 368 |
+
rand_grid_search.fit(housing_prepared,housing_labels)
|
| 369 |
+
print(rand_grid_search.best_params_)
|
| 370 |
+
print(rand_grid_search.best_estimator_)
|
| 371 |
+
|
| 372 |
+
feature_importances = grid_search.best_estimator_.feature_importances_
|
| 373 |
+
print(feature_importances)
|
| 374 |
+
|
| 375 |
+
feature_importances_rand = rand_grid_search.best_estimator_.feature_importances_
|
| 376 |
+
print(feature_importances_rand)
|
Oreilly_Example2.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#CLASSIFICATION
|
| 2 |
+
from sklearn.datasets import fetch_openml
|
| 3 |
+
mnist = fetch_openml('mnist_784',version=1,parser='auto')
|
| 4 |
+
print(mnist.keys())
|
| 5 |
+
|
| 6 |
+
X,y = mnist['data'],mnist['target']
|
| 7 |
+
|
| 8 |
+
print(X.shape,y.shape)
|
| 9 |
+
|
| 10 |
+
"""
|
| 11 |
+
There are 70,000 images, and each image has 784 features. This is because each image
|
| 12 |
+
is 28×28 pixels, and each feature simply represents one pixel’s intensity, from 0
|
| 13 |
+
(white) to 255 (black). Let’s take a peek at one digit from the dataset. All you need to
|
| 14 |
+
do is grab an instance’s feature vector, reshape it to a 28×28 array, and display it using
|
| 15 |
+
Matplotlib’s imshow() function:
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import matplotlib as mpl
|
| 19 |
+
import matplotlib.pyplot as plt
|
| 20 |
+
import numpy as np
|
| 21 |
+
some_digit = np.array(X.iloc[0])
|
| 22 |
+
some_digit_image = some_digit.reshape(28,28)
|
| 23 |
+
print(some_digit_image)
|
| 24 |
+
plt.imshow(some_digit_image,cmap=mpl.cm.binary,interpolation='nearest')
|
| 25 |
+
plt.axis('off')
|
| 26 |
+
plt.show()
|
| 27 |
+
print(y.iloc[0])
|
| 28 |
+
|
| 29 |
+
y = y.astype(np.uint8)
|
| 30 |
+
print(y)
|
| 31 |
+
print(y[0])
|
| 32 |
+
|
| 33 |
+
X_train,X_test,y_train,y_test = X[:60000], X[60000:], y[:60000], y[60000:]
|
| 34 |
+
|
| 35 |
+
"""
|
| 36 |
+
Shuffling may be a bad idea in some contexts—for example, if you are working on time series data (such as
|
| 37 |
+
stock market prices or weather conditions). We will explore this in the next chapters.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
y_train_5 = (y_train == 5) #True for all 5s, False for all other digits.
|
| 41 |
+
y_test_5 = (y_test == 5)
|
| 42 |
+
|
| 43 |
+
from sklearn.linear_model import SGDClassifier
|
| 44 |
+
|
| 45 |
+
sgd_clf = SGDClassifier()
|
| 46 |
+
sgd_clf.fit(X_train,y_train_5)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
print(sgd_clf.predict([some_digit]))
|
| 50 |
+
|
| 51 |
+
#Measuring Accuracy Using Cross-Validation
|
| 52 |
+
|
| 53 |
+
from sklearn.model_selection import StratifiedKFold
|
| 54 |
+
from sklearn.base import clone
|
| 55 |
+
|
| 56 |
+
skfolds = StratifiedKFold(n_splits=3)
|
| 57 |
+
|
| 58 |
+
for train_index, test_index in skfolds.split(X_train, y_train_5):
|
| 59 |
+
clone_clf = clone(sgd_clf)
|
| 60 |
+
X_train_folds = X_train.iloc[train_index]
|
| 61 |
+
y_train_folds = y_train_5.iloc[train_index]
|
| 62 |
+
X_test_fold = X_train.iloc[test_index]
|
| 63 |
+
y_test_fold = y_train_5.iloc[test_index]
|
| 64 |
+
clone_clf.fit(X_train_folds, y_train_folds)
|
| 65 |
+
y_pred = clone_clf.predict(X_test_fold)
|
| 66 |
+
n_correct = sum(y_pred == y_test_fold)
|
| 67 |
+
print(n_correct / len(y_pred))
|
| 68 |
+
|
| 69 |
+
from sklearn.model_selection import cross_val_score
|
| 70 |
+
cvs_sgd = cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy")
|
| 71 |
+
print("cvs_sgd: ", cvs_sgd)
|
| 72 |
+
from sklearn.base import BaseEstimator
|
| 73 |
+
|
| 74 |
+
class Never5Classifier(BaseEstimator):
|
| 75 |
+
def fit(self,X,y=None):
|
| 76 |
+
pass
|
| 77 |
+
def predict(self,X):
|
| 78 |
+
return np.zeros((len(X),1),dtype=bool)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
never_5_clf = Never5Classifier()
|
| 83 |
+
cvs = cross_val_score(never_5_clf,X_train,y_train_5,cv=3,scoring='accuracy')
|
| 84 |
+
print("cvs: ",cvs)
|
| 85 |
+
|
| 86 |
+
#Confusion Matrix
|
| 87 |
+
|
| 88 |
+
from sklearn.metrics import confusion_matrix
|
| 89 |
+
y_pred = sgd_clf.predict(X_train)
|
| 90 |
+
print(confusion_matrix(y_train_5, y_pred))
|
| 91 |
+
|
| 92 |
+
from sklearn.metrics import precision_score,recall_score,accuracy_score
|
| 93 |
+
#TP:True-Positive; FP:False-Positive
|
| 94 |
+
print(precision_score(y_train_5, y_pred)) #precision = TP/(TP+FP)
|
| 95 |
+
print(recall_score(y_train_5, y_pred)) #recall = TP / (TP+FN)
|
| 96 |
+
print(accuracy_score(y_train_5, y_pred))
|
| 97 |
+
|
| 98 |
+
"""
|
| 99 |
+
It is often convenient to combine precision and recall into a single metric called the F1
|
| 100 |
+
score, in particular if you need a simple way to compare two classifiers. The F1 score is
|
| 101 |
+
the harmonic mean of precision and recall. Whereas the regular mean treats all values equally,
|
| 102 |
+
the harmonic mean gives much more weight to low values. As a result, the classifier will only
|
| 103 |
+
get a high F1 score if both recall and precision are high.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
from sklearn.metrics import f1_score
|
| 107 |
+
|
| 108 |
+
print(f"F1 Score: {f1_score(y_train_5, y_pred)}")
|
| 109 |
+
###Precision/Recall Tradeoff.
|
| 110 |
+
"""!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
| 111 |
+
The F1 score favors classifiers that have similar precision and recall. This is not always
|
| 112 |
+
what you want: in some contexts you mostly care about precision, and in other con‐
|
| 113 |
+
texts you really care about recall. For example, if you trained a classifier to detect vid‐
|
| 114 |
+
eos that are safe for kids, you would probably prefer a classifier that rejects many
|
| 115 |
+
good videos (low recall) but keeps only safe ones (high precision), rather than a clas‐
|
| 116 |
+
sifier that has a much higher recall but lets a few really bad videos show up in your
|
| 117 |
+
product (in such cases, you may even want to add a human pipeline to check the clas‐
|
| 118 |
+
sifier’s video selection). On the other hand, suppose you train a classifier to detect
|
| 119 |
+
shoplifters on surveillance images: it is probably fine if your classifier has only 30%
|
| 120 |
+
precision as long as it has 99% recall (sure, the security guards will get a few false
|
| 121 |
+
alerts, but almost all shoplifters will get caught).
|
| 122 |
+
Unfortunately, you can’t have it both ways: increasing precision reduces recall, and
|
| 123 |
+
vice versa. This is called the `precision/recall tradeoff`.
|
| 124 |
+
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"""
|
| 125 |
+
|
| 126 |
+
#adjust threshold by using decision_function()
|
| 127 |
+
|
| 128 |
+
y_scores = sgd_clf.decision_function([some_digit])
|
| 129 |
+
print(y_scores)
|
| 130 |
+
|
| 131 |
+
threshold = 0
|
| 132 |
+
y_some_digit_pred = (y_scores>threshold)
|
| 133 |
+
print(y_some_digit_pred)
|
| 134 |
+
|
| 135 |
+
threshold = 5000
|
| 136 |
+
y_some_digit_pred = (y_scores>threshold)
|
| 137 |
+
print(y_some_digit_pred)
|
| 138 |
+
import gradio as gr
|
| 139 |
+
|
| 140 |
+
#Gradio
|
| 141 |
+
"""def ml_func(threshold):
|
| 142 |
+
y_some_digit_pred = (y_scores>int(threshold))
|
| 143 |
+
return "Result is " + str(y_some_digit_pred[0])
|
| 144 |
+
|
| 145 |
+
demo = gr.Interface(fn=ml_func,inputs="text", outputs="text")
|
| 146 |
+
|
| 147 |
+
demo.launch(share=True)
|
| 148 |
+
"""
|
| 149 |
+
from sklearn.metrics import precision_recall_curve
|
| 150 |
+
from sklearn.model_selection import cross_val_predict
|
| 151 |
+
|
| 152 |
+
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3,method="decision_function")
|
| 153 |
+
|
| 154 |
+
precisions, recalls, thresholds = precision_recall_curve(y_train_5,y_scores)
|
| 155 |
+
|
| 156 |
+
def plot_precision_recall_vs_threshold(precisions,recalls,thresholds):
|
| 157 |
+
plt.plot(thresholds, precisions[:-1],"b--",label="Precision")
|
| 158 |
+
plt.plot(thresholds,recalls[:-1],"g-",label="Recall")
|
| 159 |
+
plt.legend()
|
| 160 |
+
|
| 161 |
+
plot_precision_recall_vs_threshold(precisions,recalls,thresholds)
|
| 162 |
+
plt.show()
|
| 163 |
+
|
| 164 |
+
#receiver operating characteristic (ROC)
|
| 165 |
+
|
| 166 |
+
from sklearn.metrics import roc_curve,roc_auc_score
|
| 167 |
+
|
| 168 |
+
fpr, tpr, thresholds = roc_curve(y_train_5,y_scores)
|
| 169 |
+
|
| 170 |
+
def plot_roc_curve(fpr,tpr,label=None):
|
| 171 |
+
plt.plot(fpr,tpr,linewidth=2.5,label=label)
|
| 172 |
+
plt.plot([0,1],[0,1],"k--")
|
| 173 |
+
|
| 174 |
+
plot_roc_curve(fpr,tpr)
|
| 175 |
+
plt.show()
|
| 176 |
+
"""
|
| 177 |
+
One way to compare classifiers is to measure the area under the curve (AUC). A per‐
|
| 178 |
+
fect classifier will have a ROC AUC equal to 1, whereas a purely random classifier will
|
| 179 |
+
have a ROC AUC equal to 0.5. Scikit-Learn provides a function to compute the ROC
|
| 180 |
+
AUC:
|
| 181 |
+
"""
|
| 182 |
+
print(roc_auc_score(y_train_5,y_scores))
|
| 183 |
+
|
| 184 |
+
#Comparing of RFC's and SGD's ROC Curves
|
| 185 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 186 |
+
forest_clf = RandomForestClassifier(random_state=42)
|
| 187 |
+
y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3,
|
| 188 |
+
method="predict_proba")
|
| 189 |
+
|
| 190 |
+
y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class
|
| 191 |
+
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5,y_scores_forest)
|
| 192 |
+
|
| 193 |
+
plt.plot(fpr, tpr, "b:", label="SGD")
|
| 194 |
+
plot_roc_curve(fpr_forest, tpr_forest, "Random Forest")
|
| 195 |
+
plt.legend(loc="lower right")
|
| 196 |
+
plt.show()
|
| 197 |
+
|
| 198 |
+
#Multiclass Classification
|
| 199 |
+
from sklearn.multiclass import OneVsOneClassifier
|
| 200 |
+
|
| 201 |
+
ovo_clf = OneVsOneClassifier(SGDClassifier(random_state=42))
|
| 202 |
+
ovo_clf.fit(X_train,y_train)
|
| 203 |
+
print(f"OvO -> {ovo_clf.predict([some_digit])}")
|
| 204 |
+
|
| 205 |
+
print(len(ovo_clf.estimators_))
|
| 206 |
+
|
| 207 |
+
forest_clf.fit(X_train,y_train)
|
| 208 |
+
print(f"Random Forest Classifier -> {forest_clf.predict([some_digit])}")
|
| 209 |
+
|
| 210 |
+
print(forest_clf.predict_proba([some_digit]))
|
| 211 |
+
|
| 212 |
+
print(cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy"))
|
| 213 |
+
|
| 214 |
+
from sklearn.preprocessing import StandardScaler
|
| 215 |
+
scaler = StandardScaler()
|
| 216 |
+
X_train_scaled = scaler.fit_transform(X_train.astype(np.float64))
|
| 217 |
+
print(cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring="accuracy"))
|
| 218 |
+
y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)
|
| 219 |
+
conf_mx = confusion_matrix(y_train, y_train_pred)
|
| 220 |
+
plt.matshow(conf_mx, cmap=plt.cm.gray)
|
| 221 |
+
plt.show()
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
#Multilabel Classification
|
| 225 |
+
|
| 226 |
+
"""
|
| 227 |
+
Say the classifier has been trained to recognize three faces, Alice, Bob, and Charlie; then
|
| 228 |
+
when it is shown a picture of Alice and Charlie, it should output [1, 0, 1] (meaning
|
| 229 |
+
“Alice yes, Bob no, Charlie yes”). Such a classification system that outputs multiple
|
| 230 |
+
binary tags is called a multilabel classification system.
|
| 231 |
+
"""
|
| 232 |
+
|
| 233 |
+
from sklearn.neighbors import KNeighborsClassifier
|
| 234 |
+
#supports multilabel classification, but not all classifiers do
|
| 235 |
+
y_train_large = (y_train >= 7)
|
| 236 |
+
y_train_odd = (y_train % 2 == 1)
|
| 237 |
+
y_multilabel = np.c_[y_train_large,y_train_odd]
|
| 238 |
+
|
| 239 |
+
"""
|
| 240 |
+
np.c_[np.array([1,2,3]), np.array([4,5,6])]
|
| 241 |
+
array([[1, 4],
|
| 242 |
+
[2, 5],
|
| 243 |
+
[3, 6]])
|
| 244 |
+
np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
|
| 245 |
+
array([[1, 2, 3, ..., 4, 5, 6]])
|
| 246 |
+
"""
|
| 247 |
+
|
| 248 |
+
knn_clf = KNeighborsClassifier()
|
| 249 |
+
knn_clf.fit(X_train,y_multilabel)
|
| 250 |
+
|
| 251 |
+
prediksiyon = knn_clf.predict([some_digit]) #Checks some_digit is equal and large than 7 and odd.
|
| 252 |
+
print(prediksiyon)
|
| 253 |
+
|
| 254 |
+
y_train_knn_pred = cross_val_predict(knn_clf,X_train,y_multilabel,cv=3)
|
| 255 |
+
f1_score(y_multilabel,y_train_knn_pred,average='macro')
|
Oreilly_Example3.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from matplotlib import pyplot as plt
|
| 3 |
+
from sklearn.linear_model import LinearRegression
|
| 4 |
+
X = 2 * np.random.rand(100, 1)
|
| 5 |
+
y = 4 + 3 * X + np.random.randn(100, 1)
|
| 6 |
+
|
| 7 |
+
lr = LinearRegression()
|
| 8 |
+
lr.fit(X,y)
|
| 9 |
+
print(lr.predict([[2]]))
|
| 10 |
+
plt.scatter(X,y)
|
| 11 |
+
plt.plot([0,2],[lr.predict([[0]])[0][0],lr.predict([[2]])[0][0]],color="red")
|
| 12 |
+
plt.show()
|
| 13 |
+
|
| 14 |
+
X_b = np.c_[np.ones((100,1)),X] #added x0 = 1 to each instance
|
| 15 |
+
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
|
| 16 |
+
print(theta_best)
|
| 17 |
+
print(lr.intercept_, lr.coef_)
|
| 18 |
+
|
| 19 |
+
"""
|
| 20 |
+
Note about Gradient Descent:
|
| 21 |
+
When using Gradient Descent, you should ensure that all features
|
| 22 |
+
have a similar scale (e.g., using Scikit-Learn’s StandardScaler
|
| 23 |
+
class), or else it will take much longer to converge.
|
| 24 |
+
To find a good learning rate eta, can use grid search. However, you
|
| 25 |
+
may want to limit the number of iterations so that grid search can eliminate models
|
| 26 |
+
that take too long to converge.
|
| 27 |
+
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
from sklearn.linear_model import SGDRegressor
|
| 31 |
+
sgd_reg = SGDRegressor(max_iter=1000,tol=1e-3,penalty=None,eta0=0.1)
|
| 32 |
+
sgd_reg.fit(X,y.ravel())
|
| 33 |
+
|
| 34 |
+
print(sgd_reg.intercept_, sgd_reg.coef_)
|
Oreilly_LogisticReg.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn import datasets
|
| 2 |
+
from sklearn.linear_model import LogisticRegression
|
| 3 |
+
import numpy as np
|
| 4 |
+
from matplotlib import pyplot as plt
|
| 5 |
+
iris = datasets.load_iris()
|
| 6 |
+
print(list(iris.keys()))
|
| 7 |
+
X = iris["data"][:,3:]
|
| 8 |
+
y = (iris["target"] == 0).astype(int)
|
| 9 |
+
print(X)
|
| 10 |
+
print(y)
|
| 11 |
+
|
| 12 |
+
from sklearn.linear_model import LogisticRegression
|
| 13 |
+
log_reg = LogisticRegression()
|
| 14 |
+
log_reg.fit(X, y)
|
| 15 |
+
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
|
| 16 |
+
print(X_new)
|
| 17 |
+
y_proba = log_reg.predict_proba(X_new)
|
| 18 |
+
plt.figure(figsize=(10,6))
|
| 19 |
+
plt.plot(X_new, y_proba[:, 1], "g-", label="Iris-Virginica")
|
| 20 |
+
plt.plot(X_new, y_proba[:, 0], "b--", label="Not Iris-Virginica")
|
| 21 |
+
plt.xlabel("Petal width (cm)")
|
| 22 |
+
plt.ylabel("Probability")
|
| 23 |
+
plt.legend()
|
| 24 |
+
plt.show()
|
Oreilly_PolynomialReg.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import numpy as np
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
from sklearn.linear_model import LinearRegression
|
| 5 |
+
m = 100
|
| 6 |
+
X = 6 * np.random.rand(m,1)-3
|
| 7 |
+
y = 0.3 * X**3 + 0.5 * X**2 + X + 8 - np.random.rand(m,1)
|
| 8 |
+
|
| 9 |
+
plt.scatter(X,y)
|
| 10 |
+
plt.xlabel("X1")
|
| 11 |
+
plt.ylabel("y")
|
| 12 |
+
plt.show()
|
| 13 |
+
|
| 14 |
+
from sklearn.preprocessing import PolynomialFeatures
|
| 15 |
+
|
| 16 |
+
poly_features = PolynomialFeatures(degree=3,include_bias=False)
|
| 17 |
+
X_poly = poly_features.fit_transform(X)
|
| 18 |
+
print(X[0])
|
| 19 |
+
print(X_poly[0])
|
| 20 |
+
|
| 21 |
+
lin_reg = LinearRegression()
|
| 22 |
+
lin_reg.fit(X_poly,y)
|
| 23 |
+
print(X_poly)
|
| 24 |
+
print(y)
|
| 25 |
+
print(lin_reg.intercept_, lin_reg.coef_)
|
| 26 |
+
|
| 27 |
+
print(lin_reg.predict([[0,1,2]]))
|
Oreilly_SVM.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
|
| 3 |
+
A Support Vector Machine (SVM) is a very powerful and versatile Machine Learning
|
| 4 |
+
model, capable of performing linear or nonlinear classification, regression, and even
|
| 5 |
+
outlier detection. It is one of the most popular models in Machine Learning, and any‐
|
| 6 |
+
one interested in Machine Learning should have it in their toolbox. SVMs are partic‐
|
| 7 |
+
ularly well suited for classification of complex but small- or medium-sized datasets.
|
| 8 |
+
|
| 9 |
+
SVMs are sensitive to the feature scales. After feature scaling
|
| 10 |
+
(e.g., using Scikit-Learn’s StandardScaler),
|
| 11 |
+
the decision boundary looks much better.
|
| 12 |
+
|
| 13 |
+
"""
|
| 14 |
+
import gradio as gr
|
| 15 |
+
from sklearn import datasets
|
| 16 |
+
from sklearn.svm import SVC
|
| 17 |
+
import numpy as np
|
| 18 |
+
import matplotlib.pyplot as plt
|
| 19 |
+
from sklearn.datasets import make_moons
|
| 20 |
+
from sklearn.pipeline import Pipeline
|
| 21 |
+
from sklearn.preprocessing import StandardScaler
|
| 22 |
+
|
| 23 |
+
iris = datasets.load_iris()
|
| 24 |
+
|
| 25 |
+
import pandas as pd
|
| 26 |
+
#Gradio Function
|
| 27 |
+
def polynomial_svm(file,n_sample,noise,degree,C,coef0,head_value):
|
| 28 |
+
df = pd.read_csv(file.name)
|
| 29 |
+
X, y = make_moons(n_samples=n_sample, noise=noise, random_state=44)
|
| 30 |
+
polynomial_svm_clf = Pipeline([
|
| 31 |
+
("scaler", StandardScaler()),
|
| 32 |
+
("svm_clf", SVC(kernel="poly", degree=degree, C=C, coef0=coef0,probability=True)),
|
| 33 |
+
])
|
| 34 |
+
polynomial_svm_clf.fit(X, y)
|
| 35 |
+
fig = plt.figure()
|
| 36 |
+
# Create a meshgrid of points covering the input space
|
| 37 |
+
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
|
| 38 |
+
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
|
| 39 |
+
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
|
| 40 |
+
np.linspace(y_min, y_max, 200))
|
| 41 |
+
# Generate predictions for each point in the meshgrid
|
| 42 |
+
Z = polynomial_svm_clf.predict(np.c_[xx.ravel(), yy.ravel()])
|
| 43 |
+
Z = Z.reshape(xx.shape)
|
| 44 |
+
# Create a color map
|
| 45 |
+
cmap = plt.cm.Pastel1
|
| 46 |
+
# Plot the decision boundary and the data points
|
| 47 |
+
plt.contourf(xx, yy, Z, alpha=0.8, cmap=cmap)
|
| 48 |
+
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', cmap=cmap)
|
| 49 |
+
plt.xlabel('Feature 1')
|
| 50 |
+
plt.ylabel('Feature 2')
|
| 51 |
+
plt.title('Polynomial SVM Classifier Decision Boundary')
|
| 52 |
+
plt.xlim(xx.min(), xx.max())
|
| 53 |
+
plt.ylim(yy.min(), yy.max())
|
| 54 |
+
plt.xticks(())
|
| 55 |
+
plt.yticks(())
|
| 56 |
+
|
| 57 |
+
return fig,df.head(head_value)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
inputs = [
|
| 64 |
+
gr.File(label='Import a CSV File',file_count='single',file_types=['csv']),
|
| 65 |
+
gr.Slider(100,3000,1500,label='Number of Samples'),
|
| 66 |
+
gr.Slider(0,1,0.15,label='Noise'),
|
| 67 |
+
gr.Slider(1,5,3,step=1,label='Degree'),
|
| 68 |
+
gr.Slider(1,10,5,step=1,label='C'),
|
| 69 |
+
gr.Slider(1,10,3,step=1,label='coefficient 0'),
|
| 70 |
+
gr.Slider(1,20,10,step=1,label='Head'),
|
| 71 |
+
]
|
| 72 |
+
outputs = [gr.Plot(label='Polynomial SVM Classifier'),
|
| 73 |
+
gr.Textbox(label='Head')]
|
| 74 |
+
|
| 75 |
+
demo = gr.Interface(
|
| 76 |
+
fn = polynomial_svm,
|
| 77 |
+
inputs=inputs,
|
| 78 |
+
outputs=outputs,
|
| 79 |
+
)
|
| 80 |
+
if __name__ == "__main__":
|
| 81 |
+
demo.launch(share=True)
|
Panda.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from torch.optim import Adam
|
| 5 |
+
from gym import spaces
|
| 6 |
+
import gym
|
| 7 |
+
from torch.distributions import Normal
|
| 8 |
+
import random
|
| 9 |
+
|
| 10 |
+
# Actor Network
|
| 11 |
+
class Actor(torch.nn.Module):
|
| 12 |
+
def __init__(self, n_states, n_actions):
|
| 13 |
+
super(Actor, self).__init__()
|
| 14 |
+
self.fc1 = torch.nn.Linear(n_states, 400)
|
| 15 |
+
self.fc2 = torch.nn.Linear(400, 300)
|
| 16 |
+
self.fc3 = torch.nn.Linear(300, n_actions)
|
| 17 |
+
|
| 18 |
+
def forward(self, state):
|
| 19 |
+
x = F.relu(self.fc1(state))
|
| 20 |
+
x = F.relu(self.fc2(x))
|
| 21 |
+
return torch.tanh(self.fc3(x)) # bound the actions to the range [-1, 1]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Critic Network
|
| 25 |
+
class Critic(torch.nn.Module):
|
| 26 |
+
def __init__(self, n_states, n_actions):
|
| 27 |
+
super(Critic, self).__init__()
|
| 28 |
+
self.fc1 = torch.nn.Linear(n_states + n_actions, 400)
|
| 29 |
+
self.fc2 = torch.nn.Linear(400, 300)
|
| 30 |
+
self.fc3 = torch.nn.Linear(300, 1)
|
| 31 |
+
|
| 32 |
+
def forward(self, state, action):
|
| 33 |
+
sa = torch.cat([state, action], dim=1)
|
| 34 |
+
x = F.relu(self.fc1(sa))
|
| 35 |
+
x = F.relu(self.fc2(x))
|
| 36 |
+
return self.fc3(x)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class DDPGAgent:
|
| 40 |
+
def __init__(self, env, n_states, n_actions, actor_lr=1e-4, critic_lr=1e-3, gamma=0.99, tau=1e-2, max_memory_size=50000):
|
| 41 |
+
# Network
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
self.critic = Critic(n_states, n_actions)
|
| 45 |
+
self.target_actor = Actor(n_states, n_actions)
|
| 46 |
+
self.target_critic = Critic(n_states, n_actions)
|
| 47 |
+
self.memory = []
|
| 48 |
+
self.max_memory_size = max_memory_size
|
| 49 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 50 |
+
self.actor = Actor(n_states, n_actions).to(self.device)
|
| 51 |
+
# Training
|
| 52 |
+
self.gamma = gamma
|
| 53 |
+
self.tau = tau
|
| 54 |
+
self.loss_func = torch.nn.MSELoss()
|
| 55 |
+
self.actor_optim = Adam(self.actor.parameters(), lr=actor_lr)
|
| 56 |
+
self.critic_optim = Adam(self.critic.parameters(), lr=critic_lr)
|
| 57 |
+
|
| 58 |
+
# Noise
|
| 59 |
+
self.noise = Normal(0, 0.1)
|
| 60 |
+
|
| 61 |
+
# Action space bounds
|
| 62 |
+
self.action_low = env.action_space.low
|
| 63 |
+
self.action_high = env.action_space.high
|
| 64 |
+
|
| 65 |
+
self.env = env
|
| 66 |
+
|
| 67 |
+
def remember(self, state, action, reward, next_state):
|
| 68 |
+
self.memory.append((state, action, reward, next_state))
|
| 69 |
+
if len(self.memory) > self.max_memory_size:
|
| 70 |
+
self.memory.pop(0)
|
| 71 |
+
|
| 72 |
+
def train(self, batch_size=64):
|
| 73 |
+
if len(self.memory) < batch_size:
|
| 74 |
+
return
|
| 75 |
+
|
| 76 |
+
mini_batch = random.sample(self.memory, batch_size)
|
| 77 |
+
state_batch = torch.cat([data[0] for data in mini_batch])
|
| 78 |
+
action_batch = torch.cat([data[1] for data in mini_batch])
|
| 79 |
+
reward_batch = torch.cat([data[2] for data in mini_batch])
|
| 80 |
+
next_state_batch = torch.cat([data[3] for data in mini_batch])
|
| 81 |
+
|
| 82 |
+
target_action_batch = self.target_actor(next_state_batch)
|
| 83 |
+
target_critic_value = self.target_critic(next_state_batch, target_action_batch)
|
| 84 |
+
expected_critic_value = reward_batch + self.gamma * target_critic_value
|
| 85 |
+
|
| 86 |
+
# Update critic
|
| 87 |
+
self.critic_optim.zero_grad()
|
| 88 |
+
critic_value = self.critic(state_batch, action_batch)
|
| 89 |
+
critic_loss = self.loss_func(critic_value, expected_critic_value)
|
| 90 |
+
critic_loss.backward()
|
| 91 |
+
self.critic_optim.step()
|
| 92 |
+
|
| 93 |
+
# Update actor
|
| 94 |
+
self.actor_optim.zero_grad()
|
| 95 |
+
new_action_batch = self.actor(state_batch)
|
| 96 |
+
actor_loss = -self.critic(state_batch, new_action_batch).mean()
|
| 97 |
+
actor_loss.backward()
|
| 98 |
+
self.actor_optim.step()
|
| 99 |
+
|
| 100 |
+
# Update target networks
|
| 101 |
+
self.soft_update(self.target_actor, self.actor, self.tau)
|
| 102 |
+
self.soft_update(self.target_critic, self.critic, self.tau)
|
| 103 |
+
|
| 104 |
+
@staticmethod
|
| 105 |
+
def soft_update(target, source, tau):
|
| 106 |
+
for target_param, source_param in zip(target.parameters(), source.parameters()):
|
| 107 |
+
target_param.data.copy_(target_param.data * (1.0 - tau) + source_param.data * tau)
|
| 108 |
+
|
| 109 |
+
def get_action(self, state):
|
| 110 |
+
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
|
| 111 |
+
self.actor.eval()
|
| 112 |
+
with torch.no_grad():
|
| 113 |
+
action = self.actor(state).cpu().numpy()[0]
|
| 114 |
+
self.actor.train()
|
| 115 |
+
action_tensor = torch.tensor(action, dtype=torch.float32)
|
| 116 |
+
noise_sample = torch.normal(mean=torch.zeros_like(action_tensor),
|
| 117 |
+
std=0.1 * torch.ones_like(action_tensor)).numpy()
|
| 118 |
+
return np.clip(action + noise_sample, self.action_low, self.action_high)
|
| 119 |
+
|
| 120 |
+
def save_model(self, output):
|
| 121 |
+
torch.save(self.actor.state_dict(), '{}/actor.pkl'.format(output))
|
| 122 |
+
torch.save(self.critic.state_dict(), '{}/critic.pkl'.format(output))
|
| 123 |
+
|
| 124 |
+
def load_model(self, output):
|
| 125 |
+
self.actor.load_state_dict(torch.load('{}/actor.pkl'.format(output)))
|
| 126 |
+
self.critic.load_state_dict(torch.load('{}/critic.pkl'.format(output)))
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
# Define your environment below
|
| 130 |
+
class PandaRobotEnv(gym.Env):
|
| 131 |
+
def __init__(self):
|
| 132 |
+
super(PandaRobotEnv, self).__init__()
|
| 133 |
+
self.action_space = spaces.Box(low=-np.inf, high=np.inf, shape=(6,), dtype=np.float32)
|
| 134 |
+
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(6,), dtype=np.float32)
|
| 135 |
+
self.state = None
|
| 136 |
+
self.goal = None
|
| 137 |
+
|
| 138 |
+
def step(self, action):
|
| 139 |
+
# You would typically use the action to update the state of the robot.
|
| 140 |
+
# Then, you would calculate the reward based on the new state and whether the new state is terminal.
|
| 141 |
+
self.state = np.clip(self.state + action, -np.inf, np.inf) # Update state with action
|
| 142 |
+
distance_to_goal = np.linalg.norm(self.goal - self.state)
|
| 143 |
+
done = distance_to_goal < 0.1 # The episode ends if you're close enough to the goal
|
| 144 |
+
reward = -distance_to_goal # The reward is the negative distance to the goal
|
| 145 |
+
return self.state, reward, done, {}
|
| 146 |
+
|
| 147 |
+
def reset(self):
|
| 148 |
+
# Reset the state and goal for the start of a new episode.
|
| 149 |
+
self.state = np.random.uniform(-1, 1, size=(6,))
|
| 150 |
+
self.goal = np.random.uniform(-1, 1, size=(6,))
|
| 151 |
+
return self.state
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def run():
|
| 155 |
+
# Initialize the environment and the agent
|
| 156 |
+
env = PandaRobotEnv()
|
| 157 |
+
agent = DDPGAgent(env=env, n_states=env.observation_space.shape[0], n_actions=env.action_space.shape[0])
|
| 158 |
+
|
| 159 |
+
for episode in range(1, 1001): # for 1000 episodes
|
| 160 |
+
state = env.reset()
|
| 161 |
+
done = False
|
| 162 |
+
total_reward = 0
|
| 163 |
+
while not done:
|
| 164 |
+
action = agent.get_action(state)
|
| 165 |
+
next_state, reward, done, _ = env.step(action)
|
| 166 |
+
agent.remember(torch.tensor(state, dtype=torch.float), torch.tensor(action, dtype=torch.float),
|
| 167 |
+
torch.tensor([reward], dtype=torch.float), torch.tensor(next_state, dtype=torch.float))
|
| 168 |
+
agent.train()
|
| 169 |
+
state = next_state
|
| 170 |
+
total_reward += reward
|
| 171 |
+
|
| 172 |
+
if episode % 100 == 0: # print every 100 episodes
|
| 173 |
+
print('Episode: {}, Total reward: {}'.format(episode, total_reward))
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
if __name__ == "__main__":
|
| 177 |
+
run()
|
Q-Learning Example.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.models import Sequential, Model
|
| 2 |
+
from keras.layers import Dropout, Dense, Input, Activation
|
| 3 |
+
from keras.optimizers import Adam
|
| 4 |
+
import numpy as np
|
| 5 |
+
from collections import deque
|
| 6 |
+
import random
|
| 7 |
+
|
| 8 |
+
EPISODES=1000
|
| 9 |
+
|
| 10 |
+
class Walker:
|
| 11 |
+
def __init__(self, nx, ny, lr, gamma):
|
| 12 |
+
self.nx = nx
|
| 13 |
+
self.ny = ny
|
| 14 |
+
self.lr = lr
|
| 15 |
+
self.los = []
|
| 16 |
+
self.gamma = gamma
|
| 17 |
+
self.memory_deck = deque(maxlen=2000)
|
| 18 |
+
self.epsilon = 0.7
|
| 19 |
+
self.epsilon_ = 0.01
|
| 20 |
+
self.decay = 0.995
|
| 21 |
+
self.model = self.get_model()
|
| 22 |
+
self.episode_observation, self.episode_rewards, self.episode_action, self.new_episode_observation, self.episode_flag = [], [], [], [], []
|
| 23 |
+
|
| 24 |
+
def get_action(self, observation):
|
| 25 |
+
if np.random.rand() <= self.epsilon:
|
| 26 |
+
return np.random.uniform(-1, 1, 4)
|
| 27 |
+
p = self.model.predict(observation)
|
| 28 |
+
return p[0]
|
| 29 |
+
|
| 30 |
+
def memory_recall(self, observation, action, reward, new_observation, flags):
|
| 31 |
+
self.memory_deck.append((observation, action, reward, new_observation, flags))
|
| 32 |
+
self.episode_rewards.append(reward)
|
| 33 |
+
|
| 34 |
+
def get_model(self):
|
| 35 |
+
model = Sequential()
|
| 36 |
+
model.add(Dense(400, input_dim=self.nx, activation='relu'))
|
| 37 |
+
model.add(Dense(300, activation='relu'))
|
| 38 |
+
model.add(Dense(self.ny, activation='linear'))
|
| 39 |
+
model.compile(loss='mse', optimizer=Adam(lr=self.lr))
|
| 40 |
+
return model
|
| 41 |
+
|
| 42 |
+
def training(self, batch):
|
| 43 |
+
i = random.sample(self.memory_deck, batch)
|
| 44 |
+
self.los = []
|
| 45 |
+
for obs, act, rew, new_obs, done in i:
|
| 46 |
+
target = rew
|
| 47 |
+
if not done:
|
| 48 |
+
target = ((1.0 - 0.1) * rew + 0.1 * (self.gamma * np.amax(self.model.predict(new_obs)[0])))
|
| 49 |
+
|
| 50 |
+
old_target = self.model.predict(obs)
|
| 51 |
+
old_target[0] = target
|
| 52 |
+
history = self.model.fit(x=obs, y=old_target, verbose=0, epochs=1)
|
| 53 |
+
self.los.append(history.history['loss'])
|
| 54 |
+
self.episode_observation, self.episode_rewards, self.episode_action, self.new_episode_observation, self.episode_flag = [], [], [], [], []
|
| 55 |
+
|
| 56 |
+
mm = np.mean(self.los)
|
| 57 |
+
if self.epsilon >= self.epsilon_:
|
| 58 |
+
self.epsilon *= self.decay
|
| 59 |
+
return history, mm
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
import gym
|
| 63 |
+
import numpy as np
|
| 64 |
+
import random
|
| 65 |
+
import time
|
| 66 |
+
|
| 67 |
+
seed = np.random.seed(666)
|
| 68 |
+
|
| 69 |
+
episodes = EPISODES
|
| 70 |
+
render = False
|
| 71 |
+
|
| 72 |
+
env = gym.make('BipedalWalker-v3')
|
| 73 |
+
env = env.unwrapped
|
| 74 |
+
|
| 75 |
+
lr = 0.001
|
| 76 |
+
gamma = 0.98
|
| 77 |
+
nx = env.observation_space.shape[0]
|
| 78 |
+
ny = env.action_space.shape[0]
|
| 79 |
+
agent = Walker(nx, ny, lr, gamma)
|
| 80 |
+
win = 0
|
| 81 |
+
rewards_over_time = []
|
| 82 |
+
avg_reward_mat = []
|
| 83 |
+
net_max = []
|
| 84 |
+
|
| 85 |
+
for i in range(episodes):
|
| 86 |
+
observation = env.reset()[0]
|
| 87 |
+
observation = observation.reshape(1, -1)
|
| 88 |
+
start = time.time()
|
| 89 |
+
while True:
|
| 90 |
+
if render == True:
|
| 91 |
+
env.render()
|
| 92 |
+
|
| 93 |
+
action = agent.get_action(observation)
|
| 94 |
+
new_observation, reward, flag, inf, fff = env.step(action)
|
| 95 |
+
new_observation = new_observation.reshape(1, -1)
|
| 96 |
+
agent.memory_recall(observation, action, reward, new_observation, flag)
|
| 97 |
+
observation = new_observation
|
| 98 |
+
|
| 99 |
+
end = time.time()
|
| 100 |
+
t = end - start
|
| 101 |
+
if t > 20:
|
| 102 |
+
flag = True
|
| 103 |
+
|
| 104 |
+
total_episode_rewards = sum(agent.episode_rewards)
|
| 105 |
+
if total_episode_rewards < -300:
|
| 106 |
+
flag = True
|
| 107 |
+
|
| 108 |
+
if flag == True:
|
| 109 |
+
rewards_over_time.append(total_episode_rewards)
|
| 110 |
+
max_reward = np.max(rewards_over_time)
|
| 111 |
+
if (i % 10 == 0):
|
| 112 |
+
avg_reward_mat.append(sum(rewards_over_time[-10:]) / 10)
|
| 113 |
+
if (len(net_max) == 0):
|
| 114 |
+
net_max.append(total_episode_rewards)
|
| 115 |
+
else:
|
| 116 |
+
if (net_max[-1] < total_episode_rewards):
|
| 117 |
+
net_max.append(total_episode_rewards)
|
| 118 |
+
else:
|
| 119 |
+
net_max.append(net_max[-1])
|
| 120 |
+
if int(total_episode_rewards) > 270 and i > 2000:
|
| 121 |
+
render = True
|
| 122 |
+
episode_max = np.argmax(rewards_over_time)
|
| 123 |
+
if total_episode_rewards >= 300:
|
| 124 |
+
win = win + 1
|
| 125 |
+
print('##################################################################')
|
| 126 |
+
print('Walk# : ', i)
|
| 127 |
+
print('Reward : ', int(total_episode_rewards))
|
| 128 |
+
print('Time : ', np.round(t, 2), 'sec')
|
| 129 |
+
print('Maximum Reward : ' + str(int(max_reward)) + ' (in episode#:' + str(episode_max) + ')')
|
| 130 |
+
print('Wins : ' + str(win))
|
| 131 |
+
|
| 132 |
+
hist, mm = agent.training(16)
|
| 133 |
+
# if max_reward > 100: render = True
|
| 134 |
+
break
|
README.md
CHANGED
|
@@ -1,12 +1,6 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
|
| 4 |
-
colorFrom: red
|
| 5 |
-
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 3.
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Regressions_Classifications_Test
|
| 3 |
+
app_file: data_visualization.py
|
|
|
|
|
|
|
| 4 |
sdk: gradio
|
| 5 |
+
sdk_version: 3.35.2
|
|
|
|
|
|
|
| 6 |
---
|
|
|
|
|
|
RoboEnv_View.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import pybullet as p
|
| 3 |
+
import pybullet_data
|
| 4 |
+
import numpy as np
|
| 5 |
+
import gymnasium as gym
|
| 6 |
+
from stable_baselines3 import PPO
|
| 7 |
+
from stable_baselines3.common.env_checker import check_env
|
| 8 |
+
from sklearn.metrics import r2_score
|
| 9 |
+
# Create a custom PyBullet environment
|
| 10 |
+
class PyBulletEnv(gym.Env):
|
| 11 |
+
def __init__(self, render_mode='human'):
|
| 12 |
+
# Connect to the physics server
|
| 13 |
+
self.physics_client = p.connect(p.DIRECT)
|
| 14 |
+
|
| 15 |
+
# Load additional data files from pybullet_data
|
| 16 |
+
p.setAdditionalSearchPath(pybullet_data.getDataPath())
|
| 17 |
+
|
| 18 |
+
# Load the plane at the origin of the world
|
| 19 |
+
self.plane = p.loadURDF("plane.urdf", [0, 0, 0], [0, 0, 0, 1], useFixedBase=True)
|
| 20 |
+
|
| 21 |
+
# Load the robot model
|
| 22 |
+
self.robot = p.loadURDF("robot.urdf", [0, 0, 0], [0, 0, 0, 1], useFixedBase=True)
|
| 23 |
+
|
| 24 |
+
# Randomly set initial joint angles
|
| 25 |
+
initial_joint_angles = np.random.uniform(low=-np.pi, high=np.pi, size=(6,))
|
| 26 |
+
for i in range(6):
|
| 27 |
+
p.resetJointState(self.robot, i, initial_joint_angles[i])
|
| 28 |
+
|
| 29 |
+
# Fix the base_link to the origin of the plane
|
| 30 |
+
base_link_pos, _ = p.getBasePositionAndOrientation(self.robot)
|
| 31 |
+
plane_pos, _ = p.getBasePositionAndOrientation(self.plane)
|
| 32 |
+
constraint_pos = np.array(plane_pos) - np.array(base_link_pos)
|
| 33 |
+
p.createConstraint(self.plane, -1, self.robot, -1, p.JOINT_FIXED, constraint_pos, [0, 0, 0], [0, 0, 0])
|
| 34 |
+
|
| 35 |
+
# Load the target model at a reachable position
|
| 36 |
+
self.target = p.loadURDF("target.urdf", [0.3, 0.3, 0], [0, 0, 0, 1], useFixedBase=True)
|
| 37 |
+
|
| 38 |
+
# Define the action and observation spaces
|
| 39 |
+
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(6,), dtype=np.float32)
|
| 40 |
+
self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(6,), dtype=np.float32)
|
| 41 |
+
|
| 42 |
+
# Set the initial observation
|
| 43 |
+
self.observation = np.zeros((6,))
|
| 44 |
+
|
| 45 |
+
# Set the rendering mode
|
| 46 |
+
self.render_mode = render_mode
|
| 47 |
+
|
| 48 |
+
def step(self, action):
|
| 49 |
+
# Set joint motor controls based on the action
|
| 50 |
+
p.setJointMotorControlArray(self.robot, range(6), p.POSITION_CONTROL, targetPositions=action)
|
| 51 |
+
|
| 52 |
+
# Step the simulation
|
| 53 |
+
p.stepSimulation()
|
| 54 |
+
|
| 55 |
+
# Get the robot's joint angles
|
| 56 |
+
joint_angles = []
|
| 57 |
+
for i in range(6):
|
| 58 |
+
joint_state = p.getJointState(self.robot, i)
|
| 59 |
+
joint_angles.append(joint_state[0])
|
| 60 |
+
self.observation = np.array(joint_angles)
|
| 61 |
+
|
| 62 |
+
# Calculate the reward (6th link touching the target)
|
| 63 |
+
end_effector_pos, _ = p.getLinkState(self.robot, 6)[:2]
|
| 64 |
+
target_pos, _ = p.getBasePositionAndOrientation(self.target)
|
| 65 |
+
dist = np.linalg.norm(np.array(end_effector_pos) - np.array(target_pos))
|
| 66 |
+
reward = -dist if dist > 0.1 else 1.0
|
| 67 |
+
|
| 68 |
+
# Check if the episode is done (6th link touching the target)
|
| 69 |
+
done = dist <= 0.1
|
| 70 |
+
|
| 71 |
+
# Return the observation, reward, done flag, and additional info
|
| 72 |
+
return self.observation, reward, done, {}
|
| 73 |
+
|
| 74 |
+
def reset(self):
|
| 75 |
+
# Randomly set initial joint angles
|
| 76 |
+
initial_joint_angles = np.random.uniform(low=-np.pi, high=np.pi, size=(6,))
|
| 77 |
+
for i in range(6):
|
| 78 |
+
p.resetJointState(self.robot, i, initial_joint_angles[i])
|
| 79 |
+
|
| 80 |
+
# Set the initial observation
|
| 81 |
+
self.observation = np.array(initial_joint_angles)
|
| 82 |
+
|
| 83 |
+
return self.observation
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def render(self, mode='human'):
|
| 89 |
+
if mode == 'human':
|
| 90 |
+
p.disconnect()
|
| 91 |
+
p.connect(p.GUI)
|
| 92 |
+
self.physics_client = p.GUI
|
| 93 |
+
elif mode == 'rgb_array':
|
| 94 |
+
p.disconnect()
|
| 95 |
+
p.connect(p.DIRECT)
|
| 96 |
+
self.physics_client = p.DIRECT
|
| 97 |
+
else:
|
| 98 |
+
raise ValueError("Invalid rendering mode. Supported modes are 'human' and 'rgb_array'.")
|
| 99 |
+
|
| 100 |
+
# Render the environment
|
| 101 |
+
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
|
| 102 |
+
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
|
| 103 |
+
p.configureDebugVisualizer(p.COV_ENABLE_KEYBOARD_SHORTCUTS, 0)
|
| 104 |
+
p.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 0)
|
| 105 |
+
|
| 106 |
+
# Set the camera parameters (adjust as needed)
|
| 107 |
+
p.resetDebugVisualizerCamera(cameraDistance=1, cameraYaw=45, cameraPitch=-45, cameraTargetPosition=[0, 0, 0])
|
| 108 |
+
|
| 109 |
+
# Get the camera image
|
| 110 |
+
_, _, rgb, _, _ = p.getCameraImage(width=800, height=600, renderer=p.ER_BULLET_HARDWARE_OPENGL)
|
| 111 |
+
|
| 112 |
+
# Convert RGB image to numpy array
|
| 113 |
+
rgb_array = np.array(rgb)
|
| 114 |
+
|
| 115 |
+
if mode == 'rgb_array':
|
| 116 |
+
return rgb_array
|
| 117 |
+
else:
|
| 118 |
+
# Show the rendered image in a window
|
| 119 |
+
time.sleep(1 / 240) # Control rendering speed
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# Create the custom PyBullet environment
|
| 123 |
+
env = PyBulletEnv()
|
| 124 |
+
check_env(env)
|
| 125 |
+
|
| 126 |
+
# Create the PPO agent
|
| 127 |
+
model = PPO("MlpPolicy", env, verbose=1)
|
| 128 |
+
|
| 129 |
+
# Train the agent
|
| 130 |
+
model.learn(total_timesteps=10000)
|
| 131 |
+
|
| 132 |
+
# Render the trained model every 100 episodes
|
| 133 |
+
for episode in range(0, 10001, 100):
|
| 134 |
+
# Reset the environment
|
| 135 |
+
vec_env = model.get_env()
|
| 136 |
+
obs = vec_env.reset()
|
| 137 |
+
|
| 138 |
+
# Run the episode with the trained model
|
| 139 |
+
for _ in range(100):
|
| 140 |
+
# Get the action from the trained model
|
| 141 |
+
|
| 142 |
+
# Render the environment
|
| 143 |
+
vec_env.render()
|
| 144 |
+
action, _ = model.predict(obs, deterministic=True)
|
| 145 |
+
# Take a step in the environment
|
| 146 |
+
obs, reward, done, info = vec_env.step(action)
|
| 147 |
+
|
| 148 |
+
# Check if the episode is done
|
| 149 |
+
if done:
|
| 150 |
+
break
|
| 151 |
+
|
| 152 |
+
# Close the environment
|
| 153 |
+
env.close()
|
SimplifiedOffsetCorrectionDS.xlsx
ADDED
|
Binary file (11.8 kB). View file
|
|
|
data_X.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:384f8a2c378f24d24834ce6be39278224da0f203b570935d56c130462f980bff
|
| 3 |
+
size 196164389
|
data_Y.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data_visualization.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import gradio as gr
|
| 5 |
+
from sklearn.linear_model import LinearRegression,LogisticRegression
|
| 6 |
+
from sklearn.ensemble import RandomForestRegressor
|
| 7 |
+
from sklearn.model_selection import train_test_split
|
| 8 |
+
"""x,y = [1,2,3],[4,5,6]
|
| 9 |
+
plt.title("Linear Graph",fontdict={'fontname':'Times New Roman','fontsize':26,'color':'#000000'})
|
| 10 |
+
plt.xlabel('X Axis',fontdict={'fontname':'Times New Roman','fontsize':16,'color':'blue'})
|
| 11 |
+
plt.ylabel('Y Axis',fontdict={'fontname':'Times New Roman','fontsize':16,'color':'green'})
|
| 12 |
+
plt.plot(x,y,color='purple',linewidth=2,label='first')
|
| 13 |
+
plt.plot(y,x,color='red',linewidth=2,label='second')
|
| 14 |
+
plt.xticks([0.5,1,1.5,2,2.5,3])
|
| 15 |
+
plt.xticks([3.5,4,4.5,5,5.5,6])
|
| 16 |
+
plt.legend()
|
| 17 |
+
plt.show()
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def draw_graph(x=str, y=str, title=str, x_label=str, y_label=str):
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
fig = plt.figure()
|
| 25 |
+
plt.title(title, fontdict={'fontname': 'Times New Roman', 'fontsize': 26, 'color': '#000000'})
|
| 26 |
+
plt.xlabel(x_label, fontdict={'fontname': 'Times New Roman', 'fontsize': 16, 'color': 'blue'})
|
| 27 |
+
plt.ylabel(y_label, fontdict={'fontname': 'Times New Roman', 'fontsize': 16, 'color': 'green'})
|
| 28 |
+
x = list(map(int, x.split(',')))
|
| 29 |
+
y = list(map(int, y.split(',')))
|
| 30 |
+
if len(x) != len(y):
|
| 31 |
+
raise gr.Error("X and Y Arrays' Lengths are not Equal!")
|
| 32 |
+
df = pd.DataFrame({'X':x,'Y':y})
|
| 33 |
+
x_train,x_test,y_train,y_test=train_test_split(df[['X']],df[['Y']],test_size=0.33,random_state=42)
|
| 34 |
+
lr = LinearRegression()
|
| 35 |
+
lr.fit(x_train,y_train)
|
| 36 |
+
y_pred_lr = lr.predict(x_test)
|
| 37 |
+
log_reg = LogisticRegression(max_iter=1000)
|
| 38 |
+
log_reg.fit(x_train.to_numpy().tolist(),y_train.to_numpy().tolist())
|
| 39 |
+
y_pred_log_reg = log_reg.predict(x_test.to_numpy().tolist())
|
| 40 |
+
rnd_fr = RandomForestRegressor(n_jobs=100)
|
| 41 |
+
rnd_fr.fit(x_train.to_numpy().tolist(),y_train.to_numpy().tolist())
|
| 42 |
+
y_pred_rnd_fr = rnd_fr.predict(x_test)
|
| 43 |
+
plt.grid('on')
|
| 44 |
+
plt.plot(x, y,"o",label='Values')
|
| 45 |
+
plt.plot(x_test,y_pred_lr,label='Linear Regression')
|
| 46 |
+
plt.plot(x_test,y_pred_log_reg,label='Logistic Regression')
|
| 47 |
+
plt.plot(x_test,y_pred_rnd_fr,label='Random Forest Regression')
|
| 48 |
+
plt.legend()
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
return fig
|
| 53 |
+
|
| 54 |
+
inputs = [
|
| 55 |
+
gr.Textbox(label='X values'),
|
| 56 |
+
gr.Textbox(label='Y values'),
|
| 57 |
+
gr.Textbox(label='Title'),
|
| 58 |
+
gr.Textbox(label='X Label'),
|
| 59 |
+
gr.Textbox(label='Y Label'),
|
| 60 |
+
]
|
| 61 |
+
outputs = [
|
| 62 |
+
gr.Plot(label='Regressions'),
|
| 63 |
+
gr.Plot(label='Classifications'),
|
| 64 |
+
gr.Textbox(label='Regression Scores'),
|
| 65 |
+
gr.Textbox(label='Classification Scores')
|
| 66 |
+
|
| 67 |
+
]
|
| 68 |
+
|
| 69 |
+
demo = gr.Interface(
|
| 70 |
+
fn=draw_graph,
|
| 71 |
+
inputs=inputs,
|
| 72 |
+
outputs=outputs,
|
| 73 |
+
)
|
| 74 |
+
if __name__ == "__main__":
|
| 75 |
+
demo.launch(share=True)
|
datasets/housing/housing.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
datasets/housing/housing.tgz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d4cd501af90475f09b814c7447c7701f59bf28e8cf1180205ae5ace9737a0109
|
| 3 |
+
size 409488
|
dqn_lunar.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c61789586b8fa2cda6d43d22995313ff8787792663cc8517dc9372d7eb5c30ad
|
| 3 |
+
size 104897
|
flagged/Polynomial SVM Classifier/tmp1qkjy2sm.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"type": "matplotlib", "plot": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAAHgCAYAAAA10dzkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAADCvklEQVR4nOzddVRUaxfA4d+AMKRYiAEmmNgdiIHd3a2frVev3d157e7u7rqi127FQq9dGCgdc74/gLkgg4KAA7KftVxLznvmzJ6Bmdnzxn5ViqIoCCGEEEKIJMNA3wEIIYQQQohfSxJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAIYQQQogkRhJAkSCUL1+e8uXL6zuMOLFq1SpUKhX//vtvjG/brl07smTJEucxJQb6/hsYPXo0KpUqwrGgoCAGDhyInZ0dBgYG1KtXDwCVSsXo0aN/fZB68DO/l9i8BpI6XX+HQsQHSQDFTwl7gw/7Z2JiQo4cOejZsydv377Vd3i/PY1Gw5o1ayhRogSpUqXC0tKSHDly0KZNG86fPw9A7969UalUPHr0KMrrDBs2DJVKxc2bNwHIkiULKpUKFxcXnecvXbpU+zu/fPlytGJ9+/Yt/fv3J1euXJiZmWFubk6RIkUYP348nz9/jtkD/8VWrFjBtGnTaNSoEatXr6Zv376/PIbwr7NkyZKRKlUqihQpQp8+fbh79+4vjyehC/sbDv/e5ODgwIABA/j48aO+wxMiwUim7wBE4jZ27FiyZs2Kn58frq6uLFy4kAMHDnD79m3MzMz0HZ5etG7dmmbNmqFWq+PtPnr37s38+fOpW7cuLVu2JFmyZNy/f5+DBw+SLVs2SpYsScuWLZk7dy4bNmxg5MiROq+zceNG8uXLR/78+bXHTExMOHnyJG/evCFdunQRzl+/fj0mJib4+flFK85Lly5Ro0YNvLy8aNWqFUWKFAHg8uXLTJ48mb///psjR4785LMQt4YPH87gwYMjHDtx4gQZM2Zk1qxZEY77+vqSLNmve/usXLkybdq0QVEUPD09uXHjBqtXr2bBggVMmTKFfv36xdt9/8zv51e8Br6nYMGC/PnnnwD4+flx5coVZs+ezenTp7l48aJeYhIiwVGE+AkrV65UAOXSpUsRjvfr108BlA0bNsToes7Ozoqzs3McRpg4tW3bVsmcOfN3z3nz5o2iUqmUzp07R2rTaDTK27dvtT/b29sruXLl0nmdc+fOKYAyefJk7bHMmTMrlSpVUpInT67Mnj07wvnPnz9XDAwMlIYNG+r83X/r06dPSsaMGRUbGxvFzc1N5+MYN26c9ueE+DdQoUIFJW/evPF6H76+vkpwcHCU7YDSo0ePSMc9PDyUUqVKKYCyf//++AwxUcmcObNSs2bNSMf79++vAMqDBw/0EFX0jRo1Somrj2aNRqP4+PjEybXE70eGgEWcqlixIgBPnjwBQuZQjRs3juzZs6NWq8mSJQtDhw7F398/ymt4eXlhbm5Onz59IrW9ePECQ0NDJk2aBPw3FH327Fn69euHtbU15ubm1K9fn/fv30e6/YIFC8ibNy9qtZoMGTLQo0ePSMOQ5cuXx9HRkZs3b+Ls7IyZmRn29vZs27YNgNOnT1OiRAlMTU3JmTMnx44di3B7XfOfdu/eTc2aNcmQIQNqtZrs2bMzbtw4goODf/ykfuPJkycoikKZMmUitalUKtKmTav9uWXLlty7d4+rV69GOnfDhg2oVCqaN28e4biJiQkNGjRgw4YNEY5v3LiRlClTUrVq1WjFuXjxYl6+fMnMmTPJlStXpHYbGxuGDx8e5e0DAgIYOXIkRYoUwcrKCnNzc5ycnDh58mSkczdt2kSRIkWwtLQkefLk5MuXjzlz5mjbAwMDGTNmDA4ODpiYmJA6dWrKli3L0aNHteeEn3v177//olKpOHnyJHfu3NEOJ546dQrQPQfw5cuXdOjQARsbG9RqNXnz5mXFihURzjl16hQqlYpNmzYxfPhwMmbMiJmZGV++fPnh8/mt1KlTs2nTJpIlS8aECRMitPn7+zNq1Cjs7e1Rq9XY2dkxcOBAna+7devWUbx4cczMzEiZMiXlypWL0Ounaw7g3LlzyZs3r/Y2RYsWjfD3EtUcwJi8/u7evUuFChUwMzMjY8aMTJ06NcbPUXhhvdnf9tyeOHECJycnzM3NSZEiBXXr1sXNzS3COVHNzdU1X0+lUtGzZ0927dqFo6Oj9m/h0KFDkW7v6upKsWLFMDExIXv27CxevFhn7CtXrqRixYqkTZsWtVpNnjx5WLhwYaTzsmTJQq1atTh8+DBFixbF1NSUxYsX4+zsTIECBXReO2fOnNF+TYvfiySAIk65u7sDIR9OAJ06dWLkyJEULlyYWbNm4ezszKRJk2jWrFmU17CwsKB+/fps3rw5UoK0ceNGFEWhZcuWEY736tWLGzduMGrUKLp168bevXvp2bNnhHNGjx5Njx49yJAhAzNmzKBhw4YsXryYKlWqEBgYGOHcT58+UatWLUqUKMHUqVNRq9U0a9aMzZs306xZM2rUqMHkyZPx9vamUaNGfP369bvPy6pVq7CwsKBfv37MmTOHIkWKMHLkyEhDjtGROXNmALZu3YqPj893zw17nr5N5oKDg9myZQtOTk5kypQp0u1atGjBxYsXtb/PsGs0atQIIyOjaMW5Z88eTE1NadSoUbTO/9aXL19YtmwZ5cuXZ8qUKYwePZr3799TtWpVrl+/rj3v6NGjNG/enJQpUzJlyhQmT55M+fLlOXv2rPac0aNHM2bMGCpUqMC8efMYNmwYmTJl0pkYA1hbW7N27Vpy5cqFra0ta9euZe3ateTOnVvn+W/fvqVkyZIcO3aMnj17MmfOHOzt7enYsSOzZ8+OdP64cePYv38//fv3Z+LEiRgbG//Uc5QpUyacnZ05f/68NonUaDTUqVOH6dOnU7t2bebOnUu9evWYNWsWTZs2jXD7MWPG0Lp1a4yMjBg7dixjxozBzs6OEydORHmfS5cupXfv3uTJk4fZs2czZswYChYsyIULF74ba0xff9WqVaNAgQLMmDGDXLlyMWjQIA4ePBit5yUwMBAPDw88PDx48eIFe/fuZebMmZQrV46sWbNqzzt27BhVq1bl3bt3jB49mn79+nHu3DnKlCkTqwUsrq6udO/enWbNmjF16lT8/Pxo2LAhHz580J5z69YtqlSpor3v9u3bM2rUKHbu3BnpegsXLiRz5swMHTqUGTNmYGdnR/fu3Zk/f36kc+/fv0/z5s2pXLkyc+bMoWDBgrRu3ZqbN29y+/btCOdeunSJBw8e0KpVq59+rCIR03cXpEicwoaAjx07prx//155/vy5smnTJiV16tSKqamp8uLFC+X69esKoHTq1CnCbcOGYk6cOKE99u3w3+HDhxVAOXjwYITb5s+fP8J5YXG4uLgoGo1Ge7xv376KoaGh8vnzZ0VRFOXdu3eKsbGxUqVKlQjDbfPmzVMAZcWKFRFi4Zth7Hv37imAYmBgoJw/fz5SnCtXrowU05MnT7THdA3DdOnSRTEzM1P8/Py0x6IzBKwoitKmTRsFUFKmTKnUr19fmT59us5hVkVRlGLFiim2trYRHvehQ4cUQFm8eHGEc8OGz4KCgpR06dJph2jv3r2rAMrp06ejHP7/VsqUKZUCBQr88LGE+fZvICgoSPH3949wzqdPnxQbGxulQ4cO2mN9+vRRkidPrgQFBUV57QIFCugcFgxP19Cbs7OzziFgQBk1apT2544dOyrp06dXPDw8IpzXrFkzxcrKSvv7P3nypAIo2bJli/bQHFEMAYfp06ePAig3btxQFEVR1q5dqxgYGChnzpyJcN6iRYsUQDl79qyiKIry8OFDxcDAQKlfv36kIejwr6Vvfy9169b94bD4t6+Bn3n9rVmzRnvM399fSZcundKwYcPv3q+ihPwNA5H+lSlTJtLvp2DBgkratGmVDx8+aI/duHFDMTAwUNq0aaM9FtXrUtffDKAYGxsrjx49inBNQJk7d672WL169RQTExPl6dOn2mN3795VDA0NI11T199K1apVlWzZsul87IcOHYpw/PPnz4qJiYkyaNCgCMd79+6tmJubK15eXpGuL35/0gMoYsXFxQVra2vs7Oxo1qwZFhYW7Ny5k4wZM3LgwAGASBPUwyZn79+//7vXzZAhA+vXr9ceu337Njdv3tT5bfV///tfhKEYJycngoODefr0KRDyTT8gIIA//vgDA4P//uw7d+5M8uTJI8ViYWERoZcyZ86cpEiRgty5c1OiRAnt8bD/P378OMrHAmBqaqr9/9evX/Hw8MDJyQkfHx/u3bv33dvqsnLlSubNm0fWrFnZuXMn/fv3J3fu3FSqVImXL19GOLdVq1a8ePGCv//+W3tsw4YNGBsb07hxY53XNzQ0pEmTJmzcuBEIWfxhZ2eHk5NTtGP88uULlpaWMX5s4WMI6xnTaDR8/PiRoKAgihYtGqHnLkWKFHh7e0cYzv1WihQpuHPnDg8fPvzpeKKiKArbt2+ndu3aKIqi7Xny8PCgatWqeHp6RuppbNu2bYS/idiwsLAA0PZCb926ldy5c5MrV64IsYRNzwgbQt+1axcajYaRI0dGeE0A3y1DkiJFCl68eMGlS5eiHePPvP7Cv86NjY0pXrz4D19nYUqUKMHRo0c5evQo+/btY8KECdy5c4c6derg6+sLwOvXr7l+/Trt2rUjVapU2tvmz5+fypUra9+/foaLiwvZs2ePcM3kyZNr4w8ODubw4cPUq1cvQg987ty5dQ7Hhv9b8fT0xMPDA2dnZx4/foynp2eEc7NmzRrpGlZWVtStW1c7ghIWw+bNm6lXrx7m5uY//VhF4iUJoIiV+fPnc/ToUU6ePMndu3d5/Pix9s3n6dOnGBgYYG9vH+E26dKlI0WKFNrkTBcDAwNatmzJrl27tMOcYStQdSUt3w5jpkyZEggZSgqLBUISufCMjY3Jli1bpFhsbW0jfQhaWVlhZ2cX6Vj4+4nKnTt3qF+/PlZWViRPnhxra2vtB9y3b+DRYWBgQI8ePbhy5QoeHh7s3r2b6tWrc+LEiUjD682aNcPQ0FA7DOzn58fOnTupXr269nnSpUWLFty9e5cbN26wYcMGmjVrFqP6ZMmTJ//h0PiPrF69mvz582vn7VlbW7N///4Iz1n37t3JkSMH1atXx9bWlg4dOkSabzV27Fg+f/5Mjhw5yJcvHwMGDNCWvomt9+/f8/nzZ5YsWYK1tXWEf+3btwfg3bt3EW4Tfhgytry8vAC0yfbDhw+5c+dOpFhy5MgRIRZ3d3cMDAzIkydPjO5v0KBBWFhYULx4cRwcHOjRo0eE4XZd4uL1lzJlyh++zsKkSZMGFxcXXFxcqFmzJkOHDmXZsmWcO3eOZcuWfTcmCEnEPDw88Pb2jtb9fUvXtIrw8b9//x5fX18cHBwinacrnrNnz+Li4qKdp2htbc3QoUOByO8fUf1ttWnThmfPnnHmzBkgJCl/+/YtrVu3jtmDE78NSQBFrBQvXhwXFxfKly9P7ty5I/UkwPd7E76nTZs2eHl5sWvXLhRFYcOGDdSqVUubdIVnaGio8xph33ZjKqrr/cz9fP78GWdnZ27cuMHYsWPZu3cvR48eZcqUKUBI71ZspE6dmjp16nDgwAGcnZ1xdXWN8IGaNm1aKleuzPbt2wkMDGTv3r18/fo10jzKb5UoUYLs2bPzxx9/8OTJE1q0aBGjuHLlysWDBw8ICAj4qce1bt062rVrR/bs2Vm+fDmHDh3i6NGjVKxYMcJzljZtWq5fv86ePXuoU6cOJ0+epHr16rRt21Z7Trly5XB3d2fFihU4OjqybNkyChcurE0GYiMsllatWml7nb799+2Cnbjq/YOQnnFDQ0PtB79GoyFfvnxRxtK9e/dY3V/u3Lm5f/8+mzZtomzZsmzfvp2yZcsyatSouHg4QNy/ngEqVaoEEKEnPLqieg+LahFXXMbv7u5OpUqV8PDwYObMmezfv5+jR49qa1J++/4R1d9W1apVsbGxYd26dUDI6ytdunRR1vwUvz+pAyjiTebMmdFoNDx8+DDC5Pm3b9/y+fNn7WKGqDg6OlKoUCHWr1+Pra0tz549Y+7cuT8dC4RMkM6WLZv2eEBAAE+ePInXN8FTp07x4cMHduzYQbly5bTHw1ZKx6WiRYty+vRpXr9+HeH5bdmyJYcOHeLgwYNs2LCB5MmTU7t27R9er3nz5owfP57cuXNTsGDBGMVSu3Zt/vnnH7Zv3x5ppXF0bNu2jWzZsrFjx44IH8C6Eg1jY2Nq165N7dq10Wg0dO/encWLFzNixAhtD3SqVKlo37497du3x8vLi3LlyjF69Gg6deoU49jCs7a2xtLSkuDg4F/+Yfrs2TNOnz5NqVKltD2A2bNn58aNG1SqVOm7X76yZ8+ORqPh7t27Mf7dmpub07RpU5o2bUpAQAANGjRgwoQJDBkyBBMTk0jn6/P1FyYoKAj4r8c0fEzfunfvHmnSpNEOjaZMmVJn0fLvjWJ8j7W1NaampjqnJHwbz969e/H392fPnj0RehZ1rYb/HkNDQ1q0aMGqVauYMmUKu3btonPnzlEmq+L3Jz2AIt7UqFEDINIqyJkzZwJQs2bNH16jdevWHDlyhNmzZ5M6dWqqV6/+U7G4uLhgbGzMX3/9FeFb+PLly/H09IxWLD8r7A02/P0GBASwYMGCn7remzdvdO4AERAQwPHjx3UOu9erVw8zMzMWLFjAwYMHadCggc4P6m916tSJUaNGMWPGjBjH2bVrV9KnT8+ff/7JgwcPIrW/e/eO8ePHR3l7Xc/bhQsX+OeffyKcF35lJYQMj4cVtg4re/LtORYWFtjb23+3HFF0GRoa0rBhQ7Zv3x5plSWgsxxRXPj48SPNmzcnODiYYcOGaY83adKEly9fsnTp0ki38fX11Q5r1qtXDwMDA8aOHRupF+l7PVXfPpfGxsbkyZMHRVEireYNo8/XX5i9e/cCaMuhpE+fnoIFC7J69eoIyd3t27c5cuSI9v0LQpJlT0/PCNMGXr9+rXPFbnQYGhpStWpVdu3axbNnz7TH3dzcOHz4cKRzIeLvxNPTk5UrV8b4flu3bs2nT5/o0qWLtji7SLqkB1DEmwIFCtC2bVuWLFmiHQa9ePEiq1evpl69elSoUOGH12jRogUDBw5k586ddOvWLdolSL5lbW3NkCFDGDNmDNWqVaNOnTrcv3+fBQsWUKxYsXh9IyxdujQpU6akbdu22u3Z1q5d+9PDWS9evKB48eJUrFiRSpUqkS5dOt69e8fGjRu5ceMGf/zxB2nSpIlwGwsLC+rVq6edB/ij4d8wmTNn/uk9b1OmTMnOnTupUaMGBQsWjLATyNWrV9m4cSOlSpWK8va1atVix44d1K9fn5o1a/LkyRMWLVpEnjx5tL04EJKkfvz4kYoVK2Jra8vTp0+ZO3cuBQsW1PY858mTh/Lly1OkSBFSpUrF5cuX2bZtW6RSQT9r8uTJnDx5khIlStC5c2fy5MnDx48fuXr1KseOHYv1FmQPHjxg3bp1KIrCly9fuHHjBlu3bsXLy4uZM2dSrVo17bmtW7dmy5YtdO3alZMnT1KmTBmCg4O5d+8eW7Zs0daIs7e3Z9iwYYwbNw4nJycaNGiAWq3m0qVLZMiQQVtr81tVqlQhXbp0lClTBhsbG9zc3Jg3bx41a9aMctHPr379vXz5UjvUGRAQwI0bN1i8eDFp0qShV69e2vOmTZtG9erVKVWqFB07dsTX15e5c+diZWUV4e++WbNmDBo0iPr169O7d298fHxYuHAhOXLkiLKU0I+MGTOGQ4cO4eTkRPfu3QkKCtLWVwyfaFapUkXbwx2WuC1dupS0adPy+vXrGN1noUKFcHR01C4UKly48E/FLn4Tv3zdsfgtRLcUSGBgoDJmzBgla9asipGRkWJnZ6cMGTIkQukTRfn+LhA1atRQAOXcuXPRjiOs3MbJkycjHJ83b56SK1cuxcjISLGxsVG6deumfPr0KVIsuspcRLXDAN+U6dBVBubs2bNKyZIlFVNTUyVDhgzKwIEDtSVkwscYnTIwX758UebMmaNUrVpVsbW1VYyMjBRLS0ulVKlSytKlSyOU8Ahv//79CqCkT58+yp0nonqM4UX3dx/m1atXSt++fZUcOXIoJiYmipmZmVKkSBFlwoQJiqenp/a8b/8GNBqNMnHiRCVz5syKWq1WChUqpOzbty/Sc7Rt2zalSpUqStq0aRVjY2MlU6ZMSpcuXZTXr19rzxk/frxSvHhxJUWKFIqpqamSK1cuZcKECUpAQID2nNiUgVEURXn79q3So0cPxc7OTjEyMlLSpUunVKpUSVmyZIn2nLC/y61bt0bruQu7r7B/BgYGSooUKZRChQopffr0Ue7cuaPzNgEBAcqUKVOUvHnzKmq1WkmZMqVSpEgRZcyYMRGec0VRlBUrViiFChXSnufs7KwcPXo0wnMQ/veyePFipVy5ckrq1KkVtVqtZM+eXRkwYECE6+p6DShK7F5/0S2R9G0ZGAMDAyVt2rRK8+bNI5RmCXPs2DGlTJkyiqmpqZI8eXKldu3ayt27dyOdd+TIEcXR0VExNjZWcubMqaxbty7KMjC6yvZkzpxZadu2bYRjp0+fVooUKaIYGxsr2bJlUxYtWqTzmnv27FHy58+vmJiYKFmyZFGmTJmirFixItJzHJ3X79SpUxVAmThx4nfPE78/laLEYlatEL9A/fr1uXXrFo8ePdJ3KEIIkajNmTOHvn378u+//+pcrSySDpkDKBK0169fs3//filVIIQQsaQoCsuXL8fZ2VmSPyFzAEXC9OTJE86ePcuyZcswMjKiS5cu+g5JCCESJW9vb/bs2cPJkye5desWu3fv1ndIIgGQBFAkSKdPn6Z9+/ZkypSJ1atXazdyF0IIETPv37+nRYsWpEiRgqFDh1KnTh19hyQSAJkDKIQQQgiRxMgcQCGEEEKIJEYSQCGEEEKIJEYSQCGEEEKIJEYWgcSCRqPh1atXWFpafnfPTSGEEEIkHIqi8PXrVzJkyICBQdLsC5MEMBZevXqFnZ2dvsMQQgghxE94/vw5tra2+g5DLyQBjIWwfS+fbN9McnMzPUcjhBAiIbtgnJFSeT7g65cDMABjQ32HlGR9/fqVnDlzRrl/dVIgCWAshA37Jjc3I7m5uZ6jEUIIkZCZqy1JntwPI+PkYGyk73AEJOnpW0lz4FsIIYQQIgmTBFAIIYQQIomRBFAIIYSIZ+fUdpR1fI+vX259hyIEIHMAhRBCiHjlrranrLVBaPIniz9EwiA9gEIIIUQ8ymR+HVQgyZ9ISCQBFEIIIYRIYiQBFEIIIYRIYiQBFEIIIeLJOXXIblG+yaVWrEhYZBGIEEIIEQ/c1faUdXyBr59TyAGZ/ycSEOkBFEIIIeLYObUdDo4vwq38lZ0/RMIiCaAQQggRn6TnTyRAkgAKIYQQImkJCNR3BHonCaAQQgghkg5J/gBJAIUQQog4Jdu+JWABwQAYvP1Hz4HonySAQgghRBwJWfkbLvmTxR8JjqmnNy+88+k7DL2TBFAIIYSII5nMr8PbTMjK3wQoIBjQ6DuKBEMSQCGEECIuqfQdgIgkIBDQYGrixsP3GrIGPNZ3RHonCaAQQgghfl+hiz4MXp/B9bY12f0f6TmghEF2AhFCCCHE7yd0yNfU05uH7zVk97egNM/1HVWCIT2AQgghRCydU9thUMQETXqnkH1/pfizfn0z5Cu9fpFJAiiEEELEQqSyL7L4Q79kyDdaJAEUQggh4ookfwmCqYkbl7xzU9pfhnyjIgmgEEIIEQs2qENLvwi9Cy30LH5MEkAhhBDiJ5xT2xGYygtbe0t8rcyl90/fwub9eXrz8Lat9P79gCSAQgghxE8oZu6G2qxwyA+S/OlX6Lw/UxM3XN/7yry/aJAyMEIIIcTPUoH0pehRuFIv/j5X0XyUUi/RJX+1QgghhEik/iv1YvTRQt/BJCqSAAohhBAiUXuLv75DSHQkARRCCCFiKDCVF2qzwiFFn4V+hM77Ez9H5gAKIYQQ0RRW9FnjVwhfkMUf+hA67w9Cij3LvL+fIz2AQgghxM+Q5O/XC1fq5cWjrzLvLxYkARRCCCFEwielXuKUJIBCCCGESBRMTdxwvW0tRZ7jgCSAQgghRDTJtm/idyGLQIQQQohoCEzlhYOZQci2b9J/8uuEK/b88JEtpWXoN07IX7AQQgjxA9qyL2F7/hob6jukpCFs0UdosWeZ9xd3JAEUQgghokO2ffu1Qhd9GLw+g+tta0n+4pgMAQshRCIUEBjIHtezXH/wCLWxEdVLlaRorpz6DkuI2As/5PteQ3Z/qfMXHyQBFEKIRMb1xk1ajBrL6w8fyWRtjZefH2NXrKZC4UJsHDuS1FZW+g5RiJ8TvtTLI1ntG5+kL1sIIRIRt3+fUqv/YHJkyMC1ebNwX7mYV+tWsHXoQG49cqfuoKFoNBp9h/lbcVfby7Zvv0JAMPDfkK8kf/FLegCFECIRmbFxM6ktLdkzahhmJmoADA0NqVe6JKksLak0ZARHLl6iWskSeo709xCYygvb9JYh275hIIs/4pmppzcPvQvKSt9fQHoAhRAikVAUhW0nTtGuckVt8heek2Me8mbOxJbjp359cL+hc2o71FnThPwgK3/jV7j9fcWvIQmgEEIkEhqNBm8/PzKmSa2zXaVSYZsmNV99vH9xZELEgpR60QtJAIUQIpEwNDQkW4b0uN5x09nu6+/PpQePsLe1/cWR/X7c1faUdXyPr19ufYfyewu/6ENKvfxSkgAKIUQi0qlOLbb87crlh5E/KKdv38XHr1/pUKuGHiL7fbir7XGwNghN/gxChn9F3AoIhoBATD29MXh9Bs0VP1n08YvJIhAhhEhEejSsz+4zrlQeOoou1atSs3gRPL19WHX0OLvPX2R0x/Y42EkPYGxkMr8OqsLIoo/4Fb7On/j1JAEUQohExMzEhEMzpzN25SqW7zvIjB27AMiTJTMrhg2mdbUq+g1QiGiRBR/6JgmgEEIkMhZmpkzt0Y2xnTry75s3qI2MyJI+HSqVSt+hCfFjofP+sHnG2/fWZNdvNEmWJIBCCJFImaiNyZU5k77D+K2cU9tRNut7fP2k6HOcC1fqxeD1GTQfZYs3fZIEUAghhCB05a928Qcy/y8uha32lXl/CYasAhZCCJHknVPb4eD4Al8rc2TlbxwL3eLN1MQN1/e+UuolgZAeQCGEECI86fmLc6ae3jx8ZCtbvCUg0gMohBBCiHgkK34TIukBFEIIIUTcC1308d+8P+n9S0ikB1AIIUSSdk5tJ9u+xbVw+/vKvL+ESXoAhRBCJFmRV/7K4o9YC13xa/D6DK7euWWLtwRKEkAhhBBCtn2LGwHBmH7x5uG7kFIvUucv4ZIhYCGEEElWJvPr+g7h96PoOwARHZIACiGESHLOqe0wKGKCJr1TSO0/6f2LvdB5f9g803ckIhokARRCCJGkRFr0IfP+Yi9spw8TN1xvW8uij0RA5gAKIYRIuiT5i51wpV78fa7K/r6JiPQACiGEECLmwpI/Ezcevtdg9FH2901MpAdQCCGEED/F9Is3/q89yO7vp+9QRAxJD6AQQogkpZi5G7zNpO8wfgMaWfGbiEkCKIQQIkmIvPJX5v/9lIBg7aIPf5+rMvSbSEkCKIQQIkmwQf1fz58kfz8nbIs3T29ePPoqyV8iJgmgEEKIJEY++n5K+FIvsr9voieLQIQQQvz23NX2OFgbhAz9ipgLCAZC9veVUi+/B/kaJIQQ4rd1Tm1HYCovbO0t/5v3J7t+/BTTL976DkHEIUkAhRBC/NbUWdOE/Efm/f2c0Hp/KPDMu6C+oxFxRBJAIYQQQugWtugjtNizzPv7fUgCKIQQ4rfkrranrLXpf3v+ip8i+/v+nmQRiBBCiN9OxEUfBjLvT4hvSAIohBDiNybJ308J2+fXM2Srt9Ky1dtvRxJAIYRIYLx8fDlx5SpffXzIkcmOorlyolKp9B2WSCrC6v15eofO+5Niz78jSQCFECKB0Gg0TFqznpmbNvPF20d7vKCDPYsG9adIzhx6jE4kCeGLPT+yprS/1Pv7XckiECGESCCGLlrK6OUr6VC5EveWLuDL9o3sHT0clUZD5d79uPPkib5DTBTOqe1wcHwhRZ9jKiAQU0/vkGLPV/wk+fvNSQIohBAJwLO3b5m1eSvj27RkWqf2ZE+fDlO1mmpFC3N80jhsUlgxbsVqfYeZ4EVc+Svz/2LM5hmXvGXVdFIgCaAQQiQAm44ex9TYmO61akRqszQzpXutGuz62xVPLy89RJc4ROj5kx0/YiZ06FckHZIACiFEAvDm40cyp7XG0sxUZ3veTHYEazR8+PLlF0cmfnvh5/3dlnl/SYUsAhFCiAQgQ+o0/Pv2HV98fEhuZhap/ea/TzFKlow0VlZ6iC7hO6e2o6zjeyn6HFMBwQAh8/4+WlAaSf6SCukBFEKIBKB5lUr4BwYyZ9feSG2e3t4s2HuABs5OJDeXhQ3firTjh+z5GyOmX7z1HYLQA+kBFEKIBCCjtTWDWrdg7Op1vP38mW41q5M+VUpO37rD2A2b+eTtzYgObfUdZgIniz5iJLTYMwo88y5IdmSrt6REEkAhhEggRndsj5W5OVPXbWTxgcPa46Uc83Ji9AhyZsqkx+jEbyXcvL+Hj2xln98kSBJAIYRIIFQqFf2aN6V7g/r8ff0GX318yJk5E47Zsuo7tATpnNqOYuZu2Ka3xBek9y+6QpM/g9dncPXOTWlJ/pIkSQCFECKBMVEbU6VEMX2HkaD9t+jDKeSAzPv7sXD7+4Zt8SaLPpIuSQCFEEIkbpL8RZvpF2/8fa7K/r5CVgELIYRIfGxQw1uZExkzIQs+hABJAIUQQiQygam8sLW3DN3rVz7GoiVspw/Z6k2EkiFgIYQQiYa72h4HM4PQRR8y9BstAYGYeoYM/UqxZxFGvjoJIYRIhOTjK0ak5098Q15BQgghEgV3tT0Oji9Ch35FtIRu9SbEt2QIWAghRIIXmMorZOjXLzey40c0hSv27HrbmtL+MvQr/iM9gEIIIRK0c2o71FnThPT8GRtJ8hcdoT1/Bq/PoLniJ8mfiEQSQCGEEOI3ZPrFW98hiARMEkAhhBDidxK644fU/BPfIwmgEEII8bsICAQ0mJq48fC9BqOPsuOH0E0SQCGEEAmWu9qestamoYs/xHeFLvoweH0G19vWZPd/pOeAREImq4CFEEIkSO5qexysDf7b8eObxR+fPn3i6NGjeHl5kSNHDsqUKYNKpdJPsPoUOuQbVuzZSIo9i2iQBFAIIUQCFzH5Cw4OZsyYMSxcuBBfX19UKhWKopAzZ04WLlxI8eLF9Rirfph++S/5EyI6ZAhYCCFEgnNObUcm8+s6iz4PHDiQ2bNn06tXLx4+fMinT5/Yv38/VlZW1K5dm9u3b+shYn2SBR8i5iQBFEIIkaC4q+0p6/geTXqnkAPhev+ePHnCkiVLGD9+PMOHD8fGxgYDAwOcnJzYvXs36dOnZ+LEiXqKXA9C5/3JVm8ipiQBFEIIkWCcU9uFbPem3fHDKEL75s2bsbS0pEOHDpFua25uzv/+9z/279/Ply9fflHEehRu0YcUexYxJQmgEEKIhEnHjh8eHh5kzJgRMzMznTdxcHAgODiYT58+xXd0CYKpiZv0/ImfIgmgEEKIBKOYudt3S75kzJiRp0+f4unpqbP95s2bqNVq0qRJE18hJgyhW70J8bMkARRCCKF359R2GBQxCTfvz0jnec2bNycgIIA5c+ZEavvw4QNLliyhQYMGmJtHXjzy2whX7Nn1trUM/YqfImVghBBC6J0NanibCawi1/sLL126dAwePJjx48fz5s0bOnbsSPr06Tl9+jTTpk3D39+foUOH/sLI9cPg9RlcvXNL8id+miSAQgghEpVBgwaRIkUKpk+fzrp167THK1asyNatW8maNaseo4tnAcEhNf9Akj8RK5IACiGE0KvAVF44mBnorPmni0qlomvXrnTs2JELFy7g7e2Ng4MD2bJli+dI9Sx0xw+p+SfigiSAQggh9OKc2o5i5m5o0jvhC1HO+4uKkZERZcuWjZfYEpzQki+mJm48fGRLdn/Z8UPEjiSAQohfxs8/gO2nTnP88lWCNcGUdMxLyyouJP+dJ+yL71JnTYOvHzFO/pIiU1M3XG9ZU9r/kb5DEb8BWQUshPglbrk/Jk+L1rQbP4k7jx7x5Plz+s6eS/ZGzTlx+aq+wxN6YINa3yEkLjL0K+KQ9AAKIeLd569e1Og3EJsUVhwYM4JcdrYAvPT4wP/+mk/9IcO4vGIpDqHHxe/PXW2Pg7XBfzt+CN3C5v0B/k88KO3vp994xG9DXnVCiHi35tBhPDw92TVyiDb5A8iYJjVbhw7CwsSE+dt36jFC8Stpkz8r85Ch3++UfREaTD29efHoK0YfZd6fiDuSAAoh4t0+13NUK1IIWx27M5iZqGnm7MRe17N6iEzol3wERYe/z1Wyy7w/Ecfk1SeEiHe+/v6ktLSMsj2VhQV+AQG/MCKhL+fUdjg4voh2yZckLXTlrxDxQeYACiHiXX777OxzPUtgUBBGySK/7Ry+eo382bPrITLxq7z2+MD0Q+e5d/cyqI0oV9aJVu3a/v579v6s0OTP4PUZLnnnpjRS9FnELekBFELEu//Vq8OrDx+ZsGkrihJxKePa4yf5x+0+XerX0VN0Ir7tdT1LjmatWbZ6HiaBviTz/My4iRMokD8/Z86c0Xd4CU9AYIR5f7Ljh4gP0gMohIh3BeyzM/5/nRi+ZBmnb92heXkn1EZG7Dp3nn0XL9O+ZnXqOiWRgr5JzL2nz2g+cizVixVhae8epLAIGfp97+lJq2mzaNK4MVevXSN9+vR6jjSBSfcM3suqeBF/pAdQCPFLDGrdgh2TxqFKlowe8xfTafY8nn/8xJLBA1g8qD8qlUrfIYp4sGDHTqwszFnb/w9t8gdgbWXF5sED0AQHsXLlSj1GKETSJD2AQohfpnbZMtQuWwb/gAA0ioKpWgoB/+72X7hGk7JlMDE2jtSWwsKc2sWLcezoEYYOHaqH6IRIuqQHUAjxy6mNjSX5SyKCg4Mw+87v2lRtTKCsdv1P2HOhwFv89RuL+K1JAiiEECLeOOYpyN6LlyIt/gEIDAri0JVrFC5aVA+RJUDhVv5qrvjJ4g8RryQBFEIIEefOqe0ITOVFnz+64fbsOVO2bo/QrigKQ1au5c3HT3Tu3FlPUSY8Bq/PyI4f4peQOYBCCCHi1Dm1HcXM3dCkd6Jcehg6dCgjJk5k1z8XaVimJJ7ePuz45wIPX7xk+vTpODo66jtk/QsI1ncEIomRBFAIIeKJoigcOn+BhTt2c/nePZIZGlKlRHF6NW5IAfvfvPB12KJuYyOGDh1KsWLFGDtmDKPXbyYgMGSo08rKilevXvH161csv7NTzG8vdOjX1MSNh94FyY5s+ybinwwBCyFEPFAUhSELl1Bn4FDeffhAj5rVaV2hPCcvXaFkp65sO3lK3yFqBQcHExgUFGfXs0GN2rRwhGMvXrzg2vXrFCtenCVLlrB9+3ZatWrF4sWLqVWrFl5eXnF2/4lKuHl/rretZc9f8cuoFF0zc0W0fPnyBSsrKz4c2ktyc9nXUgjxn72uZ2kwZAQzOnegd91a2uOBQUF0mDWXHefO82DzOjJaW+stxkPnLzB78zZOXLmKoigUypmDHg3q0aZ61Z+uyxiYygtNeqfQnwzA2JC3b9+SO3duWrduzcyZMyNc+8aNG1StWpXevXszfPjwOHhUiUxAIKambjy8ZSvJ3y/0xdub1NVq4+npSfLkyfUdjl5ID6AQQsSD+dt3UjJXzgjJH4BRsmTM79EFI0NDlu/dr6foYPbmrdQeMARPBaZOncrcuXNJmyUrnSZNpevUGTpX7f6Iu9oetVloz5+xERgbArB+/XoMDQ0ZOXJkpMSyQIECNG/enJUrV6LRaGL9uBKVsHl/0g0j9EASQCGEiAcX7rhRr1QJnW3JzcyoVDA/52/f1R577fEBt3+f8sXbO95ju/vkXwbOX0SfPn04fuIEXbp0oW3btmzdupWFCxeyYt8Bdp6OzR69ET9aHjx4gKOjIylTptR5drly5Xj79i2enp6xuM9EJiAQ0GDq6c3D29L7J349SQCFECIeGBoY4B8YdYFj/8BAkhkacvLKVSr2/INM9RuTv3V7MtRuQIcJk3nx7n28xbZ0zz7SpE7NiBEjIvXItWzZklIlS7Jo154YXdNdbY+D4wt8rSJPhzE3N+f9+/dR9iq+ffsWAwMDTE1NY3SfiZ2piRuu730l+RN6IQmgEELEA5diRdh46m+dw5qvP37k+PWbpE2Vgur9BhLg58fqP/twcvJ4RrZoyrGLl3Dq2jPeksAbj9wpX6ECxjq2ZwOoUrUqNx5FPykJTOWFg7UBvn65CZv3F17dunX5999/OXbsWOTbBgayevVqqlWrhomJSYweR2Ln/8RDij0LvZEEUAiRYCmKwt0n/3L62nUePn+h73BipHeTRtx/+YpeC5fiFxCgPf7u82eaT55BcjMz9pw5R92SxTk9dQItKjhT1jEPAxs34PysaSgaDUMWLomX2NRGRt8dbv38+bPOvXt1Oae2Awjp+Qs37y88JycnypQpQ+fOndm/fz/BwSFz354/f0779u25f/8+f/755088EiHEz5I6gEKIBOn45SsMWbiEaw8eao+Vye/IlO7dKJE3tx4ji57S+RxZOKAf3afPYvvZc1QuVBAff38OXbmKhakpPRs3ZOyKVUxo1xpDw4hJU4bUqehTrzbD16znr6+9SRnHNfIy2aRl9cHDvHjxAltb2whtfn5+bNm8mVqlS0X/gj9YMKxSqdi4cSOtW7emefPm2NjYkCpVKu7fv4+FhQVr166lRAnd8yWFEPFDegCFEAnOwX/OU/PPQZgbGbFzxBDuLp7HxsH98fXxxaV3X87duq3vEKOlY+2a3F63itbVq/Hi02e+BgQytlMH7m5Yg0ajIUPqVNhnSK/ztk6OeQgIDOTpm7dxGtNXHx+2njiJ2siIZk2b8vDhfwn227dvad26NR4eHvRq1OCH1zqntqOs43s06Zx+eG6qVKnYt28fJ06coGXLlpQvX545c+bw4MEDateuHavHlKgEBIeUfvGM/8U+QnyP9AAKIRKU4OBges2cg0uhAuwaOZRkob1jDhkzUKt4USoOHkHfOfO4sGyRniONHgc7W6b17BbpeHJzcz55eeHt54e5jrlvLz0+hJ5nFqfxbD52Am8/f/aNHs7/5i6gSJEiFC1SBBMTEy5cvIgKMDEyInvGDN+9jrvanrLaeX+EDP/+gEqlonjx4hQvXjwOHkkiFLbjh6c3D99ryO4ve/4K/ZEeQCFEgnLy6jWevnnLiBZNtclfGBNjY4Y2bcTV+w+4+chdTxHGjQblnfALCGTV0ROR2hRFYeH+gxTOmYOs6XX3EP6s24+fkNvOlsqFC+K2eB7L+/bCwdKcDAYwqV0rVvfrw1dfX95+/BTNKxpEK/kTIUxN3UKTP1n5K/RLegCFEAlK2JBnMQd7ne1Fc9iHnveG/Il4P93M6dLRrkY1Bq5YhaGhAW0rVcBUreaFhwej123i+PWb7Jg07qd35IiKqdqYN58+MWfXXoySJcOlUAHaVKqgbV934lToeeo4vV8hRMIiCaAQIkFJbRWyLZP76zc46BiGdH/9JvQ8q18aV3yY9+cfaBSFXguWMGTlGtJaWfH03XtM1WqWDRlI7bJl4vT+Hj5/wb5z5/nw5SvD1m5Ao9EQGBhIrZLFWfFHT1KYm7P88DHK5HckTYrE//wKIaImCaAQIkGpWqI4KS0tmbVzDwt6do3QpigKs3fuIWv6dJTMm0dPEcYdYyMjlg0ZyJA2Ldl+8jSfvbzIliEDTV0qYmkWt3P/3n78SOU+f2JmZcW2bdtwcXEhICCAHTt2MGTwYKqPGEvBrFlwvXOXPVMnfvda59R2FDO/jq/Vjxd/CCESJkkAhRAJiqlazYgObek3Zx5GyQz5s0E9MqW15uHLV0zYtJVd/1xg7ahhGBj8PlOYs2fMyMBWLWJ0Gz//AII1wZiZmERrmHjhjt188fXl5NmzpEuXDgATExNatGhB9uzZqVy5MjceP2HhgH5UL1UyyuuErfz19QtN/nTU/RNCJHySAAohEpyeDesTHBzM2BWrWbDvIOYmJnj7+ZEquSXLhgykmUslfYeoN3tdzzJr01bO3LgJQN6sWejWoB6d69T6blK84dhxGjdpok3+witRogRFixQhrXEyOtWpFeU1Qnr+3EKTv8g7fojvCAgpfo3u3fCE+OUkARRCJDgqlYo/mjamU+1a7HE9y9uPn7BNa02tMqWS9OKEGRs2MXjhEsrmzcPCnt0wVRuz558L9Jo5h7M3b7Fq+JAok8APnp5ky5Ytymtny56dNw/v/ziIsM5GSf6iL3z5l0e2sgJYJAiSAAohEiwLM1NaVHHRdxgJwt0n/zJ44RIGNW7AuDYttcO+LSs4s/XMWVpMmUGN0iWj7B3NnC4dV69c0dmmKArXrl6lhH3UCaKIHVMTN1wfWcvevyLB+H0m0QghxG9s6Z592KRMwcgWTSPN+WvsVIYK+fOxeOeeKG/fvmZ19uzdy/Xr1yO1bdmyhYePHtG+RvW4DluE8n/iIcmfSFAkARRR+uDpifvLl3j7+uo7FCGSvFvu7pTP54ixke6iy1UKF+SW++Mob9+xVg0KOthTu1Ytpk2bxr1797h27RqDBw+ma9euNK/sglPB/PEVvhAigZEhYBHJ2Zu3mbBqDUcvXQZCVmU2c6nIqI7tyGhtrefohEiaTI3VfPzqFWX7h69fvzs/0szEhEMzpzJowWKmT5vKuHHjAEiTIgVD27RkaJvWcV50WgiRcEkCKCI4+M95Gg4dSd7MmVjcuztZ0qbl/L37LNh/kGOXLvP3wnnYppUkUIhfrbZTGXrNnMPjN2/I9s1KXl9/fzac+ps6TmUIDApir+s59riexc/fnwIO9rSrUZ30aVJjZWHBooF/MrlbF265u2NoaEjhHDkwURvr6VEJIfRFhoCFVkBgIP+bPJ3KhQpwbsZkOlRxoWLB/Axt1pgLs6ahCdYweMEifYcpRJLUorILGdOkpv7YSdz696n2+AsPD5pOmsYnL28aVnCmSLtONB0xmjuP3Pn06TOT16wne+PmrD5wSHubFJYWOBUsQOl8jpL8CZFESQ+g0Nrreo43Hz8ycdxIjJJF/NPImCY1f9Svw9BVa/H47CnbRInflqIoePv6YWhokKBKzliYmXJg5jTqDhxC4Z59yZ81C6ZqYy4/eISFqSlbxo2i7+y5+Pn5c2H2NAqH7pP82cubQStW03nyNLKkT4dzoYL6fSBCiAQhSfYA+vr64urqyt27dyO1+fn5sWbNGj1EpX8Pnj/H2sqKvJkz6Wwvn9+RwKAgnrx+/YsjEyL+aTQaFu3cTcE2HUhZtSbJXapToUcf9pw5q+/QtHJlzsTt9avZMGYkhfPkxiFLFmb/0YsnOzbjHxjE3X+fsmHwn9rkDyCFhTkLe3alQLaszNiwWY/RJ1FhBaCFSGCSXAL44MEDcufOTbly5ciXLx/Ozs68DpfQeHp60r59ez1GqD8WZmZ88fHBK4pVv68/fASI8z1KhdA3jUZDu/GT6D3rL3JnzMDqP/uwuHd3DDQaGg4dwbT1G/UdopZRsmQ0rliepYMHsHLYYLrWr4ulmRn7zv2DY5bMFHWwj3QbAwMD2rpU4NCFiwQGBekh6iQqIBDQYGrixjPvgvqORogIklwCOGjQIBwdHXn37h3379/H0tKSMmXK8OzZM32Hpnd1ncoQGBzMyqMnIrUpisLiA4fJmzULOTPZ6SE6IeLPlhMn2Xj0OOsH9mPTkAG0qOBMhyouHJs0lqFNGzF00VJuP36i7zC/yz8gkBTm5lG2pzA3R1GU2CeAYVuZSc/W94Xt/mHihutta9n9QyQ4SS4BPHfuHJMmTSJNmjTY29uzd+9eqlatipOTE48fR11DKynIZGNDuxrVGLxyNUsPHcEvIACAd58/03vRUvZfusywdm2kVIT47SzeuYfy+R1p7FQmwnGVSsWwZo2xSZmCpbv36im66CnokJ1LDx7i4flFZ/vhK9fIYWcbq3mNpf2f88y7IKYmboBGm+QI3UxN3Xh421YKQIsEKcklgL6+viQLt8BBpVKxcOFCateujbOzMw8ePNBjdPr3V9/eNKlYge7zFmHXpiP5u/Uma/v/seroCf7q14fGFcvrO0QhonTo/AVq/jmI5C7Vsapcndr9B3P04qUf3u724ydUKVxQZ5uxkREV8uf7ZT2AD54956+t25m+fhPHLl1Go9FE63ZtqldDpVLRb+kKgoMj9s4duXKNba7n6FK/bqy/wGX3f4TrbWsMXp8JOSA9gUIkSkluFXCuXLm4fPkyuXPnjnB83rx5ANSpU0cfYSUYamNjVg4fwpA2rdhy/CSfvn7lfxnS06KKC6mSJ9d3eEJEacKqtYxevpKiOewZ3bIpigJbzpylxp+DmNi1MwNaNo/ytqZqYz58+U6R5S9fMY3ncilfvL3pNHEqO/8+g4mxMWojIzy9vclhZ8uaUcMpkjPHd2+fJoUVy4cOos3YCdx88oS2lSqS0tKCI1evsePseaqWKEa3+nXjJNbS/s9xpyAOnt74WlnGyTWFEL+WSlEU5cen/T4mTZrEmTNnOHDggM727t27s2jRomh96/7y5QtWVlZ8OLSX5N+ZeyOEiF/nbt3GuXtvRrdsxtBmjbW9XIqiMGb9JiZs2sq5JQsoljuXztv3mD6LPX+78mDZgkhDpI/fvCH3/3ryV9/edKkXP18QNRoN1foO4Mq9+8zo3J5mzk6ojYw4e/ceA5at5NHrN5xftpDsGTP+8Fr/3L7DrI1b2Hv2HEHBweTOnIn/1atDl3p1IpV3ig13tT0O1gb4WpkDBmBsGGfX/i18MwdQhoETli/e3qSuVhtPT0+SJ9HOjSSXAMYlSQDF7+bf129YunsvV+8/wNgoGdVKlaRV1coJYuX3nSdPWLhjN2dv3sLAwIAKhQvRtX5d7G0z0nrMBK7cvcvtRXMxMIg4syU4OJhc/+tBucKFWD50kM5r33/2jOIdu+CUNw8Le3bDzjoNADef/EvrabP46ufHjbUr4+15OH75CtX6DmDfmOFULVI4QpuntzeOXXtTp1xZ5vfvG+1rajQagjWaOE36vhUxCQSMde9TnGSFJYGe3jx8r5GFIAmIJIBJcA6gEEK3lfsPkqtZKxbv2oOlsREBfn70nT2XPM3bcOORu15jW7HvAIXbdWbP32conTMHxbJnY92hwxRo04Gdp//myr171ChWNFLyB2BoaEiNYkW4cu9+lNfPmSkTOyaN58L9h9h37EqZPwdTuFc/ivTqh19QEAdmTovXJHjTsRPktrOlSuFCkdqszM1pX6USm44dj9E1DQwM4jX5g9D5gO99QxeFIItCvmVsBIQkyJnMr+s7GiEiSHJzAIUQkbnevEWXKdPpWNWF6Z3aY25iAsCzd+9pNGEKtfsPwm3jWsxNTX95bFfvP6DbtJl0rOLCnK6dtEnNrP91oOPsubQaM4HMNmnx9veL8hpfff1+mAxVKlqEx9s3seHIMS7ccSOZoSFD27ehrlPZeE+kPn35Qua0aaNcoJHFJi1fvH0IDg7G0DBhDbWW9n+O5goYpDqDJr1TaBIoQ8JaxoYQEL2FPEL8StIDKIRgzqat5Mlky/zuXbTJH0CmtNZsHjKANx8/sfFozHqg4sr87TuxTZ2aud06R0jETNVqlvXphZnaGCsLC7a7/oO3X+Qk8IuPD7vOnad6qRI/vC9LMzO61KvDimGDWDK4P40qlI+U/Lmr7QlM5aX9566OXHg5prKkT8+1x48JCNTdg3bx/kPs0lpjaGiIoigkxJk7Rh8tePHoK6ae3kiJGCESPkkAhRAcvniJlhWcdQ6hZk1ng1PePByJRjmV+HDm+g0ali2ls+fLzERNreLFCAgMxD8okGaTpkeog/fu82eaTpqGAnSuUzvWsQSm8sLW3hJNeiftP1t7SwJTRb2CODra16rO20+fmb/vYKS2O0+fseHUaZwLFaJW/8FYVKqGWcUqVOrdl52n/45RMvjv6zeMX7mGbtNmMnrZSh48i9uFCb9ySPjcuXO0atUKO9uMZMyQgYYNGnDs2LF4uz8hfjcyBCyEICg4+LsFgs1N1AQG6mcLMUXhu7XrDAxUGCVLxvaJ42g8bBRZ2nWmQv58aBQNp27dxsTYmJ2Tx2Nnk1Z7G3e1fYznZKmzpkHjFzpHL/xih4BANOmdMMjqhv8Tjyhv/8y7YJSLAPJmzUrfZo0ZuHwVt58+pZ1LRZKbmXHw8hVm7dyLlbkF6w4foVChQoweMwZDQ0P27N5Nk+Gj+bN5UyZ37/Ld2BVFYdjipUzfsBlLU1McMmbgyZu3TFi9ls51ajG3X584G1r+FUPCS5YsoV+/fuTOnIk+NauTzNCQ7efOU69ePYYNG8aQIUPi7L6E+F0l6QRw7dq1LFq0iCdPnvDPP/+QOXNmZs+eTdasWalbN27qZQmRGBTNlZN9Fy7Rs3bNSG2fvbw5fesOA1u10ENkUKZAPnac/YeJbVtFSlJ8/f3Zd+EybWpUw6VYUR5u2cDqAwf5+/oNDFUqxv+vE21rVNPWsDyntqOYuRu26S3R4BSjOHzDRpe/XelqbAQBgfj65Yb0Ud/eFgh87cUl79w6S4JM6d4VOxsbZm3czJpjJwEwMTamaoli7D5zlj///JORI0dqk+Fu3boxf/58hgwZgkuxIrgUKxrlfc/cuJlp6zcxvk1LetapibmJCf6BgSw/fIx+S5aTwtKCiV3/F5On44eMPlrg7v31v1XCARrtc/f161dOnz6Nj48PuXPnJl++fNG+7t27d/nzzz/pVacmMzp30D4fAxs3YPKW7YycMIGyZcvi5BSz368QSU2SHQJeuHAh/fr1o0aNGnz+/FlbOT9FihTMnj1bv8GJX0ZRFN5+/MgrD49o77jwO+rWoB7Hr99k1Tfz/IKCg+m1cAnBGg0datXQS2w9Gtbn6bv3/LlsZYQdLgICA+k6bxFffX3pWj+kPl+aFFb82aIZM+dsZc+xmfw5rQ1pKqTFoIgJBkVMKOv4PqRXCkKSkZ/5p0s0b6dJ70RZx/c65w2qVCp6NWrAg80buLh8MWcWzeP5rm1Yp0xJhvTpGTZsWKSe0O7du+OYNy8Ldu6O8vnz8w9g+obNdKlRlUFNGmrneKqNjOheqzqDmzRk/vadfP4au2FsXb4dEtb4+TNu3Dhy5MhBs2bN6NChA6VKlaJ8+fLcuXMnWtdcunQpNilTMqVD2wjPh0qlYnCThuTKZMfixYvj/LEI8btJsj2Ac+fOZenSpdSrV4/JkydrjxctWpT+/fvrMTLxKyiKwuoDh5i9eSt3nvwLQJZ06ejesB69GjUkWbKktYKxmUtF/r52nc5z5rPm+ElqFy/GV19f1p88zbP3HqwZOYz0aVIDcMv9Mcv37uf+02dYmJrSoIIzDZydUBvHz04ZxXLnYm6/PvSaOYe95y9Rt1RxgoI17Dx3Ho8vX1g9Yihvs5Xkbej5NqhDep38ckdxRT2tUDU2goBgfP1y42DtzcP39jqHhJMlM6RQDgftz9cePsKlcuUIW1iGUalUVKtenU1r1kR5t+du3cbD05P/Va+qs71ztSpM2LSVIxcv0aRShZ94YN8Xfkh40JQdLFy3ht69e9OhQwesra05ffo048aNo1q1apw6dYrs2bN/93pXLl2iWpFCOldmq1Qq6pYoxoZzF+L8ccSWOmsaXL2lILRIOJJsAvjkyRMKFYpcc0utVuPt7a2HiMSvNHjBYmZu2kKdksUZ3rQRxsmSsfPceYYsXMLFu26sHz1C54KI35VKpWLBgH5ULFqEhTt2MWbDZoyNjKheqgQbmjTSbkM2Zvkqxq9aQ7qUKSmTJxevPN7TZuwEJmfJzIGZU8lobR0v8XWpV4fieXKzcMcuDl+7gYFKRb3y5ejeoB7GOSvh4PhCe66vX258IWEWJTY2BAzxtTLH1ur7Q8LamyRLhtfXr1G2f/36FePvPFZvP18AbFKk0NlukzLkuI+OFdRx6d8bn5m/ZhWTJk2iR48e2uM1atSgdOnSlC5dmilTprBkyZLvXieZUTL8AgKibPfx99eZLOtXyBeSst9J/IX41RLaq+SXyZo1K9evXydz5swRjh86dCjSPsHi93L+zl1mbtrCtE7t+aPefytD65QsTp2SxWkycSr1yjnR1KWiHqP89VQqFY0rlqdxxfI629cdOsL4VWsY27oF/RvW0/bA3Hj8hPrjJtFwyAj+Wbrwuws2YqNQDgeWDB7AObWd9pi9uRua9Jbf9PYlghp0ofMG1WaFKcZV8LeI8tQaJUswYe06Pnz4QOrUqSO0+fr6sn3bNppVcI7y9rmzZAHg+PUbtNBx3vHrNyKcF1/WHz5KyhQp6NixY6S2FKHHJ0+ezF9//YVJuFJE36rkUpm/Zs/is5c3KSwi7sDkHxjIVtd/qN2wYZzHHyvhEn8HGzce3pYkUOhf0uni+Ea/fv3o0aMHmzdvRlEULl68yIQJExgyZAgDBw7Ud3giHi3dvZesNjb0rhN5wUP90iUp55iXpXv26iGyhEtRFGZs3EztEsUY0rRRhOG3AtmysqxPT67cf8Dpa9fjNY7AVF6UtTbV/tM9ny+BJ39aBhCNXLlD7RqYGqtp0bw5L1++BODBgwfMnDkTZ2dnvn79QvcG9aK8vb1tRioWKcz4jVt47+kZoe2Ljw8j1myggH12iufRvU9yXHnz8SNZsmSJMrnLkycP/v7+eH4T47c6duyIgWEymk2OWPLH09ubNtNn89HLiy5dvr8qWq/i5/uREDGWZHsAO3XqhKmpKcOHD8fHx4cWLVqQIUMG5syZQ7NmzfQdnohH954+pVy+vFEO8VYskI+FBw7/4qh+zM8/AEPD6G3v5fbvU3ac+puvPj442NnSpFKFWG1l9vrDB24/fsKIpo10tlcokA/bNKk5fOES5XVsZ/azwi+WyGR+HU16p5Dh3fDfXRNNwvdz0qZMyZ6pE6k3aBh58+YlVcqUeHz4QLJkyVCpVAQGBtJuwmTWjhxKtgwZdF5j3p9/UL5Hb4r06kfXGtXInzUL91+8ZNGBQ3zy8uboXzPjrec2TPrUqXnytyu+vr6Y6thR5s6dO5iYmGBlZfXd66RLl47NW7bQrGlTsrT/Hy4F82NkaMjR6zcI1iisXr2aXLniN5mNDV/f3Dg4uuF6207mAwq9SpI9gEFBQaxZswYXFxcePnyIl5cXb9684cWLFzqHJ8TvxcLUjHefP0fZ/ubzZyz0sOWZLsHBwSzetYeCbTpg6VIN84pVqd53AEcu6C7K7OvvT6vR48jfuj2zNm1h9+m/6T59FpnrN4nxXrLhBQaF1AA0N9FdK1ClUmGmVmvPiwuBqbxwsDbQ/ovY22f4378koGTePNxetxK7tGkJ1mhYvHgxr1+/5t27d2zevJkPPr5U7vMnHp9195452NlydvECqpcuxeStO6g/bhIj122kdIH8nF0yP8Kik/jSqlplPL98YdmyZZHaPn78yPLly2nUqNF3h3/DlCtXjpu3bjF8xAj8LJLjqTalT99+3Lp9m9q1Y1/wO96EztUMmQ9oGie7yAjxs5JkD2CyZMno2rUrbm4hpQnMzMwwi8eN3kXCUs/Zib6z5/L4zRuypUsXoe2zlzebTp+hUxzsGhFbGo2GduMnseXEKeqWLE6/erXx9vNj3YnT1Ow/iNl/9KJHw/oRbtN50lT2uJ5lSe8etKhQDrWREc/fezB01VrajptE6uTJqVy8WIxjyZjGmvSpU7H3wiWqFikcqd3t+QsevHzFiNw5f/rxhjmntgsp1+JXKOEu5oitgGBAAzHY0e3Y5Ss8ffOGEydOULTofzX/qlevTr58+ShSuDBLdu9haNvWOm+fJX06Fg/qz5w/evPp61dSWFp8t/h3XMueMSO9GjVg+PDhvHr1SrsK+NSpU0ycOBE/Pz8GDRoU7eulSZOGvn370rdv33iMOjIvLy/WrFnD+vXrefPmDTY2NrRs2ZI2bdpgaWn54wuErQa3MieTz5nvzv8UIj4lyR5AgOLFi3Pt2jV9hyH0oFXVytilTUvtUeM5f+++diutO0+fUXv0eFQqA7o10H8h8PVHjrHp2Ak2DvqTLUMH0qZSBbrVrI7r9En0qVubfn/Nxz10ThiEDPtuPn6SOV06075KJdRGIYmTnXUaVv/Zh5K5cjBh9dqfiiVZMkP+V7cOK48e5+SNWxHavHx96Tl/MelSpaJ+udgV33VX21PW2vS/RR2/ZfIXCGgw9fTm4XsNRh+jlwBsPHqc0qVKRUj+wtja2tKwUSM2HD3xw+uYqI1Jnyb1L03+wkzr2Y1RHdqxfs0aihQpQqZMmWjTpg2pUqbk0KFDZM2a9ZfHFBMeHh64uLgwdOhQsmbNSseOHcmePTvDhg2jUqVKvH//PnoXSiI91yJhS5I9gBBSQPXPP//kxYsXFClSBHPziKvJ8ufPr6fIRHyzNDPj4KxpNBg8DKf+Q8iWzgZjIyPuPX9BhjSp2Td9MplsbPQdJot37aZK4YI0KFMqwnGVSsW4Ni1Yc/wkS3fv024DtuPU31iZm9OiQrlI1zIwMKBL9aq0nTGH1x4ftDX9YmJAy2b8c/sO1UaMoVbxopTLl5c3Hz+x7sRpvP392TNtUqxqAQam8sI2veV/c/xCPyQ/f/7Mpk2buHnzJsbGxlSvXh0XF5c427pMH0xN3HB9FLOacB88v+BQKHLva5hs2bJxaN++uAgv3hgYGDCsXWv6NmvM6WvXuRJoTm1HQ3I5tdR3aNHSp08f3rx5g6urK3ny5NEev3fvHrVq1aJXr15s2rQp2tfTpHcikDM/LAUkRHxIsglg2EKP3r17a4+pVCoURUGlUkXYcUD8fuxtM3Jt9XIOX7jE8ctXCNZoGN6hHfWdnTA2Shi9TjcfPWZcG93br5mq1ZTP78jNR+7aY199fLC2Sq7t+fuWbZo02vPSE/MEUG1szK4pE1i5/yBLd+9lxJoNWJqZ0qC8M70bN8TBzjbG14SIQ75AhF6/ffv20alTJ/z8/ChQoABfQueQOTo6sm3bNmxtf+4+E6NMNjZcvXJF+x71ratXrpD5mykNCZWZiQnVS5WkeujPga/PoDYrHLJlXAIt4/PixQv27t3LjBkzIiR/ALly5WL48OH06dOHp0+fRiovplNoKSBNeifKmsiiEPHrJdkE8MmTJ/oOQeiZoaEhNUqXpEbpkvoORSdTtTGfvaIuSv7xqxfJQ/e4hZCJ/o/fvOX5ew/srNNEOv/v23cwM1GTUUdbdBklS8b/6tbmf3WjniN585E7p65dR1EUyhbIry0irUvIkG/Yrh0RP/ivXr1K69atqVGjBlOnTiV9+vTakk0dOnSgfv36nDt3DqMEkrDHtw61qlO930B27txJgwYNIrRdvnyZAwcPMuePXnqKLnaMPlrw0FuDA974WkVjHp0eXLp0CY1GE+U+8XXr1qV3795cvHgxegkgaJNAVCE72AjxKyXZBDDaL1Ah9KRm6VKsO3GKIU0aRuqVfPjyFX/fvsPiQf9tW9ikUgUGzFvI0FVrWf1nnwhlbp68ecv8vQdoXtkF83ha4fzmw0fajJvIyStXUavVqFTg5+dPqXyOrBs1LNKwelRDvmHmzJlD5syZWbFihTbJU6lUlChRgnXr1uHs7MyBAwei/EBODN5/+syiXbvZcPgoHp5fyJTOhvY1q9OhVg3MvlkNW6loERpVLE+nTp24fPkyTZo0Qa1Ws2fPHv6aM4fieXLTrkb1KO5JxFbYlIPAwECd7WHHf2pqwptMQNLdi1zoh0oJmwGfxKz5zt6ZAG3atPnhNb58+YKVlRUfDu0l+TdzCIWIrZuP3Cn1v+7UKFqYed27aLfsuv3vU1pMmYFPYCA3166MkChsOnacNmMnUip3TrpUr4ptmjT8ffsO8/cewMrSgtML/sImVao4j9XHz49SXXrwyceXKVOnUrNmTQwMDDh8+DCDBg7EUBPMhSULSWFpoR3y/d5CD0VRSJMmDUOHDo1ylWe5cuXIlSuXzrIiCVLYyl/A4PUZnt3ypHLvfnz88pVmzk7Yp0/H5YeP2H3+IgUd7Dk0axpWFhEXiAQGBTF+1RoW7tzDpy8hRZBNTdS0qlqFKd27xKrWo765q+1D9nC2skyQQ8Dv378nZ86cjBw5MsLUoTDz5s1j1KhR3Lt3D5uYzCH+5u8iuouCROx88fYmdbXaeHp6RhhJSUqSbA9gnz59IvwcGBiIj48PxsbGmJmZRSsBFCI+5bfPzuZxo2g1ejxZ2/+PEjlz4O3nxzX3x2TLkJ79M6ZE6iVq5lKJ1MmTM2H1WtrOmAOAmYma5pVdGNOpfbwkfwAbjhzD7cm/nD9/PsJWijVq1CB37twULVqUlfsPUqftkCiHfMPTaDT4+/tH2vosvFSpUuHj4xPXDyV+hH7Ih638ze5vQdtxgzBJlow7i+aSMdyinKuP3KkybDQD5y+K0MMLIUPwYzp1YHDrllx78JDg4GDyZc9OCstfnzQoikJQcHC0CpP/DqytrWnWrBmTJk0if/78lC9fXtt25swZJk6cSJMmTWKW/IF2m7iw+YCBSBIofo0k2wOoy8OHD+nWrRsDBgygatWqPzxfegDFr/Dp61fWHDzM5Xv3MTJMRvVSJajrVOaHi1Vee3zgq48PGa3TxNuwbxiX3v0wTmPN9u3bdba3bduWJ/dv4eoaVsD6xxP9ixQpQt68eVm1alWktq9fv5IzZ0569erFsGHDYhn9LxAQjOmXr/h7X8XoowWX792nVOdu7Bo5lJrFI5d1mbJlO+M3beXZrq2kjE5tuV/o/rNnTF+/ia0nTuHt54ddWms61q5F7yYNY9UDmdB7AAG8vb1p0qQJp0+fplixYuTNm5e7d+9y8eJFypYty7Zt27CwiEXyFhAovYC/iPQAJuE6gLo4ODgwefLkSL2DQuhTSktL+jRpxNqRw1gxbBCNK5aP1krl9GlSkyOTXbwnfwAfvnwlS5YsUbZnz56dj1/9Q36I5n69nTp1Yvfu3Zw6dSrCcUVRGDNmDL6+vrRt2zYWUf9KEYs+X7hzF6NkyahWRPe2eXVLlcAvICDCKu/o+PzVi0tu97j9+AkaTdzPKfvn9h1Kde7OsYuX+LNBXZb26UHlggWYsm49FXr04fNXrzi/z4TE3Nyc3bt3s2HDBqytrbl58yapU6dm3bp17Nu3L3bJXyi1WWHZIUT8Ekmj7z4GkiVLxqtXr/QdhhCJSuZ0ISVKonL50iUyZcoco8LOHTt25MiRIzRs2JBGjRpRtWpVvnz5wrp167h48SKzZs1K+GVgws3v8ve5qu3ZMTQwQKPREBAUhKmORQN+MVxQ8P7TZ4YsWsLm4yfx8w9JtLPZZmRQy+Z0qFUjDh5IyLaErceMJ3+WzOwbM1y7XWK7ypXoWacmFQcPZ8TS5czt93t/gU6WLBl16tShTp068XB1A3ytzHHAG38fL+kJFPEqySaAe/bsifCzoii8fv2aefPmUaZMGT1FJUTi1KFmdRoOHcHBgwepXj3iStQzZ85w6vRpli5dGqNrGhsbs3nzZhYsWMDSpUvZuHEjAM7OzuzcuZPKlSvHWfzxJ+K8vzCVihYhWKNhy5mztHWpGOlWG06eJqWl5XdL6IT5+OULFXr15YOXF0OGDqVixYp8+vSJVatW0WXKdF57fGBYO93bw8XEwfMXePrmLVsG94+0V3a+LJnpVbsms3btZWLXzol6MYpehc4H9LUyBysnDLK64Xo7ZgXDhYiuJJsA1qtXL8LPKpUKa2trKlasyIwZM/QTlBCJVK0ypahVtjStW7emW7duNGzYEENDQ3bu3Mn8+fMpX748jRo1ivF1jY2N+eOPP+jTpw+fP3/G2Ng40q49CZ7NM96+tyZ7uEMOdrbUdSrDwOWrcciYgdK5cwEhX0S3/H2WuXv2M7BV82ht1zZ9wyZef/zIqdOnsbf/b+iwfPnyODg4MGbqVFpWrUyW9LErEn3joTtpU1hR2D67zvZqRYswbuMWHr14SaEcDrG6ryQvtD6gr19uylp78/C9Pdn9H+k7KvGbSbIJYHzMjxEiqTIwMGDz2FGMWb6KJStWMGdOyApkSwtLOrRtx+hxY2NVsFmlUpEyZcq4CvfXCNBdLy7M0iEDqT1gMM4DhlIyV06yp0/HNffH3H32nCYVyzOi3Y/nN2o0GlbuP0TLVq0iJH9h/vjjDxYuWMCag4cY2aHdzz4SANRGRvj4+xMQGKhzDuoXn5Ci5Sax2A5QhGNsBAHB+FqZk8nnDPjLcLCIW0l2EcjYsWN1lpDw9fVl7NixeohIiMTN2MiICV07c3L/ZVy37+bYwUM8vHuXKdOnYfoLFqIkKKHJn8HrM1EO4aW0tOTkvDlsHjcaG+s0/PvhAwVy5uDonBmsGz2CZMl+PP/vq48PHp8/U7x4cZ3t5ubm5M2bl8evXsfu8QDVS5fAy9eP7Wf/0dm+6ugJ7DNmJGcmu1jflwiVQFdDi99Dku0BHDNmDF27dsXsm7kqPj4+jBkzhpEjR+opMiESN1MTU/IXLoKXhRmnzp3h4cOHWFpaUq1ate/W9fsRT09P1q5dy5YtW/jw4QOZM2emTZs2NGzYMOFsBxeu3p+/z1UeXvHg8PExbPD8QpZ06WhVrQrpUv9Xi9EoWTIalC9Hg/LlfuruzExMMDYy4tmzZzrbNRoNL168oIit7gQxJvJmzUqt0qXovWgpqS0tqVy4ICqVCr+AAGbt3MPmv11ZOKBfhB1oRNwIqw94yTu3zAcUcSbJJoBRbah+48YNUsVTsVwhkoJM5tc5+Y8vnQYP4N9nzzA2MiIgMBC12phuXbsxZuzYGG+X9eLFC2rWrMmzZ8+oXbs2FSpU4MqVK3Tq1Il169axdetW/fcyhvb6mZq4ccctLVPGzmX94aOkSpmSjBkzsObQEUYuW8G4zh34s0WzOLnLsARy1cqVdOnSJdIX2n379vH8xQuaugyOk/tbNWIIDYaMoOaoceSyzYidtTVX3d358OUrQ9u2omPtmnFyPyKc0PmAmvROlDVxw/W2nSSBIk4kuQQwZcqUqFQqVCoVOXLkiJAEBgcH4+XlRdeuXfUYoRCJ0zm1HcXM3bj4yoya7RpRKldO1vWZTPEcDnh8+cKi/YcYP3cufv7+TJ8+PUbXbt++PYGBgVy6dIls2bJpj//99980btyY0aNHM2XKlLh+SDFmahKyanPtlD/YevI0c+fOpXnz5hgbG/P582dmzJjB4DlzSJMiBW1rVIuT+xzUqgVluvSgYYMGTJw0iUKFCuHr68uWLVsYMngwNUqXpESe3D++kA7XHz7ilvtjTNXGuBQtSgpLC47OmcHJq9fYfOwknl5etK9Vk/Y1q5NDhn7jT2gSSOQ+CyF+WpLbCWT16tUoikKHDh2YPXs2VlZW2jZjY2OyZMlCqVKlonUt2QlEiBDh9/dt0KQxr90f8c/MKai/GZqdtWM3g1et5c6dO9jZRS9huH79OmXLlmXDhg3UqlUrUvvYsWNZvHgxDx8+jJNCvD8tIBBTEzd2ngimad3yjBs3jp49e0Y6rW2bNty4fAm39avjbLjU9eYt2o6byLM3b0mVMiW+vr74+vnRuFIFlg7qH+Ni4HeePOF/k6dx8e497TFTtZqu9eowsev/ojU/MaYCU3mhSe9EdHaJSZICAsOVFJIVwbElO4EkwR7AsJ0DsmbNSunSpRPO3CEhfgMeHz5w9PhxFvXqFin5A+hcvQpjN25h27Zt9O3bN1rXPHv2LGq1mmrVdPeY1a9fn+nTp3Pr1q1of3mLU+GLPT/x4PWJ46hUqij3E+/YqRO1du3ipvtjCjrEzY4PZfPn48GmdRw8f4Hb7k8wURtTq0xp7G0zxvhaT169plLPvqRLmYIdIwZTuVBBPnz5yvLDR5m0ZTsfv35l2ZCBcRI3/PflQeMXuiuKJH9RCCkSbWsFga+lSLSIvSSXAIZxdnbW/t/Pz4+AgIAI7Un1G4EQsfHx40cURcEhQ3qd7RampqRLlQoPD49oXzNsmkZUgxVhJZ10zemNd2Hz/sIVe/bx88PUxCTK9xAbGxsAvHx94zQUQ0NDapUpTa0ypWN1nSnrNmCczJATk8eRKnQf4oxpUjOyZTMypE5Nt3kL6dOkEfmyZ/vBlX4sfM8xEKOdYpKc0CLRYfMBpUi0iK0ku1zLx8eHnj17kjZtWszNzUmZMmWEf0KImLOxSYuRkRGXH+rew/a9pyfP3r2L0RZu5cqVw9/fn3379uls3759O1ZWVuTPn/+nYv5p4RZ9uL731Q7L5cqcma9eXly9elXnzU6ePImhoSEOP9E7F9+CgoLZePQYnatV0SZ/4bV1qYBNyhSsP3w07u9ckr/oCX2eQopEm8q+weKnJdkEcMCAAZw4cYKFCxeiVqtZtmwZY8aMIUOGDKxZs0bf4QmRqBQzd4O3mbBKbkW9evWYt/cAH758jXTelC07UBmoaNKkSbSv7ejoiLOzM4MHD8bNzS1C26FDh1iwYAEdOnSItAI2XoWr86e54hehF6ZqiWJkSmfDyBEj8PPzi3CzV69eMWf2bOo6lcEmAVYb8PL1xcfPn9xRLOgwSpYM+/TpefPx4y+OTERgbIR232DHF5xTywIcEXNJNgHcu3cvCxYsoGHDhiRLlgwnJyeGDx/OxIkTWb9+vb7DEyJROKe2w6CICZr0TiH7lxobMWLECHyCgig7YAirj53g8Zs3uN5xo8WUGczZvZfRo8fEuB7gihUrSJEiBaVKlaJRo0YMGDCASpUq0aRJEypWrMiIESPi6RFGzdTEjUvekVfXGhoasmLoIC5evEjZMmVYvHgxhw8fZuLEiZQtUwaD4GCm9+weZ3H4+QfgevMWJ69cxeOzZ6yuZWlmiqWZGTef/Kuz3T8wkPsvXpIxTZpY3Y+IAzJXUsRSkp0D+PHjR205ieTJk/Mx9Btt2bJl6datmz5DEyLRsEENbzOBFdqhqWzZsnH0+HEG9O9Pp9nztOdmzpSJhQsX0rp165jfj40Np0+fZvPmzWzevJm///6bzJkzs3HjRmrUqBHjuoKxEhD8w1OcCxXk9Pw5TFi9jkGDBqHRaFAbG5M2RQpy2Nmy+4wrratVwSoWq5aDg4OZsHot87fv5GNob6uxkRFNKpZnRu8epPqJecyGhoa0rFqZ5YeP0qNWDWxSpojQvvTQETy+fKF19ao/HXd4xczd8PVzipNrJVW+frkp6yj1AUXMJbkyMGHy58/P3LlzcXZ2xsXFhYIFCzJ9+nT++usvpk6dyosXL354DSkDI5I6d7U9DtYG+FpZ6uyRePbsGe7u7lhaWlKoUKFfm6jFh/Dz/qI5AX/DkaP8b8oMDAwMcC5fHn9/f/7++29SWlqye8oEiv9EjT5FUeg4cQobjhyjR+0atKpYHnMTE/ZduMjUbTvJYG3N6QV/YfkTw+Iv3r2n9P+6Ya5WM7x5E6oU/m8V8Jzd++harw5/9esT4+uGJ4s/4lCkhUhSIiY6pAxMEk4AZ82ahaGhIb179+bYsWPUrl0bRVEIDAxk5syZ9Onz4zc4SQBFUvejBPC3Ea7Ui8Hr6G/Jdcv9MSU6daV27drMnjOHFClSACFzAdu0bo37gwfcWb+K1OHqkUbHP7fvUK5bL5b90ZO2LhUjtN15+owSfwxgTKf2P73jiPvLl/SYPovjl/9byJLCwoI+TRoxtG2rWNcv/O/vxlySv7gQEIyp51f8fa5KeZhokgQwCSeA33r69ClXrlzB3t4+2qsJJQEUSV2SSADD7e8b0x6W/02eztHrN7h+4wbGxsYR2t69e0eePHkY16k9/Zo3jVFInSdN49A//1AwezbuPHuOqVpN3RLF6FqjGpnSWtN+5l9cfPiIOxtit6Dt4fMX3HnyBFO1GqcC+TEzMYnV9cIkib+bXykgGNMvX/H3lgQwuiQBTMKLQMLz8/Mjc+bMNGjQ4NeXkhBCJFwBgYAmUqmX6Dp88SKNmzSJlPwBpE2bFhcXFw5duBSjayqKwtGLl3jz6TPvNdC0TVvKVq7CkiPHKdy7H/+43aewfXaevnkbo+vq4mBnS71yTlQtUTzOkj8RD4wN8U1ujia9E4GpvGRVsIiWJJsABgcHM27cODJmzIiFhQWPHz8GYMSIESxfvlzP0Qkh9O47pV6if4mg75anMTc3JyAwMEbXXL53Py89PFi4cCEnT51i5MiRzJ49m9t37pA3X34aTZjCvefPsQ4dbhZJROhQuia9E2Ud30sSKH4oySaAEyZMYNWqVUydOjXCt3NHR0eWLVumx8hEQuDj58f5O3e5cMcNn29quYlv2DwDNNqE6Xdi8PpMrIbUiubKycGDB3W2+fn5cfz4MYrmyhnt6ymKwtztO6lVqyYtW7aM0GZlZcX8BQt49/kza4+fokUVl5+OO76cU9sRmMoLW3vL0Pl/Mvwbp2Q+pYiBJJsArlmzhiVLltCyZcsIKxMLFCjAvXv3vnNL8TvzDwhg0PxFZKrXGKeuPSnbtQeZ6zdh6KIlMe6pSQqy+z/C9bY1piahBZp/lyQwIBhTT+9YX6Zb/bpcvXqVxYsXRziu0WgYNmwYnz978r+6taN9vS/e3tx9/IR69errbM+ePTt58+YFlYqejRrEKva4dk5tRzFzNzTpQ8u+RDNZefbsGWvXrmXVqlXcvXs3HiP8jbzNFFKiSYjvSLJ1AF++fIm9feQtdDQaDYHyQZ8kBQUF02joSE5du06vOjVpVLY0CrD177PM2bKNu0/+ZfvEcYm/lMlPCg4O5tjlKzx8/gIrCwtqli5JquTJKe3/HNfbdpR1dPuvrEdiFpbE2jzj2e2CZOfny2pUL1WCP5o2ZsCAAezYvp3aderg7+/Pls2buXf/PvP+/IMcUey6oUvYfsfBwVHXIgwMDCR3lsykTxOzYtu/gjprGnz9iFby5+npSe/evdm5c6d2v2cI2RpwyZIlMdpOMGkJWV1tawWBr71kUYiIUpJNAPPkycOZM2fInDlzhOPbtm2jUKFCeopK6NOO039z6MJFDo4biUuhgtrjReyz45wvL3XGTGCP61nqO5fTX5B6cvzyFbpMmc7TN29RGxnhHxiIibExfZo0Ykyn9voOL+6Em/fn6p2b0rGsqaZSqZjaoytl8jsyf/suxo4ZQzJDQyoVLcKCnl0pWyBmi84sTE0xNDRky5YtNG/ePFK7m5sbDx48oFTePLGKOz5EKBr+A0FBQTRs2BA3NzdmzpxJk9CFNPv27WPkyJFUr16d06dPkyoBbqend8aGgCEEBKI2K4w/sjJY6JZkE8CRI0fStm1bXr58iUajYceOHdy/f581a9ZEuem8+L2t2Lefco55IyR/YaoXK0Kp3DlZsfdAkksA/7l9hzoDh+KUNzcbB/ajWA4H3n76zML9B5m0YRP+AQHU7z9J32HGTrhSL2G11EoTN7sqqFQq6pVzol652O94oSgKwcHBHD9+nL/++ouePXtqa/K9fv2azp06YWZiQgbrhLVVW2AqL2zTW+ILRGfm0b59+zh//jyHDh2idOnS2uMNGjSgcOHCFC9enBUrVtC/f/94iznxMwCVvmMQCVmSnQNYt25d9u7dy7FjxzA3N2fkyJG4ubmxd+9eKleurO/whB48ff2GYjkcomwvlsOBf1+//oURJQxjlq8kbyY79owapn1+bFKmYHSr5oxr3YK523Zw49ol+g6YTXkXFypWqMi4ceN4+fJlnMdy9+5djhw5wtWrV4mzEqbhSr08fK9J0L0lhoaGlMiTm8xprRk+fDiFChTgjz/+oFXLluTLl483z5/jHxBAmRj2LMYnd7U9arPCIT8YG0Vr4cfGjRspVqxYhOQvTJYsWahfvz4bN26M61B/O77JzVFnTYO7OvJ0JyGSXAL4+PFj7QeHk5MTR48e5d27d/j4+ODq6kqVKlX0HKHQl5TJk/Pv26hrpz1585ZUVt8vGPrk1WtGLVtBy1Hj6DZtJievxGGiogevPT5w/PJVetWpibFR5HlbXWtWwyhZMvp0bcmGzSfImTsXmbNkZu7cueTJk4fcuXMzceJE3n7neY2O8+fPU758eYoXL06DBg0oV64cRYoUYe/evbG6boQh39vWiWIbrV6NG/L03Xv61q9DmexZufb3aT48dmdEs8bkzWSLuakJrasmxPex6H/cvH//HgeHqL+MOTg48P79+7gI6vdlbAgY4OuXGwdrAwJTeek7IpHAJLkE8Ns3jqZNm8b6w0n8HppVrsTu8xdxf/0mUtv9Fy85cOkKzSpHXVpj8pr15GzWivnbdvLO4z2nLl+hyh/9qdynH55eifPN98OXLwDYZ8igsz25mRmpLC1wdHTEzc2NhQsXsnz5ch48eEDFihV5++YNs2bOoFTJkj+9gvP8+fPUqlULRVHYuHEjd+/eZe/evWTOnJnmzZuzbdu2mF80IBgCAjH19ObFo68hQ74/UedPH5pUqkCvRg2YtXMPt58+o1qRQhTImpnZu/Zw4cFDto4fSwrLhNGL6a62x8HxRUjJlxjImDEjt27divLL082bN2URSHQYG4KxEb5WIT2BUhtQhJfkEsBv31AOHDiAt3fsyz2IxK9djWpkTZ+OqsNGsffCRYKDgwkKDmbXufNUHzEGe9uMUfasrDt0hBFLlzO4SUOerl7K0Yljubt4HntHD+fGw0e0GTvhFz+auJEhTWoMDQy48kh3z9ibT594++kz7dq1Q63+r+xE8uTJWbFiBUZGRnSrXhUbC3NaNGsWYTVndA0ePJi8efNy8OBBatasia2tLc7OzmzdupX69eszaNCgmK3c/2bINzH0+oWnUqmY0bsH+6ZPxtbGhrUnTnHk2g1aVq3C1VXLqFi0sL5DBELm/TlYG4SuDDeIUc2/Nm3acOvWLQ4cOBCp7datW+zbt49WrVrFYbRCJD1Jbi9gAwMD3rx5Q9q0aQGwtLTkxo0bZMuWLcbXkr2Afz/P376j1ZhxnLt1B1N1SIFwX/8AnArkZ+2oYWS0to50G0VRKNC6PTnSp2Pb8MGR2jefdqXVtJlcXbWMfNlj/nemb81GjOaq230uzJlGSov/epYURaHPomUsO3IMd3d3UujYeaJdu3a8e3CPSe1a49R/CDt27IjRNIs7d+5QokQJFi9erHPV6927dylZsiRbtmyhRo0aui+iozahweszXPLOnWh6/RKTn633F55Go6FZs2YcP36cPn360LRpU4yNjdm7dy/Tpk0jc+bMHD58GHN5342e0NeAqYkbrret5e8e2QsYkuAqYJVKpa2lFf6YEAB2Nmk5vWAul+/d58z1m6hU4FyoIIW+szjE/eUr3J4+Y3I73T0SDcqUxGqBOfvP/pMoE8CxnTvi1K0nTv2HMKhxA8rmzcMLDw8W7DvINtdzVKpUSWfyB5AsWTI0GoUSOXOQMU0azp07F60EMDAwkLlz5zJv7lwAunTpwqKFC+nzxx80aPBfgeM8efJgZGTE8+dRfKCF++Dzf+IBwDPvgmT3j7tVvuI/7mp7yjq+wNfv55M/CPmivnbtWkaPHs2CBQuYOnUqAEZGRjRq1IipU6f+0uTvypUrbNu2jc+fP5M1a1ZatmxJxowZf9n9x5qxEQQE4+uXm7LW3jx8b5/oer5F3EtyCaCiKBGGq/z8/OjatWukN5MdO3boIzyRQBTNlTPaW3T5+vsDkMJC97wro2TJsDQ1wS8gIM7i+5VyZLLj1Py/+POv+XSYNVd7PFuG9BTJmZOXL16gKEqkL1I+Pj4cOXyYTi4VAAjWaLQlS74nKCiIli1acOTIEVqWL0ftksXx8fdn9bETtGvXjgcPHjB4cEhP69OnTwkMDCRNmm/KnoSWdYHQvXw/WmBEyO8nNoWdRTS8zQRWMRvy1UWtVjNp0iSGDBnClStXCAoKokCBAtrRm1/Bx8eHdu3aceDAATJkyEDGjBnZuXMnEyZMYNSoUfTr1++XxRJrxoYQAKggk/l18E8Y80SF/iS5BLBt27YRfpZ5JCK2smVIj6WZGUeuXKN07lyR2m/9+5QXHh/Ib59dD9HFjdxZMnNg5lSevHrN41evsDQzo2iunJy9eYuKvfoyfvx4hg8frk0CAwMD+eOPP/D29qZz9SqcvnWbNx8/4lzmx7Xw1q1bx8FDh9gzahjVws1na16+HGPWb2L8xInUrFmTfPnyMWfOHKysrKhevfp/Fwjr9fP0Dp3jJx90iVny5MmpUKGCXu67R48enD59mlWrVlG3bl0MDQ35+vUrM2bMYOTIkdjY2ETak1mIxCLJzQGMSzIHUITpM+sv1h8+yskp48mX5b/dZXz9/ak7ZiL3X73i0daNGCX7/b5zTVu/kaGLlpI9e3bq1atHQEAAO7Zv5+3bt6zs15uiDvbUHDWe5ClScsbVNWJPoY4hwhLFi2NnZsLe0cMjtQUEBmLXpiPZc+Umf/78LF++nCkTJ9Oja5cI58lcp18vMJXXf/P+YrjoIyFyd3enQIEC/PXXX7Rr1y5Se+vWrblz5w7Xrl1LPNOIAoIx/fIVf2/ZHUTmACbBHkAh4sPYzh04e/MWZf8cTMsKzpTJm5sX7z1YfvgYbz0/s3fq5N8u+bvkdo9V+w/y7O1bKhcviq+/P0sWLcLX3x8LtZp6pUqw6dQZ2s2YQ+Z0NgyeOI+X7v+Vw3GwNggtDxJxWPjhw4e0j2I+pbGRERXy52PnufM8fPCAqZMm0+1//wtZ0Xv7v7Ig2f39ZI7fL3JObUdZx/do/EK30IzmvL+nT5+ybds2Pn78SKZMmWjcuHGC2trtwIEDmJqa0rRpU53t7dq1o379+ty/f59cuSL3/CdUvsnNMfAO+b3JF6Sk7ff6RNITg4ImGCQ34eFtW5lYm0RZWVhwYt5sZm/eyvK9+1l66AhqIyMaVnCmf4tmiXLxR1Q0Gg09Zsxm2Z592NnaUqBgQZ65u3PXzY3yhQvxR5OGbDhyjAfPX5Dc3JzZf/SiZdXKWJoB4V4fru/tKGvzLGTOWDiGBga8++QZ5f2/+fQZBXj+z2XMTE3xf+MaJ/v2iphzV9tTNoalXoKCghgwYADLli3D3NwcGxsbnj17xrBhwxg3bhzdunWL/8CjwcfHB3Nzc0xNTXW2h8079fX1/ZVhxY6xIQRo0KR3oqyJGw9vy2KQpEwSwDjg65cDI+Pk2NpD4GsvLnnn1nmefNv6vSU3N2dkh3aMaN8Wv4AA1EZG0Vr0kNhMXbeR5Xv3M2fOHNq0aYOhoSGKonD8+HHatW3L+qPH2TBm5A+vU9r/OZor4K6OWBswdeq0rDp6nGHNGmNmoo7QdufpM87edcPExISXXmrw0siKXj1xV9v/14sbg9W+w4YNY8WKFUyYMIH27dtjbm7O+/fvmTJlCgMGDCBlypQ0a9YsHiOPnjx58uDh4cHNmzfJnz/y1nonTpzAxMSErFmz6iG6WAi3ItjB0Q3X29ITmFTJHMBYCJsD+OrVq5A5BOFKTugic5LiRmBQEPvOnuP24yeYqdXUKluanJky/fiGItb8AwLI2qg59Ro1YsaMGZHaV61aRZ8+fbi/aR1ZM6T/qfuYuXELQxYtoXw+Rxb07Er29OlQFIVzbvdoP2MOrz5+onHF8qwcPiS2D+e3EVa0XG1s/Mvu878E0DLa8/3evXtHrly5GDRoEAMGDIjQpihKhHl1+v7yFBQURN68ebG3t2fr1q2YmJho2x4/fkzlypWpUqUKixcv1mOUsRAQmKTnysocQEkAYyVSAggRyk98K2xV4lv8I7UlxRfgzzh55Srtxk/ilccH0qdKyVdfP7x8fWlYvhzLhgzCwkz3cI2IG+du3ca5e29OnTpF4cKRd5zw8fEhY8aMzPmjF13q1fmp+/APCCB38za8+fCBwOBgHLNkwsfPn8dv3mJlboZfYBAXli0k73d6XoKCgtl68hTL9+7n0cuXWFlY0LRieTrXqY11yhQ/FVd8ePfpE09evcHKwpycmexivJjA9cZNpm3YzKHzF9BoNOTMnJku9WrjUrQIy/fu59TVayhAmfyOdKtfj9zhFijFRti8v5ju8rFq1Sp69+6Nu7s7qVOnjtR++vRpateuzdmzZylQoECcxBobp0+fplGjRmTIkIGOHTuSKVMmLl68yJo1a0iTJg1Hjhz5pWVp4pQkgEk+AZQh4LhmbAjofjP0tTLH1gocPCN/s/X38ZKdCX7g+sNH1Bk4lDJ5crFv9HDyZcmMf2Agm06foe/i5TQfOYY90yYlnhV5iVBgUBAAFlHUPDQxMcHIKBkBoef9DLWxMacWzKF2/8Hc/fcpz995YGQU9lalYuekcd9N/gICA2k0bCQH/7mAc7lytGxXhRcvXjB53UYW797LkdnTf1mP8e3HT5i3bQdHLlwkIDCIorlz0q1BPXJmysTgBYvY9bcrwaHb4zlmy8rIDm2p71wuWtded+gIHSdNJW+ePEyYMIEUKVJw9OhRBsxbCICVmSl1S5XA0MCAHSdPs2TXXpYOGUjratHfiUWXiPP+iHby5+vry6dPnzA1NdWZ/AFkCN1z+kvoHtT65uzszPHjx5k+fTojR44kKCiIlClT0qZNG/r16xe5/qQQiYj0AMaCzh7AHwkI1nEw5APg25WMuiTlCbvNR47hxv0HXJ03C5Nvhrp2njtPk4lTObNoHiXz5tFThInb/WfPePr6LamsklM4h4POIbgPnp5kqt+EkaNG0bt370jtx48fp379+pxe8Bel8znGKh6NRsORi5c4+M8FAoKCKJIzB81cKv2wl3fUshVM37iFTZs24eLioj3+5s0b6tSujWFgANdWLYv3Lwq7/3alxehxWCdPTlPnsliYmLDnwiWuuz/G0syMlObm9GtQl7J5c/PywwcW7DvI4SvXWDyoPx1qRbGtXai3Hz+SrVFzmjRtyty5czE0/C8JO3r0KI0bN2ZK+zb8UT+kFzYgMJCeC5ew5thJLi5f/NM1KWNa6kVRFDZt2sSCBQu4du0aAIaGhgwbNow///wz0u8gbArBvXv3EtxOG35+fnh7e2NlZUWy32FFf+iUpaS6LaL0AEoCGCs/lQBGJXTo2NTT+/vn2TxLkl32AYGBWFWuwcS2rejboG6kdo1Gg33HrtQrX46ZvXvqIcLE6/K9+/z513zO3bqtPZbDzpYxnTvQqEL5SOe3nzCZAxcvcejQ4QjlLzw8PKhVsyZGQUFcWr5ILz2x/gEBZGnYjEbNmmm3DwvP1dWVGjVqcGT2dLJmyMDZm7dQFIUy+fP99JxFXd58+IhDkxbULFaENf3/wNgoZJGEoiiU7DuAVx8+cvmvmdiEG45WFIUucxew5e+zPNu19bu1Raes3cCENeu4d/8+KVOmjNTetk0b3K5e4daCOdpjQcHB2HfoStVSJVg8qH+MHk/EIV+itehDURQGDhzIwoULqVy5MvXr10dRFLZs2cLp06fp06cP48aN057v4eFBxYoVyZUrF9u2bYtRfOInhZu3ntSqWEgCKEPACUfo0LGvleV3TtJAuL0cdfldX8Defn4EBQeTKa21znYDAwNs06Th89cfJNAigqv3H+DSqy8OGTOwaXB/iuVw4Mmbt8zetYfmI8fiM9SfNtWrRrjNjF7dud67H+WcnGjUuDGFChXi8ePHbNiwASMVHJszU2/D8A9fvMTj82fq1auns71MmTKkTpWKPrP+4t6z54R9/1WpVNQuU4rFgwaQJoVVrONYsW8/BioVi3p11yZ/AF98fLj97zPGtWkRIfkLi2FUi2asOXaSzcdP0rlOrSivf/ORO8WKFdOZ/AFUqVqVnbt2ERAYqL3/ZIaGNChTkoNXrsXosUQe8o3eit+TJ0+ycOFCZsyYQefOnbXHW7duzdy5cxk2bBgGBgZUqVKFK1eusGjRIvz9/ZkyZUqM4hOxELoimLeZyGR+VbaHS2IkAUxovjukYhiyfD90LuG3TD298ffx+i0rvCc3MyONlRVn77rRsGzpSO1ffHy49e9TqpYupYfoEq9B8xeRPX06Tk+ZoC25kimtNeXy5aXdzL8YMG8BjSuWx1T9XzmWVMmTc3r+HBZs38XKg4fYtGkjqZNb0baKC32aNMI2iiT9VzAITTyDg3VNtQjplQoMCuLfN29Y0KMrTcqVQYWKra5nGb56PVX79ufMwrmYhVvx+TP+uXWHCvnzkcIiYi/eyw8fCQgKokQU+0xnTJOaTGmtefLq1XevrzY25vN7jyjbP336hKGhAckMI76fGKgMiMmYT2AqL2zTWxJS6S5mu3ssXboUR0dHOnXqFKmtR48eLF26lFmzZjFr1iyMjY2pV68eI0eOJEuWLNEPUMSNcN/X3n36xPrDR3n86jUpLS1o6lLxu3NuReL1+xUp+90ZG4Z8a9Pxz9fKHE16JwyKmBCYyivSv3NqO31H/9MMDQ1pV7M6q46e4P6Ll5HaJ2/Zjq+/P21rVNNDdL/Oi3fvGbpoCTmatMCmZl2cuvZk9YFD2sUZMfHk1WtOXbtO/4b1ItXbU6lUDG/WmI9fvrLX9Vyk2yY3N2dwm5bc37gW35NHebF7G9N6dtNr8geQw86ODNZp2Lp1q87248eP8+XLF/7q2plO1SqT3MwMSzNTOlRx4dD4Udx+/IT1R47FOg5DQwOdC2FShiaE/759p/N23n5+vPvsSSrL7w9J1SpTipu3bnH9+vVIbcHBwaxft44axYpGmMcZHBzMznPnKVMg3w/jP6e2w6CIyX/z/YyNYry12+3bt6lUqZLO3uCwnr9cuXJx9epV/v33X1asWCHJn579tXU7WRs2Y+TSFZy9foMlu/ZQsE1HWo8Zj59/gL7DE3FMEsDfSejQjK9fbjTpnSL9K2ttirta99BxYjCwVXNs01rjPHAoEzdt5eL9hxy6fJVG4yczbdtOxv+vk94TkPh07cFDirTvxNLde6lWuBD96tXBykRNp0lTaTB4OAGBgTG63sv37wEoFMUuJQ4ZM5DczIznb9/GOvZfJVkyQ3o2rM+aNWvYvHkz4ac4P3z4kN69epHK0pK2LhUj3TZ/1ixUL1KYdYcOxzqOikWLcPLmLV59+BjhePpUqSjnmId5e/YTpKOXcvnhY/j4+9OoYvnvXr922dLkypKZNq1baxdXAHz48IFu3bpx182NP+r+N4QcFBxM3yUreO7hQY+G9b977ZAhX9MYD/l+y8TEhE+fPkXZ/vHjRywsLMiRI0eSnYOVkGzed5o//5pPl+pVeLp6KVfnzuTZ6mUs7dODXX+70nPmbH2HKOKYDAH/bqJ6sw4dOnYgZJj4exLqirCUlpacmDeHYYuXMnnrDkat2whA7syZWDV8CC2rVtZzhPEnKCiYpsNHkdXGhgNjR5DKMmSu6CAacvTqdeqNm8iUdRsY0b5ttK+ZJkUKAB69ek0uu8irz199+MhXX1/teYlFv2ZNuPPkXzp37szsWbMoWaoUz5494/jx46iNjGju7BTlHMVcdrbsOn8x1jG0qVaVCavW0GLKDLYOG4i1VcicDf/AQFJZWnLm9gWaTJzK+LatyJPJDk9vb5YfPsqw1evpVLsmWdKn++71jZIlY9+0SdQaMARnZ2fyOTqSIkUKLl26pB3+bjVtFg3KlMJApWLXPxd44fGB+f37UjhnjgjXclfbk8n8uvbnnx3y/VatWrVYvHgx48ePJ8U3f0MfPnxg//79DBw48KevL+KOoihMXrSZOiWLM6NzB+3rw9jIiHaVK+HjH0DfJcsZ0b4NmdN9/29TJB6yCjgW4nQV8K8S8ONeorCC1Ql5QckXb28ev3qNqdqYHHYxL6Ab3r+v3+Dp7U0mm7SktPzeIhz92XPmLA2HjuDC7GkU1lHCo9fCJew8d4EnOzZjFM0SFYqiULJzNyyMjDgyYXSEUiIAA5evYvHBwzzbuRWrKOr+JVSKonD88hWW7d2P+8tXpLCwoEmlCmw5dhxVsIYjE8fovF2NEWPxVxSOz50V6xj+uX2HugOH4uPnR/WihTE3MeHI1et4fPlCp9o12X3GlXefPpMmeXK++PoQHKyhc51azOzTM9q/w7Bdcfad/Qf/gAAKONjTtkY13n36zIIduzh99Zp2lXP3hvUplMMh4u1TeaE2KxyynVt4P9nrF97Lly8pXrw4efPmZdGiRdrhXXd3dzp37szjx4+5fPmyzkLKwcHBHDlyhL///htFUShVqhQ1a9b8PcqvJDQBwfx79SqOLhXYN2Y4VYtELvDu7eeHTfO2TOz2P3o3bqiHIOOerAKWHsCkJxpv7GGLTAx0TIT3f+KRIHoIk5ubU9AhdsPZB/85z7iVa7jkdg8I+bbbsHw5JnTpjJ1Nwqruf+HOXeys0+hM/gDqlyrJov2H+Pf1Gxx09ObpolKpGP+/jtQeMIRGE6YwsmUzCmbLyvP3HszZvZe/du9j/P86JbrkD0Iem0uxorgUKxrhuLmJCW3HTcT1jhtl80bcs/v8vfscvXad5UMHxUkMpRzzcnfDGlbuP8Ch8xd58+UrTStXoku9OuTKnIlZfXqy9+w5Hj1/iaW5GfXKlSWjdcymMBglS0Z953KRikenTZmSBf37Rnm7c2o7ipm7oUnvFNLbFwcJ37cyZszIjh07aNq0KQUKFKBw4cJoNBquXbuGjY0Nu3bt0pn8PXjwgCZNmvDo0SOyZMmCgYEB8+bNI3PmzGzatIl8+X48h1HEjJd3SPUEmyhWlZubmGBpaoq3r++vDEvEM0kARWTGRhAQ+N8coPDSQ1mTkA3EddF3YhhdG44co934SZRzzMumwf2xtU7DuTtuzNm9j3LdenFm0bwENZ8wbFGBoig6ezv9AkMmaH+76vNHKhcvxpbxY+gz6y+K9+lPMkNDgoKDsTQzY2LXzvRv0SxO4k8oGlVwZunuvdQZPZ6hTRvRuFwZDFQGbDtzlvGbtlLKMS9NK1WIs/tLk8KKAS2bM6Bl80htamNjnXUW40vYIjAb1JR1fIGvX7gFHvGkRIkS3L17l61bt3L27FlUKhVdunShYcOGmJpGLuj9+fNnateujaWlJSdOnKBo0ZAE/ubNm/Ts2ZPatWtz4cIFbGxs4i3mpCh93twYGSXj5I1bFMwWecXvzSf/4vHlC7kyy57rvxMZAo6FRDkEHFvhC1bbPIvUnBiKiXr5+JK5QWNqFivKqn69I6yUfPXhI6X6DaRC0SKsGj5Ej1FG9Pe1G1Tq3ZdD40dRqWDkPVJbT5vF5UePubNhtc4dPH4kKCiYo5cu8eT1G1InT07N0qV+232VvXx86ffXPNYfOaZdOGNsZEQzl4rM/qMXlmZmeo4wboUVcdZ6myl0yDd2c/ziw9y5cxk5ciQ3btzA1jZiT7aHhwf58uWjT58+DB06VE8R/qYCAunwv86cOXEC1+mTItRb9Q8MpMG4Sdx+9pxHWzdGe3pCQidDwJIAxkqSTADDfGcuYdjWQmESWq/gqv0H6TJ1Bg+WLSCzjiGoGdt3MXLdRl7t2Z5ghj8VRaFU5254fPrEntHDyZMppDdHo9Gw6MBh+ixayl/9+tCtfuRdUoRuHp89uXDnLgoKxfPkJm0Uw1+JmbvaHgdrg3iZ4xcfKlWqhI2NDevWrdPZ3q1bN65du8bFi7FfqCMiev30BS7VKuP9xZMu1atSMldO/n33jkX7D/Hw1Wt2TR5P5eLF9B1mnJEEUIaAxc+KcrVxYEjJmXBb2iW04tSPXrzELk1qnckfQKk8uQgIDOTF+/cJJgFUqVRsnTCW6v0GULDHH1TIn4+MaVJz9s5dHr95S/cG9ehar06c3d+Ld+85fvkKAUFBFM2VM9Ligd9BmhRW1Czz+xYOj00RZ33x9vbWOS8wTLp06fD2lt1+4kP6zLacOHKUqTOGMGv9fnw2bUWlUlGzVEmWDB1E8Tw6pgSJRE0SQBG3QrcW+m9LOw1YOWGQNWSvyW+9xf+X9xCmsLDA48tXfPz8IxVABnj+LmSHBSvzhJH8hbGzSculFUvYcvwk206c4tHbdzgVLsSq2jUp5Zg3Tu7D29eXHjNms+nocYI1GlQqVUjvo2NeVg4fTPaMGePkfkTcC1/j08HxBRq/QiE/JNDePl1y5MiBq6trlHNdT58+Tc6cundREbFnY2PDgr8G0LDtOBze3SG5udl396QWiZsUghZxz9gw3L//ilM7WBtE+qeP4tQNKpTDx9+fVcdORGoLDg5m4f6DlM7nmKAWgYQxVatpW6Mae6dP5u+Fc1k2ZGCcJX8ajYaGQ0ew++8zzOjcgQ9b1uGzawvbhg3C4+NHKvXsy9uPH398IfFLhe3aEf51FbKAyyBRJX8AHTt25N69e6xevTpS2/bt27l8+TIdOnTQQ2Rxy9XVlRYtWmBnZ4ednR1NmjTh1KlT+g5LS61WY5vWWpK/35z0AIr4F5YEWumqsafRFqd+5l0wUmt8LCjJliED7WpUY8DylWgUDe0rV8LcxITHb94wYvV6/rl3n33TJsX5/SZ0Ry9d5vjlq5FqgdUtVYJiORzI160387btZNz/OuoxyqQrqi9KISt6c4MVaL/Tx9Nwb1Q9c3GlXLlydOzYkd69e3Pq1CkaNWqEgYEBu3btYsuWLTRt2pQaNWrE2/3/CvPnz2fQoEHkyZOH7t27ax9frVq1GDt2LP369dN3iCKJkEUgsZCkF4HEpe8sKDE1ccP1tnWcDxMHBAbSa+YcVu4/iKmxMaksLXj54SNW5ubM79+XJnFYCiSxaD12ArcfPuLq3Jk6P+R7LVzC/ktXeLx9c7zcf1BQMIaGBvGaYCRWURZs1oq/OX5v3rxh3rx5bNywgXfv35MhfXpatW5N9+7dSZ06dZzfn6IoLF++nPn/b+++w5o8vz6Af8PeCIg4AFFBxWLdteKkVgFHHXUPUJy46ni12uGotljr3nXWwXLjtmqdVX9ulIoWERdFQLGMEBIgz/sHEEGCZYeQ7+e6cl3luZOHA0U4ucc569YhIiICAODg4ABfX1+MHz8+X8FydRIaGoq2bdtiypQpWLhwoeJnXRAE/Pjjj1iyZAnOnj2L1q1bqyZAWXqZ/c6taHgIhAlgiTABLGPZiaFhohjS1NuKy+8Xos75ES5O4vA05hUOXriIxBQx6tnWwpedOsBISQFsdZealoY9Z8/h1qO/oaejA49PP0Hnli3ylIzxnDYT5vp6CJozU+k91hw+hjnbdyLlj5L3ys2RnpGBzYeP4teQI3jwJAp6urro2c4V0wcN0LhN5znFmd+nX6dqifvyfkhcXBzOnTsHqVSKJk2aoEmTd2WGnjx5Ag93d6QmJ2P4Zx3R0M4WoU+eIuD8RdhUr45Tp0+jehm1BhMEATExMRAEATVq1ChWeaOKZuLEiThz5gzu37+fr6uJXC5Hs2bN8Mknn2Dr1q2qCZAJoEbhEjBVXIoDJcaAeXvF5ZxC1Ab3zmJZYDCO/nkVEqkULvXqYlyvnhjVozt0dAo3S+BQozqmDRpQVl+BSgiCgKthf+HR8xcwMTSEga4uxvy8FAlJSWjk7AyxOAWr9+5HEycnHPRbqOh6YmtTDZfv3IVcLlf6x/bukyewK8V9kekZGej37Vyc+t8NfNGzJ3ynTcfbt2+xe9cudJz4FfznfYe+nTr8940qgUh9x7zFmXORpGX/Ryknf2lpaZg1axZ27dqF9PR3s/CtWrXCr7/+ivr162PcuLEw0hLh2vqVqGFpqXjOjC97we3r7zF16lQEBQWValw5RCIRatasWSb3VpVbt27Bw8NDaUs7LS0tdO/eHSdOnFBBZKSJmABSxaanDSBXMifLhCTNGa/v7MegSZNgZ2eHGTNnwtLSEn+cPYspK1bj5P+uY8/C+ZWmYGlR3Ah/iDF+S/BX1FPFNW0tLdja2eH0uXNwdHTMShCvXsXYMWPQ7f++xq1tm6Cnqwvvbh747dgJBF64hKFuHfPc9+GLl9hz8U98N8Kr1GJdu+8gfr9+E/v27UPnzp0V1ydNmoTRo0dj5I+L0al5U1iq+bvzgmb2cnMyyn1wo2yXOAVBwIULFzBu3DjEx8fDzs4Onp6eePPmDS5fvoy//voLbdu2xdKlS3H16jXs+WZWnuQPAOpWr47vBvfHpPWb8PLly3xFm0k5HR0dpKWlFTgukUjUeomb1Iv6z6mTZtHTxltxCrymf4Vunt1w48ZNzJw5E6NGjYJ/QACCg4Nx4ur/sP7AIVVHWu7uRz5B169mwEhXFycXzYMkZC+iftuE//uyN168eIFNmzYByJpZcXV1RWBQEB4+fYYDFy4CANo2dsGgzztj9Mq1mLN9J/569hxPY+Ow5vAxdJ7zPerVqonxfUqn1qAgCPg15Aj69u2bJ/kDsv5ILlmyBOmZmdh18vdS+XyqEqnviHbWhpDXaP/Bh8TcOGuGr4yTP7lcjgkTJqBHjx4wNDTE0KFDUbduXaxfvx4HDx5Ehw4dMGTIEBgaGmLSpEkAgG6tWii9V49PWkIul+PevXtlGnNl0rlzZxw5cgQpKSn5xiQSCQ4dOoQuXbqoIDLSRJo3RUJqLyAgADKZDEuXLYOubt5lMXd3d/Tp0we/njiK7sO+hqMsUkVRlr+F23aghkUV/P7jfJhk91m1rVoVi7yHwdLUBLN//RUTJkyAg4MDAKBx48b4pFUrHLpwCYM+7wyRSITt386GQ43q2HDgEJbuPwQgq7/wl506YOXUKaVWGDtFIkHky5eY07Wr0vFq1aqhebNmuBtR8doKplumQL9O1UI91zatYhViXrduHXbv3o3169dj6NChin2zERER6NOnD8LDw3H+/Hn4+fmhe/fuuH79OlIkadDXzb/8nJSa9ZXp6emV69egzkaPHo0NGzZgxIgR2Lx5Myyyu88kJibC19cXKSkpGDNmjIqjJE3BBJDUTmhoKJo1a1ZgQ3gPDw/s27cPNYwkiBQ5IhZSxVhl3dicJBbj8OU/8cvokYrkL7dx3TywKGgf9u7di5kz3x3yqGZjg5T4OMXHOjraWDh2FGYPH4LrD8Ihy8hAE0dHVLeyzHfPktDVzvrVk5SUVOBzkpKSoG+jmlqMV/TtlF5v5xIPeVqzd/vyCqOC1OLLzMzE+vXrMWTIEAwbNizPmJOTE1avXo3evXvjypUraNu2LbZu3YomH3+MHWfOYnrf3vnut/PMOZiZmuLTTz8tp69A/dnZ2SEwMBBDhgxBw4YN8dlnn0FLSwtnz56FIAjYtWsXHB3Lty4qaS4mgKR29PT0kJycXOB4zliGlTlsDQzgZPBSMRYR5lgmtQVVLSEpGZlyOZztlHfqMDYwgF01a8TFvUv20tLScPXKFQzv0jn/8w0N4ZarFmBpM9DXw+etWsJ/9274+PjkO3Ry8+ZNhD98iB+9h5ZZDAXJOZCRT6x9ue3TKwtPnjzBixcv0L9/f6XjnTp1QtWqVXHhwgW0bdsWtWvXRt169TB3VyDqVK+O3m1aQyQSITMzE/7nL2L5wRBMnTYNJhWkXaK66Ny5M+7du4edO3fi4sWLEAQB06dPx4gRI8rsRDWRMkwASe24u7vjt99+w61bt9CiRd79SYIgwN/fH25ubjAwM1UcGslh6wikx6TghvjdtcowK1jV3Bx6uroIffIUnzdrmm88USzG01exGJTdyk0QBCxZsgRvEhIw+ovu5Rxtlv8bMhAe02ZixowZWLBggaIUQ2hoKHxGjkSjunXQrU3Bs0sJSUmIinkFU0NDONnZFrt+YO7ZvlbG4Vn9c9OUlKDJKbSshskfkLX/D4DSE6hA1t5QHR0dxfMAoEWLFnjz+jUG/LQETra10KBmDYQ9f5H1szRoEL777rtyib2ysbGxwcyZM/PMxhOVNyaApHY8PT3RoEEDjBw5EgEBAXBxcQEApKSkYN68ebh58yZCQkKynpzvFHE65DXao13iu4byEfHqPytoYmSILzt1wPqjJzCya2dYmubturLy0BGkSaWwsrJCcHAwAgICcO7cOfw4bjQa2NurJObOLVtgw8zpmLxiFYKDgvBJ69Z4m5CAO3fvoqFDbRz++UelJyJfxsVjzoZN2H/+AtIzMgAAH9VxwHcjvdDPrVORYki3TEE7o3dL5pKcckMVZNm2NNWpUwfVqlVDSEgIOnTIX17n+vXrePXqlWJJNzU1FadOnYKXlxc8PT0RGBiIuNhYfObyMYYPH46WLVuyaDeRGmMh6BJgIWjVefbsGXr37o2IiAi0aNECVlZWuHr1KlJTU7Fs2TKMHj264BfLMnN9kDXboRVzKU8rOnVMCCOjo9F27ERYm5ni28ED0OljF8QkvMXGYyex7fczMDM2RpI4K/Ft1cgZMwYNwJfvlXtRhej4eGw7ehz3I5/AQE8fvdq3xRft2yot4/PP69doP34SMjIyMK1PL3Rs/BFiEhKw8dhJnLh5G2tnTMW43gWfVM7dTs0pp4UagDwFEdR0hq8wfvrpJ/zyyy8ICAiAu7u74npcXBx69eoFmUyGGzduQCaTwdfXF0eOHMGtW7dQp04dFUZN5YaFoDUKE8ASYAKoWjKZDCEhITh27BjS0tLQqFEjjBw5EnZ2yjfwF3yjdx1HckhTb0M3Qf32NoU/fYYpy1fh/J27ims2FhaYNXwIJvTphfh/E6GnqwMrc3PVBVkC435eimN/XsG1FUtgW/XdSVxBEDB5wybsOHMOzw7uUVo7MKedWg5FW7VKONtXEJlMhqFDh+LEiRPo0KED2rRpg+joaOzduxdyuRxffPEFzMzMcPjwYSQlJWH79u3o3bu3qsOm8sIEUKMwASwBJoCVjGJmMGtWUJ1/Ef79/AUePX8OE0NDtP24MfSUlPEoTzGv3yD47B+Ie/sWNatWxcDOn8HaokqR7iGWSFCjZ1/M7t8X3wzKf5Ah7t9/4TBiLH6Z5IuJX/ZRXL+ib4d2LvH5Z/sq8Uzfh2RkZODAgQPYvn07IiMjYWpqivbt2yM1NRV3796FSCRCx44dMXbsWJ5I1TRMADUK9wAS5VAkBNqALB2SNGe0sxar5R7B+vZ2qG9fxJnQMiAIAuZu3oalgcHQ0dZGjerVER0Tg6/X/4q5I70xa9jgQu8je5WQAIlUik8bNlA6Xq1KFTjWqI6of2IU17IKMZdflw11oKOjgwEDBmDAgKK3QHz06BGePXsGCwsLtGjRolL05yXSVEwAiZTJ1YfY1jzr5LA6Lgmr2uJd/li8yx9z5syBr68vqlSpgjdv3mDVqlX4buVKmBgZ5pmt+5Aq2eVGnsXFKx2Xpqcj5u2/MLXWQ7plVqcF2xoVqxCzMgX1Xi6ssLAw3LlzB/r6+ujUqROqVatWitFluXv3LmbOnImrV68qrtWtWxffffddsRJJIlI9vn0jKoietmJ/mLxGe2i1MCiwQDDll5IqwdKAYEycOBFz5sxBlSpVAABWVlb44YcfMHz4cCzeFQBZenqh7mdlbo4urVpiw7ETitO/ue06ex7/pqSgr9dURYs1AOXSYq2onj59imnTpsHW1hZmZmZwdnbG4sWLP1gY+32RkZHo0qULPv30U/j6+sLHxwcNGjTA5MmTP9hvtqju3bsHDw8PiMVi7NixAw8ePMDJkyfx0UcfwcfHBzt27Ci1z5VDEATcvHkTBw4cwPnz55Gh5P83EZUME0Ci/5KdBGYtCRsi3TJF8WBCWLAzN28iSSzGuHHjlI6PGzcOr968weXQ+4W+57cjvHD/6TP0/3EJHjzP2qOUIpFg7ZFjmLppKwYPGID6HzXKTvp0K+QBj9DQULRv3x6HDx/G6NGjsWbNGnTu3BnLly9Hly5dkJCQ8J/3ePXqFTw8PBAfHw9/f3+8fv0aUVFRmDdvHgIDA+Hl5YXS2t793Xffwd7eHqdOnUKfPn1ga2sLV1dX7N69G8OHD8c333wDsVj83zcqpEuXLuGTTz5Bp06d4OXlhR49eqBRo0bYvXt3oV7/4MED7N69G0FBQYiJifnvFxBpKC4BExVGriVh5NSKA9DOILzSdhcpqcSUrKSgVi3l3UlyrielFj55aPuxC/b/tBBjF/+CJhO+gnWVKkhOTYUsIwPDBg/GitWrSx54GRIEASNHjoSDgwNCQkIUs6Le3t6YMGECPDw88M0332Djxo0fvM+aNWsgkUhw8eJFRUtEKysrTJkyBbVr18bw4cMVLd1K4uXLl/jjjz+wceNGGBsb5xkTiUSYNWsWdu/ejSNHjmDQoEEl+lxyuRzffvst1q1bh5YtW+LQoUNo3rw5oqKisHbtWowfPx5SqRSjRo1S+vrnz59j3LhxuHTpkuJazn7HFStW5IufSNMxASQqrHxFpbO6jDhZiyFN5R7B9znZ2QIArly5go4d89cb/PPPP/M8r7C6uX6KJ/uDcPjynzgdnQiXeoCnxyjY1a1b8qDL2IULF/D333/jxIkTiuQvR8OGDTFlyhQsXrwYfn5+sLCwKPA+gYGBGDp0qNJ+2F988QXq1auHwMDAEieA0dHRAIAmTZooHa9duzYsLCzw8qWS1nnZUlNTsWPHDuzauRMvX7yAlZUVBgwahDFjxqBqdikfuVyOMWPGYN++fWjSpAmOHz8OPT09AECzZs2wZcsWGBkZYd68eRg8eDCMjIzyfI74+Hh4eHhAS0sLO3bsQLdu3SCRSBAYGIgffvgBMTExCAkJ4aGVgsgykVP9QBr1Gq7S0ttCQBUX/zUQFVf2HkGJuTH3CCrRxuUjNKpbBwt/+AESiSTPWFJSEn5evBhtGrvgo2IUGdbT1UU/t06YNWoGvuo/E3a26vF9Dw0NhYmJCVxdXZWOe3h4IC0tDY8ePSrwHoIgIC4uDg0aKD8NLRKJ4OTkhNjY2BLHm5OgPX6sfIY7Li4OiYmJiue9LzExEZ4eHpgzezaczE3xf717oF09B6xYtgzt27XD06dPAQAHDhxAcHAwMjMzMW3aNEXyl/trmjFjBv79918cPXo03+f59ddfkZCQgOPHj6NPnz7Q19dHlSpV4Ovri507d+LcuXM4c+ZMCb4TlVh28meYKMbLx8l8I6tBmAASlVTuPYIu8Xm6TWgykUiEjTOn4/79e+jUsSN+++03XL16FZs3b0bHjh3wLCoKa6Z/Vax7X9G3Q7plCmwdTbOW5SvgXj9l9PT0IJPJCjykkXMIRF9fv8B7iEQi1KxZE/fu3VM6npmZiQcPHqBmzZoljrdevXpo2bIl1q5dq/Qgxrp166Cnp4devXopff2c2bMR+fffuLL8ZwTN/j/M7NcXm6ZMxF8bV0NPnonRPj4AgC1btqBx48YAgI8++kjpvRwcHGBiYoJXr17lGwsKCkL//v1ha5t/Nvnzzz+Hi4sLAgMDC/11axrDJDGkqbe5lUXDMAEkKg16ugC0speEtRRlSDRdG5ePcH7tKtSxrIKvvvoK7u7umDVrFhrb1sLFDavRxLFeke8Zqe+Idi7xeU/5qgl3d3fIZDLs27dP6fju3btRs2ZNRTJUkGHDhiE4OFgxg5ZbUFAQnj9/Di8vr9IIGfPnz8fNmzcxYMAA3Lp1C4Ig4MWLF5gzZw5WrFiBGTNmKF2uTkhIQPCeYMzq1xvN6uVdnretWhU/j/TCtevXcffuXfz111+K/sQPHz5UGsfz58+RkpKC6tWr5xuLi4tD/fr1lb5OJBKhfv36iIuLK+qXTlSpcQ8gUWnJ3iOYc1BEq45mVNT/L80b1EfIzz8h/u2/iP/3X9hYWhS7FV2kviOc1Liwc926ddG7d2/MmTMHNjY26NKlC0QiEdLT07F582bs3LkTixcvho6SPsi5TZw4Efv27YOHhwdmz54NDw8PJCcnY/fu3Vi9ejUGDx6MFi1alErMnTp1wp49ezB16lS4ublBW1sbmZmZMDU1xfz58zFjxgylr7t37x6kUhm++LS10vHun7SEjrY2rl+/DgMDA2hpaaFFixZYtWoVPDw8oPte95oVK1bAzMwM3bt3z3evmjVr4v595afJBUHA/fv30apVqyJ+5USVGxNAotKmp5vVXzjWHjbZG6sJsLaoUuT2b7lltXR7mZX8qdGs3/s2bNiAwYMHo1+/fqhfvz4cHBwQGhqK2NhYTJgwARMnTvzPe1hZWeHUqVOYOnUqpk6dCrk86+fMzMwM06ZNw3fffVeqMbu7uyMsLAznzp1DVFQULCws4O7uDlNT0wJfo62dlZxLC6jzmJ6RAbkgQEdHBz169EBwcDA2btyIgQMHom/fvpg9e7biFPCaNWvg7++PX375Relp3uHDh8PPzw8zZsyAk5NTnrGDBw8iIiICq1atKsF3gKjyYS/gEmAvYCqQLBOGicmIiJdzX00pydPTV40TQCBrVur8+fPYs2cPEhISULt2bXh5ecHFxaXI93rx4gXu3bsHfX19tGnTpsKUO0lNTUV9JyeM6uyGxT75l6N/O30WY1atw71795CZmYm2bdvi008/xZAhQ7B48eI8B090dXWxaNGiApPjxMREdO7cGW/evMHXX3+N7t27IzU1FYGBgVi1ahW6d++OXbt2FbrtoEaRZcIwKRlS8W2NOgDCXsBMAEuECSAVKNfJOiaBJZcn+QPUPgHUFPPnz8fKFSuwa+Y0fNm2jSIBuxL+EH0W+qFth44IDAoCAJw7dw5eXl5ITExE06ZN8fbtW0RFRcHGxgYhISH/mRzHx8dj+vTpOHz4MDIzMwEAJiYmGDVqFObPn59vSZmyMQHU2L/fTABLgAkg/SdZ1vKXVswl3BA7a/x+wOJQ7Pszz57ZYvKnNtLT0+Hj44ODBw+iUW17NK9XF49jXuFa+EN80qoVDhw8mKceYkpKCoKDg3Hjxg3o6OjAzc0NX3zxRZGSt3/++QehoaHQ1dVF69atP7hMTWACyASQioMJIBVKdhJoaBCOiDBbzgYWUbplCvSNm0NiZqp2hz7o3XL3zp078eLZM1S1tsbAQYPQo0cPzsqpWq6VCmkqE0BNw0MgRGUtp41cdteQiHi2jiPNIRKJ4ObmBjc3N1WHQrnlfmP62Bb1pJqT/FEW1gEkKg962uA/NyKqSAwNskpV8Q2pZuJfJKJyJDE3hr3xXVWHoTZyWutJzCrGyVYiosqCCSBRecnev8a+wYWTc/L3XccP7v8jIiot3ANIVJ6yi0Rn9Q0Ox+UwO54MViJv2Zey7/iRkpKCPXv24O7du9DT04OHhwc+++wzaGnxPTIRVU5MAInKW06nECqcMk7+fv/9d4wcORLJyclo3LgxkpKSsHHjRjRp0gR79uxBrVq1yvTzq6vU1FTs3bsXp0+fRkZGBlq0aAEvLy/Y2NioOjQiKgS+vSVSlVh7tDIOV3UUhZKZmaloN1aZ3L9/H4MHD4arqyvCwsJw6dIl3L17F6dOnUJCQgJ69+6N9AJamWmysLAwNG3aFJMmTUJcXBwkEgmWLFmCRo0a4eDBg6oOj/6LLFPVEVAFwASQSBX0dCExN67Q+wEFQcCuk7+jzRhfGHTqAiO3rujxf1/j7M1bqg6t1KxatQo1atTAzp07YWtrCyCrbEmbNm3g7++P8PBwHD16VMVRVizJycno06cPqlatijt37uDkyZM4ePAgHj58iB49emDkyJG4e/euqsOkgsjSoehSFGbLLSgajAkgkarkdLSItYcN9FUby3sEQcD4Jcvg8+NiVDU2xvpJ47Fk1Ai8TngLj2kzsW5/5ZjlOXLkCIYOHQp9/fzf/6ZNm6JZs2Y4fPiwCiKruIKCghAbG4uAgADUrVtXcd3CwgKbNm2Cra0t1q1bp8IIqUDZM3+GBuG4HC9h+RcNxz2ARCpVMd+DHTh/EduOHsfWaZPh1fldAd/JX3THzC2/Ydqqtfi8VQs0sLdXYZQlIwgCUlNTYW1tXeBzrK2tIZFIyjGqiu/kyZPo2LEj7JX8v9fV1cXAgQOxadMmFUSmfmQyGQ4dOoTTp09DJpOhWbNmGDZsGKpWrVpmn9MwSQxpzGu4StPK7HOQeqiYf32ISKU2HDyE9i6N8iR/QNby6CLvobAyNcXmEPVeGhWJRGjYsCEuXLigdFwsFuN///sfGjZsWM6RVWzp6ekf7K9rZmYGmUxWjhGpp4iICLRo0QI+Pj54+PAh4uLi8MMPP8DZ2RmHDh1SdXikAZgAElE+oRGR8GjRXOmYgZ4eOjf9GHcjIso5qtLn4+ODw4cP4/z583muC4KAn376CcnJyRgxYoRKYquomjZtiosXLyI1NVXp+MmTJ9GkSZNyjkq9SCQS9O7dG7q6urh27RouXLiAY8eO4dGjR/D09MSIESNw584dVYdJlRwTQCLKR19XF4li5X/gAeBfsRgGenrlGFHZGDVqFNzc3NCvXz9MnDgRR44cgb+/P7p37441a9bAz88PDg4Oqg6zQvHx8UFycjK+/fbbfCfD/f39cenSJfTv319F0amH/fv349mzZwgMDESjRo0U162srLB582bY29tjzZo1KoyQNAETQCLKp5vrpwg4fwFSJSVQnsfF48ydUHRzbaOCyEqXnp4e9uzZg2+//Rbnzp3D0KFD4evri4yMDAQHB2PixImqDrHCcXBwwNKlS7F161a0bt0aq1atwubNm9G7d2/4+vrC2NgYa9asQWJioqpDrbCOHz+ONm3awMnJKd+Yrq4uhgwZghMnTqggMtIkTACJKJ8pA/ohLjEJw5YsR3yuP+QR0f+g76LFqGZRBcPcu6gwwtKjp6eHGTNm4MGDB3jy5AlevnyJM2fOoHv37qoOrcIyNs7qzWxtbY1FixZh5syZSEpKwqZNm3D58mW8fPkSO3fuVHGUFZdUKkWVKlUKHDc3N0daWikf0sgu/wIBeC5uWrr3JrXEU8BElI9L3ToIXjgPw+YvgsOIMWjr7AyJTIZrDx+hVtWqOLr0Z5hlJwGVhZaWFqpVq6bqMNRCUFAQ3NzcEBISAkEQAGQdqsnRs2dPBAUFYfLkyaoKsUJr3LgxNm3aBLFYrEimczt9+jQ+/vjj0vuE2Z2HDA3CcfmxNWv/EQDOABJRAXq0dUXkvkAsGjsalpYWqF2rJrbMmYXwoF342LFemX5uV+kLSKNewzBRnPXHi50LKpTXr18ragCKRKI8yR8A1KtXD/Hx8aoITS34+PhALBbj+++/z7eP8vDhwzh9+jRGjx5dqp/T0CAcl8OY/NE7nAEkogJZmZtj2qABKvncugkmiBDL4QQxJObGgAwl6guckJCAgIAA3Lp1C3p6eujatSt69uwJvUpwmKW82drafrDbx927d2FnV/G621QU9vb2WLFiBaZMmYKbN29i0KBBMDU1xYkTJ3Ds2DH07dsXQ4cOVXWYVMlxBpCIKqx60seQpt6GYZK4RPc5cuQIGjZsiLlz5yI6OhphYWHw9vZGy5Yt8fgxuyEUlbe3N27duoVTp07lG7t+/TpOnz4Nb29vFUSmPnx8fHD06FFUq1YN33zzDSZOnIioqCisXLkS27Ztg5YW/zxT2eIMIBGVuYyMTCRLUmFqaAQdneLP4hVHaGgovLy80L17dyxbtkzR+SMsLAwjRoxA7969cePGDRgaGpZrXOrMw8MD7u7uGDZsGCZPnoz+/ftDR0cHhw4dwsqVK9G6dWsMHDhQ1WFWeJ06dUKnTp2QmZmJzMxMzkZTueJbDCIqM89jYzFx6QpYd/sC1br1QlXPnvD9ZTmexrwqtxjWrFmDWrVqYevWrXnavrm4uCAwMBBPnz7FgQMHyi2eykBbWxsBAQEYO3Ysfv31V7Ru3RotWrTA0qVL0b9/fxw6dEhpf2VSTltbm8kflTvOABJRmYh48RJuE78CIGBq755wqW2PB89fYPPJ3xFy8TLOrlkBZ4fahbuZAAByAEWfPTxx4gQmTpwIXV3dfGNOTk5wdXXF0aNHueeqiPT19eHn54dvvvkGd+7cgVwuR5MmTWBhYaHq0IioEJgAElGZ8P1lGcyNDHHu50Woll3z7EsAE3p0g9vX32L8kmW4sH71f95HN8EEUtyGIZpnHQaBVpEOg0gkkg/WXLOwsIBUKi30/SgvU1NTdOjQQdVhEFERcQmYSFVkmcia1ap8Hj57jgt3QjFv6CBF8pfDyswUC4YPwZX7Ybj3OLJQ99NNMEFEvByGBuEo6vescePGOHPmjNIxsViMy5cvo3HjxkW6JxGRumMCSKQK2VX5DRPFiIiXo560cp1EDXvyBADQpXlTpeNdmjUBAPwV9bTQ94xF8WbpRo8ejdOnT+PIkSN5rguCgAULFiA5ORkjR44s1r2JiNQVl4CJVKQyV+U30jcAALxOTIKFiUm+8TfJyQAAQ/1ibnyXpQN6+ff0KTNkyBCcPn0aw4cPR8+ePeHh4QGxWIzAwEDcunULy5cvh4ODQ/HiICJSU5wBJKJS16FpE5gZG2HrKeVLr1tPnYGJoSE+a9G80Pd0lb6A/FYatGIuZV0oZIcQbW1tbN++HcuWLUNERAR8fX0xc+ZMWFhY4PDhwxg7dmyhYyAiqiw4A0hEpc7EyBCT+30Jv13+sLOuitEeXaCvqwtZejq2n/4DS/YewIzBA4vVT1g3wQSR4mQ4WWtldwiR/+dsoLa2NsaMGYMxY8YgLS0N2traSk8Fk/pITU3F27dvUaVKFaX9dInow5gAElGZ+H6kF14nJmLqr1vwY9BeONWqgcf/xCDu30T49OiGH8b4FPve9aSPcTneDu1snkOS5lykJWEDA4Nif15SvaioKPj5+WH//v2QSqXQ0dFBr169MHv2bDg7O6s6PCK1IRIEQVB1EOoqKSkJ5ubm+Oeff2BmZqbqcEgdZJ/8NUwUQ5p6G7oJ+ffHVTbhT59h98nf8c+bN6hhZYlh7l3RqI5Dqdz7ir4d2rnEZyWBhUwASX09evQIXbt2hYGBAcaMGYPGjRvj0aNH2Lx5M968eYOjR4+iefPCbyvQGLL0rD3HYZVzz3FxJInFsPLoicTERI39+80EsASYAFKR5CR/BuGICLOtdCd/VaGsEsCoqChs2LABISEhSElJQf369TFq1CgMGjQIOjpcOFGVbt264dWrVzh16hSsrKwU15OTk9GjRw9kZGTgypUrEIlEKoyyAmICmA8TQC4BE5Urw0QxIh4z+SstrtIXiAhzhJO1uFhFopW5du0a+vbtCz09PQwcOBA2Nja4dOkSfH19ERISgoCAAO4fVIGIiAhcvHgR27Zty5P8AVnFqOfOnYs+ffrgxo0b+OSTT1QUpeoIgoAbN27g9OnTSE9PR/PmzdGtWze+YaEC8RQwUbmpnEWfVS1rP6DkXZFoWXqx7yWVSjFs2DC4uLggNDQUfn5+mDp1Kvbv3499+/bhzJkzWLVqVekFT4X26NEjACiw60jHjh0BAA8fPiy3mCqK2NhYdO3aFZ999hk2bdqEoKAgDBkyBC4uLrhx44aqw6MKigkgUVmTZSqSEmnqbc7+lQFX6QtcDrPOWyKmGA4fPoxXr15h1apVMDU1zTPWpUsXDB48GFu2bEFm5n+Xn6HSlXPSNy4uTul4bGxsnudpivT0dPTu3RtRUVEIDg5GZGQkHjx4gMuXL6NWrVro1asXnkRFqTpMqoCYABKVuaxDHy8fJ2vEoQ9VcZW+gG6CCV4+ToZhorhYSeCNGzfg5OSEBg0aKB3v0aMHXr58iZiYmJKGS0Xk6uqKqlWrYuvWrUrHt27dCmNjY3z++eflHJlqHTlyBPfv30dgYCA8PT2hpZX1Z/3jjz/G/v37oa+vj3UbN0CS5oxWxuG4om+n4oipomACSFQebJ4Xu5UZFU0spIDN82K9VkdHB1KpFAWdjUtLS1M8j8qXvr4+pk+fjq1bt2LJkiVIzu4mI5FIsG7dOixfvhy+vr4wNzdXcaTl68CBA2jZsiVatGiRb8zMzAxDhw7FgUOHAADyGu3RziUekfqO5RwlVUT8LUZUlkqwH41KQRHqAwLA559/jtWrV+Pq1atwdXXNNx4cHAwXFxfY2NiUZpRUSJMnT0ZCQgJ++uknrFy5EnXq1MHz58+RlJSEMWPG4Pvvv1d1iOUuKSkJtWrVKnC8Zs2aSEpKyvp3IMsEYu1hb3wbkHI1QtNxBpCorGQnf1oxl1h+oRzl7AfMOhSCIiXhnTp1gouLC8aPH684dAAAGRkZWLlyJY4dO4bJkyezzIiKiEQizJ8/Hw8ePMCMGTPQunVrTJo0CaGhoVi+fDm0tUt2AlwdOTo64vr160hPV/5zfuXKFTg65prx448uZWMdwBJgHUAqkCwThonJiIiX89CHCqVbpkDfqHmRSsQ8e/YMX3zxBZ48eYIOHTqgWrVquHLlCqKjozFjxgzMnz+fCSBVGGFhYfj000+xcOFCfPXVV3nGrl27Bk9PT/z8888YP3581sVcb0w1eU8y6wByCZiIKjHdBBNEiOVwsgnPbhn3332Da9eujWvXrmH//v04ePAg/vnnH7i7u2PUqFFo0qRJOUVeuURGRuLOnTvQ09NDu3btYGlpqeqQKg0XFxdMnz4d33//PW7fvo1BgwbBxMQEx48fx7Zt29CmTRuMHDny3Qv0dAFZOuQ12kOrDotDazLOAJYAZwBJqex32Oz4UXFc0bdDK+NwyGu0z7rAtnHlIjo6GpMmTcLp06cV1wwMDODl5QU/Pz/o6+urMLrKQxAE7NixA6tWrUJERAQAwNLSEiNHjsTs2bNhaGiY/0Ua3h2EM4CcASQqXbmSv6xfrEz+KgJX6QtAaoJ0XCrykjAVz5s3b+Dp6QmZTIaNGzfC09MTqampCAgIwJIlSxAdHY2goCAup5cCkUiEESNGwNvbG0+fPoVMJoODg8N/J9ix9rBhgXqNxQSQqJRp8rvqiq44S8JUPBs3bkRsbCyuXbuG2rVrAwAsLCwwc+ZMNGjQAMOGDcPFixcVHTyo5EQiEerUqVPIZ2tBYm4MJ4ghTU3R6P2AmoqngIlKi4zdIdRBPenjUukaQh8WEBCAgQMHKpK/3Hr27In69evD399fBZERgKzZbz1dSMyNs/YDtjBgkWgNwwSQqDTI0pHT8SMizJazfxWc0q4hTOBL1atXr+Ds7Kx0TCQSwdnZGa9evSrnqCif7BlwSZoz2lkbski0BmECSFRS2YmDoUE4LsdLeOhDjdSTPkZEvDy7ZqCcs4GlqHr16ggPD1c6JggCwsPDUb169XKOipTS00XOkrCtoynSLVNwRd9O8aDKiQkgUSngzJ/64pJw2RgyZAiCg4Px7NmzfGNHjhzB33//jaFDh6ogMlIqe0kYyG4ZZ22oeHBWsHJiAkhUYjxFp+64JFz6xo8fDxsbG3h6esLf3x9v375FdHQ0fvnlF4waNQrdu3dHhw4dVB0mvU8xG2ia/cg7K0iVB+sAlgDrAGo4WSYU+/7Y8aPSiNR3hJPLy6xTwgBPCZdAdHQ0Jk+ejN9//11xzcDAAN7e3vjpp59YB1BdVMLapqwDyASwRJgAarBK+AuR3mHh6NKlzp1AYmNjERkZCWNjYzRu3BhaWhq6cJbrDa809Taei5sCAGIhVcutL0wAmQCWCBNADZWrl+YNsbNa/vKjwonUd4STtVaRC0cnJSXh3LlzkEgkaNSoET7++OOyDZRK3cuXLzF79mwcOXIEmZlZ2wHq1q2LWbNmYdiwYSqOToXe2yOrrisgTABZCJqoaGSZuX7hmcAVTP4qs6xTwo5wqh4OicQZwIcTwMzMTCxcuBAbNmyAWCxWXG/ZsiXWr1+PRo0alXHEVBpiYmLw+eefQxAELF68GB07dkRcXBy2bt2K8ePH4/Xr15g6daqqw1SN3LPhssysPYLmQHpMiuLyc3FTtUsINRETQCKi/1LIdZJp06Zhx44dmDZtGkaMGIGqVavi3LlzWLRoEdzd3XHhwgXUrVu3bGOlEvPz84NUKsWlS5dQs2ZNAEDDhg3RoUMH2NvbY8GCBRg8eDBsbGxUHKmK6WkD0AZk6e+2SwBwSmR3EXWgoZsZiIohu9gzaZZYSN998IESMQ8fPsS2bdvw888/Y+7cubC3t4eRkRG6d++O48ePw8jICL/88ks5REwlkZaWhuDgYIwaNUqR/OU2ffp06OjoIDAwUAXRVVB6unkeOd1F0i1TFA+Wkql4mAASFUauQx8s9qxZXKUvIL+VBq2YSx8sERMUFAQrKyt4e3vnG7OwsICPjw/27t0LmUxWHmFTMb158wZisRgtW7ZUOm5hYQFHR0c8ffq0fANTJ7nqCeY8ckrJUMXBBJDoQ2SZgCwdholiaMVcgvxWGg99aCjdBJMPdg2JjY1F3bp1Cyxt0rBhQ6SlpSExMbEcoqXiMjMzg5aWFqKiopSOS6VSREdHq9VJZpV4b1YQgKLncM6DCaFqMQEkKkhO2QODcETEy7mfhRRdQ7KSQORJAmvUqIHHjx8jLS1N6Wv/+usvGBkZoUqVKuUQKRWXqakpPD09sWXLFqX/L4OCgpCQkIABAwaoIDo1lqvncM4jZ5mYbedUgwkg0QcYJokhjXrNJV9SKGhJeMiQIXj79i22bNmS7zVv3rzBtm3bMHDgQOjqsqZgRTdnzhw8e/YM/fr1w61btwAAiYmJWLt2LWbMmIHBgwejYcOGKo5SDRUwK9jOJV7x4Kxg+eEpYKICyQEhu6QBmABSXroJJogQy+FkEw5JmjMc7WvD19cX3377LaKjo+Hj4wMrKyv88ccf8PPzQ2ZmJmbOnKnqsKkQmjZtigMHDmDcuHFwc3ODmZkZUlNTIQgCvL29sXTpUlWHWDno6WaVksnpugMANQCtOuG4HGat9CXcglN6WAi6BFgIupLKXvoFsoo9c+mXPiR31xC5XI4lK1dg7dq1+PfffxXPad++PVavXg0nJyfVBUpFlpmZiTNnzuDhw4cwNjZGt27dlJ4MplKUq+OIMqVVdJqFoJkAlggTwEoo57Svmla3J9VJt0yBvlFzSMyNkZqahsvXryI1NRXOzs5o0KCBqsMjUh9KTtlneffGPKcVXW5F+X3NBJBLwETv5C718tiaSw1UJLmXhAFndO3kxh7CRMVRYMvFd0WnnZTNENoY4HIYf3cXFhNAIkDxjlMr5hLkCWzxRsWTdUrYDq2ML+XpjEBEpSRn36C56XsDciDNGe2sxYiIV150mis6eTEBJMpmmCTO3fOBqFhcpS+QbqzqKIgqMaUzhNp5ehMrkx7D9nS5sQwMUc6hj+wTv0Sl4UNdQ4ioDOhp5y81o6QQdbplCtItlB8y0SScASTNlmvfX8RjWy4RUKlQ7AeEGBJzY0Am535AIlXKXXKmBiBPSlJ1RCrHGUDSXNnJn1bMJVwOs2byR6WqnvQxLsdLlHYNISIVeH+GUMMxASTNk6u/78vHydBNMOGpMSoTSruGEBFVAEwASbPI0pHT3/dyvISzflQudBNMIE29reowiIgUuAeQNEfuJV+xM2f9SDVkmR+oc0ZEVD44A0gaxdAgHDeY/JEK3BA7Z+8HlHMpmIhUjgkgaQaW4iAVy70fEABLxBCRSjEBpMovZ99fohgRYbac/SOV0k0wwcvHydnN7uVMAolIJZgAUuWWu78vD31QBZG3RIxc1eEQkQbiIRCqnLK7exgmiiFNvc3+vlThuEpfQBqVAtRQdSREpIk4A0iVVFapl4h4OXs/UsXHQyFEVM6YAFKlFgupqkMgKpBugkneItHcD0hE5YQJIFU+nE0hNaKbYIKIeDkMDbkfkIjKD/cAUuWRve8PyCr2zH1/REREynEGkCqHXKVecvr7EhERkXJMAEn9sdQLVQaCqgMgIk3CBJDUlywzT39f+a00FnkmtRQLKaRRr7M+4B5WIioHTABJPeWq88clX1J3rtIXeTuEMAkkojLGBJDUlmFSVpFnLvlSZVFP+hjS1NuqDoOINAATQFJTcu6ZIiIiKiYmgKR+spfHpKm3cUPsrOJgiIiI1A/rAJL6yLXvLyJejnpS1vmjSkyWDujpqjoKIqqkOANI6iGnzh9LvVAll9MeDgDbwxFRmWECSBXbe6VeLodZs9QLVXp5TgSzPRwRlQEmgFRxKSn1wuSPNEr156qOgIgqKSaAVIFxyZeIiKgsMAEkIiIi0jBMAKliYicEIiKiMsMyMFSx5Nr3J029DXkCS70QERGVNs4AUsWRq9RLRLyc/X2JiIjKCBNAqhjeK/XCQx9ERERlh0vApFrvLfnqcsmXiIiozHEGkFSMS75ERETljTOApHLSqNeoJ01TdRhEREQagzOApDqy9OxWV0RERFSeOANYAoIgAACSk5NVHImayd73BwApcVeh+9YYEjARJMotJT0ZSUliSCRJgJ6uqsMhqlRy/m7n/B3XREwASyDnB6hBgwYqjoSIiIiKKjk5Gebm5qoOQyVEgianvyUkl8vxzz//wNTUFCKRSNXhEBERUSEIgoDk5GTUrFkTWlqauRuOCSARERGRhtHMtJeIiIhIgzEBJCIiItIwTACJiIiINAwTQCIiIiINwwSQiCqsESNGQCQS5Xs8fvy4VO7/22+/oUqVKqVyr+K6ePEievbsiZo1a0IkEuHQoUMqjYeINAMTQCKq0Dw8PBATE5PnUadOHVWHlU96enqxXicWi9GkSROsW7eulCMiIioYE0AiqtD09fVRvXr1PA9tbW0AQEhICJo3bw4DAwPUrVsXCxYsQEZGhuK1y5cvR+PGjWFsbAw7OztMmDABKSkpAIDz589j5MiRSExMVMwszp8/HwCUzsRVqVIFv/32GwDg6dOnEIlECA4ORseOHWFgYAB/f38AwJYtW+Ds7AwDAwM0bNgQ69ev/+DX5+npiUWLFqFPnz6l8N0iIiocdgIhIrV06dIleHl5YfXq1Wjfvj0iIyMxduxYAMC8efMAAFpaWli9ejXq1KmDJ0+eYMKECZg1axbWr18PV1dXrFy5EnPnzsWjR48AACYmJkWKYfbs2Vi2bBmaNWumSALnzp2LtWvXolmzZrhz5w7GjBkDY2NjeHt7l+43gIioJAQiogrK29tb0NbWFoyNjRWPfv36CYIgCJ07dxZ++umnPM/ftWuXUKNGjQLvt3fvXsHKykrx8fbt2wVzc/N8zwMgHDx4MM81c3NzYfv27YIgCEJUVJQAQFi5cmWe59SrV08ICAjIc23hwoVCmzZt/utLLfDzEhGVBc4AElGF5ubmhg0bNig+NjY2BgCEhobizz//xI8//qgYy8zMRFpaGlJTU2FkZIQzZ87Az88PDx8+RFJSEjIyMvKMl1TLli0V/y0WixEZGYlRo0ZhzJgxiusZGRka22uUiCouJoBEVKEZGxvD0dEx3/WUlBQsWLAAffv2zTdmYGCAp0+fokePHvD19cWPP/4IS0tLXL58GaNGjYJMJvtgAigSiSC81yVT2SGPnGQ0Jx4A2Lx5M1q3bp3neTl7FomIKgomgESklpo3b45Hjx4pTQ4B4NatW5DL5Vi2bJmi2fuePXvyPEdPTw+ZmZn5XmttbY2YmBjFxxEREUhNTf1gPDY2NqhZsyaePHmCoUOHFvXLISIqV0wAiUgtzZ07Fz169IC9vT369esHLS0thIaGIiwsDIsWLYKjoyPS09OxZs0a9OzZE3/++Sc2btyY5x4ODg5ISUnB2bNn0aRJExgZGcHIyAifffYZ1q5dizZt2iAzMxNff/01dHV1/zOmBQsWYMqUKTA3N4eHhwekUilu3ryJt2/fYvr06Upfk5KSkqeuYVRUFO7evQtLS0vY29uX7JtERFQQVW9CJCIqiLe3t9CrV68Cx0+ePCm4uroKhoaGgpmZmfDJJ58ImzZtUowvX75cqFGjhmBoaCi4u7sLO3fuFAAIb9++VTxn/PjxgpWVlQBAmDdvniAIghAdHS107dpVMDY2FpycnITjx48rPQRy586dfDH5+/sLTZs2FfT09AQLCwuhQ4cOwoEDBwr8Gs6dOycAyPfw9vYuwneKiKhoRILw3kYXIiIiIqrUWAiaiIiISMMwASQiIiLSMEwAiYiIiDQME0AiIiIiDcMEkIiIiEjDMAEkIiIi0jBMAImIiIg0DBNAIiIiIg3DBJCIiIhIwzABJCIiItIwTACJiIiINAwTQCIiIiINwwSQiIiISMMwASQiIiLSMEwAiYiIiDQME0AiIiIiDcMEkIiIiEjDMAEkIiIi0jBMAImIiIg0DBNAIiIiIg3DBJCIiIhIwzABJCIiItIwTACJiIiINAwTQCIiIiINwwSQiIiISMMwASQiIiLSMEwAiYiIiDQME0AiIiIiDcMEkIiIiEjDMAEkIiIi0jBMAImIiIg0DBNAIiIiIg3DBJCIiIhIwzABJCIiItIwTACJiIiINMz/A7rZb07VGdSDAAAAAElFTkSuQmCC"}
|
flagged/log.csv
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Number of Samples,Noise,Degree,C,coefficient 0,Polynomial SVM Classifier,Probabilities,flag,username,timestamp
|
| 2 |
+
350,1,5,8,8,C:\IT\PythonProject\OffsetCorrection\flagged\Polynomial SVM Classifier\tmp1qkjy2sm.json,"[[4.84269407e-01 5.15730593e-01]
|
| 3 |
+
[4.90755053e-01 5.09244947e-01]
|
| 4 |
+
[5.63261660e-01 4.36738340e-01]
|
| 5 |
+
[4.08689474e-01 5.91310526e-01]
|
| 6 |
+
[5.00000000e-01 5.00000000e-01]
|
| 7 |
+
[4.62511426e-01 5.37488574e-01]
|
| 8 |
+
[4.83982836e-01 5.16017164e-01]
|
| 9 |
+
[5.45072318e-01 4.54927682e-01]
|
| 10 |
+
[4.67917347e-01 5.32082653e-01]
|
| 11 |
+
[4.74482405e-01 5.25517595e-01]
|
| 12 |
+
[4.12314113e-01 5.87685887e-01]
|
| 13 |
+
[4.91000216e-01 5.08999784e-01]
|
| 14 |
+
[6.57143411e-01 3.42856589e-01]
|
| 15 |
+
[4.56434251e-01 5.43565749e-01]
|
| 16 |
+
[4.69722281e-01 5.30277719e-01]
|
| 17 |
+
[4.16949760e-01 5.83050240e-01]
|
| 18 |
+
[4.55113843e-01 5.44886157e-01]
|
| 19 |
+
[4.79002628e-01 5.20997372e-01]
|
| 20 |
+
[4.33410232e-01 5.66589768e-01]
|
| 21 |
+
[5.08523248e-01 4.91476752e-01]
|
| 22 |
+
[4.01499958e-01 5.98500042e-01]
|
| 23 |
+
[5.00000000e-01 5.00000000e-01]
|
| 24 |
+
[5.00000000e-01 5.00000000e-01]
|
| 25 |
+
[4.08199595e-01 5.91800405e-01]
|
| 26 |
+
[9.69644681e-01 3.03553193e-02]
|
| 27 |
+
[4.88197228e-01 5.11802772e-01]
|
| 28 |
+
[4.84114775e-01 5.15885225e-01]
|
| 29 |
+
[4.77507941e-01 5.22492059e-01]
|
| 30 |
+
[3.74425704e-01 6.25574296e-01]
|
| 31 |
+
[4.58412126e-01 5.41587874e-01]
|
| 32 |
+
[4.89897945e-01 5.10102055e-01]
|
| 33 |
+
[4.78140045e-01 5.21859955e-01]
|
| 34 |
+
[4.03915616e-01 5.96084384e-01]
|
| 35 |
+
[4.56796091e-01 5.43203909e-01]
|
| 36 |
+
[5.00000000e-01 5.00000000e-01]
|
| 37 |
+
[7.32797836e-01 2.67202164e-01]
|
| 38 |
+
[5.00000000e-01 5.00000000e-01]
|
| 39 |
+
[5.00000000e-01 5.00000000e-01]
|
| 40 |
+
[9.93406935e-01 6.59306543e-03]
|
| 41 |
+
[3.78157020e-01 6.21842980e-01]
|
| 42 |
+
[4.57160704e-01 5.42839296e-01]
|
| 43 |
+
[9.99999733e-01 2.67343407e-07]
|
| 44 |
+
[6.22350465e-01 3.77649535e-01]
|
| 45 |
+
[4.64316427e-01 5.35683573e-01]
|
| 46 |
+
[4.81290184e-01 5.18709816e-01]
|
| 47 |
+
[7.14259399e-01 2.85740601e-01]
|
| 48 |
+
[4.77395029e-01 5.22604971e-01]
|
| 49 |
+
[5.00000000e-01 5.00000000e-01]
|
| 50 |
+
[4.16303760e-01 5.83696240e-01]
|
| 51 |
+
[4.80194576e-01 5.19805424e-01]
|
| 52 |
+
[7.15837454e-01 2.84162546e-01]
|
| 53 |
+
[4.85737793e-01 5.14262207e-01]
|
| 54 |
+
[4.59332065e-01 5.40667935e-01]
|
| 55 |
+
[5.00000000e-01 5.00000000e-01]
|
| 56 |
+
[4.88384247e-01 5.11615753e-01]
|
| 57 |
+
[4.54899516e-01 5.45100484e-01]
|
| 58 |
+
[5.60211023e-01 4.39788977e-01]
|
| 59 |
+
[4.53175429e-01 5.46824571e-01]
|
| 60 |
+
[4.85564491e-01 5.14435509e-01]
|
| 61 |
+
[4.53566621e-01 5.46433379e-01]
|
| 62 |
+
[4.92265383e-01 5.07734617e-01]
|
| 63 |
+
[4.90539349e-01 5.09460651e-01]
|
| 64 |
+
[3.95359300e-01 6.04640700e-01]
|
| 65 |
+
[6.20691301e-01 3.79308699e-01]
|
| 66 |
+
[5.00000000e-01 5.00000000e-01]
|
| 67 |
+
[9.99999287e-01 7.12873818e-07]
|
| 68 |
+
[4.85769673e-01 5.14230327e-01]
|
| 69 |
+
[4.93337010e-01 5.06662990e-01]
|
| 70 |
+
[9.81381449e-01 1.86185515e-02]
|
| 71 |
+
[4.55065432e-01 5.44934568e-01]
|
| 72 |
+
[4.57407007e-01 5.42592993e-01]
|
| 73 |
+
[4.88064354e-01 5.11935646e-01]
|
| 74 |
+
[4.07671732e-01 5.92328268e-01]
|
| 75 |
+
[5.00000000e-01 5.00000000e-01]
|
| 76 |
+
[4.57986562e-01 5.42013438e-01]
|
| 77 |
+
[3.49359951e-01 6.50640049e-01]
|
| 78 |
+
[4.87098761e-01 5.12901239e-01]
|
| 79 |
+
[4.57893841e-01 5.42106159e-01]
|
| 80 |
+
[5.00000000e-01 5.00000000e-01]
|
| 81 |
+
[5.00000000e-01 5.00000000e-01]
|
| 82 |
+
[5.00000000e-01 5.00000000e-01]
|
| 83 |
+
[3.86068942e-01 6.13931058e-01]
|
| 84 |
+
[5.11186691e-01 4.88813309e-01]
|
| 85 |
+
[5.00000000e-01 5.00000000e-01]
|
| 86 |
+
[5.00000000e-01 5.00000000e-01]
|
| 87 |
+
[5.11595504e-01 4.88404496e-01]
|
| 88 |
+
[4.56883473e-01 5.43116527e-01]
|
| 89 |
+
[4.48814896e-01 5.51185104e-01]
|
| 90 |
+
[4.49255361e-01 5.50744639e-01]
|
| 91 |
+
[4.83946813e-01 5.16053187e-01]
|
| 92 |
+
[4.91723763e-01 5.08276237e-01]
|
| 93 |
+
[4.83997832e-01 5.16002168e-01]
|
| 94 |
+
[6.46859600e-01 3.53140400e-01]
|
| 95 |
+
[4.31456429e-01 5.68543571e-01]
|
| 96 |
+
[4.57347527e-01 5.42652473e-01]
|
| 97 |
+
[6.34743472e-01 3.65256528e-01]
|
| 98 |
+
[4.55476049e-01 5.44523951e-01]
|
| 99 |
+
[4.88134941e-01 5.11865059e-01]
|
| 100 |
+
[5.25883778e-01 4.74116222e-01]
|
| 101 |
+
[4.87301266e-01 5.12698734e-01]]",,,2023-07-06 11:04:01.656113
|
| 102 |
+
,,,,,,,,2023-07-06 17:24:49.861189
|
iris_tree.dot
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
digraph Tree {
|
| 2 |
+
node [shape=box, style="filled, rounded", color="black", fontname="helvetica"] ;
|
| 3 |
+
edge [fontname="helvetica"] ;
|
| 4 |
+
0 [label="petal length (cm) <= 2.45\ngini = 0.667\nsamples = 150\nvalue = [50, 50, 50]\nclass = setosa", fillcolor="#ffffff"] ;
|
| 5 |
+
1 [label="gini = 0.0\nsamples = 50\nvalue = [50, 0, 0]\nclass = setosa", fillcolor="#e58139"] ;
|
| 6 |
+
0 -> 1 [labeldistance=2.5, labelangle=45, headlabel="True"] ;
|
| 7 |
+
2 [label="petal width (cm) <= 1.75\ngini = 0.5\nsamples = 100\nvalue = [0, 50, 50]\nclass = versicolor", fillcolor="#ffffff"] ;
|
| 8 |
+
0 -> 2 [labeldistance=2.5, labelangle=-45, headlabel="False"] ;
|
| 9 |
+
3 [label="gini = 0.168\nsamples = 54\nvalue = [0, 49, 5]\nclass = versicolor", fillcolor="#4de88e"] ;
|
| 10 |
+
2 -> 3 ;
|
| 11 |
+
4 [label="gini = 0.043\nsamples = 46\nvalue = [0, 1, 45]\nclass = virginica", fillcolor="#843de6"] ;
|
| 12 |
+
2 -> 4 ;
|
| 13 |
+
}
|
iris_tree.png
ADDED
|
kaggle_example.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
|
| 5 |
+
df_X = pd.read_csv("data_X.csv",parse_dates=["date_time"])
|
| 6 |
+
df_Y = pd.read_csv("data_Y.csv",parse_dates=["date_time"])
|
| 7 |
+
df_X_copy = df_X.copy()
|
| 8 |
+
df_Y_copy = df_Y.copy()
|
| 9 |
+
print(df_X.head(10))
|
| 10 |
+
print(df_Y.head(10))
|
| 11 |
+
print(df_X.shape)
|
| 12 |
+
print(df_Y.shape)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
print(df_X_copy.head(10))
|
| 16 |
+
|
| 17 |
+
print(type(df_X["date_time"][0]))
|
| 18 |
+
|
| 19 |
+
print(df_X_copy.info(), df_Y_copy.info())
|
| 20 |
+
|
| 21 |
+
print(df_X_copy.describe())
|
| 22 |
+
|
| 23 |
+
print(df_X_copy.shape[0]/60)
|
| 24 |
+
|
| 25 |
+
print(df_X_copy.isnull().sum()/df_X_copy.shape[0]*100)
|
| 26 |
+
print(df_X_copy.isnull().sum())
|
| 27 |
+
|
| 28 |
+
df_X_copy["date_hour"] = df_X_copy["date_time"].apply(lambda x: x.strftime("%d-%m-%Y-%H"))
|
| 29 |
+
print(df_X_copy.head())
|
| 30 |
+
|
| 31 |
+
L = list(df_X_copy.groupby("date_hour"))
|
| 32 |
+
df_X_copy.hist(bins=100,figsize=(10,12),grid=False)
|
| 33 |
+
#plt.show()
|
| 34 |
+
|
| 35 |
+
l = len(L)
|
| 36 |
+
print(l)
|
| 37 |
+
|
| 38 |
+
List = []
|
| 39 |
+
|
| 40 |
+
for i in range(l):
|
| 41 |
+
dh = L[i][1]["date_hour"].iloc[0]
|
| 42 |
+
row = L[i][1].drop(["date_time", "date_hour"], axis=1).to_numpy().flatten().tolist()
|
| 43 |
+
row.append(dh)
|
| 44 |
+
List.append(row)
|
| 45 |
+
if (i+1)%2000 == 0:
|
| 46 |
+
print("Processing: %.4f%%"%(100*(i+1)/l))
|
| 47 |
+
|
| 48 |
+
df = pd.DataFrame(List)
|
| 49 |
+
print(df[1020])
|
| 50 |
+
df["date_hour"] = df[1020]
|
| 51 |
+
|
| 52 |
+
|
main.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Author: İlter Gökberk Özel 12/06/2023 11:40
|
| 3 |
+
Summary: This program's main purpose is to determine the tool to give the offset to correct CNC process.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
#Libraries
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from matplotlib import pyplot as plt
|
| 11 |
+
import seaborn as sbn
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
#Main Class
|
| 15 |
+
class OffsetCorrection():
|
| 16 |
+
def __init__(self):
|
| 17 |
+
print("OffsetCorrection Initilized!")
|
| 18 |
+
@staticmethod
|
| 19 |
+
def x():
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
#Child Classes
|
| 24 |
+
|
| 25 |
+
class Line(OffsetCorrection):
|
| 26 |
+
def __init__(self):
|
| 27 |
+
pass
|
| 28 |
+
|
| 29 |
+
class Circle(OffsetCorrection):
|
| 30 |
+
def __init__(self):
|
| 31 |
+
pass
|
| 32 |
+
|
| 33 |
+
def A(self):
|
| 34 |
+
pass
|
| 35 |
+
class Square(OffsetCorrection):
|
| 36 |
+
def __init__(self):
|
| 37 |
+
pass
|
| 38 |
+
def main():
|
| 39 |
+
#Initilize
|
| 40 |
+
OSC = OffsetCorrection()
|
| 41 |
+
if __name__=='__main__':
|
| 42 |
+
main()
|
| 43 |
+
|
meshes/katana/katana_base_link.stl
ADDED
|
Binary file (100 kB). View file
|
|
|
meshes/katana/katana_gripper_l_finger.stl
ADDED
|
Binary file (35.1 kB). View file
|
|
|
meshes/katana/katana_gripper_link.stl
ADDED
|
Binary file (25.1 kB). View file
|
|
|
meshes/katana/katana_gripper_r_finger.stl
ADDED
|
Binary file (35.1 kB). View file
|
|
|
meshes/katana/katana_internal_controlbox.stl
ADDED
|
Binary file (50.1 kB). View file
|
|
|
meshes/katana/katana_motor1_pan_link.stl
ADDED
|
Binary file (50.1 kB). View file
|
|
|
meshes/katana/katana_motor2_lift_link.stl
ADDED
|
Binary file (85 kB). View file
|
|
|
meshes/katana/katana_motor3_lift_link.stl
ADDED
|
Binary file (85 kB). View file
|
|
|
meshes/katana/katana_motor4_lift_link.stl
ADDED
|
Binary file (85.1 kB). View file
|
|
|
meshes/katana/katana_motor5_wrist_roll_link.stl
ADDED
|
Binary file (85.1 kB). View file
|
|
|