gautamaj commited on
Commit
d167e83
·
verified ·
1 Parent(s): bb669d6

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. .idea/.gitignore +3 -0
  3. .idea/LLM_Visual.iml +12 -0
  4. .idea/inspectionProfiles/Project_Default.xml +235 -0
  5. .idea/inspectionProfiles/profiles_settings.xml +6 -0
  6. .idea/misc.xml +4 -0
  7. .idea/modules.xml +8 -0
  8. .idea/workspace.xml +62 -0
  9. .streamlit/config.toml +3 -0
  10. BLEU_Eval.py +38 -0
  11. Data/test.jsonl +94 -0
  12. Data/train.jsonl +18 -0
  13. Files/COMPUTER VISION TOOLKIT INSTRUCTION.txt +829 -0
  14. Files/F170.txt +0 -0
  15. Files/KAT Gateway Software.txt +952 -0
  16. Files/Mockpanel.docx +0 -0
  17. Files/Mockpanel.pdf +0 -0
  18. Files/Reference.txt +35 -0
  19. Files/Stratasys-F123-Series-User-Guide.pdf +3 -0
  20. Files/Ultimaker S3-S5 - User manual ENv2.4.pdf +3 -0
  21. Files/Ultimaker S5.txt +839 -0
  22. Files/User_manual_v2.pdf +3 -0
  23. Files/environment_data.csv +20 -0
  24. Ignore/page17_img2.png +0 -0
  25. Ignore/page1_img1.png +0 -0
  26. Ignore/page1_img2.png +0 -0
  27. Ignore/page22_img4.png +0 -0
  28. Ignore/page27_img2.png +0 -0
  29. Ignore/page32_img2.png +0 -0
  30. Ignore/page32_img4.png +0 -0
  31. Ignore/page35_img2.png +0 -0
  32. Ignore/page36_img2.png +0 -0
  33. Ignore/page3_img1.png +0 -0
  34. Ignore/page40_img2.png +0 -0
  35. Ignore/page5_img2.png +0 -0
  36. Ignore/page6_img2.png +0 -0
  37. Ignore/page6_img4.png +0 -0
  38. Ignore/page7_img2.png +0 -0
  39. Image.png +0 -0
  40. Image/F170.jpg +0 -0
  41. Image/KATWalk.jpg +0 -0
  42. Image/Mockpanel.jpg +0 -0
  43. Image/Ultimaker.jpg +0 -0
  44. LLM_App.py +651 -0
  45. README.md +3 -9
  46. Reference/Mockpanel/Images/page100_img3.png +0 -0
  47. Reference/Mockpanel/Images/page100_img5.png +0 -0
  48. Reference/Mockpanel/Images/page101_img3.png +0 -0
  49. Reference/Mockpanel/Images/page101_img5.png +0 -0
  50. Reference/Mockpanel/Images/page102_img3.png +0 -0
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Files/Stratasys-F123-Series-User-Guide.pdf filter=lfs diff=lfs merge=lfs -text
37
+ Files/Ultimaker[[:space:]]S3-S5[[:space:]]-[[:space:]]User[[:space:]]manual[[:space:]]ENv2.4.pdf filter=lfs diff=lfs merge=lfs -text
38
+ Files/User_manual_v2.pdf filter=lfs diff=lfs merge=lfs -text
39
+ volumes/etcd/member/wal/0000000000000000-0000000000000000.wal filter=lfs diff=lfs merge=lfs -text
.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
.idea/LLM_Visual.iml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="jdk" jdkName="Python 3.10 (llm)" jdkType="Python SDK" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ <component name="PyDocumentationSettings">
9
+ <option name="format" value="PLAIN" />
10
+ <option name="myDocStringFormat" value="Plain" />
11
+ </component>
12
+ </module>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
5
+ <option name="ignoredErrors">
6
+ <list>
7
+ <option value="W29" />
8
+ <option value="E501" />
9
+ <option value="W29" />
10
+ <option value="E501" />
11
+ <option value="W29" />
12
+ <option value="E501" />
13
+ <option value="W29" />
14
+ <option value="E501" />
15
+ <option value="W29" />
16
+ <option value="E501" />
17
+ <option value="W29" />
18
+ <option value="E501" />
19
+ <option value="W29" />
20
+ <option value="E501" />
21
+ <option value="W29" />
22
+ <option value="E501" />
23
+ <option value="W29" />
24
+ <option value="E501" />
25
+ <option value="W29" />
26
+ <option value="E501" />
27
+ <option value="W29" />
28
+ <option value="E501" />
29
+ <option value="W29" />
30
+ <option value="E501" />
31
+ <option value="W29" />
32
+ <option value="E501" />
33
+ <option value="W29" />
34
+ <option value="E501" />
35
+ <option value="W29" />
36
+ <option value="E501" />
37
+ <option value="W29" />
38
+ <option value="E501" />
39
+ <option value="W29" />
40
+ <option value="E501" />
41
+ <option value="W29" />
42
+ <option value="E501" />
43
+ <option value="W29" />
44
+ <option value="E501" />
45
+ <option value="W29" />
46
+ <option value="E501" />
47
+ <option value="W29" />
48
+ <option value="E501" />
49
+ <option value="W29" />
50
+ <option value="E501" />
51
+ <option value="W29" />
52
+ <option value="E501" />
53
+ <option value="W29" />
54
+ <option value="E501" />
55
+ <option value="W29" />
56
+ <option value="E501" />
57
+ <option value="W29" />
58
+ <option value="E501" />
59
+ <option value="W29" />
60
+ <option value="E501" />
61
+ <option value="W29" />
62
+ <option value="E501" />
63
+ <option value="W29" />
64
+ <option value="E501" />
65
+ <option value="W29" />
66
+ <option value="E501" />
67
+ <option value="W29" />
68
+ <option value="E501" />
69
+ <option value="W29" />
70
+ <option value="E501" />
71
+ <option value="W29" />
72
+ <option value="E501" />
73
+ <option value="W29" />
74
+ <option value="E501" />
75
+ <option value="W29" />
76
+ <option value="E501" />
77
+ <option value="W29" />
78
+ <option value="E501" />
79
+ <option value="W29" />
80
+ <option value="E501" />
81
+ <option value="W29" />
82
+ <option value="E501" />
83
+ <option value="W29" />
84
+ <option value="E501" />
85
+ <option value="W29" />
86
+ <option value="E501" />
87
+ <option value="W29" />
88
+ <option value="E501" />
89
+ <option value="W29" />
90
+ <option value="E501" />
91
+ <option value="W29" />
92
+ <option value="E501" />
93
+ <option value="W29" />
94
+ <option value="E501" />
95
+ <option value="W29" />
96
+ <option value="E501" />
97
+ <option value="W29" />
98
+ <option value="E501" />
99
+ <option value="W29" />
100
+ <option value="E501" />
101
+ <option value="W29" />
102
+ <option value="E501" />
103
+ <option value="W29" />
104
+ <option value="E501" />
105
+ <option value="W29" />
106
+ <option value="E501" />
107
+ <option value="W29" />
108
+ <option value="E501" />
109
+ <option value="W29" />
110
+ <option value="E501" />
111
+ <option value="W29" />
112
+ <option value="E501" />
113
+ <option value="W29" />
114
+ <option value="E501" />
115
+ <option value="W29" />
116
+ <option value="E501" />
117
+ <option value="W29" />
118
+ <option value="E501" />
119
+ <option value="W29" />
120
+ <option value="E501" />
121
+ <option value="W29" />
122
+ <option value="E501" />
123
+ <option value="W29" />
124
+ <option value="E501" />
125
+ <option value="W29" />
126
+ <option value="E501" />
127
+ <option value="W29" />
128
+ <option value="E501" />
129
+ <option value="W29" />
130
+ <option value="E501" />
131
+ <option value="W29" />
132
+ <option value="E501" />
133
+ <option value="W29" />
134
+ <option value="E501" />
135
+ <option value="W29" />
136
+ <option value="E501" />
137
+ <option value="W29" />
138
+ <option value="E501" />
139
+ <option value="W29" />
140
+ <option value="E501" />
141
+ <option value="W29" />
142
+ <option value="E501" />
143
+ <option value="W29" />
144
+ <option value="E501" />
145
+ <option value="W29" />
146
+ <option value="E501" />
147
+ <option value="W29" />
148
+ <option value="E501" />
149
+ <option value="W29" />
150
+ <option value="E501" />
151
+ <option value="W29" />
152
+ <option value="E501" />
153
+ <option value="W29" />
154
+ <option value="E501" />
155
+ <option value="W29" />
156
+ <option value="E501" />
157
+ <option value="W29" />
158
+ <option value="E501" />
159
+ <option value="W29" />
160
+ <option value="E501" />
161
+ <option value="W29" />
162
+ <option value="E501" />
163
+ <option value="W29" />
164
+ <option value="E501" />
165
+ <option value="W29" />
166
+ <option value="E501" />
167
+ <option value="W29" />
168
+ <option value="E501" />
169
+ <option value="W29" />
170
+ <option value="E501" />
171
+ <option value="W29" />
172
+ <option value="E501" />
173
+ <option value="W29" />
174
+ <option value="E501" />
175
+ <option value="W29" />
176
+ <option value="E501" />
177
+ <option value="W29" />
178
+ <option value="E501" />
179
+ <option value="W29" />
180
+ <option value="E501" />
181
+ <option value="W29" />
182
+ <option value="E501" />
183
+ <option value="W29" />
184
+ <option value="E501" />
185
+ <option value="W29" />
186
+ <option value="E501" />
187
+ <option value="W29" />
188
+ <option value="E501" />
189
+ <option value="W29" />
190
+ <option value="E501" />
191
+ <option value="W29" />
192
+ <option value="E501" />
193
+ <option value="W29" />
194
+ <option value="E501" />
195
+ <option value="W29" />
196
+ <option value="E501" />
197
+ <option value="W29" />
198
+ <option value="E501" />
199
+ <option value="W29" />
200
+ <option value="E501" />
201
+ <option value="W29" />
202
+ <option value="E501" />
203
+ <option value="W29" />
204
+ <option value="E501" />
205
+ <option value="W29" />
206
+ <option value="E501" />
207
+ <option value="W29" />
208
+ <option value="E501" />
209
+ <option value="W29" />
210
+ <option value="E501" />
211
+ <option value="W29" />
212
+ <option value="E501" />
213
+ <option value="W29" />
214
+ <option value="E501" />
215
+ <option value="W29" />
216
+ <option value="E501" />
217
+ <option value="W29" />
218
+ <option value="E501" />
219
+ <option value="W29" />
220
+ <option value="E501" />
221
+ <option value="W29" />
222
+ <option value="E501" />
223
+ <option value="W29" />
224
+ <option value="E501" />
225
+ <option value="W29" />
226
+ <option value="E501" />
227
+ <option value="W29" />
228
+ <option value="E501" />
229
+ <option value="W29" />
230
+ <option value="E501" />
231
+ </list>
232
+ </option>
233
+ </inspection_tool>
234
+ </profile>
235
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (llm)" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/LLM_Visual.iml" filepath="$PROJECT_DIR$/.idea/LLM_Visual.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/workspace.xml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ChangeListManager">
4
+ <list default="true" id="a8959b7f-1323-4e61-abea-2edc17036b14" name="Changes" comment="" />
5
+ <option name="SHOW_DIALOG" value="false" />
6
+ <option name="HIGHLIGHT_CONFLICTS" value="true" />
7
+ <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
8
+ <option name="LAST_RESOLUTION" value="IGNORE" />
9
+ </component>
10
+ <component name="MarkdownSettingsMigration">
11
+ <option name="stateVersion" value="1" />
12
+ </component>
13
+ <component name="ProjectId" id="2gWr8Kxp1bjRWHYQnDkFBstiYyf" />
14
+ <component name="ProjectViewState">
15
+ <option name="hideEmptyMiddlePackages" value="true" />
16
+ <option name="showLibraryContents" value="true" />
17
+ </component>
18
+ <component name="PropertiesComponent"><![CDATA[{
19
+ "keyToString": {
20
+ "RunOnceActivity.OpenProjectViewOnStart": "true",
21
+ "RunOnceActivity.ShowReadmeOnStart": "true",
22
+ "last_opened_file_path": "C:/Users/CGDM/Desktop/LLM_Visual",
23
+ "settings.editor.selected.configurable": "com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable"
24
+ }
25
+ }]]></component>
26
+ <component name="RunManager">
27
+ <configuration name="LLM_App" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
28
+ <module name="LLM_Visual" />
29
+ <option name="INTERPRETER_OPTIONS" value="" />
30
+ <option name="PARENT_ENVS" value="true" />
31
+ <option name="SDK_HOME" value="" />
32
+ <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
33
+ <option name="IS_MODULE_SDK" value="true" />
34
+ <option name="ADD_CONTENT_ROOTS" value="true" />
35
+ <option name="ADD_SOURCE_ROOTS" value="true" />
36
+ <option name="SCRIPT_NAME" value="$PROJECT_DIR$/LLM_App.py" />
37
+ <option name="PARAMETERS" value="" />
38
+ <option name="SHOW_COMMAND_LINE" value="false" />
39
+ <option name="EMULATE_TERMINAL" value="false" />
40
+ <option name="MODULE_MODE" value="false" />
41
+ <option name="REDIRECT_INPUT" value="false" />
42
+ <option name="INPUT_FILE" value="" />
43
+ <method v="2" />
44
+ </configuration>
45
+ <recent_temporary>
46
+ <list>
47
+ <item itemvalue="Python.LLM_App" />
48
+ </list>
49
+ </recent_temporary>
50
+ </component>
51
+ <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
52
+ <component name="TaskManager">
53
+ <task active="true" id="Default" summary="Default task">
54
+ <changelist id="a8959b7f-1323-4e61-abea-2edc17036b14" name="Changes" comment="" />
55
+ <created>1715824013297</created>
56
+ <option name="number" value="Default" />
57
+ <option name="presentableId" value="Default" />
58
+ <updated>1715824013297</updated>
59
+ </task>
60
+ <servers />
61
+ </component>
62
+ </project>
.streamlit/config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [server]
2
+ enableXsrfProtection = false
3
+ enableCORS = false
BLEU_Eval.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
2
+
3
+ reference = """
4
+ The main components of the Ultimaker S5 are:
5
+ Glass door(s), Print head, Build plate, Build plate clamps, Touchscreen, USB port, Feeder 2, Bowden tubes, Feeder 1, Power socket and switch, Ethernet port, Double spoolholder with NFC cable, NFC socket
6
+
7
+ """
8
+
9
+ candidate_chatgpt = """
10
+ The main components of the Ultimaker S5 are:
11
+
12
+ Glass door(s)
13
+ Print head
14
+ Build plate
15
+ Build plate clamps
16
+ Touchscreen
17
+ USB port
18
+ Feeder 2
19
+ Bowden tubes
20
+ Feeder 1
21
+ Power socket and switch
22
+ Ethernet port
23
+ Double spoolholder with NFC cable
24
+ NFC socket
25
+ """
26
+
27
+ # Preprocessing text for better comparison
28
+ ref_processed = reference.strip().replace('\n', ' ')
29
+ candidate_processed = candidate_chatgpt.strip().replace('\n', ' ')
30
+
31
+ # Calculating BLEU score
32
+ bleu_score = sentence_bleu(
33
+ [ref_processed.split()],
34
+ candidate_processed.split(),
35
+ smoothing_function=SmoothingFunction().method1
36
+ )
37
+
38
+ print(bleu_score)
Data/test.jsonl ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"what is a capsid protein\" The answer is?","completion":" content"}
2
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you describe the interaction between the Cyclophilin A and the Capsid?\" The answer is?","completion":" content"}
3
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Explain the function of the Inner Membrane.\" The answer is?","completion":" content"}
4
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Tell me about the Outer Membrane.\" The answer is?","completion":" content"}
5
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Provide a more in-depth explanation of the structure and function of the Nucleocapsid Protein.\" The answer is?","completion":" content"}
6
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"What is the purpose of the human lymphocyte antigen class II?\" The answer is?","completion":" content"}
7
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"I don't quite understand, can you clarify?\" The answer is?","completion":" content"}
8
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you give me an example?\" The answer is?","completion":" content"}
9
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you expand on that point?\" The answer is?","completion":" content"}
10
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Please repeat that again, I missed something.\" The answer is?","completion":" content"}
11
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you break that down further?\" The answer is?","completion":" content"}
12
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Elaborate on the topic a bit more, would you?\" The answer is?","completion":" content"}
13
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"What makes the virus unique in comparison to other viruses?\" The answer is?","completion":" content"}
14
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you provide an in-depth explanation of the viral replication cycle?\" The answer is?","completion":" content"}
15
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"What is the significance of the viral protease in the replication process?\" The answer is?","completion":" content"}
16
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Let's retreat.\" The answer is?","completion":" navigation"}
17
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you reverse?\" The answer is?","completion":" navigation"}
18
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Let's step back.\" The answer is?","completion":" navigation"}
19
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Play some music for me.\" The answer is?","completion":" error"}
20
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you perform a task outside of the dataset's scope?\" The answer is?","completion":" error"}
21
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you provide output that does not fit into any of the five predefined classes?\" The answer is?","completion":" error"}
22
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you handle requests that contain incomplete or ambiguous information?\" The answer is?","completion":" error"}
23
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"What's the capital of Kazakhstan?\" The answer is?","completion":" error"}
24
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Show me a picture of a tree growing in space.\" The answer is?","completion":" error"}
25
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Write a short story where the protagonist is a talking cat who solves a mystery, but also make sure to include a subplot about a cursed diamond and three distinct settings.\" The answer is?","completion":" error"}
26
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you show me the hemoglobin, rotate it to the right, cut it in half, and then show me the heme group?\" The answer is?","completion":" error"}
27
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Display the DNA double helix, cut it at the middle, rotate the top half, and then show me the base pairs in the major groove.\" The answer is?","completion":" error"}
28
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you zoom in on an enzyme and rotate it? How does the enzyme's shape facilitate substrate binding?\" The answer is?","completion":" error"}
29
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"I want to see the structure of a myosin molecule, cut it in half, and then show me the ATP-binding site.\" The answer is?","completion":" error"}
30
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you show me the structure of a virus particle, rotate it to the back, and then zoom in on the capsid?\" The answer is?","completion":" error"}
31
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you show me the structure of a bacterium, rotate it to the side, and then zoom in on the flagellum?\" The answer is?","completion":" error"}
32
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Present me the Immunoglobulin G now\" The answer is?","completion":" navigation"}
33
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Show the Transferrin instead\" The answer is?","completion":" navigation"}
34
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Bring up the Alpha-amylase now\" The answer is?","completion":" navigation"}
35
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Reveal the Aspartate Aminotransferase for me\" The answer is?","completion":" navigation"}
36
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Showcase the Capsid Protein, could you?\" The answer is?","completion":" navigation"}
37
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Highlight the Reverse Transcriptase, please\" The answer is?","completion":" navigation"}
38
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Showcase the Viral Protein R, if you don't mind\" The answer is?","completion":" navigation"}
39
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Guide me to the Spacer Protein 2 now\" The answer is?","completion":" navigation"}
40
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Expose the Negative Regulatory Factor for me\" The answer is?","completion":" navigation"}
41
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Bring up the Surface, could you?\" The answer is?","completion":" navigation"}
42
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Demonstrate the Plasma, would you?\" The answer is?","completion":" navigation"}
43
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Show me the RNA, again\" The answer is?","completion":" navigation"}
44
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Outer Membrane\" The answer is?","completion":" navigation"}
45
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"HIV Protease\" The answer is?","completion":" navigation"}
46
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Take me to the Factor XIII instead\" The answer is?","completion":" navigation"}
47
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Highlight the Complement C3, please\" The answer is?","completion":" navigation"}
48
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Take me to the Complement C9, could you?\" The answer is?","completion":" navigation"}
49
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Navigate to the Coagulation Factor X now\" The answer is?","completion":" navigation"}
50
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Reveal the Capsid for me\" The answer is?","completion":" cutting"}
51
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Would you mind revealing the inside of the RNA?\" The answer is?","completion":" cutting"}
52
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you cut up the Viral Infectivity Factor to see what's inside?\" The answer is?","completion":" cutting"}
53
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you show me the interior of the Viral Protein R?\" The answer is?","completion":" cutting"}
54
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you show me what's inside the Human lymphocyte antigen class II?\" The answer is?","completion":" cutting"}
55
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you expose the inner workings of the Inner Membrane?\" The answer is?","completion":" cutting"}
56
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Could you cut up the Capsid to reveal its interior?\" The answer is?","completion":" cutting"}
57
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you cut open the Viral Envelope Protein to show its inside?\" The answer is?","completion":" cutting"}
58
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Open up the Envelope Protein for me to see\" The answer is?","completion":" cutting"}
59
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Show me the interior of the Matrix Protein.\" The answer is?","completion":" cutting"}
60
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Could you cut it open to show me the inside?\" The answer is?","completion":" cutting"}
61
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Unveil the inner workings.\" The answer is?","completion":" cutting"}
62
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Dissect it for me.\" The answer is?","completion":" cutting"}
63
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Cut it open for inspection.\" The answer is?","completion":" cutting"}
64
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Expose the insides for viewing.\" The answer is?","completion":" cutting"}
65
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Provide an inside view for me.\" The answer is?","completion":" cutting"}
66
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you reveal the inside of the Protease Enzyme?\" The answer is?","completion":" cutting"}
67
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Could you please expose the insides of the Inner Membrane?\" The answer is?","completion":" cutting"}
68
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"I'd like to see the inside of the Spacer Protein 2, can you help?\" The answer is?","completion":" cutting"}
69
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Could you please uncover the insides of the Inner Viral Membrane?\" The answer is?","completion":" cutting"}
70
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"I'd like to see the inside of the Spacer RNA, can you help?\" The answer is?","completion":" cutting"}
71
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"i want to see a close up of the bottom\" The answer is?","completion":" visual"}
72
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"turn it a little to the right\" The answer is?","completion":" visual"}
73
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"show me the capsid protein from the bottom\" The answer is?","completion":" visual"}
74
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Show me a close-up of the Matrix Protein.\" The answer is?","completion":" visual"}
75
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you zoom out on the Envelope Glycoprotein?\" The answer is?","completion":" visual"}
76
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Could you turn it to the right?\" The answer is?","completion":" visual"}
77
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"I want to see the top.\" The answer is?","completion":" visual"}
78
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Could you give me a different view?\" The answer is?","completion":" visual"}
79
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"I want to see it from above.\" The answer is?","completion":" visual"}
80
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you give me a better view?\" The answer is?","completion":" visual"}
81
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you rotate it 90 degrees?\" The answer is?","completion":" visual"}
82
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Could you show me a side view?\" The answer is?","completion":" visual"}
83
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"I would like to zoom in on this section, if you don't mind.\" The answer is?","completion":" visual"}
84
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Can you rotate it to the top, if you can?\" The answer is?","completion":" visual"}
85
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"What are the potential targets for antiviral drugs or vaccines and how could they be designed to disrupt the virus's lifecycle\" The answer is?","completion":" content"}
86
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"how does the virus reproduce\" The answer is?","completion":" content"}
87
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation�� means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"Describe the process of how the virus infects a host cell\" The answer is?","completion":" content"}
88
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"show me what the capsid is composed of\" The answer is?","completion":" cutting"}
89
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"go back to the capsid\" The answer is?","completion":" navigation"}
90
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"move a level up\" The answer is?","completion":" navigation"}
91
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"go back to the start\" The answer is?","completion":" navigation"}
92
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"turn a little to the right\" The answer is?","completion":" visual"}
93
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"rotate up\" The answer is?","completion":" visual"}
94
+ {"prompt":"I have a classification task with five labels: \"visual\", \"content\", \"navigation\", \"cutting plane\" and \"error\", for each input, you need to select one label as the output. Let's think step by step. \"visual\" label means the rotation, zoom in\/out, camera movement, etc. transformation actions. \"content\" label refers to questions related to biology molecular concepts. It's very important that the question is related to biology concepts. Or the input is similar to \"please explaie more\". \"navigation“ means you want to move\/go to another node, or go back, reset actions. You might say: \"show me something\". \"cutting plane\" means you want to see the inside of the object. If the input cannot match to \"visual\", \"content\", \"navigation\", \"cutting plane\", it will be defined as \"error\" label. You only need to return the label without any other words. Now the input is: \"zoom in close please\" The answer is?","completion":" visual"}
Data/train.jsonl ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "Can the CV Toolkit analyze real-time data from Mockpanel?"}, {"role": "assistant", "content": "Yes, the CV Toolkit is designed to process and analyze real-time data from Mockpanel, providing immediate feedback and insights, which is crucial for dynamic training environments."}]}
2
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "How does the CV Toolkit contribute to safety in training using Mockpanel?"}, {"role": "assistant", "content": "By accurately simulating and analyzing scenarios, the CV Toolkit helps trainees recognize and respond to potential hazards safely, thus enhancing safety awareness without exposing them to real risks."}]}
3
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "What software platforms does the CV Toolkit support?"}, {"role": "assistant", "content": "The CV Toolkit is compatible with various software platforms, primarily those supporting Python and OpenCV, allowing for versatile integration with other industrial software tools."}]}
4
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "How is data from the CV Toolkit typically used?"}, {"role": "assistant", "content": "Data from the CV Toolkit is used for training purposes, performance monitoring, and enhancing operational efficiency by providing detailed insights and analyses of the captured images."}]}
5
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "Does the CV Toolkit require Internet connectivity?"}, {"role": "assistant", "content": "While Internet connectivity enhances the functionality of the CV Toolkit by enabling updates and remote access, it can operate locally with no internet connection for most processing tasks."}]}
6
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "What types of cameras are compatible with the CV Toolkit?"}, {"role": "assistant", "content": "The CV Toolkit is compatible with various types of cameras, including standard USB webcams, industrial cameras, and even advanced imaging systems, provided they meet the required resolution and framerate specifications."}]}
7
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "How does the CV Toolkit process images from Mockpanel?"}, {"role": "assistant", "content": "Images captured from Mockpanel are processed using advanced algorithms in OpenCV to detect, analyze, and interpret different artifacts, converting visual data into actionable insights."}]}
8
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "Can the CV Toolkit differentiate between different colors in safety lights?"}, {"role": "assistant", "content": "Yes, the CV Toolkit can accurately differentiate between colors in safety lights, using color detection algorithms to interpret status signals like green, yellow, and red."}]}
9
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "What is the resolution requirement for cameras used with the CV Toolkit?"}, {"role": "assistant", "content": "For optimal performance, cameras used with the CV Toolkit should have a minimum resolution of 720p, though higher resolutions like 1080p are recommended for more detailed analysis."}]}
10
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "How are updates to the CV Toolkit handled?"}, {"role": "assistant", "content": "Updates to the CV Toolkit are typically handled through software patches that can be downloaded and installed. These updates may include improvements to algorithms, additional features, or security enhancements."}]}
11
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "What are the main components of the CV Toolkit?"}, {"role": "assistant", "content": "The main components of the CV Toolkit include the image capture module, the image processing module, and the data output module, each integral to handling and analyzing visual data from Mockpanel."}]}
12
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "Does the CV Toolkit support multi-camera setups?"}, {"role": "assistant", "content": "Yes, the CV Toolkit supports multi-camera setups, allowing for simultaneous data capture from multiple angles, enhancing the accuracy and comprehensiveness of the analysis."}]}
13
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "What programming languages does the CV Toolkit support?"}, {"role": "assistant", "content": "The CV Toolkit primarily supports Python, utilizing libraries such as OpenCV for image processing, which is widely recognized for its robustness and versatility in handling computer vision tasks."}]}
14
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "Can the CV Toolkit interface with other industrial software systems?"}, {"role": "assistant", "content": "Yes, the CV Toolkit can interface with other industrial software systems for enhanced data integration and management. It is designed to work seamlessly with systems that support data import/export in common formats."}]}
15
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "How is the CV Toolkit used in training scenarios?"}, {"role": "assistant", "content": "In training scenarios, the CV Toolkit is used to simulate and analyze real-world industrial situations, allowing trainees to practice and refine their skills in a safe and controlled environment without the risk of damaging actual equipment."}]}
16
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "What challenges does the CV Toolkit address in industrial settings?"}, {"role": "assistant", "content": "The CV Toolkit addresses several challenges in industrial settings, including the need for precise data capture, real-time processing, and the ability to analyze complex visual data from machinery to improve operational efficiency and safety."}]}
17
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "Can the CV Toolkit be used to monitor machine health?"}, {"role": "assistant", "content": "Absolutely, the CV Toolkit can be utilized to monitor machine health by analyzing visual indicators and parameters from the Mockpanel, helping in predictive maintenance and avoiding costly downtimes."}]}
18
+ {"messages": [{"role": "system", "content": "The assistant is knowledgeable about the computer vision toolkit (CV Toolkit) used in Mockpanel. Mockpanel is a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes. It includes components like gauges, switches, and displays. A Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit."}, {"role": "user", "content": "How does the CV Toolkit handle different lighting conditions in image processing?"}, {"role": "assistant", "content": "The CV Toolkit includes advanced image processing algorithms capable of adjusting to different lighting conditions, ensuring accurate data capture and analysis regardless of variations in lighting."}]}
Files/COMPUTER VISION TOOLKIT INSTRUCTION.txt ADDED
@@ -0,0 +1,829 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ COMPUTER VISION TOOLKIT INSTRUCTION MANUAL
2
+ Version 2.0
3
+ UNIVERSITY OF CINCINNATI
4
+ i
5
+ Contents
6
+ I. CVT Software Start Page: ............................................................................................................................... 1
7
+ 1. Launch the CVT Software: .......................................................................................................................... 1
8
+ 2. Selecting the Directory: ............................................................................................................................... 4
9
+ 3. Take Snapshot: ............................................................................................................................................. 5
10
+ 4. Selection of Relevant Applications:............................................................................................................. 7
11
+ II. OCR APPLICATION: ................................................................................................................................. 7
12
+ 1. Launch the CVT Software: .......................................................................................................................... 7
13
+ 2. Select the OCR Button: ................................................................................................................................ 8
14
+ 3. Select Region(s) of Interest for the OCR. .................................................................................................... 9
15
+ 4. OCR Setup: ................................................................................................................................................ 13
16
+ III. Knob Application: ...................................................................................................................................... 18
17
+ 1. Launch the CVT Software: ........................................................................................................................ 18
18
+ 2. Select the Knob Button: ............................................................................................................................. 19
19
+ 3. Select Region(s) of Interest for the Knob(s): ............................................................................................. 20
20
+ 4. Knob - 1 Setup: .......................................................................................................................................... 23
21
+ 5. Knob - 2 Setup: .......................................................................................................................................... 36
22
+ 6. Knob - 3 Setup: .......................................................................................................................................... 52
23
+ IV. Seven Segment Application: ...................................................................................................................... 73
24
+ 1. Launch the CVT Software: ........................................................................................................................ 73
25
+ 2. Select the Seven Segment Button: ............................................................................................................. 73
26
+ 3. Select Region(s) of Interest for the Seven Segment: ................................................................................. 74
27
+ 4. Seven Segment Setup:................................................................................................................................ 78
28
+ V. Safety Lights Application: ......................................................................................................................... 91
29
+ 1. Launch the CVT Software: ........................................................................................................................ 91
30
+ 2. Select the Safety Lights Button: ................................................................................................................ 91
31
+ 3. Safety Lights Setup: ................................................................................................................................... 92
32
+ VI. Fixture Application: ................................................................................................................................. 101
33
+ 1. Launch the CVT Software: ...................................................................................................................... 101
34
+ 2. Select the Fixture Button: ........................................................................................................................ 101
35
+ 3. Select Region(s) of Interest for the Fixture and Setup:............................................................................ 105
36
+ VII. Liquid Level Application: ........................................................................................................................ 114
37
+ 1. Launch the CVT Software: ...................................................................................................................... 114
38
+ 2. Select the Liquid Level Button: ............................................................................................................... 114
39
+ 3. Select Region(s) of Interest for the Liquid Level and Setup: .................................................................. 115
40
+ ii
41
+ VIII. Toggle Switch Application: ................................................................................................................. 122
42
+ 1. Launch the CVT Software: ...................................................................................................................... 122
43
+ 2. Select the Toggle Switch Button: ............................................................................................................ 122
44
+ 3. Select Region(s) of Interest for the Toggle Switch:................................................................................. 123
45
+ 4. Toggle Switch 1 Setup: ............................................................................................................................ 125
46
+ 5. Toggle Switch 2 Setup: ............................................................................................................................ 134
47
+ IX. Gauge Application (Circular Gauge): ...................................................................................................... 143
48
+ 1. Launch the CVT Software: ...................................................................................................................... 143
49
+ 2. Select the Gauge Button: ......................................................................................................................... 143
50
+ 3. Select Region(s) of Interest for the Gauge:.............................................................................................. 144
51
+ 4. Circular Gauge Setup: .............................................................................................................................. 144
52
+ X. Gauge Application (Horizontal Linear Gauge): ...................................................................................... 156
53
+ 1. Launch the CVT Software: ...................................................................................................................... 156
54
+ 2. Select the Gauge Button: ......................................................................................................................... 156
55
+ 3. Select Region(s) of Interest for the Gauge:.............................................................................................. 157
56
+ 4. Horizontal Linear Gauge Setup: .............................................................................................................. 161
57
+ XI. Gauge Application (Arc Gauge): ............................................................................................................. 176
58
+ 1. Launch the CVT Software: ...................................................................................................................... 176
59
+ 2. Select the Gauge Button: ......................................................................................................................... 176
60
+ 3. Select Region(s) of Interest for the Gauge:.............................................................................................. 177
61
+ 4. Arc Gauge Setup: ..................................................................................................................................... 181
62
+ iii
63
+ 1
64
+ I. CVT Software Start Page:
65
+ 1. Launch the CVT Software:
66
+ Copy/Download the folder Computer_Vision_Software-Version_x from shared drive on to the desktop.
67
+ Open the folder and make sure that the folder contains CVT.py file.
68
+ 2
69
+ Right click anywhere in this directory and select “Open in Terminal”
70
+ A new window with terminal opens: (shown in image below)
71
+ Type "python3 CVT.py" and press return/enter
72
+ 3
73
+ This will launch the start page of the CVT software as shown in the figure below.
74
+ Press “Start” button to go to home page of CVT software. The Image of the User Interface (UI) is shown below:
75
+ 4
76
+ Under this window, there are multiple buttons to select Application based on the artifacts.
77
+ The Application buttons are:
78
+  Gauge
79
+  OCR
80
+  Knob
81
+  Seven Segment
82
+  Safety Lights
83
+  Fixture
84
+  Liquid Level
85
+  Toggle Switches
86
+ Directory Selection and reference image buttons:
87
+  Take Snapshot!
88
+  Select Directory
89
+ 2. Selecting the Directory:
90
+ Click on the “Select Directory” button and a window with directory selection opens.
91
+ 5
92
+ Select the appropriate directory or Create a new directory where user wants to save the reference images for the artifacts.
93
+ Note:
94
+ Do not include spaces while naming the directory or filename!
95
+ 3. Take Snapshot:
96
+ This button allows the user to take one snapshot at a time and save them to the directory selected in step 2.
97
+ Click on the button “Take Snapshot!” and a window with file name as “Current_Date.jpg” or “Current_date.png” appears.
98
+ 6
99
+ Rename the snapshot according to user’s convenience.
100
+ For example, snapshot name is mentioned as “Raytheonknobs_Moment_1.png”.
101
+ Based on the number of artifacts, the user will save corresponding images for each Application to train the CVT software.
102
+ 1. OCR Application: No need to save any reference image.
103
+ 2. Knobs Application: User will save ‘n’ number of reference images, where ‘n’ is the number of positions of the knob.
104
+ 7
105
+ 3. Seven Segment Application: No need to save any reference image.
106
+ 4. Safety Lights Application: User has to save a reference image of the safety light tower. Use of this reference image will be discussed in later sections.
107
+ 5. Fixture Application: User will save one reference image of the Fixture table with fixtures arranged in a predefined pattern. Use of this reference image will be discussed in later sections.
108
+ 6. Liquid Level Application: User will save one reference image of Liquid bath tank. Use of this reference image will be discussed in later sections.
109
+ 7. Toggle Switch Application: User will save two reference images, one of them showing toggle switch at ON position and the other image showing OFF position.
110
+ 8. Gauge Application (Circular Gauge): No need to save any reference images.
111
+ 9. Gauge Application (Arc Gauge): No need to save any reference images.
112
+ 10. Gauge Application (Horizontal Linear Gauge): No need to save any reference images.
113
+ 11. Gauge Application (Vertical Linear Gauge): No need to save any reference images.
114
+ 4. Selection of Relevant Applications:
115
+ Based on different artifacts, select the appropriate buttons and click Next button.
116
+ For example, if the camera view has two different artifacts like Toggle Switches and Knobs, User will select Toggle Switch and Knob buttons and click Next.
117
+ II. OCR APPLICATION:
118
+ 1. Launch the CVT Software:
119
+ Launch the CV Software by following the instructions in Chapter 1.
120
+ 8
121
+ 2. Select the OCR Button:
122
+ Select "OCR" (or multiple) button(s) on this page and click Next.
123
+ The CV Software will display the image, asking the user to select Regions of Interest for each artifact selected. In this case, its only one artifact, OCR.
124
+ 9
125
+ Click “Align” if the displayed image is not parallel to camera sensor. This can be done by selecting 4 points on the image which represent corners of a rectangle or square on the image. (Note: The 4 points need to be selected in cyclic manner).
126
+ After the 4 points are selected, press “Spacebar” on your keyboard, now you’ll see a new window with a rectangle drawn between these 4 points. Press “Spacebar” again to confirm.
127
+ After this, the image is realigned and is made parallel to camera sensor. Press “Spacebar” again to accept the changes.
128
+ The display image in the UI gets realigned according this information.
129
+ (Use of “Align” button is discussed in Fixture Application)
130
+ 3. Select Region(s) of Interest for the OCR.
131
+ Select “Select Focus Area” button to select a relatively smaller area on the displayed image.
132
+ 10
133
+ New window pops up asking user to “Select the Focus Area”. After selecting the focus area, press “Okay” button to close the window.
134
+ A new window “Select the region of interest” pops up with cropped image based on the Focus Area.
135
+ 11
136
+ Now draw Region(s) of interest on this Focus Area and click “Okay” to close the window.
137
+ 12
138
+ A bounding box in red color is drawn along with number on top indicating that which ROI the selection is. The ROI (s) are resized back to original image displayed in the UI.
139
+ After selecting Regions of interest, press “Next/Skip” button.
140
+ 13
141
+ By default, the entire frame is considered as the region of interest if no regions are selected.
142
+ 4. OCR Setup:
143
+ In the “OCR Name” field enter a name for the OCR under consideration.
144
+ For example, “Pressure” as shown in figure below.
145
+ Enter the appropriate measurement units of the gauge in the field “Measurement Units” field.
146
+ Note:
147
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field.
148
+ 14
149
+ For example, “PSI” as shown in figure below.
150
+ If the OCR Application always detects the numbers, then check “Just Number” box.
151
+ 15
152
+ Do Preliminary image processing by varying the different sliders on this page, so that the characters in black and white image are clear.
153
+ For example, “Gaussian Blur” slider is set to “1” to reduce the noise.
154
+ Click “Apply” and new window displays the OCR image with the current detected OCR value.
155
+ 16
156
+ In this window the detected value is displayed in RED. Make sure the value displayed is the correct value and close the window. For example, the detected OCR value is shown in the following image.
157
+ 17
158
+ Next, a window with brief live feed from camera will be displayed (a brief video is played in this example) so that the user can vary or monitor the artifact to validate the readings. If the values are correct, click “Okay” button.
159
+ Note:
160
+ If the values are not correct the user can close the windows displaying the detected value and detected values for user validation and readjust the sliders to get an optimum result. This process needs to be done until the correct values are detected by the software.
161
+ Click “Next” button to complete the training process as shown in the figure below.
162
+ 18
163
+ If there is error in detected value, press “Retrain” button to train the software again.
164
+ III. Knob Application:
165
+ 1. Launch the CVT Software:
166
+ Launch the CV Software by following the instructions in Chapter 1.
167
+ 19
168
+ 2. Select the Knob Button:
169
+ Select "Knob" (or multiple) button(s) on this page and click Next.
170
+ The CV Software will display the image asking user to select Regions of Interest for each artifact selected. In this case, there are three Knobs that need monitoring.
171
+ 20
172
+ 3. Select Region(s) of Interest for the Knob(s):
173
+ Now draw Region(s) of interest on the image shown in the UI.
174
+ First, draw the Region of Interest for Knob1
175
+ A bounding box in red color is drawn along with number on top indicating that which ROI the selection it is as shown in the figure below.
176
+ 21
177
+ Now draw the second Region of Interest for the second Knob as shown in the figure below.
178
+ Now draw the third Region of Interest for the third Knob as shown in the figure below.
179
+ After selecting Regions of interest, press “Next/Skip” button as shown in the figure below.
180
+ 22
181
+ By default, the entire frame is considered as the region of interest if no regions are selected.
182
+ 23
183
+ 4. Knob - 1 Setup:
184
+ After the previous step, the UI will show a black and white image of the first Knob along with some empty fields and sliders to train the software to detect positions of the Knob.
185
+ Note:
186
+ For a given application, the first image displayed is Region of Interest-1, Region of Interest-2 and Region of Interest-3 image appear sequentially once the training process is completed for first Knob.
187
+ 24
188
+ In the “Knob Name” field, enter a name for the Knob under consideration. For example, “Knob1” as shown in figure below.
189
+ Enter the appropriate measurement units of the gauge in the field “Measurement Units” field.
190
+ Note:
191
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field.
192
+ 25
193
+ For example, “PERCENT” as shown in figure below.
194
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
195
+ 26
196
+ For Example, “Raytheonknobs_Moment_1.jpg” is selected as shown in the following example.
197
+ Next enter “1” in the “Position Value” field.
198
+ 27
199
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
200
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
201
+ 28
202
+ For Example, “Raytheonknobs_Moment_25.png” is selected as shown in the following example.
203
+ Next enter “25” in the “Position Value” field.
204
+ 29
205
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
206
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
207
+ 30
208
+ For Example, “Raytheonknobs_Moment_50.png” is selected as shown in the following example.
209
+ Next enter “50” in the “Position Value” field.
210
+ 31
211
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
212
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
213
+ 32
214
+ For Example, “Raytheonknobs_Moment_100.png” is selected as shown in the following example.
215
+ Next enter “100” in the “Position Value” field.
216
+ Do Preliminary image processing by moving the sliders “Median Blur” and “Morphological Open” as shown in the examples below.
217
+ 33
218
+ “Median Blur” operation reduces the noise in the black and white image, in this example, the slider is set at a position “3”. (For most cases, this is value works better)
219
+ “Morphological Open” operation is another way of making sure the that the lines and various contours on the images are unique to this position. In the following example, “Morphological Open” slider is set at value “2”. (For most cases, this is value works better)
220
+ 34
221
+ Click “Apply” and new window displays the Knob image with the current detected value of Knob1.
222
+ In this window the detected value is displayed in RED. Make sure the value displayed is the correct value and close the window. For example, the detected Knob1 value is shown in the following image.
223
+ 35
224
+ Next, a window with brief live feed from camera will be displayed (a brief video is played in this example) so that the user can vary or monitor the artifact to validate the readings. If the values are correct, click “Okay��� button.
225
+ Note:
226
+ If the values are not correct the user can close the windows displaying the detected value and detected values for user validation and readjust the sliders to get an optimum result. This process needs to be done until the correct values are detected by the software.
227
+ Click “Next” button to train the software to detect position values of second Knob.
228
+ 36
229
+ 5. Knob - 2 Setup:
230
+ In the “Knob Name” field enter a name for the Knob under consideration.
231
+ For example, “Knob2” as shown in figure below.
232
+ Enter the appropriate measurement units of the gauge in the field “Measurement Units” field.
233
+ Note:
234
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field.
235
+ 37
236
+ For example, “PERCENT” as shown in figure below.
237
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
238
+ For Example, “Raytheonknobs_Moment_30.png” is selected as shown in the following example.
239
+ 38
240
+ Next, enter “30” in the “Position Value” field.
241
+ 39
242
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
243
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
244
+ 40
245
+ For Example, “Raytheonknobs_Moment_70.png” is selected as shown in the following example.
246
+ Next enter “70” in the “Position Value” field.
247
+ 41
248
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
249
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
250
+ 42
251
+ For Example, “Raytheonknobs_Moment_100.png” is selected as shown in the following example.
252
+ Next, enter “100” in the “Position Value” field.
253
+ 43
254
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
255
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
256
+ 44
257
+ For Example, “Raytheonknobs_Moment_120.png” is selected as shown in the following example.
258
+ Next enter “120” in the “Position Value” field.
259
+ 45
260
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
261
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
262
+ 46
263
+ For Example, “Raytheonknobs_Moment_140.png” is selected as shown in the following example.
264
+ Next, enter “140” in the “Position Value” field.
265
+ 47
266
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
267
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
268
+ 48
269
+ For Example, “Raytheonknobs_Moment_180.png” is selected as shown in the following example.
270
+ Next, enter “180” in the “Position Value” field.
271
+ Do Preliminary image processing by moving the sliders “Median Blur” and “Morphological Open” as shown in the examples below.
272
+ 49
273
+ “Median Blur” operation reduces the noise in the black and white image, in this example, the slider is set at a position “3”. (For most cases, this is value works better)
274
+ “Morphological Open” operation is another way of making sure the that the lines and various contours on the images are unique to this position. In the following example, “Morphological Open” slider is set at value “2”. (For most cases, this is value works better)
275
+ 50
276
+ Click “Apply” and new window displays the Knob image with the current detected value of Knob2.
277
+ In this window the detected value is displayed in RED. Make sure the value displayed is the correct value and close the window. For example, the detected Knob2 value is shown in the following image.
278
+ 51
279
+ Next, a window with brief live feed from camera will be displayed (a brief video is played in this example) so that the user can vary or monitor the artifact to validate the readings. If the values are correct, click “Okay” button.
280
+ Note:
281
+ If the values are not correct the user can close the windows displaying the detected value and detected values for user validation and readjust the sliders to get an optimum result. This process needs to be done until the correct values are detected by the software.
282
+ Click “Next” button to train the software to detect position values of third Knob.
283
+ 52
284
+ 6. Knob - 3 Setup:
285
+ In the “Knob Name” field enter a name for the Knob under consideration.
286
+ For example, “Knob3” as shown in figure below.
287
+ Enter the appropriate measurement units of the gauge in the field “Measurement Units” field.
288
+ Note:
289
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field.
290
+ 53
291
+ For example, “PERCENT” as shown in figure below.
292
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
293
+ 54
294
+ For Example, “Raytheonknobs_Moment_50.png” is selected as shown in the following example.
295
+ Next enter “50” in the “Position Value” field.
296
+ 55
297
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
298
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
299
+ 56
300
+ For Example, “Raytheonknobs_Moment_60.png” is selected as shown in the following example.
301
+ Next, enter “60” in the “Position Value” field.
302
+ 57
303
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
304
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
305
+ 58
306
+ For Example, “Raytheonknobs_Moment_70.png” is selected as shown in the following example.
307
+ Next, enter “70” in the “Position Value” field.
308
+ 59
309
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
310
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
311
+ 60
312
+ For Example, “Raytheonknobs_Moment_80.png” is selected as shown in the following example.
313
+ Next, enter “80” in the “Position Value” field.
314
+ 61
315
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
316
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
317
+ 62
318
+ For Example, “Raytheonknobs_Moment_90.png” is selected as shown in the following example.
319
+ Next, enter “90” in the “Position Value” field.
320
+ 63
321
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
322
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
323
+ 64
324
+ For Example, “Raytheonknobs_Moment_100.png” is selected as shown in the following example.
325
+ Next, enter “100” in the “Position Value” field.
326
+ 65
327
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
328
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
329
+ 66
330
+ For Example, “Raytheonknobs_Moment_110.png” is selected as shown in the following example.
331
+ Next, enter “110” in the “Position Value” field.
332
+ 67
333
+ Click “Add Position” button to add new empty fields to upload reference image for second position and enter respective value for this position.
334
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
335
+ 68
336
+ For Example, “Raytheonknobs_Moment_120.png” is selected as shown in the following example.
337
+ Next, enter “120” in the “Position Value” field.
338
+ Do Preliminary image processing by moving the sliders “Median Blur” and “Morphological Open” as shown in the examples below.
339
+ 69
340
+ “Median Blur” operation reduces the noise in the black and white image, in this example, the slider is set at a position “3”. (For most cases, this is value works better)
341
+ “Morphological Open” operation is another way of making sure the that the lines and various contours on the images are unique to this position. In the following example, “Morphological Open” slider is set at value “2”. (For most cases, this is value works better)
342
+ Click “Apply” and new window displays the Knob image with the current detected value of Knob3
343
+ 70
344
+ In this window the detected value is displayed in RED. Make sure the value displayed is the correct value and close the window. For example, the detected Knob3 value is shown in the following image.
345
+ 71
346
+ Next, a window with brief live feed from camera will be displayed (a brief video is played in this example) so that the user can vary or monitor the artifact to validate the readings.
347
+ If the values are correct, click “Okay” button.
348
+ Note:
349
+ If the values are not correct the user can close the windows displaying the detected value and detected values for user validation and readjust the sliders to get an optimum result. This process needs to be done until the software detects the correct values.
350
+ 72
351
+ Click “Next” button to complete the training process for Knobs.
352
+ The software is now trained to detect positions of all three Knobs as shown below.
353
+ If there is an error in detected value, press “Retrain” button to train the software again.
354
+ 73
355
+ IV. Seven Segment Application:
356
+ 1. Launch the CVT Software:
357
+ Launch the CV Software by following the instructions in Chapter 1.
358
+ 2. Select the Seven Segment Button:
359
+ Select "Seven Segment" (or multiple) button(s) on this page and click Next.
360
+ 74
361
+ The CV Software will display the image asking user to select Regions of Interest for each artifact selected. In this case, its only one artifact, Seven Segment.
362
+ 3. Select Region(s) of Interest for the Seven Segment:
363
+ Click “Select Focus Area” button to select a relatively smaller displayed area in the image.
364
+ 75
365
+ New window pops up asking user to “Select the Focus Area”. After selecting the focus area, press “Okay”.
366
+ New window pops up asking user to “Select the Region(s) of Interest”.
367
+ 76
368
+ Now draw Region(s) of interest on this Focus Area as shown below.
369
+ After selecting the regions of interest, press “Okay”.
370
+ 77
371
+ A bounding box in red color is drawn along with number on top indicating that which ROI the selection is. The ROI (s) are resized back to original image displayed in the UI.
372
+ After selecting Regions of interest, press “Next/Skip” button.
373
+ By default, the entire frame is considered as the region of interest if no regions are selected.
374
+ 78
375
+ 4. Seven Segment Setup:
376
+ In the “Seven Segment Name” field enter a name for the Seven Segment under consideration.
377
+ For example, “SevenSegment” as shown in figure below.
378
+ Enter the appropriate measurement units of the gauge in the field “Measurement Units” field.
379
+ Note:
380
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field.
381
+ 79
382
+ Enter “NONE” as shown below.
383
+ Do preliminary image processing by varying the different sliders on this page, so that the characters in black and white image are clear.
384
+ Move the “Gaussian Blur” slider to reduce the noise. For example, it is set to “3” as shown in figure below.
385
+ 80
386
+ Use of “Dilate” slider is to reduce the thickness of the vertical and horizontal segments if the segments are overly thick. For example, “Dilate” slider is set to a value of 5 and thickness of the segments reduces significantly as shown below.
387
+ To get an optimum thickness of the segments, the “Dilate” value is set to ‘2’ as shown below.
388
+ 81
389
+ “Horizontal Dilate” slider helps in thinning the horizontal segments. For example, “Horizontal Dilate” is set to a value of ‘5’, the vertical segments become thinner and introduce gaps between segments as shown below.
390
+ Now, to get an optimum thickness of horizontal segments, “Horizontal Dilate” is set at a value ‘2’ as shown below.
391
+ 82
392
+ “Vertical Dilate” slider helps in thinning the horizontal segments. For example, “Vertical Dilate” is set to a value of ‘5’, the horizontal segments become thinner segments as shown below. (Any further increase in the value introduces gaps between the segments).
393
+ Now, to get an optimum thickness of horizontal segments, “Vertical Dilate” is set at a value ‘2’ as shown below.
394
+ 83
395
+ Use of “Erode” slider is to increase the thickness of the vertical and horizontal segments if the segments are disjointed or extremely thin. For example, “Erode” slider is set to a value of ‘4’ and thickness of the segments increases significantly as shown below.
396
+ To get an optimum thickness of the segments, the “Erode” value is set to ‘1’ and the “Dilate” is set to ‘1’ to compensate for thickness.
397
+ 84
398
+ “Horizontal Erode” slider helps in thinning the horizontal segments. For example, “Horizontal Erode” is set to a value of ‘6’, the vertical segments become thicker and introduce connectivity between two numbers as shown below.
399
+ Now, to get an optimum thickness of vertical segments, “Horizontal Erode” is set at a value ‘1’ as shown below.
400
+ 85
401
+ “Vertical Erode” slider helps in thickening the horizontal segments. For example, “Vertical Erode” is set to a value of ‘4’, the horizontal segments become thicker as shown below. (Any further increase in the value closes the gaps inside each number).
402
+ Now, to get an optimum thickness of horizontal segments, “Vertical Erode” is set at a value ‘1’ as shown below.
403
+ 86
404
+ For, “Erode”, “Dilate”, “Horizontal Dilate”, “Vertical Dilate”, “Horizontal Erode” and “Vertical Erode”, there isn’t one fixed value combination that would work for all types of Seven Segment Display and/or lighting conditions. User can move these sliders in different combinations to get an optimum vertical and horizontal thickness for the displayed digits.
405
+ Next, check “Draw Lines” box. This will create bounding boxes around each displayed digit.
406
+ 87
407
+ Now use the “Skew” slider to change the digits’ orientation such that the vertical segments are parallel to vertical lines of the bounding box and the horizontal segments are parallel to horizontal lines as shown in the example below.
408
+ Now use “Vertical Segment Width” to specify the vertical segment thickness for each digit as shown below. (Green Vertical Lines)
409
+ 88
410
+ Similarly use “Horizontal Segment Width” to specify the horizontal segments’ thickness as shown below. (Blue Horizontal Lines)
411
+ Click “Apply” and new window displays the Seven Segment image with the current detected Seven Segment value.
412
+ 89
413
+ In this window the detected value is displayed in RED. Make sure the value displayed is the correct value and close the window. For example, the detected Seven Segment value is shown in the following image.
414
+ Next, a window with brief live feed from camera will be displayed (a brief video is played in this example) so that the user can vary or monitor the artifact to validate the readings. If the values are correct, click “Okay” button.
415
+ 90
416
+ Note:
417
+ If the values are not correct the user can close the windows displaying the detected value and detected values for user validation and readjust the sliders to get an optimum result. This process needs to be done until the correct values are detected by the software.
418
+ Click “Next” button to complete the training process as shown in the figure below.
419
+ If there is error in detected value, press “Retrain” button to train the software again.
420
+ 91
421
+ V. Safety Lights Application:
422
+ 1. Launch the CVT Software:
423
+ Launch the CV Software by following the instructions in Chapter 1.
424
+ 2. Select the Safety Lights Button:
425
+ Select "Safety Lights" (or multiple) button(s) on this page and click Next.
426
+ 92
427
+ The CV Software will display the image asking user to select Regions of Interest for each artifact selected. In this case, its only one artifact, Safety Lights. Click “Next/Skip” to go Next Page.
428
+ By default, the entire frame is considered as the region of interest if no regions are selected.
429
+ 3. Safety Lights Setup:
430
+ In the “Safety Lights Name” field enter a name for the Safety Light under consideration.
431
+ For example, “SafetyLights” as shown in figure below.
432
+ 93
433
+ Select “Browse” button next to “Reference Image” field to select a reference image for the current artifact.
434
+ Click “Select Region” button next to “Light Color” field. The reference image opens in new window.
435
+ 94
436
+ Draw the region of interest of the first light color and press “Okay” as shown in example below.
437
+ Enter “Red” as the Light Color as shown in figure below.
438
+ 95
439
+ Select “Add Light Color”, an additional “Light Color” field appears along with the “Select Region” button.
440
+ Click “Select Region” button next to “Light Color” field. The reference image opens in new window.
441
+ 96
442
+ Draw the region of interest of the second light color and press “Okay” as shown in example below.
443
+ Enter “Yellow” as the Light Color as shown in figure below.
444
+ 97
445
+ Select “Add Light Color”, an additional “Light Color” field appears along with the “Select Region” button.
446
+ Click “Select Region” button next to “Light Color” field. The reference image opens in new window. Draw the region of interest of the third light color and press “Okay” as shown in example below.
447
+ 98
448
+ Enter “Green” as the Light Color as shown in figure below.
449
+ Click “Apply” and new window displays the Safety Light image with the current detected Safety Light value. In this window the detected value is displayed in RED. Make sure the value displayed is the correct value and close the window. (If none of the lights are ON, then “off” result will be displayed)
450
+ 99
451
+ Next, a window with brief live feed from camera will be displayed (a brief video is played in this example) so that the user can vary or monitor the artifact to validate the readings.
452
+ If the values are correct, click “Okay” button.
453
+ 100
454
+ Note:
455
+ If the values are not correct the user can close the windows displaying the detected value and detected values for user validation and readjust the sliders to get an optimum result. This process needs to be done until the correct values are detected by the software.
456
+ Click “Next” button to complete the training process as shown in the figure below.
457
+ If there is error in detected value, press “Retrain” button to train the software again.
458
+ 101
459
+ VI. Fixture Application:
460
+ 1. Launch the CVT Software:
461
+ Launch the CV Software by following the instructions in Chapter 1.
462
+ 2. Select the Fixture Button:
463
+ Select "Fixture" (or multiple) button(s) on this page and click Next.
464
+ 102
465
+ The CV Software will display the image asking user to select Regions of Interest for each artifact selected. In this case, its only one artifact, Fixture.
466
+ If the displayed image is not parallel to camera sensor as it is case in this example, click “Align” and a new window “preview” pops up allowing User to align the image.
467
+ 103
468
+ This can be done by selecting 4 points on the image which represent corners of a rectangle or square on the image. (Note: The 4 points need to be selected in cyclic manner).
469
+ After the 4 points are selected, press “Spacebar” on your keyboard, now you will see a new window with a rectangle drawn between these 4 points. Press “Spacebar” again to confirm.
470
+ 104
471
+ After this, the image is realigned and is made parallel to camera sensor. Press “Spacebar” again to accept the changes.
472
+ The display image in the UI gets realigned according this information.
473
+ 105
474
+ 3. Select Region(s) of Interest for the Fixture and Setup:
475
+ Select the Region of Interest of entire fixture table in realigned image and click “Next/Skip” as shown in figure below.
476
+ In the “Fixture Name” field enter a name for the Fixture Configuration under consideration.
477
+ For example, “Fixture” as shown in figure below.
478
+ 106
479
+ Click “Select Image” button next to “Select Reference Image” field.
480
+ Select a reference image of pre-arranged fixture configuration. (As saved in chapter 1)
481
+ 107
482
+ A new window “Reference Image” opens and in this window user will select the regions of interest of each fixture in any order.
483
+ Note:
484
+ The order selected by the user remains constant throughout as well as in saved configuration.
485
+ Select regions of interest of fixtures as shown below.
486
+ 108
487
+ After selecting the regions of interest, click “Okay” button at the bottom of the window.
488
+ After clicking “Okay”, each of the regions of interest of fixture will be displayed in new window one at a time in the order in which user selected the regions of interest.
489
+ 109
490
+ In this new window, user has to specify the template of the fixture by selecting only the template on the fixture as shown below.
491
+ After selecting the fixture template, use the drop down menu to specify the direction to which the fixture is facing. For example, in the figure shown below the direction of the fixture selected is “E” which denotes East.
492
+ 110
493
+ Click “Okay”.
494
+ Repeat the process of selecting the templates for each fixtures and providing their corresponding directions using the drop down menu ‘N’ times.
495
+ Where ‘N’ is the number of fixtures in the predetermined fixture configuration.
496
+ In this example, there are 6 fixtures in the Fixture Configuration, so we repeat the process 6 times. Therefore, the sequential arrangement of fixtures is East, South, North, North, East and South. The entire configuration is represented as “ESNNES”.
497
+ 111
498
+ Click “Apply” and new window displays the correct string arrangement and current detected string as shown below.
499
+ In this window the detected value is displayed in RED. Make sure the value displayed is the correct value and close the window. For example, the detected Fixture arrangement value is shown in the following image.
500
+ 112
501
+ Next, a window with brief live feed from camera will be displayed (a brief video is played in this example) so that the user can vary or monitor the artifact to validate the readings.
502
+ If the values are correct, click “Okay” button.
503
+ 113
504
+ User has the option to save the configuration by clicking the “Save Configuration” button.
505
+ After clicking “Save Configuration”, a new window with “Default_name.pkl” will appear. Change the name of the configuration and click “Okay” to save the configuration.
506
+ Every time there is change in fixture configuration, the User has to retrain the software to recognize the new configuration. Instead of retraining the software every time there is fixture configuration change, the User can use saved configurations to retrain the system if the saved configurations are repeated.
507
+ Note:
508
+ If the values are not correct the user can close the windows displaying the detected value and detected values for user validation and readjust the sliders to get an optimum result. This process needs to be done until the correct values are detected by the software.
509
+ Click “Next” button to complete the training process as shown in the figure below.
510
+ If there is error in detected value, press “Retrain” button to train the software again.
511
+ 114
512
+ VII. Liquid Level Application:
513
+ 1. Launch the CVT Software:
514
+ Launch the CV Software by following the instructions in Chapter 1.
515
+ 2. Select the Liquid Level Button:
516
+ Select "Liquid Level" (or multiple) button(s) on this page and click Next.
517
+ 115
518
+ The CV Software will display the image asking user to select Regions of Interest for each artifact selected. In this case, its only one artifact, Liquid Level.
519
+ 3. Select Region(s) of Interest for the Liquid Level and Setup:
520
+ Select the Region of Interest of entire water tank (water bath) displayed in the UI and click “Next/Skip” as shown in figure below.
521
+ By default, the entire frame is considered as the region of interest if no regions are selected.
522
+ 116
523
+ In the “Liquid Level Name” field enter a name for the Liquid Level under consideration.
524
+ For example, “LiquidLevel” as shown in figure below. (No spaces between the name)
525
+ Click “Select Reference Image” button to select a reference image of the water tank saved in Chapter 1.
526
+ 117
527
+ Select the reference image and click “Open”
528
+ A new window “Reference Image” opens as shown below.
529
+ 118
530
+ In this window User will draw lines to indicated maximum allowable Liquid Level (Red Line) and the current Liquid Level (Green Line).
531
+ Note:
532
+ User can right click to undo the above selection.
533
+ Draw Red Line to indicate maximum water level as shown below.
534
+ Draw Green Line to indicate current water level as shown below.
535
+ 119
536
+ Click “Okay” to accept the marked Liquid Levels.
537
+ Click “Apply” and a new window “Detected Liquid Level” displays the current detected value in RED
538
+ 120
539
+ For example, the status of Liquid Level is “OK” as shown below.
540
+ Note:
541
+ If the Liquid Level crosses the maximum water level an “ALERT” message is sent via MTConnect.
542
+ Next, a window with brief live feed from camera will be displayed (a brief video is played in this example) so that the user can vary or monitor the artifact to validate the readings.
543
+ 121
544
+ Click “Next” button to complete the training process as shown in the figure below.
545
+ If there is error in detected value, press “Retrain” button to train the software again.
546
+ 122
547
+ VIII. Toggle Switch Application:
548
+ 1. Launch the CVT Software:
549
+ Launch the CV Software by following the instructions in Chapter 1.
550
+ 2. Select the Toggle Switch Button:
551
+ Select "Toggle Switch" (or multiple) button(s) on this page and click Next.
552
+ 123
553
+ The CV Software will display the image asking user to select Regions of Interest for each artifact selected. In this case, there are two Toggle Switches.
554
+ 3. Select Region(s) of Interest for the Toggle Switch:
555
+ First, draw the Region of Interest for Toggle Switch 1
556
+ A bounding box in red color is drawn along with number on top indicating that which ROI the selection it is as shown in the figure below.
557
+ 124
558
+ Now draw the second Region of Interest for the second Toggle Switch as shown in the figure below.
559
+ After selecting Regions of interest, press “Next/Skip” button as shown in the figure below.
560
+ By default, the entire frame is considered as the region of interest if no regions are selected.
561
+ 125
562
+ 4. Toggle Switch 1 Setup:
563
+ After the previous step, the UI will show a black and white image of the first Toggle Switch along with some empty fields and sliders to train the software to detect positions of the Toggle Switch.
564
+ Note:
565
+ For a given application, the first image displayed is Region of Interest-1 and Region of Interest-2 image appears sequentially once the training process is completed for first Toggle Switch.
566
+ In the “Toggle Switch Name” field enter a name for the Toggle Switch under consideration.
567
+ For example, “ToggleSwitch1” as shown in figure below.
568
+ 126
569
+ Enter the appropriate measurement units of the Toggle Switch in the “Measurement Units” field.
570
+ Note:
571
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field.
572
+ Enter “NONE” as shown below.
573
+ 127
574
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1.
575
+ For Example, “ToggleSwitch1-ON.png” is selected as shown in the following example.
576
+ 128
577
+ Next enter “ON” in the “Position Value” field.
578
+ Click “Add Position” button to add new empty fields to upload reference image for OFF position and enter respective value for this position.
579
+ 129
580
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1. For Example, “ToggleSwitch1-OFF.png” is selected as shown in the following example.
581
+ Next enter “OFF” in the “Position Value” field.
582
+ 130
583
+ Do Preliminary image processing by moving the sliders “Median Blur” and “Morphological Open” as shown in the examples below.
584
+ “Median Blur” operation reduces the noise in the black and white image, in this example, the slider is set at a position “3”. (For most cases, this is value works better)
585
+ 131
586
+ “Morphological Open” operation is another way of making sure the that the lines and various contours on the images are unique to this position. In the following example, “Morphological Open” slider is set at value “2”. (For most cases, this is value works better)
587
+ Click “Apply” and new window displays the Toggle Switch image with the current detected Toggle Switch value.
588
+ 132
589
+ In this window the detected value is displayed in RED. Make sure the value displayed is the correct value and close the window. For example, the detected Toggle Switch value is shown in the following image.
590
+ Next, a window with brief live feed from camera will be displayed (a brief video is played in this example) so that the user can vary or monitor the artifact to validate the readings.
591
+ 133
592
+ If the values are correct, click “Okay” button.
593
+ Note:
594
+ If the values are not correct the user can close the windows displaying the detected value and detected values for user validation and readjust the sliders to get an optimum result. This process needs to be done until the correct values are detected by the software.
595
+ Click “Next” button to train the software to detect position values of second Toggle Switch.
596
+ 134
597
+ 5. Toggle Switch 2 Setup:
598
+ In the “Toggle Switch Name” field enter a name for the Toggle Switch under consideration.
599
+ For example, “ToggleSwitch2” as shown in figure below.
600
+ Enter the appropriate measurement units of the Toggle Switch in the “Measurement Units” field.
601
+ Note:
602
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field as shown in the figure below.
603
+ 135
604
+ Enter “NONE” as shown below.
605
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1. For Example, “ToggleSwitch2-ON.png” is selected as shown in the following example.
606
+ 136
607
+ Next enter “ON” in the “Position Value” field as shown in the figure below.
608
+ Click “Add Position” button to add new empty fields to upload reference image for OFF position and enter respective value for this position.
609
+ 137
610
+ Click “Browse” button next “Position Image” field and select an appropriate position image saved in the directory as shown in Chapter 1. For Example, “ToggleSwitch2-OFF.png” is selected as shown in the following example.
611
+ Next enter “OFF” in the “Position Value” field.
612
+ 138
613
+ Do Preliminary image processing by moving the sliders “Median Blur” and “Morphological Open” as shown in the examples below.
614
+ “Median Blur” operation reduces the noise in the black and white image, in this example, the slider is set at a position “3”. (For most cases, this is value works better)
615
+ 139
616
+ “Morphological Open” operation is another way of making sure the that the lines and various contours on the images are unique to this position. In the following example, “Morphological Open” slider is set at value “2”. (For most cases, this is value works better)
617
+ Click “Apply” and new window displays the Toggle Switch image with the current detected Toggle Switch value.
618
+ 140
619
+ In this window the detected value is displayed in RED. Make sure the value displayed is the correct value and close the window. For example, the detected Toggle Switch value is shown in the following image.
620
+ Next, a window with brief live feed from camera will be displayed (a brief video is played in this example) so that the user can vary or monitor the artifact to validate the readings.
621
+ If the values are correct, click “Okay” button.
622
+ 141
623
+ Note:
624
+ If the values are not correct the user can close the windows displaying the detected value and detected values for user validation and readjust the sliders to get an optimum result. This process needs to be done until the correct values are detected by the software.
625
+ Click “Next” button to complete the training process as shown in the figure below.
626
+ The software is now trained to detect positions of both Toggle Switches as shown below.
627
+ If there is error in detected value, press “Retrain” button to train the software again.
628
+ 142
629
+ 143
630
+ IX. Gauge Application (Circular Gauge):
631
+ 1. Launch the CVT Software:
632
+ Launch the CV Software by following the instructions in Chapter 1.
633
+ 2. Select the Gauge Button:
634
+ Select "Gauge" (or multiple) button(s) on this page and click Next.
635
+ 144
636
+ 3. Select Region(s) of Interest for the Gauge:
637
+ Now draw Region(s) of interest on the gauge image shown in the UI and select “Next/Skip” button. (In this case, we are selecting only one region of interest.)
638
+ 4. Circular Gauge Setup:
639
+ After the previous step, the UI will show a black and white image of the gauge region of interest along with some empty fields and sliders to train the software to detect the needle position and the corresponding value.
640
+ In the “Gauge Name” field enter a name for the Gauge under consideration. For example, “circular_gauge” as shown in figure below.
641
+ 145
642
+ From the drop down, menu “Gauge Type” select the type of the gauge under consideration (By default, it is set to ‘Circular’). For example, “Circular” as shown in figure below.
643
+ Enter the appropriate measurement units of the gauge in the “Measurement Units” field.
644
+ Note:
645
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field.
646
+ Enter “PSI” as shown in the figure below.
647
+ 146
648
+ Now, user will provide information about the gauge parameters like, minimum value, maximum value, needle length etc., by selecting various other buttons that appear on this page of the UI.
649
+ First, the user will select “Circular Gauge Inputs” button to draw the circle of the gauge, input min and max values.
650
+ After selecting “Circular Gauge Inputs” button, a new window opens as shown in the figure below.
651
+ As shown in figure below, the user will now select three points that form the gauge circle (3 Green points) and click “Okay”
652
+ 147
653
+ After selecting “Okay”, a new window “Identified Circle” appears with a Green Circle and Green Center point superimposed on the gauge circle as shown in figure below.
654
+ On this new window, user will draw two lines and enter the minimum and maximum value of the gauge.
655
+ First, the user will draw a line joining the center of the identified circle and the approximate minimum point on the identified gauge circle. This operation is done by simply left clicking the mouse at the center and dragging the mouse pointer to the minimum value point on the identified circle. Right click to clear the line. This line appears in Red color as shown in the figure below.
656
+ 148
657
+ Like previous step, draw a line joining the center point and the approximate maximum point on the identified gauge circle. This line appears in Blue color as shown in figure below.
658
+ Now, enter the minimum and maximum values of the gauge in the fields “Start Position Value” and “End Position Value” respectively. For this example, enter ‘0’ and ‘350’ for minimum and maximum respectively as shown in the figure below and click “Okay”
659
+ In the next step, the user will crop out the unnecessary part of the circular gauge that appears in the region of interest. This operation is carried out primarily to eliminate possible glare induced by the gauge rim and delete unnecessary pixels.
660
+ 149
661
+ To crop out the Outer circle (Circular Gauge Rim), user need to select the button “Select Gauge Button” as shown in the figure below.
662
+ A new window, “Select Outer Circle” appears as shown in the figure below.
663
+ Now, in this window, user will draw an approximate circle that includes details of the circular gauge like the needle, the inner and outer measurements.
664
+ 150
665
+ This operation is carried out by left clicking the mouse pointer approximately on the top left corner of the imaginary square that contain the circular gradations on the gauge and dragging the mouse pointer diagonally down towards the bottom right of the corner of the as shown in the figures below.
666
+ 151
667
+ If the circle doesn’t contain enough details of the circular gauge, right click on the window to clear the circle and repeat the above steps again until the user obtains an acceptable circle and then click “Okay” to crop the outer ring of the circular gauge as shown below.
668
+ Now, the user will provide the information about the needle length by approximately tracing the gauge needle to draw a line.
669
+ Select “Draw Needle” button as shown in figure below.
670
+ 152
671
+ A new window “Draw Needle” appears. Trace the gauge needle in this window to draw a Red Straight Line superimposed on the needle as shown in figure below and click “Okay”.
672
+ Now, perform preliminary image processing by moving the sliders “Binary Threshold”, “Erode”, “Dilate” and “Median Blur” as shown in the examples below.
673
+ “Binary Threshold” converts the pixels into either black or white depending on a threshold value. Move the “Binary Threshold” slider until the gauge needle is clearly visible as shown in the figure below. In this example it is set to 34.
674
+ 153
675
+ “Median Blur” operation reduces the noise in the black and white image, in this example, the slider is set at a position “2” as shown in the figure below. (For most cases, 2 or 3 works better)
676
+ Click “Apply” to see if the software has detected the correct value of the gauge needle in the current image as shown in the figure below.
677
+ 154
678
+ A new window “Detected Gauge Value” appears with the detect value printed in red color as shown in figure below.
679
+ Click “Okay” and a new window “Detected Values” appears displaying the current detected values of the gauge and click “Okay” to accept.
680
+ Note:
681
+ If the detected values are not correct values, user can readjust the sliders to until acceptable results are obtained. Refer training videos for information.
682
+ 155
683
+ Click “Next” to accept the training parameters as shown in figure below.
684
+ A new window “Detected Values” appears displaying the current detected value from the gauge needle as shown in figure below.
685
+ 156
686
+ X. Gauge Application (Horizontal Linear Gauge):
687
+ 1. Launch the CVT Software:
688
+ Launch the CV Software by following the instructions in Chapter 1.
689
+ 2. Select the Gauge Button:
690
+ Select "Gauge" (or multiple) button(s) on this page and click Next.
691
+ 157
692
+ 3. Select Region(s) of Interest for the Gauge:
693
+ Now draw Region(s) of interest of Horizontal Linear Gauges on the Panel image shown in the UI.
694
+ Click “Select Focus Area” button to select a relatively smaller displayed area in the image.
695
+ A new window “Select the Focus Area” appears asking user to select the Focus Area as shown in figure below.
696
+ 158
697
+ Select the Focus Area as shown in figure below and click “Okay”.
698
+ A new window “Select the region of interest” appears asking user to select the Region of Interest as shown in figure below.
699
+ 159
700
+ Now, draw the Region of Interest inside the Horizontal Linear Gauge as shown in figure below.
701
+ Click “Okay” to accept the ROI of the first Horizontal Linear Gauge as shown in figure below.
702
+ 160
703
+ Now, the UI with the ROI 1 marked appears as shown in figure below.
704
+ Next, the user will repeat the above steps to select the region of interest for the second Horizontal Linear Gauge and select “Next/Skip” button as shown in image below.
705
+ 161
706
+ 4. Horizontal Linear Gauge Setup:
707
+ After the previous step, the UI will show a black and white image of the gauge region of interest along with some empty fields and sliders to train the software to detect the needle position and the corresponding value.
708
+ In the “Gauge Name” field enter a name for the Gauge under consideration. For example, “linear_gauge1” as shown in figure below.
709
+ From the drop down, menu “Gauge Type” select the type of the gauge under consideration (By default, it is set to ‘Circular’). For example, “Horizontal” as shown in figure below.
710
+ 162
711
+ After selecting the type of gauge, the number of buttons under the empty fields is also update as shown in the figure below.
712
+ Next, enter the appropriate measurement units of the gauge in the “Measurement Units” field.
713
+ Note:
714
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field.
715
+ Enter “MM” as shown in the figure below.
716
+ 163
717
+ Next, user will provide information about the gauge parameters like, minimum value, maximum value, needle length etc., by selecting “Linear Gauge Inputs” button on the UI as shown in the figure below.
718
+ A new window “Linear Gauge Inputs” appears as shown in the figure below, where the user will provide the necessary information.
719
+ 164
720
+ On this new window, user will draw two lines and enter the minimum and maximum value of the gauge.
721
+ First, the user will draw a vertical line near the minimum value of the Horizontal Linear Gauge. This line appears in Red color as shown in the figure below. (Right click to clear at any point)
722
+ Like previous step, draw a vertical line at the maximum value of the Horizontal Linear Gauge. This line appears in Blue color as shown in figure below.
723
+ 165
724
+ Now, enter the minimum and maximum values of the gauge in the fields “Start Position Value” and “End Position Value” respectively. For this example, enter ‘0’ and ‘100’ for minimum and maximum respectively as shown in the figure below and click “Okay”
725
+ Now, perform preliminary image processing by moving the sliders “Binary Threshold”, “Erode”, “Dilate” and “Median Blur” as shown in the examples below.
726
+ “Binary Threshold” converts the image pixels into either black or white depending on a threshold value. Move the “Binary Threshold” slider until the gauge needle is clearly visible over the background as shown in the figure below. In this example it is set to 108.
727
+ 166
728
+ Click “Apply” to see if the software has detected the correct value of the gauge needle in the current image as shown in the figure below.
729
+ A new window “Detected Gauge Value” appears with the detect value printed in red color as shown in figure below. Click “Okay”.
730
+ 167
731
+ A new window “Detected Values” appears displaying the current detected values of the gauge and click “Okay” to accept.
732
+ Note:
733
+ If the detected values are not correct values, user can readjust the sliders to until acceptable results are obtained. Refer training videos for information.
734
+ Click “Next” to accept the training parameters for first Horizontal Linear Gauge as shown in figure below.
735
+ 168
736
+ After clicking “Next”, the UI will show a black and white image of the gauge region of interest for Gauge 2 with similar setup containing empty fields and sliders to train the software to detect the needle position and the corresponding value.
737
+ In the “Gauge Name” field enter a name for the Gauge under consideration. For example, “linear_gauge2” as shown in figure below.
738
+ From the drop down, menu “Gauge Type” select the type of the gauge under consideration (By default, it is set to ‘Circular’). For example, “Horizontal” as shown in figure below.
739
+ 169
740
+ After selecting the type of gauge, the number of buttons under the empty fields is also update as shown in the figure below.
741
+ Next, enter the appropriate measurement units of the gauge in the “Measurement Units” field.
742
+ Note:
743
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field.
744
+ Enter “MM” as shown in the figure below.
745
+ 170
746
+ Next, user will provide information about the gauge parameters like, minimum value, maximum value, needle length etc., by selecting “Linear Gauge Inputs” button on the UI as shown in the figure below.
747
+ A new window “Linear Gauge Inputs” appears as shown in the figure below, where the user will provide the necessary information.
748
+ 171
749
+ On this new window, user will draw two lines and enter the minimum and maximum value of the gauge.
750
+ First, the user will draw a vertical line near the minimum value of the Horizontal Linear Gauge. This line appears in Red color as shown in the figure below. (Right click to clear at any point)
751
+ Like previous step, draw a vertical line at the maximum value of the Horizontal Linear Gauge. This line appears in Blue color as shown in figure below.
752
+ 172
753
+ Now, enter the minimum and maximum values of the gauge in the fields “Start Position Value” and “End Position Value” respectively. For this example, enter ‘0’ and ‘100’ for minimum and maximum respectively as shown in the figure below and click “Okay”
754
+ Now, perform preliminary image processing by moving the sliders “Binary Threshold”, “Erode”, “Dilate” and “Median Blur” as shown in the examples below.
755
+ “Binary Threshold” converts the image pixels into either black or white depending on a threshold value. Move the “Binary Threshold” slider until the gauge needle is clearly visible over the background as shown in the figure below. In this example it is set to 108.
756
+ 173
757
+ Click “Apply” to see if the software has detected the correct value of the gauge needle in the current image as shown in the figure below.
758
+ A new window “Detected Gauge Value” appears with the detect value printed in red color as shown in figure below. Click “Okay”.
759
+ 174
760
+ A new window “Detected Values” appears displaying the current detected values of the gauge and click “Okay” to accept.
761
+ Note:
762
+ If the detected values are not correct values, user can readjust the sliders to until acceptable results are obtained. Refer training videos for information.
763
+ Click “Next” to accept the training parameters for second Horizontal Linear Gauge as shown in figure below.
764
+ 175
765
+ A new window “Detected Values” appears displaying the current detected value from the gauge needle as shown in figure below.
766
+ In the above figure, results of Arc Gauge are also displayed along with Horizontal Linear Gauges. Training of CV software to detect values from an Arc Gauge will be discussed in the next section.
767
+ 176
768
+ XI. Gauge Application (Arc Gauge):
769
+ 1. Launch the CVT Software:
770
+ Launch the CV Software by following the instructions in Chapter 1.
771
+ 2. Select the Gauge Button:
772
+ Select "Gauge" (or multiple) button(s) on this page and click Next.
773
+ 177
774
+ 3. Select Region(s) of Interest for the Gauge:
775
+ Now draw Region(s) of interest of Arc Gauge on the Panel image shown in the UI.
776
+ Click “Select Focus Area” button to select a relatively smaller displayed area in the image. In the following figure, Arc Gauge was selected as the 3rd type of gauge and ROI 1 & 2 are Horizontal Linear Gauges discussed in previous chapter. Please ignore them for now, as they don’t affect the case study.
777
+ A new window “Select the Focus Area” appears asking user to select the Focus Area as shown in figure below.
778
+ 178
779
+ Select the Focus Area as shown in figure below and click “Okay”.
780
+ A new window “Select the region of interest” appears asking user to select the Region of Interest as shown in figure below.
781
+ 179
782
+ Now, draw the Region of Interest inside the Arc Gauge as shown in figure below.
783
+ Click “Okay” to accept the ROI of the Arc Gauge as shown in figure below.
784
+ 180
785
+ Now, the UI with the ROI 3 (It is supposed to be ROI 1 instead of ROI 3. Please refer earlier note.) marked appears as shown in figure below.
786
+ Select “Next/Skip” to setup the Arc gauge.
787
+ 181
788
+ 4. Arc Gauge Setup:
789
+ After the previous step, the UI will show a black and white image of the gauge region of interest along with some empty fields and sliders to train the software to detect the needle position and the corresponding value.
790
+ In the “Gauge Name” field enter a name for the Gauge under consideration. For example, “arc_gauge” as shown in figure below.
791
+ From the drop down, menu “Gauge Type” select the type of the gauge under consideration (By default, it is set to ‘Circular’). For example, “Circular” as shown in figure below.
792
+ 182
793
+ Note:
794
+ Arc Gauge has many similarities when compared to a Circular Gauge, therefore we will treat Arc Gauge as a Circular Gauge for the purposes of CV Software Training.
795
+ Enter the appropriate measurement units of the gauge in the “Measurement Units” field.
796
+ Note:
797
+ All the letters in this field must be entered in UPPERCASE letters. If there are no units for the detected value, then type “NONE” in this field.
798
+ Enter “FPM” as shown in the figure below.
799
+ Now, we are going to provide information about the gauge parameters like, minimum value, maximum value, needle length etc., by selecting various other buttons that appear on this page of the UI.
800
+ 183
801
+ First, the user will select “Circular Gauge Inputs” button to draw the circle of the gauge, input min and max values.
802
+ After selecting “Circular Gauge Inputs” button, a new window opens as shown in the figure below.
803
+ As shown in figure below, the user will now select three points that form the gauge circle (3 Green points) and click “Okay”
804
+ 184
805
+ After selecting “Okay”, a new window “Identified Circle” appears with a Green Circle superimposed on the gauge circle as shown in figure below.
806
+ Note:
807
+ In an Arc Gauge, we only see an ‘Arc’ of the identified gauge circle and since the gauge circle extends beyond the gauge image, we don’t see Green Center point as in case of a Circular Gauge.
808
+ On this new window, user will draw two lines and enter the minimum and maximum value of the gauge.
809
+ First, the user will draw a line joining the center of the identified circle and the approximate minimum point on the identified gauge circle. This operation is done by simply left clicking the mouse anywhere on the image and dragging the mouse pointer to the minimum value point on the identified circle. Right click to clear the line. This line appears in Red color as shown in the figure below.
810
+ 185
811
+ Like previous step, draw a line joining the center point and the approximate maximum point on the identified gauge circle. This line appears in Blue color as shown in figure below.
812
+ Now, enter the values minimum and maximum values of the gauge in the fields “Start Position Value” and “End Position Value” respectively. For this example, enter ‘0’ and ‘150’ for minimum and maximum respectively as shown in the figure below and click “Okay”
813
+ 186
814
+ Now, the user will provide the information about the needle length by approximately tracing the gauge needle to draw a line.
815
+ Select “Draw Needle” button as shown in figure below.
816
+ A new window “Draw Needle” appears. Trace the gauge needle in this window to draw a Red Straight Line superimposed on the needle as shown in figure below and click “Okay”.
817
+ 187
818
+ Now, perform preliminary image processing by moving the sliders “Binary Threshold”, “Erode”, “Dilate” and “Median Blur” as shown in the examples below.
819
+ “Binary Threshold” converts the pixels into either black or white depending on a threshold value. Move the “Binary Threshold” slider until the gauge needle is clearly visible as shown in the figure below. In this example it is set to 94.
820
+ 188
821
+ Click “Apply” to see if the software has detected the correct value of the gauge needle in the current image as shown in the figure below.
822
+ A new window “Detected Gauge Value” appears with the detect value printed in red color as shown in figure below. Click “Okay”
823
+ A new window “Detected Values” appears displaying the current detected values of the gauge and click “Okay” to accept.
824
+ 189
825
+ Note:
826
+ If the detected values are not correct values, user can readjust the sliders to until acceptable results are obtained. Refer training videos for information.
827
+ Click “Next” to accept the training parameters as shown in figure below.
828
+ 190
829
+ A new window “Detected Values” appears displaying the current detected value from the gauge needle as shown in figure below.
Files/F170.txt ADDED
The diff for this file is too large to render. See raw diff
 
Files/KAT Gateway Software.txt ADDED
@@ -0,0 +1,952 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ KAT Gateway Software
2
+ USER MANUAL
3
+ This product is for personal use ONLY. Any form of commercial application is prohibited.
4
+ Thank you for purchasing our product!
5
+ Please, read this instruction manual carefully and keep it safely stored for future use.
6
+ KATVR reserves the right to interpret and modify the manual. Any amendments, updates
7
+ and interpretations to the manual will be published on the KATVR official website.
8
+ Acknowledgement
9
+ 03
10
+ ▍INDEX
11
+ KAT Gateway Software Introduction 4
12
+ KAT Gateway download 5
13
+ Device Detection 6
14
+ Sensor Pairing 7
15
+ Home Page 9
16
+ Wake Up and Calibrate Each Time 12
17
+ Configuration 15
18
+ Game 15
19
+ Profile 16
20
+ Preference 18
21
+ Game Config 23
22
+ Sitting (For Certain Models) 27
23
+ Haptics Mode (For Certain Models) 29
24
+ Community 30
25
+ User Status 32
26
+ Help forum 32
27
+ Game 33
28
+ Mods(coming soon) 33
29
+ Ranking 34
30
+ Notification 34
31
+ Feedback 35
32
+ System 36
33
+ VR-Integrated Gateway 37
34
+ Opening KAT Gateway 37
35
+ 04
36
+ ▍KAT Gateway Software Introduction
37
+ KAT Gateway is the dedicated software for running KAT VR devices. It is currently available
38
+ in PC and VR-Integrated version.
39
+ Gateway for PC operates in the Windows OS environment, allowing the user to set up and
40
+ manage the device before getting onto it.
41
+ The VR-Integrated version is a plug-in that allows you to access all the settings and other
42
+ features directly from your VR headset and without getting back to the PC.
43
+ With KAT Gateway, you can quickly connect the KAT VR devices with your computer
44
+ system to manage the device, view the current connection status, adjust the settings, and
45
+ run the available VR content. Thanks to our inSteam Turbo Mode, each compatible game
46
+ will be automatically launched with the optimal settings configuration. You can access the
47
+ KAT Gateway’s control panel either from your computer screen or directly from the VR
48
+ headset.
49
+ Both the desktop and VR-Integrated dashboard of KAT Gateway grant you access to a
50
+ variety of adjustable parameter settings allowing to make the necessary changes and turn
51
+ the KAT VR devices into a perfect VR input device for your needs.
52
+ Note: KAT Gateway supports active recognition of all the supported content whether
53
+ launched through steamVR or directly through the game exe files.
54
+ KAT Gateway VR-Integrated dashboard can be only accessed through SteamVR.
55
+ The KAT Gateway VR integrated dashboard based on the Steam VR will be provided
56
+ as a software update.
57
+ System Requirements
58
+ Operating system:Microsoft Windows 10/11
59
+ USB port:2x USB 2. 0 or higher
60
+ Hard Disk Space:At least 400 MB of space(For installing product software)
61
+ Software:Steam VR(Steam Official Website:http://store. steampowered. com/)
62
+ For additional information, please refer to the system requirements of your VR
63
+ HMD.
64
+ 05
65
+ ▍KAT Gateway download
66
+ Visit the official website: https://www. kat-vr. com/,and click"Support"to proceed to the
67
+ product list.
68
+ Select KAT Walk Coord 2 and proceed to the product resource page
69
+ Download KAT Gateway and install it on your PC, double-click the shortcut icon to open
70
+ after installation.
71
+ 06
72
+ ▍Device Detection
73
+ Connect the USB data cable of the KAT product to the PC USB port. KAT Gateway will
74
+ detect all the currently connected KAT Devices but can only run one at a time. (Here using
75
+ C2+ as an example, and the same applies to the following)
76
+ First case - No device detected: Please connect the device to your PC.
77
+ Skip the following step if you only have one product.
78
+ Second case - Multiple devices of the same model detected: Please disconnect the device
79
+ that is not in use.
80
+ Third case - Multiple devices of different type detected: Please select the device needed.
81
+ 07
82
+ Sensor Pairing
83
+ Once the below window appears, the device pairing starts, click "NEXT" when ready.
84
+ Once the below window appears, connect the direction sensor (also known as inner
85
+ sensor) to your PC. Follow the software instructions and click "NEXT" to complete the
86
+ direction sensor pairing.
87
+ 08
88
+ Once the below window appears, disconnect the direction sensor, and connect one of the
89
+ foot sensors to pair it.
90
+ After you see the "✔" symbol, disconnect the current foot sensor and connect the other
91
+ foot sensor. Click "NEXT" to complete the pairing process for both foot sensors.
92
+ 09
93
+ ▍Home Page
94
+ Now the KAT Gateway Home Page is ready!
95
+ Current status display
96
+ a. The KAT Walker account login status;
97
+ b. The body direction status (inner sensor);
98
+ c. The left foot sensor status;
99
+ d. The right foot sensor status;
100
+ e. The Vehicle hub status (Certain Models)
101
+ f. The Haptic status (Certain Models)
102
+ Device status and Data statistics
103
+ The current device information, connection status, Sensor Manager and the current
104
+ sensors status; The mileage statistics, click to check Session or Global status.
105
+ Connection Status
106
+ Connected: Device ready to go.
107
+ Disconnected:
108
+ - Sensors may enter the sleep mode: Wake up the device every time by clicking the
109
+ main button on the device and shake the shoe sensors.
110
+ - No battery: Ensure the sensors are charged.
111
+ Unpaired: Please pair the sensors in the Sensor Manager.
112
+ 10
113
+ Device switch
114
+ Switch between different KAT products that you may own. Click '》' and select the device
115
+ needed to switch to it.
116
+ Sensor Manager
117
+ Check the sensor status and manage the sensors, click ″+" to see the details under each
118
+ sensor system.
119
+ Sensors Kit Connection Status
120
+ - S/N: the serial number of the receiver
121
+ - Sensor pairing status:
122
+ Paired: Sensors ready to go
123
+ Unpaired: Click "Repeat the Initial Pairing" to pair.
124
+ If you have already paired the sensors before, click "Quick Re-Pairing" to recover.
125
+ - Version of the Receiver's Firmware: Check if your firmware version is up to date.
126
+ Click "Update Firmware" to update firmware.
127
+ - Battery Information: Shows the battery status of each sensor
128
+ Update Firmware
129
+ Finish the update by following the instructions on the pop-ups.
130
+ 11
131
+ Troubleshoot the sensors
132
+ Finish the update by following the instructions on the pop-ups.
133
+ 12
134
+ ▍Wake Up and Calibrate Each Time
135
+ - Wake up the device every time before getting on if it
136
+ entered sleep mode (No movement for more than 30 min,
137
+ lights off)
138
+ - Click the main button and shake the shoes to wake up.
139
+ - Check the connection status on Gateway.
140
+ Calibrate every time entering a game or any time if the walking direction, jump status or
141
+ vibration is incorrect.
142
+ Put on the HMD, look forward and calibrate in one of the following ways:
143
+ Quick Calibration:
144
+ Hold controller Menu button until vibration.
145
+ Meta Standalone: Hold Oculus button
146
+ instead.
147
+ Meta with SteamVR: Hold only controller
148
+ triggers instead.
149
+ 13
150
+ Dashboard Calibration:
151
+ Click controller system button, open the Gateway via KAT icon in platform dashboard.
152
+ Click the Calibration button and follow the instruction.
153
+ 14
154
+ Button Calibration:
155
+ Reach back to click the main button.
156
+ Calibrate from PC: (For situations where other calibration methods are not possible)
157
+ Access your PC desktop and Gateway through Remote Desktop or similar method, and
158
+ click the waist calibration icon. Once the calibration pop-up appears, the process is
159
+ complete.
160
+ 01
161
+ 15
162
+ ▍Configuration
163
+ Click the configuration button before running a game.
164
+ The Configuration page is essential for optimizing device performance in different games.
165
+ It offers default profiles with recommended settings for popular games.
166
+ Clicking the "default" button in each section will restore the recommended default
167
+ configuration for that particular section.
168
+ Game
169
+ You can customize configurations for each game, and the changes take effect in real-time.
170
+ The game list automatically displays your installed Steam games. You can search, sort,
171
+ view the current game, scan for new games, manually add or remove games, and launch a
172
+ game through its respective platform after configuring the settings.
173
+ 16
174
+ Profile
175
+ Profiles function similarly to documents, as they store all the configurations for each
176
+ game. Managing profiles is similar to managing documents.
177
+ When you click on each game, the game name and default profiles with recommended
178
+ settings will be applied accordingly.
179
+ You can use the default profiles or create multiple customized profiles with unique names
180
+ for each game. Just like managing a document.
181
+ Switch or Delete Profile
182
+ To switch between different profiles, click on the profile name or the "﹀" icon.
183
+ To delete the current profile, click on the trash bin icon.
184
+ Restore Default Settings
185
+ You can always restore all the default recommended settings for the current game by
186
+ clicking "Restore Default Settings". The current settings will be lost.
187
+ 17
188
+ Save to Profile
189
+ Save all your current configurations for the current game to the current profile. It will
190
+ overwrite the original profile.
191
+ Save as . . .
192
+ Save all your current configurations for the current game to a new profile stored in the
193
+ local path. You can use a memorable method to name it, such as:
194
+ Jack’s sister with jump on - Community ver.
195
+ Import Profile
196
+ You can import a new profile from your computer to create a new profile for current game.
197
+ The current settings will be lost.
198
+ 18
199
+ ▍Preference
200
+ You can customize various settings based on your preferences for each game.
201
+ Common Settings:
202
+ (1) Left-Handed Mode
203
+ Default OFF, toggle between left and right main controllers.
204
+ (2) Original Trackpad /Joystick For Walk
205
+ Define precedence in case of signal conflict: original trackpad/joystick or locomotion
206
+ device.
207
+ a. Default - Controller Secondary: (Recommended)
208
+ Locomotion device prioritizes walking control. When walking on the device, original
209
+ trackpad/joystick cannot control walking. When not walking on the device, original
210
+ trackpad/joystick can control walking.
211
+ b. Controller Priority: (Optimized for competitive gaming)
212
+ Controller prioritizes walking control. When walking on the device, original
213
+ trackpad/joystick can still control walking.
214
+ c. Disable: (Minimize Misjudgment)
215
+ Only use the locomotion device for walking control to prevent controller misjudgment.
216
+ The original trackpad/joystick is disabled for walking control.
217
+ (3) Walking Speed Multiplier:
218
+ Higher value provides larger multiplier, leading to faster walking speed in game.
219
+ 19
220
+ (4) Sprint Mode:
221
+ Turn on Sprint mode to enable sprint activation through a natural running action.
222
+ Use the Sprint Threshold to adjust the speed required for sprint activation.
223
+ Decrease for easier activation, increase for more deliberate activation.
224
+ Decrease if activation is too difficult, increase if walking is misjudged as running.
225
+ (5) Jump Mode:
226
+ Turn on Jump mode to enable Jump activation through a natural jumping action.
227
+ Calibration is needed every time entering a game or any time if the jump is incorrect.
228
+ Use the Jump Threshold to adjust the jump range required for jump activation.
229
+ Decrease for easier activation, increase for more deliberate activation.
230
+ Decrease if activation is too difficult, increase it if normal actions are misjudged as
231
+ jumping.
232
+ Settings for Walk C2 series :
233
+ (1) Walking Accuracy:
234
+ More Fluency provides smoother movement.
235
+ More Accuracy provides more realistic, step-by-step locomotion.
236
+ (2) Senor Sensitivity:
237
+ Decrease the value to prevent misjudgments of small movements.
238
+ Increase it for more accurate micro-operations.
239
+ 20
240
+ (3) Cruise mode:
241
+ Turn on Cruise mode to enable Cruise Walking through swiping your foot across the base
242
+ in a skateboarding-like action.
243
+ Putting your foot on the platform, sliding it forward or backward and keeping in a stable
244
+ position. Cruise speed will based on your sliding speed.
245
+ 01
246
+ MBAOCVKINWGARD
247
+ 02
248
+ MBAOCVKINWGARD
249
+ 03
250
+ MTHOEVOINTGHTEOR
251
+ SIDE
252
+ 04
253
+ MTHOEVOI
254
+ NTGHTEO
255
+ Settings for Walk C :
256
+ (1) Walking Trigger Sensitivity:
257
+ Decrease the value to prevent misjudgments of small movements.
258
+ Increasing the value brings easier activation of walking.
259
+ For best experience, set to minimum at which the sensor can detect your steps.
260
+ (2) Senor Sensitivity:
261
+ Decrease the value to prevent misjudgments of slow movements.
262
+ Increasing the value brings easier activation of walking.
263
+ (3) Lateral Movement
264
+ To trigger strafing left, place your right foot at the center of the platform and keep the left
265
+ one at the left edge with toes directed forward and heel up. Wrong toe direction may
266
+ cause movement deviations.
267
+ 21
268
+ To trigger strafing right, place your left foot
269
+ at the center of the platform and keep the
270
+ right one at the right edge with toes directed
271
+ forward and heel up. Wrong toe direction
272
+ may cause movement deviations.
273
+ The higher the sensitivity, the lower the
274
+ angle necessary to trigger movement but the
275
+ higher the chance of unintended trigger and vice versa.
276
+ (4) Backward Movement
277
+ To trigger movement backward, place one of your feet at the
278
+ center of the platform and keep the other one at the back edge
279
+ with heel up.
280
+ The higher the sensitivity, the lower the angle necessary to trigger movement but the
281
+ higher the chance of unintended trigger and vice versa.
282
+ (5) Cruise Movement
283
+ To trigger continuous movement forward without walking, place
284
+ one of your feet at the center of the platform and keep the other
285
+ one at its front edge with toes up.
286
+ The higher the sensitivity, the lower the angle necessary to trigger movement but the
287
+ higher the chance of unintended trigger and vice versa.
288
+ Settings for loco S:
289
+ (1) Walking Trigger Sensitivity:
290
+ Decrease the value to prevent misjudgments of small movements.
291
+ Increasing the value brings easier activation of walking.
292
+ For best experience, set to minimum at which the sensor can detect your steps.
293
+ 22
294
+ (2) Senor Sensitivity:
295
+ Decrease the value to prevent misjudgments of slow movements.
296
+ Increasing the value brings easier activation of walking.
297
+ (3) Lateral Movement
298
+ Swiftly turn one foot on the heel at 90 degree and back to normal
299
+ position to trigger strafing in the corresponding direction. When a
300
+ stop is needed, take a step.
301
+ The higher the sensitivity, the lower the angle necessary to trigger movement but the
302
+ higher the chance of unintended trigger and vice versa.
303
+ (4) Backward Movement
304
+ To trigger movement backward, place one of your feet at the
305
+ center of the platform and keep the other one at the back edge
306
+ with heel up.
307
+ The higher the sensitivity, the lower the angle necessary to trigger movement but the
308
+ higher the chance of unintended trigger and vice versa.
309
+ (5) Cruise Movement
310
+ To trigger continuous movement forward without walking, place
311
+ one of your feet at the center of the platform and keep the other
312
+ one at its front edge with toes up.
313
+ The higher the sensitivity, the lower the angle necessary to trigger movement but the
314
+ higher the chance of unintended trigger and vice versa.
315
+ 23
316
+ ▍Game Config
317
+ The parameters in this page are not for customize things, but to describe locomotion in
318
+ the original game to ensure the device works properly with each specific game.
319
+ By default, the recommended settings will be applied accordingly based on your selected
320
+ game. You can use the default or adjust setting based on your situation, or set for a
321
+ unknown game or a game without default profile.
322
+ Step 1: Set the Game Config
323
+ - ALWAYS SET the game to HEAD ORIENTED DIRECTION and FREE / SMOOTH LOCOMOTION
324
+ FOR WALKING to have the proper walking with independent walking direction.
325
+ KAT always walk towards the direction of body.
326
+ - "Click to learn how to set all Game Config" and you will find a detailed guide with images
327
+ on how to configure the game settings for the selected game.
328
+ You need to adjust all the parameters in the game accordingly.
329
+ Step 2: Set below locomotion method accordingly to original game and your controller.
330
+ 24
331
+ Each controller may have different configurations for different games. Gateway can detect
332
+ your Default Platform controllers and provide default configurations for Meta Touch
333
+ controllers, Index controllers, and Vive controllers in different games. You can rename
334
+ your current controllers, edit configurations under the current controllers based on your
335
+ changes, and also manually edit configurations for other types of controllers.
336
+ The default settings are based on the default settings of the game. If you have made any
337
+ changes to the game settings, controller key set, or your controller style, you will need to
338
+ adjust the settings accordingly to ensure proper functionality of the device.
339
+ The settings and options are as follows:
340
+ Walk Controller : Check the game and find out which hand controller (left or right)
341
+ controls walking and set the same here.
342
+ Walk Action : Check the game to find out what triggers the Walking action, and set the
343
+ same here.
344
+ 1. Trackpad touch: Touch the Trackpad to walk;
345
+ 2. Trackpad click: Click the Trackpad to walk;
346
+ 3. Both trackpads touch: Touch either Trackpad to walk;
347
+ 4. Joystick: Push the Joystick to walk;
348
+ 5. Joystick click: Click the Joystick to walk;
349
+ 6. Both joysticks: Push either Joystick to walk;
350
+ Sprint Action: Check the game to find out what triggers the Running action, and set the
351
+ same here.
352
+ 1. No sprint: the game does not allow to sprint.
353
+ 2. Trackpad click: first click and then push the Trackpad to run.
354
+ 3. Trackpad double click: double click and then push the Trackpad to run.
355
+ 4. Joystick click: first click and then push the Joystick to run.
356
+ 25
357
+ 5. Grip press: press grip and then push the Joystick/ Trackpad at the same time to
358
+ run.
359
+ Jump Action: Check the game to find out what triggers the Jumping action, and set the
360
+ same here.
361
+ 1. Left trigger: Press left trigger to jump;
362
+ 2. Left grip: Press left grip to jump;
363
+ 3. Left trackpad click up: Click left trackpad up to jump;
364
+ 4. Left trackpad click down: Click left trackpad down to jump;
365
+ 5. Left trackpad click left: Click left trackpad left to jump;
366
+ 6. Left trackpad click right: Click left trackpad right to jump;
367
+ 7. Left trackpad click center: Click left trackpad center to jump;
368
+ 8. Left joystick click: Click left joystick to jump;
369
+ 9. Left joystick up: Press left joystick up to jump;
370
+ 10. Left joystick down: Press left joystick down to jump;
371
+ 11. Left joystick left: Press left joystick left to jump;
372
+ 12. Left joystick right: Press left joystick right to jump;
373
+ 13. Left X: Press X to jump;
374
+ 14. Left Y/menu: Press Y/menu to jump;
375
+ 15. Right trigger: Press left trigger to jump;
376
+ 16. Right grip: Press left grip to jump;
377
+ 17. Right trackpad click up: Click right trackpad up to jump;
378
+ 18. Right trackpad click down: Click right trackpad down to jump;
379
+ 19. Right trackpad click left: Click right trackpad left to jump;
380
+ 20. Right trackpad click right: Click right trackpad right to jump;
381
+ 21. Right trackpad click center: Click right trackpad center to jump;
382
+ 22. Right joystick click: Click right joystick to jump;
383
+ 23. Right joystick up: Press right joystick up to jump;
384
+ 24. Right joystick down: Press right joystick down to jump;
385
+ 25. Right joystick left: Press right joystick left to jump;
386
+ 26. Right joystick right: Press right joystick right to jump;
387
+ 27. Right A: Press A to jump
388
+ 28. Right B/menu: Press B/menu to jump;
389
+ 26
390
+ Advanced hidden parameters
391
+ If you're unsure, please skip this part and use the recommended default setting.
392
+ Dead Zone: (In certain games, a larger dead zone is implemented to prevent misjudgment
393
+ but may make walking more difficult)
394
+ Increasing the value brings easier activation of walking.
395
+ Decreasing it brings lower misjudgment of walking.
396
+ Active Zone: (In certain games, pushing the joystick to the top may trigger special actions)
397
+ Increase the value to the top to have full use of the joystick range.
398
+ Decrease it to avoid triggering special actions when pushing the joystick to the top.
399
+ Curve: (In certain games, the walking speed may have a gradual increase to reduce
400
+ motion sickness, but may make it more difficult to start walking)
401
+ Increasing the value brings an enhanced speed curve for easier acceleration.
402
+ Decrease it brings a more even linear speed change.
403
+ Max Speed in Game: (The maximum speed varies across different games, resulting in
404
+ different walking experiences)
405
+ Increasing the value brings harder reaching the top speed in the game.
406
+ Decreasing it brings easier reaching the top speed in the game.
407
+ 27
408
+ ▍Sitting (For Certain Models)
409
+ You can customize the functions of KAT Vehicle Hub in the "Sitting" page.
410
+ Vehicle Hub mode:
411
+ Turn OFF Vehicle Hub mode to only use the seat.
412
+ Turn ON Vehicle Hub mode to enable cruise driving function in any game. (Even when
413
+ there is no in-game vehicle available)
414
+ You can also hold the module button for 1s until vibration to turn on and
415
+ off.
416
+ Once the Vehicle Hub is enabled, by default it will enter the 'AUTO'
417
+ mode in Idle state.
418
+ How To Drive/Ride:
419
+ Quick Drive Mode
420
+ Click the right controller joystick (not push) to start or stop driving.
421
+ If the right controller joystick clicking conflict any action in the game, please disable it.
422
+ Feet Ride/Drive
423
+ To start riding, place your right foot on the ground and slide it back.
424
+ 28
425
+ It will continuously move forward, without using your feet to maintain the motion.
426
+ You can control the direction by naturally turning your body.
427
+ To stop, slide your right foot forward.
428
+ You can also stand up and stop driving/riding anytime you want to switch back to the
429
+ walking mode.
430
+ Vehicle Modes Settings
431
+ You can save up to 4 customized settings for driving/riding, or use a single setting for any
432
+ mode in the selected game.
433
+ The 4 vehicle modes are Auto, Mount, Aircraft, and Heavy Vehicle, making it easy to
434
+ remember.
435
+ Vehicle Speed
436
+ You can adjust the driving/riding speed for each vehicle mode according to your
437
+ preference.
438
+ Road Roughness Simulation (For Certain Models)
439
+ It provides a haptic representation of randomized road events. You can adjust the
440
+ vibration strength according to your preference.
441
+ Quick Switch
442
+ Click the icon to quickly enable a vehicle mode.
443
+ You can also change modes by clicking the main button on the
444
+ Vehicle Hub.
445
+ 29
446
+ ▍Haptics Mode (For Certain Models)
447
+ You can toggle the haptics Mode ON or OFF and customize the settings according to your
448
+ preference.
449
+ Triggered by Controller Vibration:
450
+ The base will vibrate in sync with your controller's vibration based on different games.
451
+ You can toggle it ON or OFF and adjust the vibration strength.
452
+ Triggered by Steps:
453
+ The base will vibrate when you walk.
454
+ You can toggle it ON or OFF and adjust the vibration strength.
455
+ Click the "Test" buttons to test the vibration strength.
456
+ 30
457
+ ▍Community
458
+ Login: Click login when you first enter.
459
+ Register:Click to register an account via email.
460
+ 31
461
+ If you forget your password, you can retrieve your account and update the password.
462
+ The personal center displays user information, game data, data ranking, and self-help
463
+ forums.
464
+ You can modify and manage your personal information here, including modifying your
465
+ name, password, and email address; viewing game data and rankings; replying or finding
466
+ questions or answers.
467
+ Edit: modifying your name, password, and email address.
468
+ Data: Displays the total data, annual data, monthly data, and current data of walking
469
+ steps, game time, calorie consumption, and walking distance
470
+ 32
471
+ User Status
472
+ You can login and check your status including the Nickname, level, steps walked, walking
473
+ time, calories burned and walked distance.
474
+ Help forum
475
+ a. KAT Support: KAT team will release product related support information here. You can
476
+ request support by replying different topics or look for the information you need.
477
+ b. Community Settings (coming soon):You can share your own custom profile for
478
+ different game settings with other community members.
479
+ 33
480
+ Game
481
+ a. Native Games ( Lab) : You can download, review and rate KAT's native games;
482
+ b. Reccommended Oculus Games: Click the game and proceed to the Oculus platform.
483
+ c. Reccommended Steam Games: Click the game and proceed to the Steam platform.
484
+ d. Developer Games (coming soon) : If you're a developer of a VR game you would like to
485
+ share with the community, please feel free to contact us!
486
+ Mods(coming soon)
487
+ Mods (Under development): Content mods that help to increase the experience of playing
488
+ the VR games on the KAT VR equipment
489
+ a. Native Mods: You can download the native game mods added by the KAT VR team.
490
+ b. Developer Mods: You can download the mods provided by the developers. If you're a
491
+ developer and would like to add game mods, please feel free to contact us! d. Search:
492
+ Insert keywords to look for game mods.
493
+ 34
494
+ Ranking
495
+ You can check your KATer ranking information here.
496
+ Notification
497
+ Here you can see the notifications sent to you by the KAT VR team about software updates
498
+ and other important events. Click‘delete’to remove the notifications.
499
+ 35
500
+ Feedback
501
+ You can use this function to share your feedback with us.
502
+ 36
503
+ ▍System
504
+ The system settings page shows the currently selected language and the current inSteam
505
+ drivers and the current version of KAT Gateway.
506
+ From this page you can exit the software, change the language and reinstall the drivers, as
507
+ well as set up the LED lights and use the LAB FUNCTIONS.
508
+ Left handed Mode: Enabling the global left handed mode will activate the left handed
509
+ mode switch for all games in bulk, and disabling it will turn off the left handed mode for
510
+ all games.
511
+ LED: You can adjust the brightness or turn off the light.
512
+ 37
513
+ ▍VR-Integrated Gateway
514
+ Opening KAT Gateway
515
+ VR-Integrated Gateway allows you to easily access and configure parameters in the
516
+ headset.
517
+ Press the System Button on your hand controller and click the Gateway icon in the bottom
518
+ left corner.
519
+ www. kat-vr.com
520
+
521
+
522
+ KAT Nexus
523
+ OWNER'S MANUAL
524
+ This product is for personal use ONLY. Any form of commercial application is prohibited.
525
+ Acknowledgement
526
+ Thank you for purchasing our product!
527
+ Please, read this instruction manual carefully and keep it safely stored for future use.
528
+ KATVR reserves the right to interpret and modify the manual. Any amendments, updates and
529
+ interpretations to the manual will be published on the KATVR official website. Please pay
530
+ attention.
531
+ INDEX
532
+ 03
533
+ First & Foremost 05
534
+ Declaration 05
535
+ PRODUCTION INTRODUCTION
536
+ Product Description
537
+ KAT Nexus Description
538
+ Product Appearance
539
+ Product Parameters
540
+ Packing List
541
+ 07
542
+ 07
543
+ 07
544
+ 08
545
+ 08
546
+ Precautions before use 05
547
+ Placement of unit Issues 06
548
+ Power Issues 06
549
+ Cleaning Issues 06
550
+ Moisture Issues 06
551
+ About the user manual 06
552
+ PRODUCTION INSTALLATION
553
+ Before use
554
+ Starting Up
555
+ Network Connection
556
+ Software Set-UP
557
+ Get Your KAT Device Ready
558
+ Install the KAT Gateway of Nexus App onto Your Headset
559
+ Via KAT Gateway
560
+ Via Sidequest(Recommended for Side quest users)
561
+ Via ADB too(l Only for ADB users)
562
+ 09
563
+ 09
564
+ 09
565
+ 09
566
+ 10
567
+ 10
568
+ 10
569
+ 12
570
+ 13
571
+ 04
572
+ APPENDIX
573
+ Contact Information 24
574
+ Connect Your KAT Device to KAT Nexus
575
+ Entering the Platform( IMPORTANT!)
576
+ Opening KAT Gateway
577
+ 14
578
+ 15
579
+ 15
580
+ KAT GATEWAY OF NEXUS FUNCTIONS INTRODUCTION
581
+ Device Status
582
+ Sensor Manager
583
+ Controller Options
584
+ Games
585
+ Follow Recommended Settings (The following are consistent)
586
+ Controller: Options of the device main functions
587
+ Game Config
588
+ Haptics and Sitting
589
+ Direction calibration
590
+ 16
591
+ 17
592
+ 18
593
+ 18
594
+ 20
595
+ 20
596
+ 21
597
+ 22
598
+ 23
599
+ 05
600
+ Declaration
601
+ For the latest information, please feel free to check out our official
602
+ website: http://www.kat-vr.com
603
+ It is the trademark of Hangzhou Virtual and Reality Technology Co., Ltd. All
604
+ trademarks, product identification and trade names of other companies appearing in this
605
+ manual belong to their respective owners. No parts or content of this manual may be copied
606
+ or reproduced without the written permission of the company, and may not be shared in any
607
+ form or used for any commercial or profitable purpose.
608
+ The contents of this manual are subject to change due to product upgrades or other reasons.
609
+ Our company reserves the right to modify the contents of this manual without any notice or
610
+ prompt. This manual is intended as a guide only. We do our best to provide accurate information
611
+ in this manual but we do not guarantee that the contents of this manual are completely
612
+ error free. Statements, information, and recommendations provided in this manual do not
613
+ constitute any warranty, express or implied.
614
+ First & Foremost
615
+ This symbol is a hint, letting you know what might be helpful at a particular point.
616
+ This symbol is a safety warning, used to remind you to be wary of your personal safety
617
+ while performing the task. Be mindful of this symbol to avert any potential risks.
618
+ This owner’s manual contains all the information you need to install your KAT Walk Coord 2.
619
+ The following symbols are used to indicate important information and instructions for the
620
+ device set-up process:
621
+ This symbol is a precaution, it usually provides additional information to help you with
622
+ a task, as well as information that applies only to certain situations.
623
+ This symbol is important: You must complete one or more of the hardware and software
624
+ installations or settings as described in the manual.
625
+ Precautions before use
626
+ To prevent fire, electronic shock and damage of product, please do not let the player come into
627
+ contact with rain, moisture or liquid. Do not put vases or other water containers on the player.
628
+ 06
629
+ Placement of unit Issues
630
+ Please place the player on a flat area. Keep it away from sunlight and avoid heat, humidity and
631
+ frequent shaking to avoid damage the case and other components of the player and shorten the
632
+ service life of the player. For the best performance of the player, please do not place heavy
633
+ objects on the player and keep it at least 6 inches away from the wall.
634
+ Power Issues
635
+ Do not use over-voltage, otherwise the player would be damaged and might cause fire.Please
636
+ connect the power cord correctly and make sure the cable is not damaged. In the United States,
637
+ that means supplying 110 VAC to the device. Please unplug the power cord if you do not use the
638
+ player for long time to prevent damage caused by lightning.
639
+ Moisture Issues
640
+ Please keep the player away from any humid places, like a basement. Condensation of moisture
641
+ might cause damage to the player. In this case, please turn on the player and the heat of the
642
+ player may drive the humidity away.
643
+ About the user manual
644
+ Please read the user manual carefully and keep the manual in a safe place for review if necessary.
645
+ The pictures in this user manual might be different with the pictures shown on the HMD
646
+ To ensure good ventilation, please do not put the player in bookcases, cabinet or other confined
647
+ places. Keep well-ventilated to prevent fire or electronic shock caused by overheat of the player.
648
+ Keep lit candles and other lit subjects away from the player. Please do not open the case of the
649
+ player. Only a specialist is allowed to do this.
650
+ Cleaning Issues
651
+ Please unplug the power cord before cleaning the player. Use a slightly wet rag to clean the
652
+ player instead of liquid detergent. Do not use solvents like gas, because this might damage the
653
+ case of the player.
654
+ 07 06
655
+ because of a difference in firmware.
656
+ If any of the components needs to be replaced, please make sure only authorized components
657
+ are used. Otherwise, it might cause fire, electronic shock or other damages.
658
+ Production Introduction
659
+ Product Description
660
+ KAT Nexus Description
661
+ KAT Nexus is a revolutionary adaptation solution - a bridge connecting KAT VR devices with
662
+ the games on META Quest devices! For maximum convenience, we've integrated years of work
663
+ on a universal input integration algorithm into an easy to use, small and lightweight device so
664
+ you could experience VR at its best even without a VR-ready PC!
665
+ Unleash yourself in any VR adventures, connect fun with exercise, experience realistic
666
+ social interactions, and much more with any of our solutions via the KAT Nexus!
667
+ Product Appearance
668
+ 08
669
+ Product Parameters
670
+ Product weight
671
+ Product size
672
+ Flash
673
+ SDRAM
674
+ Power Supply
675
+ Power Indicator (LED)
676
+ USB host
677
+ 100g
678
+ 100*100*17.5mm
679
+ 16G
680
+ 2G
681
+ DC 5V/2A
682
+ Power ON: blue; Standby: Red
683
+ 1 High speed USB 3.0,1 USB 2.0
684
+ Packing List
685
+ No.
686
+ 1
687
+ 2
688
+ 3
689
+ 4
690
+ Component Name & Picture
691
+ KAT Nexus
692
+ KAT Nexus Brief User Guide
693
+ USB Power Cord
694
+ Network Cable
695
+ Quantity
696
+ 1
697
+ 1
698
+ 1
699
+ 1
700
+ KAT Nexus Brief User Guide
701
+ Before use, check carefully whether the quantity is consistent with the below list. If anything is
702
+ missing, contact our customer service at service@kat-vr.com before you proceed.
703
+ Production installation
704
+ 09
705
+ Before use:
706
+ 1. Please open an Oculus developer account by navigating to the Oculus for Developers homepage
707
+ (please refer to https: developer.oculus.com/documentation/native/android/mobile-device-
708
+ setup/ for more instructions). It will be necessary to enter the developer's mode which
709
+ enables you to install third party software on the oculus device. Enter the Oculus app on your
710
+ mobile phone and enable the Oculus developer mode.
711
+ 2. Please prepare any computer (Windows Only) for KAT device paring and software installation.
712
+ 3. Please make sure that the internet WI-FI router to which you have linked your PC and headset
713
+ provides a stable internet connection. A router with good performance and high signal
714
+ strength will exhibit better algorithmic performance.
715
+ DV
716
+ 01 Starting Up
717
+ - Connect the DC plug of the power cord into the DC port on the
718
+ Nexus.
719
+ - Then, plug the USB-A connector of the power cord into a USB
720
+ power adapter (prepare your own adapter with power supply
721
+ of at least 5V-2A).
722
+ - Finally, connect the power adapter into a power outlet. Wait
723
+ for the blue power light to turn on (approximately 1 minute).
724
+ Network Connection
725
+ - Plug one end of the network cable into the Lan port of Nexus,
726
+ and another end into the Lan port of your internet router (Use the
727
+ same router to which you have your PC and Headset con-nected,
728
+ it recommends using a router that supports a 5GHz connection
729
+ for the Quest and try to use it in the same room to avoid
730
+ signal weakening.).
731
+ - Once connected to the internet, please wait for the device to
732
+ update to the latest version of firmware, approxi-mately 10 min.
733
+ 02
734
+ Software Set-Up
735
+ KAT Gateway is the dedicated software for running KAT VR devices. It is currently available in PC,
736
+ VR-Integrated and Nexus version.
737
+ The Nexus version allows you to access all the settings and other features directly from your VR
738
+ headset.
739
+ 10
740
+ Get Your KAT Device Ready
741
+ If you didn't pair your KAT device yet, plug its USB cable into your computer port, pair the
742
+ sensors using the PC version of software (follow the instructions in KAT Gateway) and fully charge
743
+ them.
744
+ Install the KAT Gateway of Nexus App onto Your Headset
745
+ - Connect Your Oculus Quest 2 to the PC via USB-C cable and allow USB debugging ( do not keep
746
+ any other android devices such as a smart phone connected to your PC at the same time, an do
747
+ not disconnect the device during installation).
748
+ - Choose 'always enable' in the windows popups if they show (and the same in your
749
+ headset).
750
+ - Install the Gateway for Nexus via KAT Gateway, the SideQuest, the ADB Tool as following or
751
+ any other apk installation tool you're familiar with.
752
+ Via KAT Gateway 2.1.2 or higher
753
+ Open the Gateway on your computer and click the installation icon to install KAT Gateway into your
754
+ Standalone VR headset.
755
+ Hard Disk Space: At least 50~80 MB of space(For installing product software)
756
+ Visit the official website: https://www.kat-vr.com/, and click "Support" to proceed to the product
757
+ list. Select Your KAT device and proceed to the product resource page.
758
+ Download And install KAT Gateway in your computer.
759
+ 11
760
+ If the headset is not detected, it will display in gray and it won't be possible to proceed. In such a
761
+ case, re-connect the HMD and check again if it has been detected as normal (displayed in normal
762
+ color).
763
+ Click 'confirm' to start the installation.
764
+ 12
765
+ Failed Installation Prompt: If the installation failed, please connect your standalone headset to
766
+ the PC via a USB-C cable and select to install Gateway into it, do not disconnect the device
767
+ during installation.
768
+ Via Sidequest(Recommended for Side quest users)
769
+ Visit the official website at: https://www.kat-vr.com/,and click 'Support' . In the product list select
770
+ KAT Nexus and proceed to the product resource page. Download KAT Gateway of Nexus onto your
771
+ computer (Windows 10 x64) and extract it.
772
+ Successful Installation Prompt: Gateway installed into your headset.
773
+ 13
774
+ Via ADB too(l Only for ADB users)
775
+ Visit the official website: https://www.kat-vr.com/,and click "Support" to proceed to the product
776
+ list. Select KAT Nexus and proceed to the product resource page. Download KAT Gateway of Nexus
777
+ onto your computer (Windows 10 x64) and extract it.
778
+ Download and install ADB driver from a credible website, FYI: https://developer.android.com/studio/
779
+ releases/platform-tools, copy the complete path of the APK file you have extracted previously.
780
+ Press WIN+R to open the Windows command-line interface, input 'adb install "X:\katgateway.apk"
781
+ (right click the mouse to paste the path)' and press the 'Enter' key to start the installation of KAT
782
+ Nexus app, wait until the installation is finished.
783
+ Remember to Install both the katgateway. APK and katgatewayservice. APK files.
784
+ Download and install the 'Sidequest Advanced Installer' (The easy installer version does not support
785
+ APK installation) from sidequestvr.com. Then, open the Sidequest, click the icon of a folder with
786
+ arrow pointing downwards, and locate the. APK file path. Select the katgateway. APK file and click
787
+ 'Open'.
788
+ Within a second, SideQuest will begin loading and installing the kat nexus app. Wait until it finished.
789
+ (You can double - check in the sidequest task manager to see if the installation has been finished
790
+ successfully. Remember to Install both the katgateway. APK and katgatewayservice. APK files.
791
+ 14
792
+ KAT NEXUS Wi-Fi
793
+ ROUTER
794
+ PAIRED
795
+ KAT DEVICE
796
+ HMD WITH
797
+ GATEWAY INSTALLED
798
+ INTERNET
799
+ *After completing all the steps, you can unplug the USB-C cable connecting the HMD with your PC.
800
+ Connect Your KAT Device to KAT Nexus
801
+ Plug the USB cable of your paired KAT device to any USB port
802
+ of Nexus. Place the KAT Nexus at a safe spot.
803
+ USB1 USB2
804
+ 03
805
+ 041
806
+ C 2 as an example:
807
+ Before getting on or when sensors enter sleep mode (inactive ≥ 30min), click
808
+ the main button and shake the shoes. Check the sensor light:
809
+ * Flashing: no connection
810
+ Constant: successful connection
811
+ None: sleep mode / no power.
812
+ 051
813
+ Hang the HMD ando cntrollers on the storage hooks. Ensure the antislip
814
+ protection lock is enabled and the base surface is clean. Equip a pair of
815
+ kneepads and step onto the platform.
816
+ * supported weight: 130kg (286 lbs.)
817
+ Entering the Platform( IMPORTANT!)
818
+ 15
819
+ Opening KAT Gateway
820
+ Put your headset on, open the Oculus App Library>ALL>Unknown Sources, find the app "KAT
821
+ Gateway" and open it, (choose 'confirm' in the popups if needed). Make sure the sensors are
822
+ connected and select a game to open it.
823
+ 16
824
+ KAT Gateway of Nexus functions introduction
825
+ Device Status
826
+ The interface of KAT Gateway for Nexus once a KAT device is connected (C2 as an example):
827
+ Home page of KAT Gateway displays the current device connection status:
828
+ -Direction of the inner sensor, low battery reminder;
829
+ -Direction of the left foot sensor, low battery reminder;
830
+ -Direction of the right foot sensor, low battery reminder;
831
+ -Vehicle hub status (Only supports certain models)
832
+ -Haptics status (Only supports certain models)
833
+ -The Current device Icon, device name, connection status and Sensor Manager
834
+ Connected
835
+ Disconnected: Sensors may enter the sleep mode. Click the main button on the direction
836
+ sensor and shake the shoe sensors to wake them up. Ensure the sensors are charged. You
837
+ can pair the sensors in the Sensor Manager.
838
+ -Device switch:
839
+ This function was designed to help you switch between different KAT devices that you may own.
840
+ Click 'expand' and select the device to switch to it.
841
+ -KAT Walk C 2 Sensors / KAT Vehicle Hub Sensor:Click ″+" to see the details about the sensor
842
+ status.
843
+ 17
844
+ -The device Icon, device status.
845
+ Device currently in use
846
+ Device is busy
847
+ Device is ready
848
+ Device is disconnected
849
+ Sensor Manager
850
+ Check the sensor status and manage the sensors.
851
+ -Sensor Status:
852
+ Connected: Sensors are ready.
853
+ Disconnected: Sensors may enter the sleep mode. Click the main button on the direction
854
+ sensor and shake the shoe sensors to wake them up. Ensure the sensors are charged. You can
855
+ pair the sensors in the Sensor Manager.
856
+ S/N: the serial number of the receiver.
857
+ Battery Information: Shows the battery status of each sensor.
858
+ 18
859
+ Controller Options
860
+ Please configure all the settings before launching the game. To modify the configuration,
861
+ press the home button on your right controller and re-open KAT Gateway. Returning to
862
+ gateway may cause the game to close (depending on the oculus firmware version).
863
+ Games:
864
+ Game list shows the compatible games that you already installed; compatible game list will be
865
+ constantly updated. Select the game and click the“Launch Game” button. Please grant
866
+ Gateway access permission as guided to launch the game.
867
+ Toggle the switch and proceed to the next step by click Return “←”
868
+ 19
869
+ Click “USE THIS FOLDER” and then “ALLOW” to grant Gateway access for game data.
870
+ Click “ALLOW” to grant Gateway permissions.
871
+ c.Cruise mode: Enables you to trigger smooth, continuous movement forward without
872
+ constantly walking.
873
+ You may turn on or off the cruise mode which allows you to move continuously without walking.
874
+ When ′on ′, you can trigger strafing by putting your foot on the platform, sliding it forward or
875
+ backward and keeping in a stable position. continuous strafing to the sides can be triggered in
876
+ a similar way.
877
+ Follow Recommended Settings (The following are consistent):
878
+ You can also enable the 'Follow Recommended Settings' option to play on our pre-set default
879
+ profiles.
880
+ Controller: Options of the device main functions
881
+ a.Walking Speed: Adjustment of the walking speed for the selected game.
882
+ b.Walking Accuracy: Adjustment of the walking accuracy for the selected game. Higher accuracy
883
+ provides more realistic locomotion with a feeling of moving forward step by step. Higher fluency
884
+ makes movement smoother. Default: Click ‘Default ′ to reset the game profile.
885
+ 20
886
+ Games launched via GW will have an independent game save history, separate from your ordinary
887
+ saves created when the game is launched via the Oculus App Library.
888
+ Please click “Copy from Oculus home” to sync game save files from Oculus Library to Gateway.
889
+ Click “Run Game” to play game after syncing save files.
890
+ 21
891
+ Game Config
892
+ Locomotion system differs in different games! Your KAT Device can only work properly when the
893
+ settings are all correct based on the following instructions:
894
+ Always set the in-game locomotion settings to Head Oriented movement if possible!
895
+ 03
896
+ MOVING IN
897
+ THE OPPOSITE
898
+ DIRECTION
899
+ 01
900
+ MOVING
901
+ BACKWARD
902
+ 02
903
+ MOVING
904
+ FORWARD
905
+ 04
906
+ MOVING IN
907
+ THE OPPOSITE
908
+ SIDE
909
+ 22
910
+ Haptics and Sitting
911
+ 4.B/Y: Press B/Y to walk;
912
+ 5.Grip: Grab Grip button to walk;
913
+ 6.Trigger: Press Trigger to walk;
914
+ 7.None: Choose this option if you are playing a game without any walking function.
915
+ a.Mapping Controller: Try the game and find out which hand controller controls walking. Set it
916
+ accordingly here.
917
+ 1.Right hand: Choose this option if the Right Hand controller controls walking in game.
918
+ 2.Left hand: Choose this option if the Left Hand controller controls walking in game.
919
+ b.Direction Source:
920
+ Always set the in-game locomotion settings to Head Oriented movement if possible!
921
+ 1.Body (Recommended): Choose this option to enable Decoupled directions so you could
922
+ walk towards the direction of your body.
923
+ 2.Controller( NOT Recommended): Choose this option to enable Decoupled directions
924
+ towards the controller.
925
+ c.Walk source (as in the game): Check the game and find out what action triggers walking and
926
+ set it accordingly here.
927
+ 1.JStick/TPad: Push the Joystick/Touchpad to walk;
928
+ 2.JStick/Tpad-Pressed: Press down and push the Joystick/Touchpad to walk;
929
+ 3.A/X: Press A/X to walk;
930
+ Haptics and Sitting Customization Settings only supports certain models, please refer to KAT
931
+ Vehicle Hub User Manual.
932
+ https: //download.katvr.com/product/literature/KAT%20Vehicle%20Hub%20Owner's%
933
+ 20Manual 1668589353045.pdf
934
+ 23
935
+ 01 - Calibrate every time entering a game or any time if the walking
936
+ direction, jump status or vibration is incorrect.
937
+ - Put on the HMD, look forward and calibrate in one of the following
938
+ ways:
939
+ Quick: Long press the oculus home button on your right controller.
940
+ Button: Reach back to click the button.
941
+ Direction calibration
942
+ 24
943
+ Contact Information
944
+ Web: www.kat-vr.com/
945
+ Company: Hangzhou Virtual and Reality Technology Co., Ltd
946
+ 9th floor - Building 6, No.1818-2, West Wenyi Road - Yuhang Street, Yuhang District,
947
+ Hangzhou City, Zhejiang Province, China
948
+ Add:
949
+ Postal code: 311100
950
+ Email: service@katvr.com
951
+ APPENDIX
952
+ www.kat-vr.com
Files/Mockpanel.docx ADDED
Binary file (32.8 kB). View file
 
Files/Mockpanel.pdf ADDED
Binary file (163 kB). View file
 
Files/Reference.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ What is the Hardware installation process for Ultimaker?
2
+
3
+ Connect the Bowden tubes (Ultimaker S5 only)
4
+ 1. Remove the clamp clips from the print head and insert the Bowden tubes. The Bowden tubes connect to the
5
+ corresponding numbers on the print head
6
+ 2. Secure the Bowden tubes with the clamp clips
7
+ 3. Fit the print head cable clips onto Bowden tube 2
8
+ Place and connect the NFC spool holder
9
+ 1. Insert the spool holder into the back panel and push until it snaps into place
10
+ 2. Secure the cable from the spool holder behind the cable clips in the back panel
11
+ 3. Connect the cable to the NFC socket at the back of the printer
12
+ Place the glass build plate
13
+ 1. Open the glass door(s)
14
+ 2. Open the two build plate clamps at the front of the build plate
15
+ 3. Gently slide the glass plate onto the build plate and ensure that it snaps into the build plate clamps at the back
16
+ 4. Close the two build plate clamps at the front to secure the glass build plate and close the glass door(s)
17
+ Connect the power cable
18
+ 1. Connect the power cable to the power socket at the back of the Ultimaker S3 / S5
19
+ 2. Plug the other end of the cable into a wall outlet
20
+
21
+ What are the main component for Ultimaker S5?
22
+
23
+ 1. Glass door(s)
24
+ 2. Print head
25
+ 3. Build plate
26
+ 4. Build plate clamps
27
+ 5. Touchscreen
28
+ 6. USB port
29
+ 7. Feeder 2
30
+ 8. Bowden tubes
31
+ 9. Feeder 1
32
+ 10. Power socket and switch
33
+ 11. Ethernet port
34
+ 12. Double spoolholder with NFC cable
35
+ 13. NFC socket
Files/Stratasys-F123-Series-User-Guide.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d27bc0e527929c69e0abfcaa1f7e4b8e708bade3858cf8002c7f926a5cc9227
3
+ size 10226314
Files/Ultimaker S3-S5 - User manual ENv2.4.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:232020b6212727723c1d60df0dfea6413bfdbab291ff27a9e7c5e632836c4610
3
+ size 2435405
Files/Ultimaker S5.txt ADDED
@@ -0,0 +1,839 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Ultimaker S3 and
2
+ Ultimaker S5
3
+ Installation and user manual
4
+ EN 012020 v2.4
5
+ Ultimaker S3 and Ultimaker S5 user manual 1
6
+ Table of contents
7
+ 1. Safety and compliance
8
+ 1.1 Safety messages 4
9
+ 1.2 General safety information 4
10
+ 1.3 Hazards 4
11
+ 1.4 Health and safety 5
12
+ 1.5 FCC ISED regulatory notices 6
13
+ 2. Introduction
14
+ 2.1 Main components 8
15
+ 2.2 Specifications 9
16
+ 3. Installation
17
+ 3.1 Unboxing 11
18
+ 3.2 What’s in the box 11
19
+ 3.3 Hardware installation 11
20
+ 3.4 Welcome setup 12
21
+ 3.5 Firmware update 14
22
+ 3.6 Install Ultimaker Cura 14
23
+ 4. Operation
24
+ 4.1 Touchscreen 16
25
+ 4.2 Materials 16
26
+ 4.3 Preparing a print with Ultimaker Cura 17
27
+ 4.4 Remove the print 19
28
+ 4.5 Remove support material 20
29
+ 4.6 Change the printer configuration 21
30
+ 4.7 Calibration 22
31
+ 5. Maintenance
32
+ 5.1 Update the firmware 25
33
+ 5.2 Material handling and storage 25
34
+ 5.3 Maintenance schedule 26
35
+ 6. Troubleshooting
36
+ 6.1 Error messages 28
37
+ 6.2 Print core troubleshooting 28
38
+ 6.3 Print quality issues 28
39
+ 7. Warranty
40
+ 7.1 General 31
41
+ 7.2 Conditions 31
42
+ 7.3 Notification 32
43
+ 7.4 Exclusions 32
44
+ 7.5 Applicable law and competent court 32
45
+ Ultimaker S3 and Ultimaker S5 user manual 2
46
+ Disclaimer
47
+ Please read and understand the contents of this installation and user manual carefully. Failure to read the manual
48
+ may lead to personal injury, inferior results or damage to the Ultimaker printer or its peripherals. Always make sure
49
+ that anyone who uses this 3D printer knows and understands the contents of the manual to make the most out of
50
+ the Ultimaker printer.
51
+ The conditions or methods used for assembling, handling, storage, use or disposal of the device are beyond our
52
+ control and may be beyond our knowledge. For this and other reasons, we do not assume responsibility and
53
+ expressly disclaim liability for loss, injuries, damage, or expense arising out of or in any way connected with the
54
+ assembly, handling, storage, use or disposal of the product.
55
+ The information in this document was obtained from sources which we believe are reliable. However, the
56
+ information is provided without any warranty, express or implied, regarding its correctness.
57
+ Intended use
58
+ Ultimaker 3D printers are designed and built for fused filament fabrication with Ultimaker materials within a
59
+ commercialbusiness environment. The mixture of precision and speed makes Ultimaker 3D printers a perfect
60
+ machine for concept models, functional prototypes and small series production. Although we achieved a very
61
+ high standard in the reproduction of 3D models with the usage of Ultimaker Cura, the user remains responsible
62
+ to qualify and validate the application of the printed object for its intended use, especially critical for applications
63
+ in strictly regulated areas like medical devices and aeronautics. While being an open material platform, the
64
+ best results will be achieved with Ultimaker materials, while effort has been made to match material properties
65
+ with machine settings.
66
+ 1. Safety and compliance
67
+ Ultimaker S3 and Ultimaker S5 user manual 4
68
+ 1.1 Safety messages
69
+ This guide contains warnings and safety notices.
70
+ The information provided below is applicable to the Ultimaker S3 and Ultimaker S5.
71
+ Provides additional information that is helpful to do a task or to avoid problems.
72
+ Warns of a situation that may cause material damage or injuries if the safety instructions are not followed.
73
+ 1.2 General safety information
74
+ • Ultimaker 3D printers generate high temperatures and have hot moving parts that can cause injury. Never
75
+ reach inside Ultimaker 3D printer while they are in operation. Always control the printer with the touchscreen
76
+ at the front or the power switch at the back. Allow the Ultimaker 3D printers to cool down for 5 minutes
77
+ before reaching inside
78
+ • Do not change or adjust any parts of the products unless the change or adjustment is
79
+ authorized by the manufacturer
80
+ • Do not store items inside Ultimaker products
81
+ • Ultimaker products are not intended for use by persons with reduced physical andor mental capabilities, or
82
+ lack of experience and knowledge, unless they are supervised or have been given instructions concerning the
83
+ use of the appliance by a person responsible for their safety
84
+ • Children should be under constant supervision when using Ultimaker products
85
+ 1.3 Hazards
86
+ Electromagnetic compatibility (EMC)
87
+ These devices may not cause harmful interference, and these devices must accept any interference received,
88
+ including interference that may cause undesired operation.
89
+ An electrostatic discharge in some metallic parts of the devices may cause the interruption of the NFC
90
+ communications, affecting the initial detection of the material spool. In these cases, a device restart
91
+ should solve the problem
92
+ Electrical safety
93
+ Ultimaker products have been tested according to the IEC 60950-1 andor IEC 62368-1. All relevant products
94
+ have undergone and passed hi‐pot testing before shipment. This test guarantees the right level of insulation
95
+ against electrical shock. An earthed mains socket must be used. Be sure that the building installation has
96
+ dedicated means for over-current and short-circuiting. For more information, please visit our website for the
97
+ CB-certificate. The Ultimaker 3D printers are powered by mains voltage, which is hazardous when touched.
98
+ Only trained staff should remove the bottom cover.
99
+ Always unplug Ultimaker products before performing maintenance or modifications.
100
+ Ultimaker S3 and Ultimaker S5 user manual 5
101
+ Mechanical safety
102
+ Ultimaker products are compliant with the Machine Directive 200642EU. The EC declaration of conformity
103
+ can be found on our website. The Ultimaker 3D printers contain moving parts. No damage to the user will be
104
+ expected from the drive belts. The force of the build plate may cause minor injury, so stay out of the reach of
105
+ the build plate during operation.
106
+ Always unplug the product before performing maintenance or modifications.
107
+ Risk of burns
108
+ There is a potential risk of burns the print heads of the Ultimaker 3D printers can reach temperatures above
109
+ 200 °C, while the heated bed can reach temperatures above 100 °C. Do not touch either of these parts
110
+ with your bare hands.
111
+ Always allow the product to cool down for 30 minutes before performing maintenance or modifications.
112
+ 1.4 Health and safety
113
+ 3D printing thermoplastics may result in the release of ultrafine particles (UFPs) and volatile organic
114
+ compounds (VOCs) depending on the thermoplastic used and settings of the 3D printer.
115
+ Ultimaker products are designed for use with Ultimaker materials and are open for use with materials from
116
+ third-party suppliers.
117
+ Ultimaker materials
118
+ Ultimaker materials can be printed safely without any filtering using the recommended temperatures and settings
119
+ in a well-ventilated area (minimum refresh rate of 1.8 for a room size of 30.6 m3). When multiple Ultimaker 3D
120
+ printers are operated in a contained environment, emissions of UFPs andor VOCs may vary. Please consider other
121
+ safety measures, such as a filter, cabinet andor dedicated ventilation system depending on your specific situation.
122
+ Third-party materials
123
+ Third-party material manufacturers can supply print profiles that contribute to reliable results with Ultimaker
124
+ hardware (“The Ultimaker Material Alliance Program”).
125
+ The use of print profiles and materials from Ultimaker’s Material Alliances andor other suppliers may require
126
+ additional safety measures such as a filter for the safe usage of such materials. Please take the relevant information
127
+ provided by the supplier of such filamentmaterial into account at all times for safe operation. Please check the
128
+ safety data sheet of each specific material for information.
129
+ Ultimaker cannot be held responsible for any adverse effects from the use andor performance of these materials.
130
+ Ultimaker S3 and Ultimaker S5 user manual 6
131
+ 1.5 FCC ISED regulatory notices
132
+ Modification statement
133
+ Ultimaker has not approved any changes or modifications to this device by the user. Any changes or modifications
134
+ could void the user’s authority to operate the equipment.
135
+ Interference statement
136
+ This device complies with Part 15 of the FCC Rules. Operation is subject to the following two conditions (1) this
137
+ device may not cause interference, and (2) this device must accept any interference, including interference that may
138
+ cause undesired operation of the device.
139
+ Wireless notice
140
+ This device complies with FCCISED radiation exposure limits set forth for an uncontrolled environment and meets
141
+ the FCC radio frequency (RF) Exposure Guidelines and RSS‐102 of the ISED radio frequency (RF) Exposure rules. This
142
+ transmitter must not be co-located or operating in conjunction with any other antenna or transmitter.
143
+ FCC Class A digital device notice
144
+ This equipment has been tested and found to be compliant with the limits for a Class A digital device, pursuant to
145
+ part 15 of the FCC Rules. These limits are designed to provide reasonable protection against harmful interference
146
+ when the equipment is operated in a commercial environment. This equipment generates, uses, and can radiate
147
+ radio frequency energy, and if not installed and used in accordance with the instruction manual may cause harmful
148
+ interference to radio communications. Operation of this equipment in a residential area is likely to cause harmful
149
+ interference, in which case the user will be required to correct the interference at his or her own expense.
150
+ 2. Introduction
151
+ 1
152
+ 1
153
+ 5 10
154
+ 10
155
+ 11
156
+ 11
157
+ 12
158
+ 12
159
+ 13
160
+ 5 13
161
+ 6
162
+ 6
163
+ 2
164
+ 2
165
+ 3
166
+ 3
167
+ 4 7 8 9
168
+ 4 7 8 9
169
+ Ultimaker S3 and Ultimaker S5 user manual 8
170
+ 2.1 Main components
171
+ Ultimaker S3
172
+ 1. Glass door(s)
173
+ 2. Print head
174
+ 3. Build plate
175
+ 4. Build plate clamps
176
+ 5. Touchscreen
177
+ 6. USB port
178
+ 7. Feeder 2
179
+ 8. Bowden tubes
180
+ 9. Feeder 1
181
+ 10. Power socket and switch
182
+ 11. Ethernet port
183
+ 12. Double spoolholder
184
+ with NFC cable
185
+ 13. NFC socket
186
+ Ultimaker S5
187
+ Ultimaker S3 and Ultimaker S5 user manual 9
188
+ Properties Technology Fused filament fabrication (FFF)
189
+ Print head Dual-extrusion print head with a unique auto-nozzle lifting system and swappable
190
+ print cores
191
+ Build volume (XYZ) Ultimaker S3
192
+ 230 x 192 x 200 mm
193
+ (9 x 7.4 x 7.9 in)
194
+ Ultimaker S5
195
+ 330 x 240 x 300 mm
196
+ (13 x 9.4 x 11.8 in)
197
+ Layer resolution 0.25 mm nozzle 150 - 60 micron
198
+ 0.4 mm nozzle 200 - 20 micron
199
+ 0.6 mm nozzle 300 - 20 micron
200
+ 0.8 mm nozzle 600 - 20 micron
201
+ XYZ resolution 6.9, 6.9, 2.5 micron
202
+ Build speed 24 mm3s
203
+ Build plate Heated glass build plate (20 - 140 °C)
204
+ Nozzle diameter 0.4 mm (included)
205
+ 0.25 mm, 0.6 mm, 0.8 mm (sold separately)
206
+ Operating sound 50 dBA
207
+ Connectivity Wi-Fi, LAN, USB port
208
+ Physical dimensions Dimensions Ultimaker S3
209
+ 394 x 489 x 637
210
+ (15.5 x 19.3 x 25.1 in)
211
+ (inc. Bowden tubes and spool holder)
212
+ Ultimaker S5
213
+ 495 x 457 x 520 (19.5 x 18 x 20.5 in)
214
+ 495 x 585 x 780 (19.5 x 23 x 30.7 in)
215
+ (inc. Bowden tubes and spool holder)
216
+ Net weight Ultimaker S3
217
+ 14.4 kg (31.7 lbs)
218
+ Ultimaker S5
219
+ 20.6 kg (45.4 lbs)
220
+ Electrical requirements Voltage 100 - 240 VAC
221
+ Frequency 50 - 60 Hz
222
+ Power Ultimaker S3
223
+ Max. 350 W
224
+ Ultimaker S5
225
+ Max. 500 W
226
+ Software Supplied software Ultimaker Cura, our free print preparation software
227
+ Ultimaker Connect, our free printer management solution
228
+ Ultimaker Cloud, enables remote printing
229
+ Supported OS MacOS, Windows and Linux
230
+ Warranty Warranty period 12 months
231
+ 2.2 Specifications
232
+ 3. Installation
233
+ Ultimaker S3 and Ultimaker S5 user manual 11
234
+ 3.1 Unboxing
235
+ The Ultimaker S3 and Ultimaker S5 come in reusable, durable packaging, specially designed to protect your 3D
236
+ printer. Follow the steps below properly to unpack your Ultimaker printer
237
+ It is recommended to remove the packaging with the box placed on the floor for safety. Please retain all
238
+ packaging for warranty purposes.
239
+ 1. Remove the plastic locking clips from the lower section of the box
240
+ 2. Holding the handles, lift the upper section of the box to reveal the printer
241
+ 3. Remove all foam and cardboard pieces from the top of the printer, including the materials
242
+ 4. Place the printer on a flat surface
243
+ When placing the printer on a shelf or table, take proper measures to prevent the printer from falling.
244
+ According to the UL 60950-1 definition, the Ultimaker S5 is not considered portable. The printer must be lifted
245
+ by at least two people during installation.
246
+ 5. Remove the rubber door seals and carefully open the glass door(s)
247
+ 6. Take out the accessory box, cardboard section, and foam pieces from inside the printer
248
+ 7. Remove the plastic protection from the touchscreen
249
+ If the printer needs to be transported without the outer box, be aware of the weight and dimensions of the
250
+ printer. Use proper transport means to do so this safely.
251
+ 3.2 What’s in the box
252
+ The Ultimaker S3 S5 is supplied with several hardware accessories. Check if all these items are
253
+ included before continuing
254
+ Accessories
255
+ 1. Glass build plate
256
+ 2. Spool holder with material guide
257
+ 3. Power cable
258
+ 4. Ethernet cable
259
+ 5. USB stick
260
+ 6. Print core AA 0.4
261
+ 7. Print core BB 0.4
262
+ 8. XY calibration sheet
263
+ 9. Calibration card
264
+ 10. Nozzle cover (3x)
265
+ Consumables
266
+ 11. Tough PLA
267
+ 12. PVA
268
+ 13. Glue stick
269
+ 14. Oil
270
+ 15. Grease
271
+ Tools
272
+ 16. Hex screwdriver 2mm
273
+ 3.3 Hardware installation
274
+ Connect the Bowden tubes (Ultimaker S5 only)
275
+ 1. Remove the clamp clips from the print head and insert the Bowden tubes. The Bowden tubes connect to the
276
+ corresponding numbers on the print head
277
+ 2. Secure the Bowden tubes with the clamp clips
278
+ 3. Fit the print head cable clips onto Bowden tube 2
279
+ Ultimaker S3 and Ultimaker S5 user manual 12
280
+ Place and connect the NFC spool holder
281
+ 1. Insert the spool holder into the back panel and push until it snaps into place
282
+ 2. Secure the cable from the spool holder behind the cable clips in the back panel
283
+ 3. Connect the cable to the NFC socket at the back of the printer
284
+ Place the glass build plate
285
+ 1. Open the glass door(s)
286
+ 2. Open the two build plate clamps at the front of the build plate
287
+ 3. Gently slide the glass plate onto the build plate and ensure that it snaps into the build plate clamps at the back
288
+ 4. Close the two build plate clamps at the front to secure the glass build plate and close the glass door(s)
289
+ Connect the power cable
290
+ 1. Connect the power cable to the power socket at the back of the Ultimaker S3 S5
291
+ 2. Plug the other end of the cable into a wall outlet
292
+ 3.4 Welcome setup
293
+ To perform the setup steps, turn on the printer with the power switch at the back. You will first be asked to select
294
+ your preferred language. After this, the welcome setup will appear on the touchscreen. Follow the steps on the
295
+ touchscreen and described on the next pages of this user manual.
296
+ The language on the printer can always be changed by navigating to Preferences → Settings → Language.
297
+ Confirm that the glass build plate is installed
298
+ For the first use, it is recommended to print with the materials delivered with the printer Tough PLA and PVA. In
299
+ this step of the welcome setup, you must confirm that you have installed the glass build plate.
300
+ A thin layer of glue should be applied to the glass build plate to ensure reliable adhesion.
301
+ Print core installation
302
+ Print cores
303
+ The Ultimaker S3 S5 uses two print cores in the print head, which are interchangeable. There are three
304
+ types of print cores
305
+ • Type AA for printing build materials and Ultimaker Breakaway material
306
+ • Type BB for printing water-soluble support material
307
+ • Type CC for printing composite materials
308
+ Each Ultimaker S3 S5 comes with two print cores AA 0.4 (one is already installed in print head slot 1) and one print
309
+ core BB 0.4. This means that it is possible to print with two build materials or with a build and support material.
310
+ The print cores contain information on a small chip, so that the printer always knows which print cores are installed
311
+ and which materials can be used with this print core.
312
+ Install the second print core
313
+ During the welcome setup, the second print core must be installed. To do this, go to Print core 2 and select Start.
314
+ Follow the steps on the Ultimaker S3 S5
315
+ 1. Carefully open the glass door(s) and the print head fan bracket and press Confirm
316
+ 2. Install the second print core (BB 0.4) in print head slot 2 by squeezing the levers of the print core and sliding it
317
+ into the print head until you hear a click
318
+ Ultimaker S3 and Ultimaker S5 user manual 13
319
+ Do not touch the contact points on the back of the print core with your fingers.
320
+ Make sure you keep the print core completely vertical while installing it so it will smoothly slide
321
+ into the print head.
322
+ 3. Carefully close the print head fan bracket and Confirm to proceed with the welcome setup
323
+ Load materials
324
+ Before you can start printing on the Ultimaker S3 S5, you need to load materials into the printer. For the first use,
325
+ it is recommended to use the spools of Tough PLA and PVA that come with the printer.
326
+ Load material 2
327
+ Material 2 will be loaded first because it must be placed closest to the back of the printer. Select Material 2 from the
328
+ list shown on the touchscreen, select Start, then take the following steps to load the material
329
+ 1. Unpack the material and cut off the end, ensuring a short, sharp tip
330
+ 2. Place the spool with material 2 (PVA) onto the spool holder and select Confirm. Make sure the end of the
331
+ material points in a clockwise direction, so that the material can enter feeder 2 from the bottom
332
+ 3. Wait until the printer detects the material and Confirm
333
+ When using a third-party material, you can select the material type manually.
334
+ 4. Insert the end of the material into feeder 2 and gently push it until the feeder grips it and the material is visible
335
+ in the Bowden tube. Select Confirm to continue
336
+ You can straighten the end of the material a bit so it can enter the feeder easier.
337
+ 5. Wait for the Ultimaker S3 S5 to heat up print core 2 and load the material into the print head
338
+ 6. Confirm when the new material extrudes consistently from print core 2
339
+ 7. Wait a moment for print core 2 to cool down
340
+ Load material 1
341
+ After completing these steps, load material 1. Material 1 will be put on the material guide first before placing
342
+ it on the spool holder to avoid tangling of the 2 materials during printing. Select Material 1 from the list on the
343
+ touchscreen, select Start, and follow the steps below
344
+ 1. Take the material guide and hold it with the outer part towards you
345
+ 2. Place the spool on the material guide with the material in a counter-clockwise direction, and guide the end of
346
+ the material through the hole in the material guide
347
+ 3. Place the material guide with material 1 on it onto the spool holder behind material 2, and select Confirm
348
+ 4. Wait until the Ultimaker S3 S5 detects the material and select Confirm
349
+ When using a third-party material, you can select the material type manually.
350
+ 5. Insert the end of the material into feeder 1 and gently push it until the feeder grips it and the material is visible
351
+ in the Bowden tube. Select Confirm to continue
352
+ You can straighten the end of the material so it can enter the feeder easier.
353
+ 6. Wait for the printer to heat up print core 1 and to load the material into the print head
354
+ 7. Confirm when the new material extrudes consistently from print core 1
355
+ 8. Wait a moment for print core 1 to cool down
356
+ Network installation
357
+ The Ultimaker S3 S5 can connect to a local area network, using either Wi-Fi or Ethernet. To connect to a network,
358
+ select Network setup from the welcome setup list. If you want to set up a wireless network connection, select Wi-Fi
359
+ setup. If you want to use Ethernet, or don’t want to set up network connectivity, select Skip.
360
+ Ultimaker S3 and Ultimaker S5 user manual 14
361
+ Set up Wi-Fi
362
+ To connect your Ultimaker S3 S5 to a wireless network, you will need a computer or a smartphone. Start the Wi-Fi
363
+ setup and follow the steps on the touchscreen
364
+ 1. Wait until your printer has created a Wi-Fi hotspot. This may take a minute
365
+ 2. Use a computer or smartphone to connect to the printer. The name of the Wi-Fi network is shown on the
366
+ touchscreen of the printer
367
+ 3. A pop-up will appear on the display of your computer or smartphone. Follow the steps to connect the printer to
368
+ your local Wi-Fi network. The pop-up will disappear when you have completed these steps
369
+ If the pop-up does not appear, open a browser and go to a website that is not yet known by your browser.
370
+ Within some network environments the Ultimaker S3 S5 may experience difficulties connecting wirelessly.
371
+ When this happens, repeat the Wi-Fi setup from another computer or smartphone.
372
+ 4. Go back to the Ultimaker printer and wait until the Wi-Fi setup is finished
373
+ If you skip the Wi-Fi setup during the welcome setup, you can start it again by going to Preferences → Network
374
+ → Start Wi-Fi setup.
375
+ Connect via Ethernet
376
+ You can set up a wired network connection after completing the welcome setup by performing the following steps
377
+ 1. Connect one end of an Ethernet cable to the Ethernet port at the back of the printer
378
+ 2. Connect the other end of the cable to a network source (router, modem, or switch)
379
+ 3. Enable Ethernet in the network menu by navigating to Preferences → Network and enabling Ethernet
380
+ 3.5 Firmware update
381
+ After completing the welcome setup, the Ultimaker S3 S5 will check if the latest firmware is installed. If an
382
+ older version is detected, it will download the latest firmware and install it. This process may take several
383
+ minutes to complete.
384
+ You will only be prompted to install the latest firmware if the printer is connected to a network and an older
385
+ firmware version is detected. If the printer is not connected to the network, go to ultimaker.comfirmware to
386
+ check if an update is available.
387
+ 3.6 Install Ultimaker Cura
388
+ After successfully setting up your printer, install Ultimaker Cura – Ultimaker’s free print preparation and
389
+ management software – on your computer. You can download Ultimaker Cura at ultimaker.comsoftware.
390
+ For more information about Ultimaker Cura and system requirements, please consult the Ultimaker Cura user
391
+ manual at ultimaker.commanuals.
392
+ After downloading, run the installer to complete the installation of Ultimaker Cura. When opening Ultimaker Cura
393
+ for the first time, you will be asked to select your 3D printer. Select the Ultimaker S3 or Ultimaker S5 profile, and
394
+ you are ready to go. You can now directly connect to the printer and start using Ultimaker Cura.
395
+ For best printing results, always use the latest version of Ultimaker Cura.
396
+ 4. Operation
397
+ Ultimaker S3 and Ultimaker S5 user manual 16
398
+ 4.1 Touchscreen
399
+ You can control the Ultimaker S3 S5 by using the touchscreen at the front of the printer.
400
+ When turned on for the first time, the Ultimaker S3 S5 will run the welcome setup. After this, the main menu will
401
+ be shown when the printer is turned on.
402
+ Interface
403
+ The main menu offers three options, represented by the following icons
404
+ The status overview is automatically shown next to the main menu. From here, you can start a print from USB,
405
+ or view the progress of the print while printing
406
+ The configuration overview shows the current configuration of the printer. Here you can see which print cores
407
+ and materials are installed, as well as change the configuration
408
+ The preferences overview consists of three sub-menus settings, maintenance, and network. In the printer
409
+ settings menu, you can change general settings, such as the language. In the maintenance menu, you can
410
+ perform the most important maitenance and calibration procedures, in addition to saving logs for diagnostics.
411
+ The network menu allows you to change network settings or to perform the Wi-Fi setup
412
+ 4.2 Materials
413
+ Material compatibility
414
+ The Ultimaker S3 S5 comes with two AA print cores and one BB print core. The AA print cores can be used
415
+ for printing build materials and Breakaway support material. The BB print core can be used for water-soluble
416
+ support material (PVA).
417
+ The Ultimaker S3 S5 support all Ultimaker materials that are currently available, of which most can be printed with
418
+ the 0.25, 0.4 and 0.8 mm print core sizes. The print core CC Red 0.6 can be used for third party composite materials.
419
+ For an overview of possible dual-extrusion material combinations, please refer to the Ultimaker website.
420
+ All Ultimaker materials have been extensively tested and have optimized profiles in Ultimaker Cura to ensure
421
+ the best print results. Therefore, it is advised to use one of the default profiles in Ultimaker Cura for the highest
422
+ reliability. Using Ultimaker materials will also allow you to benefit from the NFC detection system. Ultimaker
423
+ spools will automatically be recognized by the Ultimaker S3 S5. This information can be directly transferred
424
+ to Ultimaker Cura when connected to a network, allowing for a seamless connection between the printer and
425
+ Ultimaker Cura software.
426
+ Print recommendations
427
+ Each material requires different settings for optimal results. If you use Ultimaker Cura to prepare your model, these
428
+ settings are automatically set correctly if the correct print core(s) and material(s) are selected.
429
+ It is recommended to apply a thin layer of glue (using the glue stick from the accessory box) or an adhesion sheet
430
+ to the glass build plate before starting a print. This will ensure that your print adheres reliably to the build plate,
431
+ and also prevents the glass build plate from chipping when removing prints.
432
+ For detailed instructions on which settings and adhesion method to use per material, take a look at the material
433
+ manuals on the Ultimaker website.
434
+ 1 2 3 4
435
+ 8 7 6
436
+ 9
437
+ 5
438
+ Ultimaker S3 and Ultimaker S5 user manual 17
439
+ 4.3 Preparing a print with Ultimaker Cura
440
+ Interface
441
+ After you have added the Ultimaker S3 S5 in Ultimaker Cura, the main interface will become visible. Here is an
442
+ overview of the user interface (UI)
443
+ UI elements
444
+ 1. Open file
445
+ 2. Printer selection panel
446
+ 3. Stages
447
+ 4. Configuration panel
448
+ 5. Print settings panel
449
+ 6. Action panel
450
+ 7. 3D viewer
451
+ 8. Camera position tool
452
+ 9. Adjustment tools
453
+ The Ultimaker Cura workflow is arranged in three stages, seen at the top of the interface. These are the prepare,
454
+ preview, and monitor stage.
455
+ Ultimaker S3 and Ultimaker S5 user manual 18
456
+ Prepare stage
457
+ Load a model and prepare it for slicing in the prepare stage of Ultimaker Cura.
458
+ 1. Load the model(s) by clicking the ‘open file’ folder icon
459
+ 2. In the configuration panel select the material type you wish to print with
460
+ When printing with a networked printer, the loaded configuration is detected. You can select this configuration
461
+ to prepare your print faster.
462
+ 3. Use the adjustment tools to position, scale, and rotate the model as desired
463
+ The adjustment tools are visible when a model is loaded and selected on the 3D viewer.
464
+ 4. Select your desired settings (profile, layer height, infill, support and build plate adhesion) in the
465
+ print settings panel
466
+ When printing with support, you have the option to select the extruder that you want to use to print
467
+ the support structure. This allows you to print your model with PVA support, Breakaway support, or
468
+ build material support.
469
+ 5. When satisfied with your print settings and print strategy, press the Slice button on the action panel
470
+ 6. When slicing is complete, the action panel will now direct you to the preview stage
471
+ Preview stage
472
+ The preview stage allows you to see exactly how your model will be printed. Use the different color schemes to get
473
+ various information about your model. You can view the different line types, differentiate infill from skin, or use the
474
+ X-ray view to detect gaps within your model.
475
+ When satisfied with your model print preview, the action panel guides you to print over the network if connected to
476
+ a networked Ultimaker 3D printer.
477
+ Alternatively, you can save the file to a USB for printing with offline printers.
478
+ For more information on the preview stage, see the Ultimaker Cura user manual found on the Ultimaker website.
479
+ Monitor stage
480
+ The monitor stage utilizes the Ultimaker Connect functionality so you can easily monitor the status of your
481
+ networked Ultimaker printer.
482
+ When printing on a networked Ultimaker printer, you can send your print jobs directly from Ultimaker Cura to the
483
+ printer. The status of the printer will change to printing, and you can begin to track the print’s progress. You can
484
+ also queue multiple print jobs and view them in the print queue.
485
+ Ultimaker Cloud
486
+ The Ultimaker S3 S5 benefits from integration with Ultimaker Cloud. To make the most out of your Ultimaker
487
+ printer, you must first associate the printer with your Ultimaker account.
488
+ For more information on the Ultimaker cloud, visit mycloud.ultimaker.com.
489
+ Ultimaker S3 and Ultimaker S5 user manual 19
490
+ 4.4 Remove the print
491
+ Once your 3D print is finished it must be removed from the build plate. There are several methods to do this.
492
+ When using a brim, be aware of the danger of cutting yourself when removing the print from the build plate.
493
+ Use a deburring tool to remove the brim once the print is taken from the build plate.
494
+ Wait for cooldown
495
+ If you printed directly onto the build plate, simply allow the build plate and the print to cool down after printing.
496
+ The material will contract as it cools, allowing you to easily remove the print from the build plate.
497
+ Use a spatula
498
+ If your print is still adhered to the build plate after cooling, you can use a spatula to remove the print. Place the
499
+ spatula under the print, parallel to the build plate, and apply a small amount of force to remove the print. A
500
+ spatula can also be used to carefully remove remaining parts of the print from the build plate, such as the brim
501
+ or support structures.
502
+ Take the build plate out of the printer to avoid damaging the build plate clamps.
503
+ Use water
504
+ Remove the build plate from the printer, but be careful if it is still warm. Run cool tap water over the bottom of the
505
+ plate to cool it quickly. Due to the contraction of the material, the print will easily pop off the plate.
506
+ Alternatively, you can run lukewarm water over the print side of the plate to dissolve the glue. If PVA was used,
507
+ place the build plate and print in water to dissolve the PVA. This makes print removal easier.
508
+ Once the print has been removed and the build plate is placed back in the printer, select Confirm removal on the
509
+ display of the Ultimaker S3 S5. This will allow the next print job to start.
510
+ Ultimaker S3 and Ultimaker S5 user manual 20
511
+ 4.5 Remove support material
512
+ Dissolve PVA support
513
+ PVA support structures can be removed by dissolving the PVA in water. This takes several hours and leaves no trace
514
+ afterwards. Follow the steps below and refer to the Ultimaker website for additional tips and information.
515
+ 1. Submerge the print in water to let the PVA dissolve
516
+ 2. Rinse the print with clean water to remove any excess PVA
517
+ 3. Let the print dry completely
518
+ 4. Dispose of the waste water
519
+ PVA is a biodegradable material, and in most cases disposing the water afterwards is easy. However, it is
520
+ recommended that you check local regulations for more comprehensive guidance. The water can be disposed
521
+ of down the drain, providing the waste water distribution network is connected to a wastewater treatment
522
+ plant. After disposal, run hot water from the tap for approximately 30 seconds to remove any excess saturated
523
+ PVA water and to avoid longer-term clogging issues.
524
+ It is possible to use the water for more than one print, however, this might extend the dissolving time.
525
+ Through repeated use, water becomes saturated by previously dissolved PVA. For the quickest result, fresh
526
+ water is recommended.
527
+ Remove Breakaway support
528
+ Prints using Ultimaker Breakaway as support material will require post-processing to remove the support
529
+ structures. This can be achieved by breaking the support structures from the build material. Follow the steps below
530
+ and refer to the Ultimaker website for additional tips and information.
531
+ It is advised to wear protective gloves when the support structure contains sharp corners or when
532
+ working with larger models.
533
+ 1. Tear away the majority of the inner support structure using pliers
534
+ 2. Use cutting pliers to grip the Breakaway support interface and pull it away from the model
535
+ 3. Remove the last traces of the Breakaway support using cutting pliers or tweezers
536
+ Ultimaker S3 and Ultimaker S5 user manual 21
537
+ 4.6 Change the printer configuration
538
+ Change materials
539
+ Materials can be changed easily on the Ultimaker S3 S5 by using the procedure from the menu. Besides changing
540
+ a material, you also have the option to only load or unload a material. Make sure compatible print cores are
541
+ installed before you insert materials.
542
+ 1. Go to the configuration menu, select the material you want to use, and select Change
543
+ 2. Wait for the print core to heat up and unload the material
544
+ 3. Remove the material from the feeder and spool holder. Confirm to continue
545
+ 4. Place the new material on the spool holder and Confirm to continue
546
+ 5. Wait until the Ultimaker S3 S5 detects the material
547
+ When using a third-party material, you can select the material type manually.
548
+ 6. Insert the end of the material into the feeder and gently push it until the feeder grips it and the material is
549
+ visible in the Bowden tube. Select Confirm to continue
550
+ 7. Wait for the Ultimaker S3 S5 to heat up the print core and to load the material into the print head
551
+ 8. Confirm when the new material extrudes consistently from the print core
552
+ 9. Wait a moment for the print core to cool down
553
+ Change print core
554
+ Print cores can be easily changed on the Ultimaker S3 S5 by using the procedure from the menu. Besides
555
+ changing print cores, you also have the option to only load or unload a print core
556
+ 1. Go to the configuration menu, select the print core you want to change and select Change
557
+ 2. Wait for the print core to heat up, unload the material, and cool down again
558
+ 3. Open the glass door(s) of the Ultimaker S3 S5 and the print head fan bracket and Confirm when completed
559
+ 4. Remove the print core carefully by squeezing the levers and sliding the print core out of the print head
560
+ Do not touch the contact points on the back side of the print core with your fingers.
561
+ Make sure you keep the print core completely vertical while removing or installing it so it will smoothly slide
562
+ intoout of the print head.
563
+ 5. Insert a print core by squeezing the levers of the print core and sliding it into the print head slot
564
+ until you hear a click
565
+ It is possible that the print core includes a ring around the nozzle. Remove this nozzle ring before using the
566
+ print core on the Ultimaker S3 S5. Learn more about this on the Ultimaker website.
567
+ 6. Carefully close the print head fan bracket and Confirm when completed
568
+ 7. Wait for the Ultimaker S3 S5 to load the material into the print core
569
+ Ultimaker S3 and Ultimaker S5 user manual 22
570
+ 4.7 Calibration
571
+ Build plate leveling
572
+ While using the Ultimaker S3 S5, build plate calibration should be performed to ensure reliable adhesion of the
573
+ print to the build plate. If the distance between the nozzles and build plate is too wide, your print won’t adhere
574
+ properly to the glass build plate. On the other hand, if the nozzles are too close to the build plate, it can prevent
575
+ material from extruding.
576
+ Make sure there is no excess material below the tip of the nozzles and the build plate is clean before the start
577
+ of a print or when you want to calibrate the build plate, or you might get inaccurate results.
578
+ Active leveling
579
+ Active leveling is automatically performed by the printer at the start of a print, to create a reliable first layer. During
580
+ active leveling, the Ultimaker S3 S5 will create a detailed heightmap of the build plate surface. This information
581
+ is used to compensate for any inaccuracies of the build surface during the first layers of the print. It will do this by
582
+ adjusting the build plate height while printing.
583
+ Do not touch the Ultimaker S3 S5 during the active leveling procedure, as this could affect the
584
+ calibration process.
585
+ Manual leveling
586
+ Perform manual leveling when the level of the build plate is too far out of range for active
587
+ leveling to compensate for.
588
+ 1. Go to Preferences → Maintenance → Build plate → Manual leveling and select Start
589
+ 2. Wait while the Ultimaker S3 S5 prepares the manual leveling procedure
590
+ 3. Use the touchscreen controls to move the build plate until there is approximately 1 mm distance between
591
+ the first nozzle and the build plate. Make sure that the nozzle is close to the build plate without touching it.
592
+ Select Confirm to continue
593
+ 4. Adjust the front-right thumb wheel to roughly level the front of the build plate. There should be a distance of
594
+ approximately 1 mm between the nozzle and build plate
595
+ 5. Repeat step 4 for the front-left thumb wheel and select Confirm to continue
596
+ 6. Place the calibration card inbetween the nozzle and build plate. Use the touchscreen controls to adjust the
597
+ position of the build plate until you feel some resistance when moving the card. Select Confirm to continue
598
+ Do not apply force to the build plate while fine-tuning with the calibration card, as this will lead to
599
+ leveling inaccuracies.
600
+ 7. Place the calibration card between the nozzle and build plate in the front right corner. Adjust the thumb wheel
601
+ until you feel some resistance when moving the card. Select Confirm to continue
602
+ 8. Repeat step 7 for the front left corner and select Confirm to continue
603
+ After calibrating the build plate with the first nozzle, the second nozzle needs to be aligned as well, to ensure
604
+ the heights of both nozzles are set correctly. For this it is only necessary to set the correct height by using
605
+ the calibration card.
606
+ 9. Place the calibration card in between the second nozzle and build plate. Use the controls to adjust the position
607
+ of the build plate until you feel some resistance when moving the card. Select Confirm to continue
608
+ 10. Wait for the printer to finish the manual leveling procedure
609
+ Ultimaker S3 and Ultimaker S5 user manual 23
610
+ XY offset calibration
611
+ Besides the vertical offset, the horizontal distance between the nozzles in the X and Y direction also needs to be
612
+ configured. The print cores that are supplied with the Ultimaker S3 S5 are already calibrated, but when the printer
613
+ detects a new combination, you will need to perform an XY offset calibration. This calibration must only be done
614
+ once; after this, the information is stored on the printer. A correct XY calibration will ensure that the two colors or
615
+ materials align well.
616
+ To perform the calibration you will need an XY calibration sheet, which can found in the accessory box or
617
+ downloaded at ultimaker.comXYcalibration.
618
+ Ensure 2 print cores and materials are installed before starting the calibration.
619
+ 1. Go to Preferences → Maintenance → Print head → Calibrate XY offset and select Start calibration
620
+ 2. The Ultimaker S3 S5 will now print a grid pattern on the build plate. Wait until it is complete
621
+ 3. Once the Ultimaker S3 S5 has cooled down, remove the build plate from the printer and align it with the XY
622
+ calibration sheet. Ensure the printed grid is precisely placed on the two rectangles on the sheet
623
+ 4. Find the best aligned lines on the printed X grid and note which number corresponds to these lines. Enter this
624
+ number as the X offset value on your Ultimaker S3 S5
625
+ 5. Find the best aligned lines on the printed Y grid and note which number corresponds to these lines. Enter this
626
+ number as the Y offset value on your Ultimaker S3 S5
627
+ It is important that the printed XY offset pattern adheres well to the build plate and shows no signs of underextrusion.
628
+ If it did not print well, it is recommended to repeat the calibration print.
629
+ Lift switch calibration
630
+ The switch bay enables the second print core to be lifted and lowered. For successful dual-extrusion prints, it is
631
+ important that print core switching functions well. The lift switch is already calibrated when the Ultimaker S3 S5 is
632
+ shipped, but calibration can also be performed manually if needed.
633
+ 1. Go to Preferences → Maintenance → Print head → Calibrate lift switch and select Start calibration
634
+ 2. Move the lift switch on the side of the print head to point towards you. Select Confirm to continue
635
+ 3. Move the print head so that the lift switch fits in the switching bay. Select Confirm when completed
636
+ 4. Wait for the print head to go to the home position and test the lift switch
637
+ 5. Did the lift switch lower and raise the print core If so, press Yes to complete the calibration. If not, select No to
638
+ perform the calibration again
639
+ 5. Maintenance
640
+ Ultimaker S3 and Ultimaker S5 user manual 25
641
+ 5.1 Update the firmware
642
+ Periodically, a new version of the Ultimaker S3 S5 firmware is released. To ensure that your Ultimaker S3 S5 is
643
+ equipped with the latest features it is recommended to keep the firmware updated to the latest version.
644
+ Update over the network
645
+ If the Ultimaker S3 S5 is connected to a network, it automatically checks for available firmware updates. When
646
+ a new firmware is available, the printer will prompt you to download and install it via the touchscreen interface.
647
+ Alternatively, check for updates manually by navigating to System → Maintenance → Update firmware.
648
+ Do not power off the printer during the firmware installation.
649
+ Update using a USB stick
650
+ If your Ultimaker S3 S5 is not connected to a network, you can update to the latest firmware via USB. The firmware
651
+ files are found on the Ultimaker website
652
+ 1. Navigate to ultimaker.comfirmware, and select your printer
653
+ 2. Download the firmware image and store it in the root directory of your USB stick
654
+ 3. Insert the USB stick into the USB port of the printer
655
+ 4. Go to Preferences → Maintenance → Update firmware and select the new firmware in the update menu
656
+ 5.2 Material handling and storage
657
+ Opened material spools must be stored properly when not in use. If material is stored incorrectly, it may affect its
658
+ quality and usability.
659
+ The optimal storage temperature for PLA, Tough PLA, Nylon, CPE, CPE+, PC, TPU 95A, PP, and Breakaway is between
660
+ -20 to +30 °C. For ABS, the advised temperature is between 15 and 25 °C, and for PVA 0 to 30 °C. Furthermore,
661
+ a relative humidity of below 50% is recommended for PVA, TPU 95A, PP, and Breakaway. If these materials are
662
+ exposed to a higher humidity, the quality of the material can be affected.
663
+ To keep your materials in optimal condition, opened spools should be stored
664
+ • Cool and dry
665
+ • Out of direct sunlight
666
+ • In a resealable bag
667
+ To minimize moisture absorption, store the material in a resealable bag including the supplied desiccant (silica
668
+ gel) directly after printing.
669
+ Ultimaker S3 and Ultimaker S5 user manual 26
670
+ 5.3 Maintenance schedule
671
+ To keep your Ultimaker S3 S5 in optimal condition, we recommend the following maintenance schedule, based on
672
+ 1,500 printing hours per year.
673
+ If the usage frequency is higher, we recommend performing more frequent maintenance on your printer to
674
+ ensure optimal printing results.
675
+ Every month Clean the printer Keep the Ultimaker S3 S5 clean for optimal printing results. This includes
676
+ • Cleaning the glass build plate
677
+ • Removing degraded material from the outside of the nozzles
678
+ • Removing particles from the inside of the Bowden tubes
679
+ • Cleaning the inside and glass components
680
+ Lubricate the axles Apply a small drop of oil to the X, Y, and Z axles. Move the print head and build
681
+ plate to equally distribute the oil
682
+ Only use the supplied oil, as other oils or grease may affect the coating of
683
+ the axles
684
+ The X, Y and Z axles do not need lubrication for the first year of using the
685
+ printer. After this, they should be lubricated monthly
686
+ Every three months Check for play on the axles The X and Y axles in the frame should only rotate, not move back and forth.
687
+ Firmly attempt to move the axles individually. If there is play, follow the
688
+ instructions on the Ultimaker website to correct it.
689
+ Check the tension of the
690
+ short belts
691
+ The short belts attached to the X and Y motors should be tight to correctly
692
+ transfer the movement to the print head. If the belt tension is too low, follow the
693
+ instructions on the Ultimaker website to correct it.
694
+ Clean the front fan of the
695
+ print head
696
+ Thin strands of filament could end up in the fan. Check this regularly by opening
697
+ the front fan bracket. Remove any strands of filament with tweezers.
698
+ Check the quality of the
699
+ nozzle cover
700
+ The nozzle cover shields the print cores from cold airflow from the fans, helping
701
+ the print cores maintain a stable temperature while printing. The cover also
702
+ helps to prevent backflow of material into the print head when something goes
703
+ wrong during printing.
704
+ Check both sides of the cover for tears or damage from heat. If it is damaged,
705
+ replace the nozzle cover according to the instructions on the Ultimaker website.
706
+ Lubricate the lead screw
707
+ of the Z motor
708
+ Apply a small amount of grease to the lead screw of the Z motor. Move the build
709
+ plate up and down to equally distribute the grease.
710
+ Clean the feeders Small filament particles can gather on the feeder’s knurled wheel. Unload the
711
+ materials and open the feeders to clean the inside with a small brush. Follow the
712
+ instructions on the Ultimaker website.
713
+ Clean the print cores The BB print core benefits from preventive cleaning every 400 printing hours.
714
+ This removes any degraded material from the inside of the print core. Use
715
+ Ultimaker cleaning filament or PLA for applying hot and cold pulls.
716
+ Navigate to Preferences → Maintenance → Print head → Print core cleaning and
717
+ select Start to begin the cleaning procedure and follow the instructions on the
718
+ display.
719
+ Every year Lubricate the feeder gear Remove the feeder from the back panel to access the feeder gear. Clean it first,
720
+ then apply a small amount of grease to the gear. Follow the instructions on the
721
+ Ultimaker website.
722
+ Replace the Bowden tubes Materials can slightly scratch the inside of the Bowden tubes and the ends of the
723
+ tubes can get damaged by the tube coupling collets. It is advised to replace them
724
+ after one year of printing.
725
+ Clean system fans Check the fans at the back of the printer for dust and blow on the blades to clean
726
+ them.
727
+ Lubricate the door hinge(s) Apply a small drop of oil to the door hinge(s) to ensure the doors open and close
728
+ smoothly.
729
+ 6. Troubleshooting
730
+ Ultimaker S3 and Ultimaker S5 user manual 28
731
+ 6.1 Error messages
732
+ When the printer detects that something is wrong, or when it reads values outside of the allowed range,
733
+ an error will occur. The display will give a short description of the detected issue along with its unique
734
+ error code. For example
735
+ • This print job is not suitable for this printer. Go to ultimaker.comER42
736
+ Go to the specified page to learn more and for troubleshooting tips.
737
+ 6.2 Print core troubleshooting
738
+ Clogged print core
739
+ If material does not flow consistently, the print core could be clogged with degraded material. In this case, the
740
+ print core should be cleaned by performing the hot and cold pull method. Take a look at the Ultimaker website for
741
+ detailed instructions.
742
+ Print core not recognized
743
+ If a print core is not recognized by the Ultimaker S3 S5, the printer will inform you about this. The main reason for
744
+ this is dirty contact points on the PCB at the back side of the print core. When this happens, clean the contact points
745
+ with a cotton swab and some alcohol.
746
+ Do not touch the contact points on the back of the print core with your fingers.
747
+ 6.3 Print quality issues
748
+ Poor build plate adhesion
749
+ When you experience problems with the adhesion of a print to the build plate, the following
750
+ actions can be performed
751
+ • Ensure that the correct material settings and adhesion method were used (see chapter 4.2, ‘Materials’)
752
+ • Thoroughly clean the build plate and re-apply the glue or adhesion sheet
753
+ • Check the Ultimaker Cura settings that were used, and try printing with one of the default
754
+ Ultimaker Cura profiles
755
+ Warping
756
+ Warping occurs due to material shrinkage while printing, causing the corners of the print to lift and detach from
757
+ the build plate. When plastics are printed, they first expand slightly, then contract as they cool down. If material
758
+ contracts too much, the print will bend upwards from the build plate.
759
+ When your print is warping, follow the tips under ‘Poor build plate adhesion’. Additionally, read the design, material,
760
+ and printing guidelines of the 3D model assistant on the Ultimaker website.
761
+ Ultimaker S3 and Ultimaker S5 user manual 29
762
+ Grinding PVA
763
+ Incorrect material handling or storage can lead to grinding of the material. PVA should be printed and stored at a
764
+ low humidity to avoid problems while printing. We advise a humidity below 50% for storage and below 55% while
765
+ printing. It is also recommended to keep the ambient temperature below 28 °C while printing.
766
+ When PVA is ground down by the feeder, there are three main causes
767
+ • Incorrect storage. Ensure that the PVA is stored in the recommended conditions
768
+ • Coated Bowden tube. Clean the Bowden tube and dry it very well
769
+ • Clogged print core. Clean the print core with hot and cold pulls
770
+ For more information on how to resolve grinding issues, take a look at the Ultimaker website.
771
+ Under-extrusion
772
+ In simple terms, under-extrusion is when the printer is unable to supply a sufficient amount of material. Your
773
+ Ultimaker S3 S5 is under extruding when you see missing layers, very thin layers, or layers that have random
774
+ dots and holes in them.
775
+ Under-extrusion can have several causes
776
+ • Use of low-quality material (diameter inconsistency) or incorrect settings
777
+ • Feeder tension not correctly set
778
+ • Friction in the Bowden tube
779
+ • Small particles of material in the feeder or Bowden tube
780
+ • A partial clog in the print core
781
+ If your Ultimaker S3 S5 is affected by under-extrusion, it is advised to take a look at the Ultimaker website for
782
+ detailed troubleshooting instructions.
783
+ 7. Warranty
784
+ Ultimaker S3 and Ultimaker S5 user manual 31
785
+ 7.1 General
786
+ Ultimaker grants a standard warranty on the Ultimaker S3 Ultimaker S5 (“Product”) in the country where the
787
+ product was purchased.
788
+ From the date the product is sold and delivered to an end-customer for the first time, as evidenced by the
789
+ original customer’s purchase invoice, Ultimaker warrants the product is free from defects in material, design and
790
+ workmanship for a period of twelve (12) months. Only the original purchaser is entitled to claim warranty and the
791
+ warranty period is limited to hisher lifetime.
792
+ For a warranty claim to be valid (i) notification must be made before the end of the warranty period, (ii) conform to
793
+ any additional stipulations of the warranty, as defined below, (iii) must be substantiated with the original customer’s
794
+ purchase invoice, (iv) the serial number sticker must still be on the product(s) and (v) the product must be returned
795
+ in the original packaging. Since customers will only be entitled to make a warranty claim on submission of the
796
+ original invoice and packaging, we advise that both the invoice and official packaging are kept in a safe place. If the
797
+ original packaging is not available anymore, the customer can purchase replacement packaging from a recognized
798
+ Ultimaker reseller.
799
+ The customer – provided that they are a natural person who is not acting in the course of their profession or
800
+ business – may claim the rights to which they are entitled under the warranty without prejudice to their rights or
801
+ claims in accordance with the law.
802
+ 7.2 Conditions
803
+ The Ultimaker warranty is granted under the explicit condition that
804
+ • The product was sold, delivered and assembled by a recognized Ultimaker reseller (see ultimaker.com for
805
+ addresses of the recognized Ultimaker resellers)
806
+ • The product was newly manufactured on the date of purchase and not sold as used, refurbished or
807
+ manufacturing seconds
808
+ • Ultimaker’s latest software was installed and used in and with the product
809
+ • The Ultimaker’s installation and maintenance instructions as described in the manual for the product have been
810
+ observed. Unless the manual contains ‘do-it-yourself’ assembly instructions for the product or part thereof and
811
+ these have been followed up meticulously, the warranty will become invalidated if the product was at any time
812
+ disassembled or reassembled by any other person than a recognized Ultimaker reseller
813
+ Customers are welcome and we encourage them to use third-party materials, accessories, etc. That in itself, does
814
+ not void the warranty. If, however, the use of third-party elements, causes damage to the product, the part(s)
815
+ affected by this damage is excluded from warranty.
816
+ If a part of the product is repaired or replaced during the warranty period, the warranty period still remaining for
817
+ the entire product will apply to this part. However, repair andor replacement will not extend the warranty period.
818
+ Ultimaker S3 and Ultimaker S5 user manual 32
819
+ 7.3 Notification
820
+ The Ultimaker resellers deal with this warranty on behalf of Ultimaker. Therefore, any notification on the basis of
821
+ this warranty must be made to the Ultimaker reseller from whom the product was originally purchased, even if this
822
+ is not in the customer’s present country of residence.
823
+ Any warranty claim must first be recognized as justified, either by Ultimaker’s reseller or by Ultimaker. If so, the
824
+ reseller is obliged to rectify the defects free of charge according to this warranty. If the defect cannot be repaired,
825
+ the reseller will, within the warranty period, replace the product free of charge by an identical product, or, if the
826
+ product is no longer manufactured, by a similar replacement of the same value or offer an appropriate refund.
827
+ Depending on the country, the warranty may not automatically include costs incurred for shipping defective
828
+ products for scrutiny andor repair, nor for shipping costs of replacement or repaired product(s) back to claimant.
829
+ 7.4 Exclusions
830
+ This warranty does not apply to and therefore does not cover
831
+ • Any defect or damage caused by inappropriate, incorrect or improper use, installation, maintenance, operation
832
+ and cleaning or normal wear and tear. For correct use, reference is made to the manual of the product
833
+ • Any other event, act, default or omission outside Ultimaker’s control
834
+ • Failure of the product caused by an accident
835
+ In any event, Ultimaker is not liable for indirect or consequential damages, including but not limited to loss of use,
836
+ loss of profit or revenue. Furthermore, Ultimaker’s liability is limited to the purchase value of the product.
837
+ 7.5 Applicable law and competent court
838
+ This warranty is exclusively governed by Dutch law. Any dispute arising out of or in connection with this warranty
839
+ will be exclusively submitted to the jurisdiction of the court (Rechtbank) of Midden-Nederland, location Utrecht.
Files/User_manual_v2.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a11bd00cb17c8f79e89fa2ce958d4a7816af4f696ad60837188b99c2051c86a
3
+ size 29182418
Files/environment_data.csv ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sound_Level__dBA_,Light_Intensity
2
+ 54.8,93
3
+ 56.2,96
4
+ 56.4,96
5
+ 54.7,93
6
+ 54.7,93
7
+ 55.2,93
8
+ 55.9,94
9
+ 56.5,93
10
+ 54.8,94
11
+ 54.9,95
12
+ 56.7,94
13
+ 55.6,94
14
+ 55.2,95
15
+ 56.3,95
16
+ 56.4,96
17
+ 54.9,93
18
+ 54.7,94
19
+ 55.6,93
20
+ 54.9,93
Ignore/page17_img2.png ADDED
Ignore/page1_img1.png ADDED
Ignore/page1_img2.png ADDED
Ignore/page22_img4.png ADDED
Ignore/page27_img2.png ADDED
Ignore/page32_img2.png ADDED
Ignore/page32_img4.png ADDED
Ignore/page35_img2.png ADDED
Ignore/page36_img2.png ADDED
Ignore/page3_img1.png ADDED
Ignore/page40_img2.png ADDED
Ignore/page5_img2.png ADDED
Ignore/page6_img2.png ADDED
Ignore/page6_img4.png ADDED
Ignore/page7_img2.png ADDED
Image.png ADDED
Image/F170.jpg ADDED
Image/KATWalk.jpg ADDED
Image/Mockpanel.jpg ADDED
Image/Ultimaker.jpg ADDED
LLM_App.py ADDED
@@ -0,0 +1,651 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from lida import Manager, TextGenerationConfig, llm
3
+ from PIL import Image
4
+ from io import BytesIO
5
+ import base64
6
+ import openai
7
+ import dotenv
8
+ from langchain_community.document_loaders import TextLoader
9
+ from langchain_openai import OpenAIEmbeddings
10
+ from langchain_community.vectorstores import FAISS
11
+ from langchain_openai import ChatOpenAI
12
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
13
+ from langchain.chains import create_qa_with_sources_chain, ConversationalRetrievalChain
14
+ import codecs
15
+ import openai
16
+ import faiss
17
+ import os
18
+ import pickle
19
+ import numpy as np
20
+ from scipy import spatial
21
+ from openai import OpenAI
22
+ from typing import List
23
+ from sentence_transformers import SentenceTransformer
24
+ import fitz # PyMuPDF
25
+
26
+ os.environ['OPENAI_API_KEY'] = 'sk-proj-ci954EhTDwwKwjVeDAAwT3BlbkFJCimHZXSj2686YVbjuGJx'
27
+ api_key = "sk-proj-ci954EhTDwwKwjVeDAAwT3BlbkFJCimHZXSj2686YVbjuGJx"
28
+ organization="org-P1FXeeiLUAr7Qd8DKomur3iw"
29
+
30
+ class Document:
31
+ def __init__(self, content, metadata=None):
32
+ self.page_content = content
33
+ self.metadata = metadata if metadata is not None else {}
34
+
35
+ def load_documents_from_file_like(file_like, file_type):
36
+ documents = []
37
+ if file_type == "txt":
38
+ try:
39
+ content = file_like.getvalue().decode("utf-8")
40
+ except UnicodeDecodeError:
41
+ content = file_like.getvalue().decode("cp1252")
42
+ documents.append(Document(content))
43
+ elif file_type == "pdf":
44
+ doc = fitz.open(stream=file_like.getvalue(), filetype="pdf")
45
+ content = ""
46
+ for page in doc:
47
+ content += page.get_text()
48
+ documents.append(Document(content))
49
+ return documents
50
+
51
+ def base64_to_image(base64_string):
52
+ byte_data = base64.b64decode(base64_string)
53
+ image = Image.open(BytesIO(byte_data))
54
+ return image
55
+
56
+ # Setting up the sidebar
57
+ st.sidebar.image(Image.open("Image.png"), use_column_width=True)
58
+ st.sidebar.title("Menu")
59
+ menu = st.sidebar.selectbox("Select Feature",
60
+ ["Manual Plot", "Machine Expertise", "RAG", "ChatGPT"])
61
+
62
+ # Model setting
63
+ st.sidebar.title("Settings")
64
+ model_version = st.sidebar.selectbox("Select Model",
65
+ ["gpt-3.5-turbo-0125", "gpt-4-turbo", "gpt-4o"])
66
+
67
+ # Initialize LIDA manager with configurations
68
+ lida_manager = Manager(text_gen=llm("openai", api_key=api_key))
69
+ textgen_config = TextGenerationConfig(n=1, temperature=0.5, model=model_version, use_cache=True)
70
+
71
+ # Main UI setup
72
+ st.markdown("""
73
+ <div style='text-align: center; font-size: 24px;'>
74
+ Faculty Members: Sam Anand (SM-Lab) & Ming Tang (XR-Lab)
75
+ </div>
76
+ """, unsafe_allow_html=True)
77
+
78
+ if menu == "Manual Plot":
79
+ st.subheader("Data Analysis and Visualization Tool")
80
+ with st.expander("Create Custom Plots"):
81
+ file_uploader = st.file_uploader("Upload your CSV file:", type="csv")
82
+ if file_uploader:
83
+ with open("uploaded_data.csv", "wb") as f:
84
+ f.write(file_uploader.getvalue())
85
+ user_query = st.text_area("Enter your query for generating a plot:", height=100)
86
+ if st.button("Generate Graph"):
87
+ summary = lida_manager.summarize("uploaded_data.csv", summary_method="default", textgen_config=textgen_config)
88
+ charts = lida_manager.visualize(summary=summary, goal=user_query, textgen_config=textgen_config)
89
+ img = base64_to_image(charts[0].raster)
90
+ st.image(img, caption="Your Custom Plot")
91
+
92
+ if menu == "Machine Expertise":
93
+ st.subheader("Machine Expertise")
94
+ # Dropdown to select which machine expertise is needed
95
+ machine_choice = st.selectbox("Choose a Machine", ["Stratasys F170 3D Printer", "Ultimaker S5", "KAT Walk", "MockPanel"])
96
+
97
+ option_choices = ["Cosine similarity metric",
98
+ "City block distance metric",
99
+ "Euclidean distance metric",
100
+ "Chebyshev distance metric"]
101
+
102
+ client = openai.OpenAI(api_key=api_key)
103
+
104
+ if machine_choice == "Stratasys F170 3D Printer":
105
+ #with st.expander(f"Expertise on {machine_choice}"):
106
+
107
+ option_choices = ["Cosine similarity metric",
108
+ "City block distance metric",
109
+ "Euclidean distance metric",
110
+ "Chebyshev distance metric"]
111
+
112
+ st.image("Image/F170.jpg", caption="Stratasys F170 3D Printer")
113
+
114
+ user_query = st.text_area(f"Enter your query related to the {machine_choice}:", height=150)
115
+
116
+ # Asign the similarity metric
117
+ distance_metric = st.selectbox("Choose a metric for similarity search:", option_choices)
118
+
119
+ if distance_metric == "Cosine similarity metric":
120
+ metric = "cosine"
121
+ elif distance_metric == "City block distance metric":
122
+ metric = "L1"
123
+ elif distance_metric == "Euclidean distance metric":
124
+ metric = "L2"
125
+ elif distance_metric == "Chebyshev distance metric":
126
+ metric = "Linf"
127
+
128
+ if st.button(f"Get Expert Advice for {machine_choice}"):
129
+ if len(user_query) > 0:
130
+ st.info("Your Query: " + user_query)
131
+ # Initialize the Sentence Transformer model
132
+ model = SentenceTransformer("all-MiniLM-L6-v2")
133
+
134
+ # Load saved data
135
+ output_file3 = 'output/F170.pkl'
136
+ with open(output_file3, 'rb') as fIn:
137
+ stored_data = pickle.load(fIn)
138
+ stored_chunks = stored_data["chunks"]
139
+ stored_embeddings = stored_data["embeddings"]
140
+
141
+ def distances_from_embeddings(
142
+ query_embedding: List[float],
143
+ embeddings: List[List[float]],
144
+ distance_metric,
145
+ ) -> List[float]:
146
+ """Calculates distances between a query embedding and a list of other embeddings."""
147
+ distance_metrics = {
148
+ "cosine": spatial.distance.cosine,
149
+ "L1": spatial.distance.cityblock,
150
+ "L2": spatial.distance.euclidean,
151
+ "Linf": spatial.distance.chebyshev,
152
+ }
153
+ return [
154
+ distance_metrics[distance_metric](query_embedding, embedding)
155
+ for embedding in embeddings
156
+ ]
157
+
158
+ # Function to find nearest chunks using the specified distance metric
159
+ def find_nearest_chunks(query: str, distance_metric=metric, top_k: int = 2) -> List[tuple]:
160
+ query_embedding = model.encode([query])[0]
161
+ distances = distances_from_embeddings(query_embedding, stored_embeddings, distance_metric)
162
+ nearest_indices = np.argsort(distances)[:top_k]
163
+ return [(stored_chunks[i], distances[i]) for i in nearest_indices]
164
+
165
+ # Example usage
166
+ query = user_query
167
+ context_pairs = find_nearest_chunks(query, distance_metric=metric, top_k=2)
168
+
169
+ # Combine chunks into a single context variable
170
+ context = " ".join([chunk for chunk, distance in context_pairs])
171
+
172
+
173
+ from openai import OpenAI
174
+ from typing import List
175
+
176
+ # Initialize OpenAI client
177
+ client = openai.OpenAI(api_key=api_key)
178
+
179
+ system = """
180
+ You are an advanced virtual assistant specializing in providing accurate, detailed instructions and troubleshooting advice for the Stratasys F170 3D printer. Your primary objective is to use both contextual information and your broader knowledge base to offer users effective support.
181
+
182
+ **Guidelines:**
183
+ 1. **Contextual Responses (Tag: --RAG):** If a response is directly found within the provided context below, leverage that data to generate your answer. Cite multiple chunks if needed for a comprehensive response, and append "--RAG" to these answers.
184
+
185
+ 2. **General Knowledge Responses (Tag: --GPT):** If the answer isn't found within the context, produce a response using your general knowledge base. Align answers with best practices surrounding the Stratasys F170 3D printer and append "--GPT" to these responses.
186
+
187
+ **Contextual Information (for --RAG responses):**
188
+ \"\"\"
189
+ {context}
190
+ \"\"\"
191
+
192
+ **General Instructions:**
193
+ - **Step-by-Step Clarity:** Provide clear, step-by-step troubleshooting advice that is actionable and anticipates user challenges.
194
+ - **Cross-Referencing:** Ensure consistency by cross-referencing contextual data and avoiding contradictory guidance.
195
+ - **Preventive Suggestions:** When possible, recommend maintenance tips or optimizations to improve user experience.
196
+ - **Tone:** Maintain a professional, concise, and approachable tone in all responses.
197
+
198
+ **Examples of Good and Bad Responses:**
199
+
200
+ - **Bad Response:** "The Stratasys F170 supports a variety of materials such as ABS-M30, ASA, and TPU-92A, as indicated in the provided contextual information."
201
+ - **Reasoning:** Using phrases like "as indicated in the provided contextual information" feels impersonal and awkward for an AI response. Please don't use it.
202
+
203
+ - **Good Response:** "The Stratasys F170 supports a variety of materials such as ABS-M30, ASA, and TPU-92A. --RAG"
204
+ - **Reasoning:** This response conveys the information succinctly, accurately, and includes the appropriate tag for clarity.
205
+ """
206
+
207
+ response = client.chat.completions.create(
208
+ messages=[
209
+ {'role': 'system', 'content': system},
210
+ {'role': 'user', 'content': query},
211
+ ],
212
+ model=model_version,
213
+ temperature=.3,
214
+ )
215
+
216
+ response = response.choices[0].message.content
217
+ st.write(response)
218
+
219
+ elif machine_choice == "Ultimaker S5":
220
+ #with st.expander(f"Expertise on {machine_choice}"):
221
+
222
+ option_choices = ["Cosine similarity metric",
223
+ "City block distance metric",
224
+ "Euclidean distance metric",
225
+ "Chebyshev distance metric"]
226
+
227
+
228
+ st.image("Image/Ultimaker.jpg", caption="Ultimaker S5")
229
+ user_query = st.text_area(f"Enter your query related to the {machine_choice}:", height=150)
230
+
231
+ # Asign the similarity metric
232
+ distance_metric = st.selectbox("Choose a metric for similarity search:", option_choices)
233
+
234
+ if distance_metric == "Cosine similarity metric":
235
+ metric = "cosine"
236
+ elif distance_metric == "City block distance metric":
237
+ metric = "L1"
238
+ elif distance_metric == "Euclidean distance metric":
239
+ metric = "L2"
240
+ elif distance_metric == "Chebyshev distance metric":
241
+ metric = "Linf"
242
+
243
+ if st.button(f"Get Expert Advice for {machine_choice}"):
244
+ if len(user_query) > 0:
245
+ st.info("Your Query: " + user_query)
246
+ # Initialize the Sentence Transformer model
247
+ model = SentenceTransformer("all-MiniLM-L6-v2")
248
+
249
+ # Load saved data
250
+ output_file2 = 'output/Ultimaker.pkl'
251
+ with open(output_file2, 'rb') as fIn:
252
+ stored_data = pickle.load(fIn)
253
+ stored_chunks = stored_data["chunks"]
254
+ stored_embeddings = stored_data["embeddings"]
255
+
256
+ def distances_from_embeddings(
257
+ query_embedding: List[float],
258
+ embeddings: List[List[float]],
259
+ distance_metric,
260
+ ) -> List[float]:
261
+ """Calculates distances between a query embedding and a list of other embeddings."""
262
+ distance_metrics = {
263
+ "cosine": spatial.distance.cosine,
264
+ "L1": spatial.distance.cityblock,
265
+ "L2": spatial.distance.euclidean,
266
+ "Linf": spatial.distance.chebyshev,
267
+ }
268
+ return [
269
+ distance_metrics[distance_metric](query_embedding, embedding)
270
+ for embedding in embeddings
271
+ ]
272
+
273
+ # Function to find nearest chunks using the specified distance metric
274
+ def find_nearest_chunks(query: str, distance_metric=metric, top_k: int = 2) -> List[tuple]:
275
+ query_embedding = model.encode([query])[0]
276
+ distances = distances_from_embeddings(query_embedding, stored_embeddings, distance_metric)
277
+ nearest_indices = np.argsort(distances)[:top_k]
278
+ return [(stored_chunks[i], distances[i]) for i in nearest_indices]
279
+
280
+ # Example usage
281
+ query = user_query
282
+ context_pairs = find_nearest_chunks(query, distance_metric=metric, top_k=2)
283
+
284
+ # Combine chunks into a single context variable
285
+ context = " ".join([chunk for chunk, distance in context_pairs])
286
+
287
+
288
+ from openai import OpenAI
289
+ from typing import List
290
+
291
+
292
+ # Initialize OpenAI client
293
+ client = openai.OpenAI(api_key=api_key)
294
+
295
+ system = """
296
+ You are an advanced virtual assistant specializing in providing accurate, detailed instructions and troubleshooting advice for the Ultimaker S5 3D printer. Your primary objective is to use both contextual information and your broader knowledge base to offer users effective support.
297
+
298
+ **Guidelines:**
299
+ 1. **Contextual Responses (Tag: --RAG):** If a response is directly found within the provided context below, leverage that data to generate your answer. Cite multiple chunks if needed for a comprehensive response, and append "--RAG" to these answers.
300
+
301
+ 2. **General Knowledge Responses (Tag: --GPT):** If the answer isn't found within the context, produce a response using your general knowledge base. Align answers with best practices surrounding the Ultimaker S5 3D printer and append "--GPT" to these responses.
302
+
303
+ **Contextual Information (for --RAG responses):**
304
+ \"\"\"
305
+ {context}
306
+ \"\"\"
307
+
308
+ **General Instructions:**
309
+ - **Step-by-Step Clarity:** Provide clear, step-by-step troubleshooting advice that is actionable and anticipates user challenges.
310
+ - **Cross-Referencing:** Ensure consistency by cross-referencing contextual data and avoiding contradictory guidance.
311
+ - **Preventive Suggestions:** When possible, recommend maintenance tips or optimizations to improve user experience.
312
+ - **Tone:** Maintain a professional, concise, and approachable tone in all responses.
313
+
314
+ **Examples of Good and Bad Responses:**
315
+
316
+ - **Bad Response:** "The Ultimaker S5 supports a wide range of materials, including PLA, ABS, and CPE, as indicated in the provided contextual information."
317
+ - **Reasoning:** Using phrases like "as indicated in the provided contextual information" feels impersonal and awkward for an AI response. Please dont use it.
318
+
319
+ - **Good Response:** "The Ultimaker S5 supports a wide range of materials, including PLA, ABS, and CPE. --RAG"
320
+ - **Reasoning:** This response conveys the information succinctly, accurately, and includes the appropriate tag for clarity.
321
+ """
322
+
323
+
324
+ response = client.chat.completions.create(
325
+ messages=[
326
+ {'role': 'system', 'content': system},
327
+ {'role': 'user', 'content': query},
328
+ ],
329
+ model=model_version,
330
+ temperature=.3,
331
+ )
332
+
333
+ response = response.choices[0].message.content
334
+ st.write(response)
335
+
336
+ elif machine_choice == "KAT Walk":
337
+ #with st.expander(f"Expertise on {machine_choice}"):
338
+
339
+ option_choices = ["Cosine similarity metric",
340
+ "City block distance metric",
341
+ "Euclidean distance metric",
342
+ "Chebyshev distance metric"]
343
+
344
+
345
+ st.image("Image/KATWalk.jpg", caption="KAT Walk Virtual Reality Treadmill")
346
+ user_query = st.text_area(f"Enter your query related to the {machine_choice}:", height=150)
347
+
348
+ # Asign the similarity metric
349
+ distance_metric = st.selectbox("Choose a metric for similarity search:", option_choices)
350
+
351
+ if distance_metric == "Cosine similarity metric":
352
+ metric = "cosine"
353
+ elif distance_metric == "City block distance metric":
354
+ metric = "L1"
355
+ elif distance_metric == "Euclidean distance metric":
356
+ metric = "L2"
357
+ elif distance_metric == "Chebyshev distance metric":
358
+ metric = "Linf"
359
+
360
+ if st.button(f"Get Expert Advice for {machine_choice}"):
361
+ if len(user_query) > 0:
362
+ st.info("Your Query: " + user_query)
363
+
364
+ # Initialize the Sentence Transformer model
365
+ model = SentenceTransformer("all-MiniLM-L6-v2")
366
+
367
+ # Load saved data
368
+ output_file1 = 'output/katWalk.pkl'
369
+ with open(output_file1, 'rb') as fIn:
370
+ stored_data = pickle.load(fIn)
371
+ stored_chunks = stored_data["chunks"]
372
+ stored_embeddings = stored_data["embeddings"]
373
+
374
+ def distances_from_embeddings(
375
+ query_embedding: List[float],
376
+ embeddings: List[List[float]],
377
+ distance_metric="cosine",
378
+ ) -> List[float]:
379
+ """Calculates distances between a query embedding and a list of other embeddings."""
380
+ distance_metrics = {
381
+ "cosine": spatial.distance.cosine,
382
+ "L1": spatial.distance.cityblock,
383
+ "L2": spatial.distance.euclidean,
384
+ "Linf": spatial.distance.chebyshev,
385
+ }
386
+ return [
387
+ distance_metrics[distance_metric](query_embedding, embedding)
388
+ for embedding in embeddings
389
+ ]
390
+
391
+ # Function to find nearest chunks using the specified distance metric
392
+ def find_nearest_chunks(query: str, distance_metric=metric, top_k: int = 2) -> List[tuple]:
393
+ query_embedding = model.encode([query])[0]
394
+ distances = distances_from_embeddings(query_embedding, stored_embeddings, distance_metric)
395
+ nearest_indices = np.argsort(distances)[:top_k]
396
+ return [(stored_chunks[i], distances[i]) for i in nearest_indices]
397
+
398
+ # Example usage
399
+ query = user_query
400
+ context_pairs = find_nearest_chunks(query, distance_metric=metric, top_k=2)
401
+
402
+ # Combine chunks into a single context variable
403
+ context = " ".join([chunk for chunk, distance in context_pairs])
404
+
405
+
406
+ from openai import OpenAI
407
+ from typing import List
408
+
409
+
410
+ # Initialize OpenAI client
411
+ client = openai.OpenAI(api_key=api_key)
412
+
413
+ system = f"""
414
+ You are an advanced virtual assistant specializing in providing accurate, detailed instructions and troubleshooting advice for the KAT Walk system. Your primary objective is to use both contextual information and your broader knowledge base to offer users effective support.
415
+
416
+ **Guidelines:**
417
+ 1. **Contextual Responses (Tag: --RAG):** If a response is directly found within the provided context below, leverage that data to generate your answer. Cite multiple chunks if needed for a comprehensive response, and append "--RAG" to these answers.
418
+
419
+ 2. **General Knowledge Responses (Tag: --GPT):** If the answer isn't found within the context, produce a response using your general knowledge base. Align answers with best practices surrounding the KAT Walk system and append "--GPT" to these responses.
420
+
421
+ **Contextual Information (for --RAG responses):**
422
+ \"\"\"
423
+ {context}
424
+ \"\"\"
425
+
426
+ **General Instructions:**
427
+ - **Step-by-Step Clarity:** Provide clear, step-by-step troubleshooting advice that is actionable and anticipates user challenges.
428
+ - **Cross-Referencing:** Ensure consistency by cross-referencing contextual data and avoiding contradictory guidance.
429
+ - **Preventive Suggestions:** When possible, recommend maintenance tips or optimizations to improve user experience.
430
+ - **Tone:** Maintain a professional, concise, and approachable tone in all responses.
431
+
432
+ **Examples of Good and Bad Responses:**
433
+
434
+ - **Bad Response:** "The supported weight of a user for the KAT Walk machine is 130kg (286 lbs.), as indicated in the provided contextual information."
435
+ - **Reasoning:** Using phrases like "as indicated in the provided contextual information" feels impersonal and awkward for an AI response. Please dont use it.
436
+
437
+ - **Good Response:** "The supported weight of a user for the KAT Walk machine is 130kg (286 lbs.). --RAG"
438
+ - **Reasoning:** This response conveys the information succinctly, accurately, and includes the appropriate tag for clarity.
439
+ """
440
+
441
+ response = client.chat.completions.create(
442
+ messages=[
443
+ {'role': 'system', 'content': system},
444
+ {'role': 'user', 'content': query},
445
+ ],
446
+ model=model_version,
447
+ temperature=.3,
448
+ )
449
+
450
+ response = response.choices[0].message.content
451
+ st.write(response)
452
+
453
+ elif machine_choice == "MockPanel":
454
+ #with st.expander(f"Expertise on {machine_choice}"):
455
+
456
+ option_choices = ["Cosine similarity metric",
457
+ "City block distance metric",
458
+ "Euclidean distance metric",
459
+ "Chebyshev distance metric"]
460
+
461
+
462
+ st.image("Image/Mockpanel.jpg", caption="Mockpanel")
463
+ user_query = st.text_area(f"Enter your query related to the {machine_choice}:", height=150)
464
+
465
+ # Asign the similarity metric
466
+ distance_metric = st.selectbox("Choose a metric for similarity search:", option_choices)
467
+
468
+ if distance_metric == "Cosine similarity metric":
469
+ metric = "cosine"
470
+ elif distance_metric == "City block distance metric":
471
+ metric = "L1"
472
+ elif distance_metric == "Euclidean distance metric":
473
+ metric = "L2"
474
+ elif distance_metric == "Chebyshev distance metric":
475
+ metric = "Linf"
476
+
477
+ if st.button(f"Get Expert Advice for {machine_choice}"):
478
+ if len(user_query) > 0:
479
+ st.info("Your Query: " + user_query)
480
+
481
+ # Initialize the Sentence Transformer model
482
+ model = SentenceTransformer("all-MiniLM-L6-v2")
483
+
484
+ # Load saved data
485
+ output_file1 = 'output/computerVisionToolkit.pkl'
486
+ with open(output_file1, 'rb') as fIn:
487
+ stored_data = pickle.load(fIn)
488
+ stored_chunks = stored_data["chunks"]
489
+ stored_embeddings = stored_data["embeddings"]
490
+
491
+ def distances_from_embeddings(
492
+ query_embedding: List[float],
493
+ embeddings: List[List[float]],
494
+ distance_metric,
495
+ ) -> List[float]:
496
+ """Calculates distances between a query embedding and a list of other embeddings."""
497
+ distance_metrics = {
498
+ "cosine": spatial.distance.cosine,
499
+ "L1": spatial.distance.cityblock,
500
+ "L2": spatial.distance.euclidean,
501
+ "Linf": spatial.distance.chebyshev,
502
+ }
503
+ return [
504
+ distance_metrics[distance_metric](query_embedding, embedding)
505
+ for embedding in embeddings
506
+ ]
507
+
508
+ # Function to find nearest chunks using the specified distance metric
509
+ def find_nearest_chunks(query: str, distance_metric=metric, top_k: int = 2) -> List[tuple]:
510
+ query_embedding = model.encode([query])[0]
511
+ distances = distances_from_embeddings(query_embedding, stored_embeddings, distance_metric)
512
+ nearest_indices = np.argsort(distances)[:top_k]
513
+ return [(stored_chunks[i], distances[i]) for i in nearest_indices]
514
+
515
+ # Example usage
516
+ query = user_query
517
+ context_pairs = find_nearest_chunks(query, distance_metric=metric, top_k=2)
518
+
519
+ # Combine chunks into a single context variable
520
+ context = " ".join([chunk for chunk, distance in context_pairs])
521
+
522
+
523
+ from openai import OpenAI
524
+ from typing import List
525
+
526
+
527
+ # Initialize OpenAI client
528
+ client = openai.OpenAI(api_key=api_key)
529
+
530
+ system = f"""
531
+ You are an advanced virtual assistant specializing in providing accurate, detailed instructions and troubleshooting advice for the Mockpanel system. Your primary objective is to use both contextual information and your broader knowledge base to offer users effective support.
532
+
533
+ **Guidelines:**
534
+ 1. **Contextual Responses (Tag: --RAG):** If a response is directly found within the provided context below, leverage that data to generate your answer. Cite multiple chunks if needed for a comprehensive response, and append "--RAG" to these answers.
535
+
536
+ 2. **General Knowledge Responses (Tag: --GPT):** If the answer isn't found within the context, produce a response using your general knowledge base. Align answers with best practices surrounding the Mockpanel system and append "--GPT" to these responses.
537
+
538
+ **Contextual Information (for --RAG responses):**
539
+ \"\"\"
540
+ Mockpanel includes linear and circular gauges, seven-segment displays, OCR displays, toggle switches, knobs, and safety lights, all integrated into one unit. It serves as a replica of legacy machine used in industry to replicate different legacy sensor systems for training and development purposes.
541
+ \"\"\"
542
+
543
+ **General Instructions:**
544
+ - **Step-by-Step Clarity:** Provide clear, step-by-step troubleshooting advice that is actionable and anticipates user challenges.
545
+ - **Cross-Referencing:** Ensure consistency by cross-referencing contextual data and avoiding contradictory guidance.
546
+ - **Preventive Suggestions:** When possible, recommend maintenance tips or optimizations to improve user experience.
547
+ - **Tone:** Maintain a professional, concise, and approachable tone in all responses.
548
+
549
+ **Examples of Good and Bad Responses:**
550
+
551
+ - **Bad Response:** "Mockpanel includes various components like gauges and switches, as indicated in the provided contextual information."
552
+ - **Reasoning:** Using phrases like "as indicated in the provided contextual information" feels impersonal and awkward for an AI response. Please don't use it.
553
+
554
+ - **Good Response:** "Mockpanel includes various components like gauges and switches, which are essential for replicating legacy sensor systems in industry training and development. --RAG"
555
+ - **Reasoning:** This response conveys the information succinctly, accurately, and includes the appropriate tag for clarity.
556
+ """
557
+
558
+
559
+ response = client.chat.completions.create(
560
+ messages=[
561
+ {'role': 'system', 'content': system},
562
+ {'role': 'user', 'content': query},
563
+ ],
564
+ model=model_version,
565
+ temperature=.3,
566
+ )
567
+
568
+ response = response.choices[0].message.content
569
+ st.write(response)
570
+
571
+
572
+ if menu == "RAG":
573
+ st.subheader("Retrieve and Generate (RAG)")
574
+ uploaded_file = st.file_uploader("Upload a text or PDF file", type=["txt", "pdf"])
575
+
576
+ if uploaded_file is not None:
577
+ file_type = uploaded_file.name.split('.')[-1]
578
+ # Load documents from uploaded file
579
+ documents = load_documents_from_file_like(uploaded_file, file_type)
580
+
581
+ if len(documents) == 0:
582
+ st.error("Failed to load documents. Please check the file format and content.")
583
+ else:
584
+ # Split documents into chunks
585
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
586
+ chunked_documents = text_splitter.split_documents(documents)
587
+
588
+ if len(chunked_documents) == 0:
589
+ st.error("Failed to split documents into chunks. Please check the document content.")
590
+ else:
591
+ # Create embeddings and index them
592
+ embeddings_model = OpenAIEmbeddings()
593
+ try:
594
+ db = FAISS.from_documents(chunked_documents, embeddings_model)
595
+ retriever = db.as_retriever()
596
+ except Exception as e:
597
+ st.error(f"Failed to create embeddings and index them: {e}")
598
+
599
+ # Query interface
600
+ user_query = st.text_area("Enter your query:", height=100)
601
+ if st.button("Answer Query") and user_query:
602
+ try:
603
+ llm_src = ChatOpenAI(temperature=0, model=model_version)
604
+ qa_chain = create_qa_with_sources_chain(llm_src)
605
+
606
+ retrieval_qa = ConversationalRetrievalChain.from_llm(
607
+ llm_src,
608
+ retriever,
609
+ return_source_documents=True,
610
+ )
611
+ output = retrieval_qa.invoke({"question": user_query, "chat_history": []})
612
+ st.write(f"Answer: {output['answer']}")
613
+ except Exception as e:
614
+ st.error(f"Failed to retrieve and generate answer: {e}")
615
+
616
+
617
+
618
+
619
+ chat_history = []
620
+
621
+ if menu == "ChatGPT":
622
+ client = openai.OpenAI(api_key=api_key)
623
+ user_query = st.text_area("Enter your query:", height=100)
624
+
625
+ if st.button("Answer Query") and user_query:
626
+ # Combine user query with chat history as context
627
+ context = "You are an helpful agent."
628
+ if chat_history:
629
+ chat_history_str = "\n".join([f"User: {query}\nBot: {response}" for query, response in chat_history])
630
+ context += "\n" + chat_history_str
631
+
632
+ response = client.chat.completions.create(
633
+ messages=[
634
+ {'role': 'system', 'content': context},
635
+ {'role': 'user', 'content': user_query},
636
+ ],
637
+ model=model_version,
638
+ temperature=0.3,
639
+ )
640
+ response = response.choices[0].message.content
641
+ chat_history.append((user_query, response))
642
+ st.write(response)
643
+
644
+ if st.button("Reset Chat History"):
645
+ chat_history = []
646
+
647
+ if chat_history:
648
+ st.subheader("Chat History:")
649
+ for idx, (query, response) in enumerate(chat_history):
650
+ st.write(f"{idx+1}. User: {query}")
651
+ st.write(f" Bot: {response}")
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: LLM Visual
3
- emoji: 💻
4
- colorFrom: indigo
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 4.32.2
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: LLM_Visual
3
+ app_file: garadio.py
 
 
4
  sdk: gradio
5
+ sdk_version: 4.31.4
 
 
6
  ---
 
 
Reference/Mockpanel/Images/page100_img3.png ADDED
Reference/Mockpanel/Images/page100_img5.png ADDED
Reference/Mockpanel/Images/page101_img3.png ADDED
Reference/Mockpanel/Images/page101_img5.png ADDED
Reference/Mockpanel/Images/page102_img3.png ADDED