fazzam commited on
Commit
e6748e7
·
verified ·
1 Parent(s): f63b4a9

update app

Browse files
Files changed (50) hide show
  1. .gitattributes +1 -34
  2. .gitignore +160 -0
  3. LICENSE +201 -0
  4. README.md +75 -13
  5. __init__.py +3 -0
  6. app.py +110 -0
  7. env.yml +152 -0
  8. gitignore +163 -0
  9. grainsight.log +189 -0
  10. packages.txt +1 -0
  11. requirements.txt +16 -0
  12. setup.py +28 -0
  13. src/__init__.py +0 -0
  14. src/__pycache__/__init__.cpython-310.pyc +0 -0
  15. src/__pycache__/__init__.cpython-311.pyc +0 -0
  16. src/__pycache__/__init__.cpython-39.pyc +0 -0
  17. src/__pycache__/segment.cpython-310.pyc +0 -0
  18. src/__pycache__/ui.cpython-310.pyc +0 -0
  19. src/model/.gitattributes +1 -0
  20. src/segmentation/__init__.py +1 -0
  21. src/segmentation/__pycache__/__init__.cpython-310.pyc +0 -0
  22. src/segmentation/__pycache__/__init__.cpython-39.pyc +0 -0
  23. src/segmentation/__pycache__/segment.cpython-310.pyc +0 -0
  24. src/segmentation/__pycache__/segment.cpython-39.pyc +0 -0
  25. src/segmentation/segment.py +207 -0
  26. src/ui/__init__.py +2 -0
  27. src/ui/__pycache__/__init__.cpython-310.pyc +0 -0
  28. src/ui/__pycache__/__init__.cpython-311.pyc +0 -0
  29. src/ui/__pycache__/__init__.cpython-39.pyc +0 -0
  30. src/ui/__pycache__/drawable_canvas.cpython-310.pyc +0 -0
  31. src/ui/__pycache__/drawable_canvas.cpython-311.pyc +0 -0
  32. src/ui/__pycache__/drawable_canvas.cpython-39.pyc +0 -0
  33. src/ui/__pycache__/streamlit_ui.cpython-310.pyc +0 -0
  34. src/ui/__pycache__/streamlit_ui.cpython-311.pyc +0 -0
  35. src/ui/__pycache__/streamlit_ui.cpython-39.pyc +0 -0
  36. src/ui/drawable_canvas.py +29 -0
  37. src/ui/streamlit_ui.py +46 -0
  38. src/utils/__init__.py +3 -0
  39. src/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  40. src/utils/__pycache__/__init__.cpython-39.pyc +0 -0
  41. src/utils/__pycache__/calculations.cpython-310.pyc +0 -0
  42. src/utils/__pycache__/calculations.cpython-39.pyc +0 -0
  43. src/utils/__pycache__/load_config.cpython-310.pyc +0 -0
  44. src/utils/__pycache__/parameters.cpython-310.pyc +0 -0
  45. src/utils/__pycache__/parameters.cpython-39.pyc +0 -0
  46. src/utils/__pycache__/visualization.cpython-310.pyc +0 -0
  47. src/utils/__pycache__/visualization.cpython-39.pyc +0 -0
  48. src/utils/calculations.py +7 -0
  49. src/utils/parameters.py +53 -0
  50. src/utils/visualization.py +28 -0
.gitattributes CHANGED
@@ -1,35 +1,2 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ FastSAM-x.pt filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  *.pt filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md CHANGED
@@ -1,13 +1,75 @@
1
- ---
2
- title: GrainSight
3
- emoji: 🏆
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.33.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GrainSight: README
2
+
3
+ GrainSight is a user-friendly application designed for petrographers and microscopists to perform real-time grain segmentation and analysis on microscopic thin section images. Built on top of the powerful FastSAM segmentation model, GrainSight allows you to extract quantitative data and insights from your images, aiding in various petrographic studies.
4
+
5
+ ## Importance in Petrographic Studies:
6
+
7
+ - **Automated Grain Segmentation**: GrainSight eliminates the need for manual grain boundary tracing, saving significant time and effort.
8
+ - **Quantitative Analysis**: Extract object-specific parameters such as area, perimeter, roundness, aspect ratio, and longest length, enabling quantitative analysis of grain characteristics.
9
+ - **Mineral Identification and Classification**: The extracted parameters can assist in mineral identification and classification based on their morphological properties.
10
+ - **Textural Analysis**: Grain size distribution and spatial arrangement of grains can be studied to understand the depositional and diagenetic history of rocks.
11
+
12
+ ## Installation and Usage:
13
+
14
+ ### 1. Create a Virtual Environment (Recommended):
15
+
16
+ It's recommended to use a virtual environment to manage project-specific dependencies. You can create one using venv or conda:
17
+
18
+ ```bash
19
+ # Using venv
20
+ python3 -m venv grainsight_env
21
+
22
+ # Using conda
23
+ conda create -n grainsight_env python=3.8 # Replace 3.8 with your desired Python version
24
+ ```
25
+
26
+ ### 2. Activate the Virtual Environment:
27
+
28
+ ```bash
29
+ # For venv
30
+ source grainsight_env/bin/activate # On Linux/macOS
31
+ grainsight_env\Scripts\activate # On Windows
32
+
33
+ # For conda
34
+ conda activate grainsight_env
35
+ ```
36
+
37
+ ### 3. Clone the GrainSight Repository:
38
+
39
+ ```bash
40
+ git clone https://github.com/fazzam12345/grainsight.git
41
+ ```
42
+
43
+ ### 4. Install Requirements:
44
+
45
+ Install the required libraries from the requirements.txt file:
46
+
47
+ ```bash
48
+ pip install -r requirements.txt
49
+ ```
50
+
51
+ ### 5. Run the Application:
52
+
53
+ Start the Streamlit application:
54
+
55
+ ```bash
56
+ streamlit run app.py
57
+ ```
58
+
59
+ This will open the GrainSight application in your web browser.
60
+
61
+ ### Usage:
62
+
63
+ - **Upload an Image**: Select a microscopic thin section image in JPG, PNG, or JPEG format.
64
+ - **Set Parameters (Optional)**: Adjust segmentation parameters like input size, IOU threshold, and confidence threshold as needed.
65
+ - **Draw a Line for Scale**: Draw a line on the image and enter its real-world length (in micrometers) to set the scale for measurements.
66
+ - **Run Segmentation**: Click the "Run Segmentation" button to segment the image and extract grain parameters.
67
+ - **Analyze Results**: View the segmented image and the table of calculated grain parameters. You can also download the data as a CSV file.
68
+ - **Visualize Distributions**: Select a parameter to plot its distribution and gain further insights into grain characteristics.
69
+
70
+ ### Additional Notes:
71
+
72
+ - **Dependencies**: Make sure you have the required versions of Python, PyTorch, Torchvision, and other libraries installed. Refer to the requirements.txt file for details.
73
+ - **GPU Acceleration**: For faster processing, you can use a CUDA-enabled GPU with the appropriate drivers and PyTorch version.
74
+ - **Customization**: The code is modular and can be easily extended or customized to suit your specific needs.
75
+
__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .src.ui import streamlit_ui, drawable_canvas
2
+ from .src.segmentation import segment_everything, fast_process
3
+ from .src.utils import calculate_parameters, plot_distribution, calculate_pixel_length
app.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import streamlit as st
3
+ import logging
4
+ from PIL import Image
5
+ from src.ui.drawable_canvas import drawable_canvas
6
+ from src.ui.streamlit_ui import streamlit_ui
7
+ from src.segmentation import segment_everything
8
+ from src.utils import calculate_parameters, plot_distribution, calculate_pixel_length, plot_cumulative_frequency
9
+ from ultralytics import YOLO
10
+ import torch
11
+ import cv2
12
+
13
+ logging.basicConfig(filename="grainsight.log", level=logging.INFO)
14
+
15
+ # Cache the model and device
16
+ @st.cache_data()
17
+ def load_model_and_initialize():
18
+ model_path = "src\\model\\FastSAM-x.pt"
19
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
+ model = YOLO(model_path)
21
+ return model, device
22
+
23
+ def main():
24
+ """Main application logic."""
25
+ uploaded_image, input_size, iou_threshold, conf_threshold, better_quality, contour_thickness, real_world_length, max_det = streamlit_ui()
26
+ if uploaded_image is not None:
27
+ try:
28
+ canvas_result = drawable_canvas(uploaded_image, input_size)
29
+ pixel_length = None
30
+ if canvas_result.json_data is not None and "objects" in canvas_result.json_data:
31
+ if len(canvas_result.json_data["objects"]) > 0:
32
+ line_object = canvas_result.json_data["objects"][0]
33
+ start_point = [line_object['x1'], line_object['y1']]
34
+ end_point = [line_object['x2'], line_object['y2']]
35
+
36
+ # Get image dimensions for calculating the scaling factor
37
+ image_width, image_height = Image.open(uploaded_image).size
38
+ scale_factor = input_size / max(image_width, image_height)
39
+
40
+ # Calculate pixel length with the scaling factor
41
+ pixel_length = calculate_pixel_length(start_point, end_point)
42
+ st.write(f"Pixel length of the line: {pixel_length}")
43
+ else:
44
+ st.write("Please draw a line to set the scale or enter the real-world length.")
45
+ else:
46
+ st.write("Please draw a line to set the scale or enter the real-world length.")
47
+
48
+ if pixel_length is not None and real_world_length is not None:
49
+ scale_factor = real_world_length / pixel_length
50
+ else:
51
+ st.write("Scale factor could not be calculated. Make sure to draw a line and enter the real-world length.")
52
+ return
53
+
54
+ input_image = Image.open(uploaded_image)
55
+
56
+ # Load the model and device from cache
57
+ model, device = load_model_and_initialize()
58
+
59
+ segmented_image, annotations = segment_everything(
60
+ input_image,
61
+ model=model,
62
+ device=device,
63
+ input_size=input_size,
64
+ iou_threshold=iou_threshold,
65
+ conf_threshold=conf_threshold,
66
+ better_quality=better_quality,
67
+ contour_thickness=contour_thickness,
68
+ max_det=max_det
69
+ )
70
+
71
+ st.image(segmented_image, caption="Segmented Image", use_column_width=True)
72
+
73
+ # Calculate and display object parameters
74
+ df = calculate_parameters(annotations, scale_factor)
75
+
76
+ if not df.empty:
77
+ st.write("Summary of Object Parameters:")
78
+ st.dataframe(df)
79
+
80
+ csv = df.to_csv(index=False)
81
+ st.download_button(
82
+ label="Download data as CSV",
83
+ data=csv,
84
+ file_name='grain_parameters.csv',
85
+ mime='text/csv',
86
+ )
87
+
88
+ plot_cumulative_frequency(df)
89
+ filtered_columns = [col for col in df.columns.tolist() if col != 'Object']
90
+ selected_parameter = st.selectbox("Select a parameter to see its distribution:", filtered_columns)
91
+
92
+ if selected_parameter:
93
+ plot_distribution(df, selected_parameter)
94
+ else:
95
+ st.write("No parameter selected for plotting.")
96
+
97
+ else:
98
+ st.write("No objects detected.")
99
+
100
+ except Exception as e:
101
+ logging.error(f"An error occurred: {e}")
102
+ st.error("An error occurred during processing. Please check the logs for details.")
103
+
104
+ else:
105
+ st.write("Please upload an image.")
106
+
107
+ if __name__ == "__main__":
108
+ main()
109
+
110
+
env.yml ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # environment.yml
2
+
3
+ name: c:\Users\fares\Documents\interpreterwithgemini\test_gemini
4
+ channels:
5
+ - conda-forge
6
+ - defaults
7
+ dependencies:
8
+ - asttokens=2.4.1=pyhd8ed1ab_0
9
+ - backcall=0.2.0=pyh9f0ad1d_0
10
+ - ca-certificates=2024.2.2=h56e8100_0
11
+ - colorama=0.4.6=pyhd8ed1ab_0
12
+ - comm=0.2.1=pyhd8ed1ab_0
13
+ - debugpy=1.6.7=py39hd77b12b_0
14
+ - decorator=5.1.1=pyhd8ed1ab_0
15
+ - executing=2.0.1=pyhd8ed1ab_0
16
+ - importlib-metadata=7.0.1=pyha770c72_0
17
+ - importlib_metadata=7.0.1=hd8ed1ab_0
18
+ - ipykernel=6.29.3=pyha63f2e9_0
19
+ - ipython=8.12.0=pyh08f2357_0
20
+ - jedi=0.19.1=pyhd8ed1ab_0
21
+ - jupyter_client=8.6.0=pyhd8ed1ab_0
22
+ - jupyter_core=4.12.0=py39hcbf5309_0
23
+ - libsodium=1.0.18=h8d14728_1
24
+ - matplotlib-inline=0.1.6=pyhd8ed1ab_0
25
+ - nest-asyncio=1.6.0=pyhd8ed1ab_0
26
+ - openssl=3.0.13=h2bbff1b_0
27
+ - packaging=23.2=pyhd8ed1ab_0
28
+ - parso=0.8.3=pyhd8ed1ab_0
29
+ - pickleshare=0.7.5=py_1003
30
+ - pip=23.3.1=py39haa95532_0
31
+ - prompt-toolkit=3.0.42=pyha770c72_0
32
+ - prompt_toolkit=3.0.42=hd8ed1ab_0
33
+ - psutil=5.9.0=py39h2bbff1b_0
34
+ - pure_eval=0.2.2=pyhd8ed1ab_0
35
+ - pygments=2.17.2=pyhd8ed1ab_0
36
+ - python=3.9.18=h1aa4202_0
37
+ - python-dateutil=2.8.2=pyhd8ed1ab_0
38
+ - python_abi=3.9=2_cp39
39
+ - pywin32=227=py39hb82d6ee_1
40
+ - pyzmq=25.1.2=py39hd77b12b_0
41
+ - setuptools=68.2.2=py39haa95532_0
42
+ - six=1.16.0=pyh6c4a22f_0
43
+ - sqlite=3.41.2=h2bbff1b_0
44
+ - stack_data=0.6.2=pyhd8ed1ab_0
45
+ - tornado=6.2=py39hb82d6ee_0
46
+ - traitlets=5.14.1=pyhd8ed1ab_0
47
+ - typing_extensions=4.10.0=pyha770c72_0
48
+ - vc=14.2=h21ff451_1
49
+ - vs2015_runtime=14.27.29016=h5e58377_2
50
+ - wcwidth=0.2.13=pyhd8ed1ab_0
51
+ - wheel=0.41.2=py39haa95532_0
52
+ - zeromq=4.3.5=hd77b12b_0
53
+ - zipp=3.17.0=pyhd8ed1ab_0
54
+ - pip:
55
+ - aiohttp==3.9.3
56
+ - aiosignal==1.3.1
57
+ - altair==5.3.0
58
+ - async-timeout==4.0.3
59
+ - attrs==23.2.0
60
+ - backports-tarfile==1.0.0
61
+ - blinker==1.7.0
62
+ - build==1.2.1
63
+ - cachetools==5.3.3
64
+ - certifi==2024.2.2
65
+ - charset-normalizer==3.3.2
66
+ - click==8.1.7
67
+ - clip==0.2.0
68
+ - contourpy==1.2.1
69
+ - cycler==0.12.1
70
+ - docutils==0.21.1
71
+ - filelock==3.13.4
72
+ - fonttools==4.51.0
73
+ - frozenlist==1.4.1
74
+ - fsspec==2024.3.1
75
+ - genai==2.1.0
76
+ - gitdb==4.0.11
77
+ - gitpython==3.1.43
78
+ - google-ai-generativelanguage==0.4.0
79
+ - google-api-core==2.17.1
80
+ - google-auth==2.28.1
81
+ - google-generativeai==0.3.2
82
+ - googleapis-common-protos==1.62.0
83
+ - grpcio==1.62.0
84
+ - grpcio-status==1.62.0
85
+ - idna==3.6
86
+ - importlib-resources==6.4.0
87
+ - jaraco-classes==3.4.0
88
+ - jaraco-context==5.3.0
89
+ - jaraco-functools==4.0.0
90
+ - jinja2==3.1.3
91
+ - jsonschema==4.21.1
92
+ - jsonschema-specifications==2023.12.1
93
+ - keyring==25.1.0
94
+ - kiwisolver==1.4.5
95
+ - markdown-it-py==3.0.0
96
+ - markupsafe==2.1.5
97
+ - matplotlib==3.8.4
98
+ - mdurl==0.1.2
99
+ - more-itertools==10.2.0
100
+ - mpmath==1.3.0
101
+ - multidict==6.0.5
102
+ - networkx==3.2.1
103
+ - nh3==0.2.17
104
+ - numpy==1.26.4
105
+ - openai==0.27.10
106
+ - opencv-python==4.9.0.80
107
+ - pandas==2.2.2
108
+ - pillow==10.3.0
109
+ - pkginfo==1.10.0
110
+ - proto-plus==1.23.0
111
+ - protobuf==4.25.3
112
+ - pyarrow==15.0.2
113
+ - pyasn1==0.5.1
114
+ - pyasn1-modules==0.3.0
115
+ - pydeck==0.8.1b0
116
+ - pyparsing==3.1.2
117
+ - pyproject-hooks==1.0.0
118
+ - pytz==2024.1
119
+ - pywin32-ctypes==0.2.2
120
+ - pyyaml==6.0.1
121
+ - readme-renderer==43.0
122
+ - referencing==0.34.0
123
+ - regex==2023.12.25
124
+ - requests==2.31.0
125
+ - requests-toolbelt==1.0.0
126
+ - rfc3986==2.0.0
127
+ - rich==13.7.1
128
+ - rpds-py==0.18.0
129
+ - rsa==4.9
130
+ - scipy==1.13.0
131
+ - seaborn==0.13.2
132
+ - smmap==5.0.1
133
+ - streamlit==1.33.0
134
+ - streamlit-drawable-canvas==0.9.3
135
+ - sympy==1.12
136
+ - tabulate==0.9.0
137
+ - tenacity==8.2.3
138
+ - tiktoken==0.3.3
139
+ - toml==0.10.2
140
+ - tomli==2.0.1
141
+ - toolz==0.12.1
142
+ - torch==2.2.2
143
+ - torchvision==0.17.2
144
+ - tqdm==4.66.2
145
+ - twine==5.0.0
146
+ - tzdata==2024.1
147
+ - ultralytics==8.0.120
148
+ - ultralytics-yolo==0.0.1
149
+ - urllib3==2.2.1
150
+ - watchdog==4.0.0
151
+ - yarl==1.9.4
152
+ prefix: c:\Users\fares\Documents\interpreterwithgemini\test_gemini
gitignore ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # .pt files are produced by PyInstaller
10
+ *.pt
11
+
12
+ # Distribution / packaging
13
+ .Python
14
+ build/
15
+ develop-eggs/
16
+ dist/
17
+ downloads/
18
+ eggs/
19
+ .eggs/
20
+ lib/
21
+ lib64/
22
+ parts/
23
+ sdist/
24
+ var/
25
+ wheels/
26
+ share/python-wheels/
27
+ *.egg-info/
28
+ .installed.cfg
29
+ *.egg
30
+ MANIFEST
31
+
32
+ # PyInstaller
33
+ # Usually these files are written by a python script from a template
34
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
35
+ *.manifest
36
+ *.spec
37
+
38
+ # Installer logs
39
+ pip-log.txt
40
+ pip-delete-this-directory.txt
41
+
42
+ # Unit test / coverage reports
43
+ htmlcov/
44
+ .tox/
45
+ .nox/
46
+ .coverage
47
+ .coverage.*
48
+ .cache
49
+ nosetests.xml
50
+ coverage.xml
51
+ *.cover
52
+ *.py,cover
53
+ .hypothesis/
54
+ .pytest_cache/
55
+ cover/
56
+
57
+ # Translations
58
+ *.mo
59
+ *.pot
60
+
61
+ # Django stuff:
62
+ *.log
63
+ local_settings.py
64
+ db.sqlite3
65
+ db.sqlite3-journal
66
+
67
+ # Flask stuff:
68
+ instance/
69
+ .webassets-cache
70
+
71
+ # Scrapy stuff:
72
+ .scrapy
73
+
74
+ # Sphinx documentation
75
+ docs/_build/
76
+
77
+ # PyBuilder
78
+ .pybuilder/
79
+ target/
80
+
81
+ # Jupyter Notebook
82
+ .ipynb_checkpoints
83
+
84
+ # IPython
85
+ profile_default/
86
+ ipython_config.py
87
+
88
+ # pyenv
89
+ # For a library or package, you might want to ignore these files since the code is
90
+ # intended to run in multiple environments; otherwise, check them in:
91
+ # .python-version
92
+
93
+ # pipenv
94
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
96
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
97
+ # install all needed dependencies.
98
+ #Pipfile.lock
99
+
100
+ # poetry
101
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
102
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
103
+ # commonly ignored for libraries.
104
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
105
+ #poetry.lock
106
+
107
+ # pdm
108
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
109
+ #pdm.lock
110
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
111
+ # in version control.
112
+ # https://pdm.fming.dev/#use-with-ide
113
+ .pdm.toml
114
+
115
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116
+ __pypackages__/
117
+
118
+ # Celery stuff
119
+ celerybeat-schedule
120
+ celerybeat.pid
121
+
122
+ # SageMath parsed files
123
+ *.sage.py
124
+
125
+ # Environments
126
+ .env
127
+ .venv
128
+ env/
129
+ venv/
130
+ ENV/
131
+ env.bak/
132
+ venv.bak/
133
+
134
+ # Spyder project settings
135
+ .spyderproject
136
+ .spyproject
137
+
138
+ # Rope project settings
139
+ .ropeproject
140
+
141
+ # mkdocs documentation
142
+ /site
143
+
144
+ # mypy
145
+ .mypy_cache/
146
+ .dmypy.json
147
+ dmypy.json
148
+
149
+ # Pyre type checker
150
+ .pyre/
151
+
152
+ # pytype static type analyzer
153
+ .pytype/
154
+
155
+ # Cython debug symbols
156
+ cython_debug/
157
+
158
+ # PyCharm
159
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
162
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163
+ #.idea/
grainsight.log ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ERROR:root:An error occurred: name 'model' is not defined
2
+ ERROR:root:An error occurred: segment_everything() got an unexpected keyword argument 'model'
3
+ ERROR:root:An error occurred: segment_everything() got an unexpected keyword argument 'model'
4
+ ERROR:root:An error occurred: segment_everything() got an unexpected keyword argument 'model'
5
+ ERROR:root:An error occurred: segment_everything() got an unexpected keyword argument 'model'
6
+ ERROR:root:An error occurred: segment_everything() got an unexpected keyword argument 'model'
7
+ ERROR:root:An error occurred: segment_everything() got an unexpected keyword argument 'model'
8
+ ERROR:root:An error occurred: Cannot hash argument 'model' (of type `ultralytics.models.yolo.model.YOLO`) in 'segment_everything'.
9
+
10
+ To address this, you can tell Streamlit not to hash this argument by adding a
11
+ leading underscore to the argument's name in the function signature:
12
+
13
+ ```
14
+ @st.cache_data
15
+ def segment_everything(_model, ...):
16
+ ...
17
+ ```
18
+
19
+ ERROR:root:An error occurred: Cannot hash argument 'model' (of type `ultralytics.models.yolo.model.YOLO`) in 'segment_everything'.
20
+
21
+ To address this, you can tell Streamlit not to hash this argument by adding a
22
+ leading underscore to the argument's name in the function signature:
23
+
24
+ ```
25
+ @st.cache_resource
26
+ def segment_everything(_model, ...):
27
+ ...
28
+ ```
29
+
30
+ ERROR:root:An error occurred: Cannot hash argument 'annotations' (of type `torch.Tensor`) in 'fast_process'.
31
+
32
+ To address this, you can tell Streamlit not to hash this argument by adding a
33
+ leading underscore to the argument's name in the function signature:
34
+
35
+ ```
36
+ @st.cache_resource
37
+ def fast_process(_annotations, ...):
38
+ ...
39
+ ```
40
+
41
+ ERROR:root:An error occurred: tuple index out of range
42
+ ERROR:root:An error occurred: drawable_canvas() missing 2 required positional arguments: 'annotations' and 'update_segmentation_results'
43
+ ERROR:root:An error occurred: tuple index out of range
44
+ ERROR:root:An error occurred: tuple index out of range
45
+ ERROR:root:An error occurred: tuple index out of range
46
+ ERROR:root:An error occurred: cannot unpack non-iterable int object
47
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 8791780800 bytes.
48
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 23004463104 bytes.
49
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 4685345280 bytes.
50
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 7493472768 bytes.
51
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 8282112000 bytes.
52
+ ERROR:root:An error occurred: tuple index out of range
53
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 12423168000 bytes.
54
+ ERROR:root:An error occurred: tuple index out of range
55
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 15366574080 bytes.
56
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 10655563776 bytes.
57
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 22683435008 bytes.
58
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 5753401600 bytes.
59
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 20216166400 bytes.
60
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 4364006400 bytes.
61
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 15180774400 bytes.
62
+ ERROR:root:An error occurred: [enforce fail at ..\c10\core\impl\alloc_cpu.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 25800000000 bytes.
63
+ ERROR:root:An error occurred: Cannot hash argument 'annotations' (of type `torch.Tensor`) in 'calculate_parameters'.
64
+
65
+ To address this, you can tell Streamlit not to hash this argument by adding a
66
+ leading underscore to the argument's name in the function signature:
67
+
68
+ ```
69
+ @st.cache_data
70
+ def calculate_parameters(_annotations, ...):
71
+ ...
72
+ ```
73
+
74
+ ERROR:root:An error occurred: Cannot hash argument 'annotations' (of type `torch.Tensor`) in 'calculate_parameters'.
75
+
76
+ To address this, you can tell Streamlit not to hash this argument by adding a
77
+ leading underscore to the argument's name in the function signature:
78
+
79
+ ```
80
+ @st.cache_resource
81
+ def calculate_parameters(_annotations, ...):
82
+ ...
83
+ ```
84
+
85
+ ERROR:root:An error occurred: OpenCV(4.9.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:196: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
86
+
87
+ ERROR:root:An error occurred: st_canvas() got an unexpected keyword argument 'background_image_aspect_ratio'
88
+ ERROR:root:An error occurred: Cannot hash argument 'annotations' (of type `torch.Tensor`) in 'calculate_parameters'.
89
+
90
+ To address this, you can tell Streamlit not to hash this argument by adding a
91
+ leading underscore to the argument's name in the function signature:
92
+
93
+ ```
94
+ @st.cache_data
95
+ def calculate_parameters(_annotations, ...):
96
+ ...
97
+ ```
98
+
99
+ ERROR:root:An error occurred: name 'device' is not defined
100
+ ERROR:root:An error occurred: name 'device' is not defined
101
+ ERROR:root:An error occurred: 'numpy.ndarray' object has no attribute 'cpu'
102
+ ERROR:root:An error occurred: 'numpy.ndarray' object has no attribute 'cpu'
103
+ ERROR:root:An error occurred: 'numpy.ndarray' object has no attribute 'cpu'
104
+ ERROR:root:An error occurred: 'numpy.ndarray' object has no attribute 'cpu'
105
+ ERROR:root:An error occurred: 'numpy.ndarray' object has no attribute 'numpy'
106
+ ERROR:root:An error occurred: 'numpy.ndarray' object has no attribute 'cpu'
107
+ ERROR:root:An error occurred: Cannot hash argument 'annotations' (of type `torch.Tensor`) in 'calculate_parameters'.
108
+
109
+ To address this, you can tell Streamlit not to hash this argument by adding a
110
+ leading underscore to the argument's name in the function signature:
111
+
112
+ ```
113
+ @st.cache_data
114
+ def calculate_parameters(_annotations, ...):
115
+ ...
116
+ ```
117
+
118
+ ERROR:root:An error occurred: Cannot hash argument 'annotations' (of type `torch.Tensor`) in 'calculate_parameters'.
119
+
120
+ To address this, you can tell Streamlit not to hash this argument by adding a
121
+ leading underscore to the argument's name in the function signature:
122
+
123
+ ```
124
+ @st.cache_data
125
+ def calculate_parameters(_annotations, ...):
126
+ ...
127
+ ```
128
+
129
+ ERROR:root:An error occurred: Cannot hash argument 'model' (of type `ultralytics.yolo.engine.model.YOLO`) in 'segment_everything'.
130
+
131
+ To address this, you can tell Streamlit not to hash this argument by adding a
132
+ leading underscore to the argument's name in the function signature:
133
+
134
+ ```
135
+ @st.cache_data
136
+ def segment_everything(_model, ...):
137
+ ...
138
+ ```
139
+
140
+ ERROR:root:An error occurred: Cannot hash argument 'model' (of type `ultralytics.yolo.engine.model.YOLO`) in 'segment_everything'.
141
+
142
+ To address this, you can tell Streamlit not to hash this argument by adding a
143
+ leading underscore to the argument's name in the function signature:
144
+
145
+ ```
146
+ @st.cache_data
147
+ def segment_everything(_model, ...):
148
+ ...
149
+ ```
150
+
151
+ ERROR:root:An error occurred: [enforce fail at alloc_cpu.cpp:114] data. DefaultCPUAllocator: not enough memory: you tried to allocate 27472896000 bytes.
152
+ ERROR:root:An error occurred: [enforce fail at alloc_cpu.cpp:114] data. DefaultCPUAllocator: not enough memory: you tried to allocate 27472896000 bytes.
153
+ ERROR:root:An error occurred: cannot unpack non-iterable int object
154
+ ERROR:root:An error occurred: cannot unpack non-iterable int object
155
+ ERROR:root:An error occurred: cannot unpack non-iterable int object
156
+ ERROR:root:An error occurred: cannot unpack non-iterable int object
157
+ ERROR:root:An error occurred: cannot unpack non-iterable int object
158
+ ERROR:root:An error occurred: cannot unpack non-iterable int object
159
+ ERROR:root:An error occurred: cannot unpack non-iterable int object
160
+ ERROR:root:An error occurred: local variable 'segmented_image' referenced before assignment
161
+ ERROR:root:An error occurred: local variable 'segmented_image' referenced before assignment
162
+ ERROR:root:An error occurred: local variable 'segmented_image' referenced before assignment
163
+ ERROR:root:An error occurred: local variable 'segmented_image' referenced before assignment
164
+ ERROR:root:An error occurred: local variable 'segmented_image' referenced before assignment
165
+ ERROR:root:An error occurred: drawable_canvas() missing 1 required positional argument: 'input_size'
166
+ ERROR:root:An error occurred: drawable_canvas() missing 1 required positional argument: 'input_size'
167
+ ERROR:root:An error occurred: local variable 'pixel_length' referenced before assignment
168
+ ERROR:root:An error occurred: Boolean value of Tensor with more than one value is ambiguous
169
+ ERROR:root:An error occurred: float division by zero
170
+ ERROR:root:An error occurred: float division by zero
171
+ ERROR:root:An error occurred: The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
172
+ ERROR:root:An error occurred: local variable 'feret_diameter_micron' referenced before assignment
173
+ ERROR:root:An error occurred: name 'min_feret_diameter_micron' is not defined
174
+ ERROR:root:An error occurred: [enforce fail at alloc_cpu.cpp:114] data. DefaultCPUAllocator: not enough memory: you tried to allocate 29915208704 bytes.
175
+ ERROR:asyncio:Task exception was never retrieved
176
+ future: <Task finished name='Task-25015' coro=<WebSocketProtocol13.write_message.<locals>.wrapper() done, defined at c:\Users\fares\Documents\interpreterwithgemini\test_gemini\lib\site-packages\tornado\websocket.py:1090> exception=WebSocketClosedError()>
177
+ Traceback (most recent call last):
178
+ File "c:\Users\fares\Documents\interpreterwithgemini\test_gemini\lib\site-packages\tornado\websocket.py", line 1092, in wrapper
179
+ await fut
180
+ tornado.iostream.StreamClosedError: Stream is closed
181
+
182
+ During handling of the above exception, another exception occurred:
183
+
184
+ Traceback (most recent call last):
185
+ File "c:\Users\fares\Documents\interpreterwithgemini\test_gemini\lib\site-packages\tornado\websocket.py", line 1094, in wrapper
186
+ raise WebSocketClosedError()
187
+ tornado.websocket.WebSocketClosedError
188
+ ERROR:root:An error occurred: [enforce fail at alloc_cpu.cpp:114] data. DefaultCPUAllocator: not enough memory: you tried to allocate 16398000000 bytes.
189
+ ERROR:root:An error occurred: [enforce fail at alloc_cpu.cpp:114] data. DefaultCPUAllocator: not enough memory: you tried to allocate 39407287296 bytes.
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ libgl1
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ matplotlib==3.8.4
2
+ opencv-python==4.9.0.80
3
+ Pillow==9.5.0
4
+ PyYAML==6.0.1
5
+ requests==2.31.0
6
+ scipy==1.13.0
7
+ torch==2.2.2
8
+ torchvision==0.17.2
9
+ tqdm==4.66.2
10
+ pandas==2.2.2
11
+ seaborn==0.13.2
12
+ streamlit==1.24.0
13
+ ultralytics==8.0.120
14
+ streamlit-drawable-canvas==0.9.3
15
+ clip==0.2.0
16
+ ultralytics-yolo==0.0.1
setup.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name='Grainsight',
5
+ version='0.1.0',
6
+ author='Your Name',
7
+ author_email='your_email@example.com',
8
+ description='A Streamlit app for segmenting grains using FastSAM',
9
+ packages=find_packages(where="src"),
10
+ install_requires=[
11
+ 'streamlit',
12
+ 'pillow',
13
+ 'ultralytics',
14
+ 'torch',
15
+ 'numpy',
16
+ 'opencv-python',
17
+ 'matplotlib',
18
+ 'pandas',
19
+ 'seaborn',
20
+ 'streamlit-drawable-canvas',
21
+ 'pyyaml'
22
+ ],
23
+ entry_points={
24
+ 'console_scripts': [
25
+ 'grainsight = grainsight.app:main'
26
+ ]
27
+ }
28
+ )
src/__init__.py ADDED
File without changes
src/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (162 Bytes). View file
 
src/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (178 Bytes). View file
 
src/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (160 Bytes). View file
 
src/__pycache__/segment.cpython-310.pyc ADDED
Binary file (7.31 kB). View file
 
src/__pycache__/ui.cpython-310.pyc ADDED
Binary file (2.96 kB). View file
 
src/model/.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.pt filter=lfs diff=lfs merge=lfs -text
src/segmentation/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .segment import segment_everything, fast_process
src/segmentation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (248 Bytes). View file
 
src/segmentation/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (246 Bytes). View file
 
src/segmentation/__pycache__/segment.cpython-310.pyc ADDED
Binary file (5.26 kB). View file
 
src/segmentation/__pycache__/segment.cpython-39.pyc ADDED
Binary file (5.23 kB). View file
 
src/segmentation/segment.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import matplotlib.pyplot as plt
3
+ import numpy as np
4
+ import torch
5
+ from PIL import Image
6
+
7
+
8
+ def segment_everything(_input, model, device, input_size=1024, iou_threshold=0.7, conf_threshold=0.25, better_quality=False, contour_thickness=1, max_det = 500):
9
+ """
10
+ Performs segmentation on the input image and returns the segmented image and annotations.
11
+ """
12
+
13
+ input_image = _input
14
+ input_size = int(input_size)
15
+ w, h = input_image.size
16
+ scale = input_size / max(w, h)
17
+ new_w = int(w * scale)
18
+ new_h = int(h * scale)
19
+ input_image = input_image.resize((new_w, new_h))
20
+
21
+ results = model(input_image,
22
+ retina_masks=True,
23
+ iou=iou_threshold,
24
+ conf=conf_threshold,
25
+ imgsz=input_size,
26
+ max_det=max_det)
27
+
28
+ annotations = results[0].masks.data
29
+ segmented_image = fast_process(annotations=annotations,
30
+ device=device,
31
+ image=input_image,
32
+ scale=(1024 // input_size),
33
+ better_quality=better_quality,
34
+ contour_thickness=contour_thickness)
35
+
36
+ return segmented_image, annotations
37
+
38
+
39
+ def fast_process(annotations,
40
+ image,
41
+ device,
42
+ scale,
43
+ better_quality=False,
44
+ mask_random_color=True,
45
+ bbox=None,
46
+ use_retina=True,
47
+ withContours=True,
48
+ contour_thickness=2):
49
+ if isinstance(annotations[0], dict):
50
+ annotations = [annotation['segmentation'] for annotation in annotations]
51
+ original_h = image.height
52
+ original_w = image.width
53
+ if better_quality:
54
+ if isinstance(annotations[0], torch.Tensor):
55
+ annotations = np.array(annotations.cpu())
56
+ for i, mask in enumerate(annotations):
57
+ mask = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
58
+ annotations[i] = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8))
59
+ if device == 'cpu':
60
+ annotations = np.array(annotations)
61
+ inner_mask = fast_show_mask(
62
+ annotations,
63
+ plt.gca(),
64
+ random_color=mask_random_color,
65
+ bbox=bbox,
66
+ retinamask=use_retina,
67
+ target_height=original_h,
68
+ target_width=original_w,
69
+ )
70
+ else:
71
+ if isinstance(annotations[0], np.ndarray):
72
+ annotations = torch.from_numpy(annotations)
73
+ inner_mask = fast_show_mask_gpu(
74
+ annotations,
75
+ plt.gca(),
76
+ random_color=mask_random_color,
77
+ bbox=bbox,
78
+ retinamask=use_retina,
79
+ target_height=original_h,
80
+ target_width=original_w,
81
+ )
82
+ if isinstance(annotations, torch.Tensor):
83
+ annotations = annotations.cpu().numpy()
84
+ kernel = np.ones((5, 5), np.uint8)
85
+ if withContours:
86
+ contour_all = []
87
+ temp = np.zeros((original_h, original_w, 1))
88
+ for i, mask in enumerate(annotations):
89
+ if type(mask) == dict:
90
+ mask = mask['segmentation']
91
+ annotation = mask.astype(np.uint8)
92
+ # Perform morphological operations for separating connected objects and smoothing contours
93
+ kernel = np.ones((5, 5), np.uint8)
94
+ annotation = cv2.morphologyEx(annotation, cv2.MORPH_OPEN, kernel)
95
+ annotation = cv2.GaussianBlur(annotation, (5, 5), 0)
96
+ # Find contours
97
+ contours, _ = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
98
+ for contour in contours:
99
+ hull = cv2.convexHull(contour)
100
+ epsilon = 0.005 * cv2.arcLength(contour, True)
101
+ approx = cv2.approxPolyDP(contour, epsilon, True)
102
+ contour_all.append(approx)
103
+ for i, contour in enumerate(contour_all):
104
+ M = cv2.moments(contour)
105
+ if M["m00"] != 0:
106
+ cX = int(M["m10"] / M["m00"])
107
+ cY = int(M["m01"] / M["m00"])
108
+ else:
109
+ cX, cY = 0, 0
110
+ cv2.putText(temp, str(i), (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 125, 255), 2)
111
+ cv2.drawContours(temp, contour_all, -1, (255, 255, 255), contour_thickness)
112
+ color = np.array([255 / 255, 0 / 255, 0 / 255, 1]) # RGBA
113
+ contour_mask = temp / 255 * color.reshape(1, 1, -1)
114
+ image = image.convert('RGBA')
115
+ overlay_inner = Image.fromarray((inner_mask * 255).astype(np.uint8), 'RGBA')
116
+ image.paste(overlay_inner, (0, 0), overlay_inner)
117
+ if withContours:
118
+ overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), 'RGBA')
119
+ image.paste(overlay_contour, (0, 0), overlay_contour)
120
+ return image
121
+
122
+
123
+ # CPU post process
124
+ def fast_show_mask(
125
+ annotation,
126
+ ax,
127
+ random_color=False,
128
+ bbox=None,
129
+ retinamask=True,
130
+ target_height=960,
131
+ target_width=960,
132
+ ):
133
+ mask_sum = annotation.shape[0]
134
+ height = annotation.shape[1]
135
+ weight = annotation.shape[2]
136
+ # Sort annotation by area
137
+ areas = np.sum(annotation, axis=(1, 2))
138
+ sorted_indices = np.argsort(areas)[::1]
139
+ annotation = annotation[sorted_indices]
140
+ index = (annotation != 0).argmax(axis=0)
141
+ if random_color:
142
+ color = np.random.random((mask_sum, 1, 1, 3))
143
+ else:
144
+ color = np.ones((mask_sum, 1, 1, 3)) * np.array([30 / 255, 144 / 255, 255 / 255])
145
+ transparency = np.ones((mask_sum, 1, 1, 1)) * 0.6
146
+ visual = np.concatenate([color, transparency], axis=-1)
147
+ mask_image = np.expand_dims(annotation, -1) * visual
148
+ mask = np.zeros((height, weight, 4))
149
+ h_indices, w_indices = np.meshgrid(np.arange(height), np.arange(weight), indexing='ij')
150
+ indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
151
+ mask[h_indices, w_indices, :] = mask_image[indices]
152
+ if bbox is not None:
153
+ x1, y1, x2, y2 = bbox
154
+ ax.add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='b', linewidth=1))
155
+ if not retinamask:
156
+ mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_NEAREST)
157
+ return mask
158
+
159
+
160
+ def fast_show_mask_gpu(
161
+ annotation,
162
+ ax,
163
+ random_color=False,
164
+ bbox=None,
165
+ retinamask=True,
166
+ target_height=960,
167
+ target_width=960,
168
+ ):
169
+ device = annotation.device
170
+ mask_sum = annotation.shape[0]
171
+ height = annotation.shape[1]
172
+ weight = annotation.shape[2]
173
+ areas = torch.sum(annotation, dim=(1, 2))
174
+ sorted_indices = torch.argsort(areas, descending=False)
175
+ annotation = annotation[sorted_indices]
176
+ # Find the first non-zero value index for each position
177
+ index = (annotation != 0).to(torch.long).argmax(dim=0)
178
+ if random_color:
179
+ color = torch.rand((mask_sum, 1, 1, 3)).to(device)
180
+ else:
181
+ color = torch.ones((mask_sum, 1, 1, 3)).to(device) * torch.tensor(
182
+ [30 / 255, 144 / 255, 255 / 255]
183
+ ).to(device)
184
+ transparency = torch.ones((mask_sum, 1, 1, 1)).to(device) * 0.6
185
+ visual = torch.cat([color, transparency], dim=-1)
186
+ mask_image = torch.unsqueeze(annotation, -1) * visual
187
+ # Use vectorization to get the value of the batch
188
+ mask = torch.zeros((height, weight, 4)).to(device)
189
+ h_indices, w_indices = torch.meshgrid(torch.arange(height), torch.arange(weight))
190
+ indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
191
+ # Use vectorized indexing to update the show values
192
+ mask[h_indices, w_indices, :] = mask_image[indices]
193
+ mask_cpu = mask.cpu().numpy()
194
+ if bbox is not None:
195
+ x1, y1, x2, y2 = bbox
196
+ ax.add_patch(
197
+ plt.Rectangle(
198
+ (x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
199
+ )
200
+ )
201
+ if not retinamask:
202
+ mask_cpu = cv2.resize(
203
+ mask_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST
204
+ )
205
+ return mask_cpu
206
+
207
+
src/ui/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .streamlit_ui import streamlit_ui
2
+ from .drawable_canvas import drawable_canvas
src/ui/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (238 Bytes). View file
 
src/ui/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (284 Bytes). View file
 
src/ui/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (236 Bytes). View file
 
src/ui/__pycache__/drawable_canvas.cpython-310.pyc ADDED
Binary file (773 Bytes). View file
 
src/ui/__pycache__/drawable_canvas.cpython-311.pyc ADDED
Binary file (988 Bytes). View file
 
src/ui/__pycache__/drawable_canvas.cpython-39.pyc ADDED
Binary file (982 Bytes). View file
 
src/ui/__pycache__/streamlit_ui.cpython-310.pyc ADDED
Binary file (1.79 kB). View file
 
src/ui/__pycache__/streamlit_ui.cpython-311.pyc ADDED
Binary file (2.62 kB). View file
 
src/ui/__pycache__/streamlit_ui.cpython-39.pyc ADDED
Binary file (1.79 kB). View file
 
src/ui/drawable_canvas.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src\ui\drawable_canvas.py
2
+ import streamlit as st
3
+ from streamlit_drawable_canvas import st_canvas
4
+ from PIL import Image
5
+
6
+ def drawable_canvas(uploaded_image, input_size):
7
+ """Creates a Streamlit drawable canvas with the resized image as the background."""
8
+ # Generate a unique key for the canvas based on the input size
9
+ canvas_key = f"canvas_{input_size}"
10
+
11
+ st.write("Draw a line to set the scale:")
12
+ original_image = Image.open(uploaded_image)
13
+ image_width, image_height = original_image.size
14
+ scale = input_size / max(image_width, image_height)
15
+ new_w = int(image_width * scale)
16
+ new_h = int(image_height * scale)
17
+ resized_image = original_image.resize((new_w, new_h))
18
+ canvas_result = st_canvas(
19
+ fill_color="rgba(255, 165, 0, 0.3)",
20
+ stroke_width=10,
21
+ stroke_color="#e00",
22
+ background_image=resized_image,
23
+ height=new_h,
24
+ width=new_w,
25
+ drawing_mode="line",
26
+ key=canvas_key,
27
+ )
28
+ return canvas_result
29
+
src/ui/streamlit_ui.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ def streamlit_ui():
4
+ """Creates the Streamlit user interface with input controls."""
5
+
6
+ st.sidebar.title("Segmentation Parameters")
7
+
8
+ uploaded_image = st.sidebar.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
9
+
10
+ input_size = st.sidebar.slider(
11
+ "Input Size", 512, 3000, 1024, 64,
12
+ help="Size of the input image. Higher values may improve detection but will be slower."
13
+ )
14
+
15
+ iou_threshold = st.sidebar.slider(
16
+ "IOU Threshold", 0.0, 0.9, 0.7, 0.1,
17
+ help="Intersection over Union threshold for object detection. Higher values reduce false positives."
18
+ )
19
+
20
+ conf_threshold = st.sidebar.slider(
21
+ "Confidence Threshold", 0.0, 0.9, 0.5, 0.01,
22
+ help="Minimum confidence level for detected objects. Lower values may detect more objects but increase false positives."
23
+ )
24
+
25
+ better_quality = st.sidebar.checkbox(
26
+ "Better Visual Quality", True,
27
+ help="Check to improve the visual quality of the segmentation. May be slower."
28
+ )
29
+
30
+ contour_thickness = st.sidebar.slider(
31
+ "Contour Thickness", 1, 50, 1,
32
+ help="Thickness of the contour lines around detected objects."
33
+ )
34
+
35
+ real_world_length = st.sidebar.number_input(
36
+ "Enter the real-world length of the line in micrometers:",
37
+ min_value=1, value=100,
38
+ help="Length of the reference line in the real world, used for scaling object parameters."
39
+ )
40
+
41
+ max_det = st.sidebar.number_input(
42
+ "Maximum Number of Detected Objects",
43
+ min_value=1, value=500,
44
+ help="Maximum number of detected objects. Higher values may have significant impact on performance."
45
+ )
46
+ return uploaded_image, input_size, iou_threshold, conf_threshold, better_quality, contour_thickness, real_world_length, max_det
src/utils/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .parameters import calculate_parameters
2
+ from .visualization import plot_distribution, plot_cumulative_frequency
3
+ from .calculations import calculate_pixel_length
src/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (337 Bytes). View file
 
src/utils/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (371 Bytes). View file
 
src/utils/__pycache__/calculations.cpython-310.pyc ADDED
Binary file (441 Bytes). View file
 
src/utils/__pycache__/calculations.cpython-39.pyc ADDED
Binary file (438 Bytes). View file
 
src/utils/__pycache__/load_config.cpython-310.pyc ADDED
Binary file (449 Bytes). View file
 
src/utils/__pycache__/parameters.cpython-310.pyc ADDED
Binary file (1.3 kB). View file
 
src/utils/__pycache__/parameters.cpython-39.pyc ADDED
Binary file (1.56 kB). View file
 
src/utils/__pycache__/visualization.cpython-310.pyc ADDED
Binary file (788 Bytes). View file
 
src/utils/__pycache__/visualization.cpython-39.pyc ADDED
Binary file (1.22 kB). View file
 
src/utils/calculations.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ def calculate_pixel_length(start_point, end_point):
4
+ """Calculates the pixel length of a line."""
5
+ x1, y1 = start_point
6
+ x2, y2 = end_point
7
+ return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
src/utils/parameters.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import pandas as pd
4
+ import streamlit as st
5
+
6
+ def calculate_parameters(annotations, scale_factor):
7
+ """Calculates parameters for each segmented object, including Feret diameter."""
8
+ df = pd.DataFrame(columns=['Object', 'Area', 'Perimeter', 'Roundness',
9
+ 'Aspect Ratio (Elongation)', 'Longest Feret Diameter'])
10
+ if len(annotations) > 0:
11
+ for i, mask in enumerate(annotations):
12
+ binary_mask = mask.cpu().numpy().astype(np.uint8)
13
+ area_pixel = np.sum(binary_mask)
14
+ area_micron = area_pixel * (scale_factor ** 2)
15
+
16
+ # Find contours with all points (no approximation)
17
+ contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
18
+
19
+ if contours: # Check if any contours were found
20
+ perimeter_pixel = cv2.arcLength(contours[0], True)
21
+ perimeter_micron = perimeter_pixel * scale_factor
22
+
23
+ # Fit ellipse for roundness and aspect ratio (check for sufficient points)
24
+ if len(contours[0]) >= 5:
25
+ ellipse = cv2.fitEllipse(contours[0])
26
+ major_axis, minor_axis = ellipse[1]
27
+ else:
28
+ major_axis = minor_axis = 0
29
+
30
+ major_axis_micron = major_axis * scale_factor
31
+ minor_axis_micron = minor_axis * scale_factor
32
+ roundness = (4 * np.pi * area_micron) / (perimeter_micron ** 2)
33
+ aspect_ratio = major_axis_micron / minor_axis_micron if minor_axis_micron != 0 else "Undefined"
34
+
35
+ # Calculate Feret diameter and Elongation
36
+ hull = cv2.convexHull(contours[0])
37
+ distances = np.linalg.norm(hull - hull[:, 0, :], axis=2)
38
+ max_feret_diameter_micron = np.max(distances) * scale_factor
39
+
40
+ new_row = pd.DataFrame({
41
+ 'Object': [f"Object {i}"],
42
+ 'Area': [area_micron],
43
+ 'Perimeter': [perimeter_micron],
44
+ 'Roundness': [roundness],
45
+ 'Aspect Ratio (Elongation)': [aspect_ratio],
46
+ 'Longest Feret Diameter': [max_feret_diameter_micron],
47
+ })
48
+ df = pd.concat([df, new_row], ignore_index=True)
49
+
50
+ # Eliminate artifacts with undefined parameters
51
+ df = df[(df['Longest Feret Diameter'] != 0) & (df['Roundness'] >= 0) & (df['Roundness'] <= 1)]
52
+
53
+ return df
src/utils/visualization.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import seaborn as sns
2
+ import matplotlib.pyplot as plt
3
+ import streamlit as st
4
+
5
+ def plot_distribution(df, selected_parameter):
6
+ """Plots the distribution of a selected parameter."""
7
+
8
+ try:
9
+ fig, ax = plt.subplots()
10
+ sns.histplot(df[selected_parameter], kde=True, ax=ax)
11
+ ax.set_title(f'Distribution of {selected_parameter}')
12
+ ax.set_xlabel(selected_parameter)
13
+ ax.set_ylabel('Frequency')
14
+ st.pyplot(fig)
15
+ except Exception as e:
16
+ st.write(f"An error occurred while plotting: {e}")
17
+
18
+ def plot_cumulative_frequency(df):
19
+ try:
20
+ fig, ax = plt.subplots()
21
+ sns.ecdfplot(df['Longest Feret Diameter'], ax=ax)
22
+ ax.set_title(f'Cumulative Frequency Plot')
23
+ ax.set_xlabel('Grains diameter')
24
+ ax.set_ylabel('Cumulative Frequency')
25
+ st.pyplot(fig)
26
+ except Exception as e:
27
+ st.write(f"An error occurred while plotting: {e}")
28
+