ZTWHHH commited on
Commit
bf535e8
·
verified ·
1 Parent(s): ea49892

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +20 -0
  2. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/diffusers/__pycache__/autologger.cpython-310.pyc +0 -0
  3. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/diffusers/resolvers/__pycache__/__init__.cpython-310.pyc +0 -0
  4. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/diffusers/resolvers/__pycache__/multimodal.cpython-310.pyc +0 -0
  5. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/diffusers/resolvers/multimodal.py +882 -0
  6. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/diffusers/resolvers/utils.py +102 -0
  7. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/__init__.py +11 -0
  8. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/__pycache__/__init__.cpython-310.pyc +0 -0
  9. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/__pycache__/keras.cpython-310.pyc +0 -0
  10. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/__init__.py +5 -0
  11. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/__pycache__/__init__.cpython-310.pyc +0 -0
  12. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/__pycache__/metrics_logger.cpython-310.pyc +0 -0
  13. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/__pycache__/model_checkpoint.cpython-310.pyc +0 -0
  14. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/__pycache__/tables_builder.cpython-310.pyc +0 -0
  15. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/metrics_logger.py +129 -0
  16. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/model_checkpoint.py +188 -0
  17. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/tables_builder.py +228 -0
  18. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/keras.py +1091 -0
  19. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sacred/__init__.py +117 -0
  20. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sacred/__pycache__/__init__.cpython-310.pyc +0 -0
  21. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sagemaker/__pycache__/__init__.cpython-310.pyc +0 -0
  22. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sb3/__init__.py +3 -0
  23. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sb3/__pycache__/__init__.cpython-310.pyc +0 -0
  24. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sb3/__pycache__/sb3.cpython-310.pyc +0 -0
  25. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sb3/sb3.py +147 -0
  26. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/yolov8/__init__.py +0 -0
  27. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/yolov8/__pycache__/__init__.cpython-310.pyc +0 -0
  28. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/yolov8/__pycache__/yolov8.cpython-310.pyc +0 -0
  29. evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/yolov8/yolov8.py +284 -0
  30. evalkit_cambrian/lib/python3.10/site-packages/wandb/vendor/__init__.py +0 -0
  31. evalkit_cambrian/lib/python3.10/site-packages/wandb/vendor/graphql-core-1.1/setup.py +86 -0
  32. evalkit_cambrian/lib/python3.10/site-packages/wandb/vendor/graphql-core-1.1/wandb_graphql/execution/executor.py +398 -0
  33. evalkit_eagle/lib/python3.10/site-packages/scipy/__pycache__/__config__.cpython-310.pyc +0 -0
  34. evalkit_eagle/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc +0 -0
  35. evalkit_eagle/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc +0 -0
  36. evalkit_eagle/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc +0 -0
  37. evalkit_eagle/lib/python3.10/site-packages/scipy/__pycache__/version.cpython-310.pyc +0 -0
  38. evalkit_eagle/lib/python3.10/site-packages/scipy/misc/__init__.py +6 -0
  39. evalkit_eagle/lib/python3.10/site-packages/scipy/misc/__pycache__/common.cpython-310.pyc +0 -0
  40. evalkit_eagle/lib/python3.10/site-packages/scipy/misc/__pycache__/doccer.cpython-310.pyc +0 -0
  41. evalkit_eagle/lib/python3.10/site-packages/scipy/misc/common.py +6 -0
  42. evalkit_eagle/lib/python3.10/site-packages/scipy/misc/doccer.py +6 -0
  43. evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc +0 -0
  44. evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc +0 -0
  45. evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc +0 -0
  46. evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc +0 -0
  47. evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc +0 -0
  48. evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc +0 -0
  49. evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc +0 -0
  50. evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -806,3 +806,23 @@ evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda124.
806
  evalkit_eagle/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
807
  evalkit_eagle/lib/python3.10/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
808
  evalkit_eagle/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
806
  evalkit_eagle/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
807
  evalkit_eagle/lib/python3.10/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
808
  evalkit_eagle/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
809
+ infer_4_47_1/lib/libgomp.so filter=lfs diff=lfs merge=lfs -text
810
+ infer_4_47_1/lib/libgcc_s.so.1 filter=lfs diff=lfs merge=lfs -text
811
+ infer_4_47_1/lib/libtsan.so.0 filter=lfs diff=lfs merge=lfs -text
812
+ infer_4_47_1/lib/libatomic.so.1 filter=lfs diff=lfs merge=lfs -text
813
+ infer_4_47_1/lib/libtsan.so filter=lfs diff=lfs merge=lfs -text
814
+ infer_4_47_1/lib/libstdc++.so.6 filter=lfs diff=lfs merge=lfs -text
815
+ infer_4_47_1/lib/libtk8.6.so filter=lfs diff=lfs merge=lfs -text
816
+ infer_4_47_1/lib/libtsan.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
817
+ infer_4_47_1/lib/libgomp.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
818
+ infer_4_47_1/lib/libquadmath.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
819
+ infer_4_47_1/lib/libubsan.so filter=lfs diff=lfs merge=lfs -text
820
+ infer_4_47_1/lib/libreadline.so.8.2 filter=lfs diff=lfs merge=lfs -text
821
+ infer_4_47_1/lib/libitm.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
822
+ infer_4_47_1/lib/libncursesw.so.6.4 filter=lfs diff=lfs merge=lfs -text
823
+ infer_4_47_1/lib/libbz2.so.1.0 filter=lfs diff=lfs merge=lfs -text
824
+ infer_4_47_1/lib/libncurses.so.6.4 filter=lfs diff=lfs merge=lfs -text
825
+ infer_4_47_1/lib/libasan.so filter=lfs diff=lfs merge=lfs -text
826
+ infer_4_47_1/lib/libitm.so.1 filter=lfs diff=lfs merge=lfs -text
827
+ infer_4_47_1/lib/libncursesw.so.6 filter=lfs diff=lfs merge=lfs -text
828
+ infer_4_47_1/lib/libbz2.so.1.0.8 filter=lfs diff=lfs merge=lfs -text
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/diffusers/__pycache__/autologger.cpython-310.pyc ADDED
Binary file (2.83 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/diffusers/resolvers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (343 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/diffusers/resolvers/__pycache__/multimodal.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/diffusers/resolvers/multimodal.py ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Any, Dict, List, Sequence
3
+
4
+ import wandb
5
+ from wandb.sdk.integration_utils.auto_logging import Response
6
+
7
+ from .utils import (
8
+ chunkify,
9
+ decode_sdxl_t2i_latents,
10
+ get_updated_kwargs,
11
+ postprocess_np_arrays_for_video,
12
+ postprocess_pils_to_np,
13
+ )
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ SUPPORTED_MULTIMODAL_PIPELINES = {
19
+ "BlipDiffusionPipeline": {
20
+ "table-schema": [
21
+ "Reference-Image",
22
+ "Prompt",
23
+ "Negative-Prompt",
24
+ "Source-Subject-Category",
25
+ "Target-Subject-Category",
26
+ "Generated-Image",
27
+ ],
28
+ "kwarg-logging": [
29
+ "reference_image",
30
+ "prompt",
31
+ "neg_prompt",
32
+ "source_subject_category",
33
+ "target_subject_category",
34
+ ],
35
+ "kwarg-actions": [wandb.Image, None, None, None, None],
36
+ },
37
+ "BlipDiffusionControlNetPipeline": {
38
+ "table-schema": [
39
+ "Reference-Image",
40
+ "Control-Image",
41
+ "Prompt",
42
+ "Negative-Prompt",
43
+ "Source-Subject-Category",
44
+ "Target-Subject-Category",
45
+ "Generated-Image",
46
+ ],
47
+ "kwarg-logging": [
48
+ "reference_image",
49
+ "condtioning_image",
50
+ "prompt",
51
+ "neg_prompt",
52
+ "source_subject_category",
53
+ "target_subject_category",
54
+ ],
55
+ "kwarg-actions": [wandb.Image, wandb.Image, None, None, None, None],
56
+ },
57
+ "StableDiffusionControlNetPipeline": {
58
+ "table-schema": [
59
+ "Control-Image",
60
+ "Prompt",
61
+ "Negative-Prompt",
62
+ "Generated-Image",
63
+ ],
64
+ "kwarg-logging": ["image", "prompt", "negative_prompt"],
65
+ "kwarg-actions": [wandb.Image, None, None],
66
+ },
67
+ "StableDiffusionControlNetImg2ImgPipeline": {
68
+ "table-schema": [
69
+ "Source-Image",
70
+ "Control-Image",
71
+ "Prompt",
72
+ "Negative-Prompt",
73
+ "Generated-Image",
74
+ ],
75
+ "kwarg-logging": ["image", "control_image", "prompt", "negative_prompt"],
76
+ "kwarg-actions": [wandb.Image, wandb.Image, None, None],
77
+ },
78
+ "StableDiffusionControlNetInpaintPipeline": {
79
+ "table-schema": [
80
+ "Source-Image",
81
+ "Mask-Image",
82
+ "Control-Image",
83
+ "Prompt",
84
+ "Negative-Prompt",
85
+ "Generated-Image",
86
+ ],
87
+ "kwarg-logging": [
88
+ "image",
89
+ "mask_image",
90
+ "control_image",
91
+ "prompt",
92
+ "negative_prompt",
93
+ ],
94
+ "kwarg-actions": [wandb.Image, wandb.Image, wandb.Image, None, None],
95
+ },
96
+ "CycleDiffusionPipeline": {
97
+ "table-schema": [
98
+ "Source-Image",
99
+ "Prompt",
100
+ "Source-Prompt",
101
+ "Generated-Image",
102
+ ],
103
+ "kwarg-logging": [
104
+ "image",
105
+ "prompt",
106
+ "source_prompt",
107
+ ],
108
+ "kwarg-actions": [wandb.Image, None, None],
109
+ },
110
+ "StableDiffusionInstructPix2PixPipeline": {
111
+ "table-schema": [
112
+ "Source-Image",
113
+ "Prompt",
114
+ "Negative-Prompt",
115
+ "Generated-Image",
116
+ ],
117
+ "kwarg-logging": [
118
+ "image",
119
+ "prompt",
120
+ "negative_prompt",
121
+ ],
122
+ "kwarg-actions": [wandb.Image, None, None],
123
+ },
124
+ "PaintByExamplePipeline": {
125
+ "table-schema": [
126
+ "Source-Image",
127
+ "Example-Image",
128
+ "Mask-Prompt",
129
+ "Generated-Image",
130
+ ],
131
+ "kwarg-logging": [
132
+ "image",
133
+ "example_image",
134
+ "mask_image",
135
+ ],
136
+ "kwarg-actions": [wandb.Image, wandb.Image, wandb.Image],
137
+ },
138
+ "RePaintPipeline": {
139
+ "table-schema": [
140
+ "Source-Image",
141
+ "Mask-Prompt",
142
+ "Generated-Image",
143
+ ],
144
+ "kwarg-logging": [
145
+ "image",
146
+ "mask_image",
147
+ ],
148
+ "kwarg-actions": [wandb.Image, wandb.Image],
149
+ },
150
+ "StableDiffusionPipeline": {
151
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
152
+ "kwarg-logging": ["prompt", "negative_prompt"],
153
+ "kwarg-actions": [None, None],
154
+ },
155
+ "KandinskyCombinedPipeline": {
156
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
157
+ "kwarg-logging": ["prompt", "negative_prompt"],
158
+ "kwarg-actions": [None, None],
159
+ },
160
+ "KandinskyV22CombinedPipeline": {
161
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
162
+ "kwarg-logging": ["prompt", "negative_prompt"],
163
+ "kwarg-actions": [None, None],
164
+ },
165
+ "LatentConsistencyModelPipeline": {
166
+ "table-schema": ["Prompt", "Generated-Image"],
167
+ "kwarg-logging": ["prompt"],
168
+ "kwarg-actions": [None],
169
+ },
170
+ "LDMTextToImagePipeline": {
171
+ "table-schema": ["Prompt", "Generated-Image"],
172
+ "kwarg-logging": ["prompt"],
173
+ "kwarg-actions": [None],
174
+ },
175
+ "StableDiffusionPanoramaPipeline": {
176
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
177
+ "kwarg-logging": ["prompt", "negative_prompt"],
178
+ "kwarg-actions": [None, None],
179
+ },
180
+ "PixArtAlphaPipeline": {
181
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
182
+ "kwarg-logging": ["prompt", "negative_prompt"],
183
+ "kwarg-actions": [None, None],
184
+ },
185
+ "StableDiffusionSAGPipeline": {
186
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
187
+ "kwarg-logging": ["prompt", "negative_prompt"],
188
+ "kwarg-actions": [None, None],
189
+ },
190
+ "SemanticStableDiffusionPipeline": {
191
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
192
+ "kwarg-logging": ["prompt", "negative_prompt"],
193
+ "kwarg-actions": [None, None],
194
+ },
195
+ "WuerstchenCombinedPipeline": {
196
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
197
+ "kwarg-logging": ["prompt", "negative_prompt"],
198
+ "kwarg-actions": [None, None],
199
+ },
200
+ "IFPipeline": {
201
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
202
+ "kwarg-logging": ["prompt", "negative_prompt"],
203
+ "kwarg-actions": [None, None],
204
+ },
205
+ "AltDiffusionPipeline": {
206
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
207
+ "kwarg-logging": ["prompt", "negative_prompt"],
208
+ "kwarg-actions": [None, None],
209
+ },
210
+ "StableDiffusionAttendAndExcitePipeline": {
211
+ "table-schema": ["Prompt", "Negative-Prompt", "Generated-Image"],
212
+ "kwarg-logging": ["prompt", "negative_prompt"],
213
+ "kwarg-actions": [None, None],
214
+ },
215
+ "KandinskyImg2ImgCombinedPipeline": {
216
+ "table-schema": [
217
+ "Source-Image",
218
+ "Prompt",
219
+ "Negative-Prompt",
220
+ "Generated-Image",
221
+ ],
222
+ "kwarg-logging": ["image", "prompt", "negative_prompt"],
223
+ "kwarg-actions": [wandb.Image, None, None],
224
+ },
225
+ "KandinskyInpaintCombinedPipeline": {
226
+ "table-schema": [
227
+ "Source-Image",
228
+ "Prompt",
229
+ "Negative-Prompt",
230
+ "Generated-Image",
231
+ ],
232
+ "kwarg-logging": ["image", "prompt", "negative_prompt"],
233
+ "kwarg-actions": [wandb.Image, None, None],
234
+ },
235
+ "KandinskyV22Img2ImgCombinedPipeline": {
236
+ "table-schema": [
237
+ "Source-Image",
238
+ "Prompt",
239
+ "Negative-Prompt",
240
+ "Generated-Image",
241
+ ],
242
+ "kwarg-logging": ["image", "prompt", "negative_prompt"],
243
+ "kwarg-actions": [wandb.Image, None, None],
244
+ },
245
+ "KandinskyV22InpaintCombinedPipeline": {
246
+ "table-schema": [
247
+ "Source-Image",
248
+ "Prompt",
249
+ "Negative-Prompt",
250
+ "Generated-Image",
251
+ ],
252
+ "kwarg-logging": ["image", "prompt", "negative_prompt"],
253
+ "kwarg-actions": [wandb.Image, None, None],
254
+ },
255
+ "AnimateDiffPipeline": {
256
+ "table-schema": [
257
+ "Prompt",
258
+ "Negative-Prompt",
259
+ "Number-of-Frames",
260
+ "Generated-Video",
261
+ ],
262
+ "kwarg-logging": ["prompt", "negative_prompt", "num_frames"],
263
+ "kwarg-actions": [None, None, None],
264
+ "output-type": "video",
265
+ },
266
+ "StableVideoDiffusionPipeline": {
267
+ "table-schema": [
268
+ "Input-Image",
269
+ "Frames-Per-Second",
270
+ "Generated-Video",
271
+ ],
272
+ "kwarg-logging": ["image", "fps"],
273
+ "kwarg-actions": [wandb.Image, None],
274
+ "output-type": "video",
275
+ },
276
+ "AudioLDMPipeline": {
277
+ "table-schema": [
278
+ "Prompt",
279
+ "Negative-Prompt",
280
+ "Audio-Length-in-Seconds",
281
+ "Generated-Audio",
282
+ ],
283
+ "kwarg-logging": ["prompt", "negative_prompt", "audio_length_in_s"],
284
+ "kwarg-actions": [None, None, None],
285
+ "output-type": "audio",
286
+ },
287
+ "AudioLDM2Pipeline": {
288
+ "table-schema": [
289
+ "Prompt",
290
+ "Negative-Prompt",
291
+ "Audio-Length-in-Seconds",
292
+ "Generated-Audio",
293
+ ],
294
+ "kwarg-logging": ["prompt", "negative_prompt", "audio_length_in_s"],
295
+ "kwarg-actions": [None, None, None],
296
+ "output-type": "audio",
297
+ },
298
+ "MusicLDMPipeline": {
299
+ "table-schema": [
300
+ "Prompt",
301
+ "Negative-Prompt",
302
+ "Audio-Length-in-Seconds",
303
+ "Generated-Audio",
304
+ ],
305
+ "kwarg-logging": ["prompt", "negative_prompt", "audio_length_in_s"],
306
+ "kwarg-actions": [None, None, None],
307
+ "output-type": "audio",
308
+ },
309
+ "StableDiffusionPix2PixZeroPipeline": {
310
+ "table-schema": [
311
+ "Prompt",
312
+ "Negative-Prompt",
313
+ "Generated-Image",
314
+ ],
315
+ "kwarg-logging": ["prompt", "negative_prompt"],
316
+ "kwarg-actions": [None, None],
317
+ },
318
+ "PNDMPipeline": {
319
+ "table-schema": [
320
+ "Batch-Size",
321
+ "Number-of-Inference-Steps",
322
+ "Generated-Image",
323
+ ],
324
+ "kwarg-logging": ["batch_size", "num_inference_steps"],
325
+ "kwarg-actions": [None, None],
326
+ },
327
+ "ShapEPipeline": {
328
+ "table-schema": [
329
+ "Prompt",
330
+ "Generated-Video",
331
+ ],
332
+ "kwarg-logging": ["prompt"],
333
+ "kwarg-actions": [None],
334
+ "output-type": "video",
335
+ },
336
+ "StableDiffusionImg2ImgPipeline": {
337
+ "table-schema": [
338
+ "Source-Image",
339
+ "Prompt",
340
+ "Negative-Prompt",
341
+ "Generated-Image",
342
+ ],
343
+ "kwarg-logging": ["image", "prompt", "negative_prompt"],
344
+ "kwarg-actions": [wandb.Image, None, None],
345
+ },
346
+ "StableDiffusionInpaintPipeline": {
347
+ "table-schema": [
348
+ "Source-Image",
349
+ "Mask-Image",
350
+ "Prompt",
351
+ "Negative-Prompt",
352
+ "Generated-Image",
353
+ ],
354
+ "kwarg-logging": ["image", "mask_image", "prompt", "negative_prompt"],
355
+ "kwarg-actions": [wandb.Image, wandb.Image, None, None],
356
+ },
357
+ "StableDiffusionDepth2ImgPipeline": {
358
+ "table-schema": [
359
+ "Source-Image",
360
+ "Prompt",
361
+ "Negative-Prompt",
362
+ "Generated-Image",
363
+ ],
364
+ "kwarg-logging": ["image", "prompt", "negative_prompt"],
365
+ "kwarg-actions": [wandb.Image, None, None],
366
+ },
367
+ "StableDiffusionImageVariationPipeline": {
368
+ "table-schema": [
369
+ "Source-Image",
370
+ "Generated-Image",
371
+ ],
372
+ "kwarg-logging": [
373
+ "image",
374
+ ],
375
+ "kwarg-actions": [wandb.Image],
376
+ },
377
+ "StableDiffusionPipelineSafe": {
378
+ "table-schema": [
379
+ "Prompt",
380
+ "Negative-Prompt",
381
+ "Generated-Image",
382
+ ],
383
+ "kwarg-logging": ["prompt", "negative_prompt"],
384
+ "kwarg-actions": [None, None],
385
+ },
386
+ "StableDiffusionUpscalePipeline": {
387
+ "table-schema": [
388
+ "Source-Image",
389
+ "Prompt",
390
+ "Negative-Prompt",
391
+ "Upscaled-Image",
392
+ ],
393
+ "kwarg-logging": ["image", "prompt", "negative_prompt"],
394
+ "kwarg-actions": [wandb.Image, None, None],
395
+ },
396
+ "StableDiffusionAdapterPipeline": {
397
+ "table-schema": [
398
+ "Source-Image",
399
+ "Prompt",
400
+ "Negative-Prompt",
401
+ "Generated-Image",
402
+ ],
403
+ "kwarg-logging": ["image", "prompt", "negative_prompt"],
404
+ "kwarg-actions": [wandb.Image, None, None],
405
+ },
406
+ "StableDiffusionGLIGENPipeline": {
407
+ "table-schema": [
408
+ "Prompt",
409
+ "GLIGEN-Phrases",
410
+ "GLIGEN-Boxes",
411
+ "GLIGEN-Inpaint-Image",
412
+ "Negative-Prompt",
413
+ "Generated-Image",
414
+ ],
415
+ "kwarg-logging": [
416
+ "prompt",
417
+ "gligen_phrases",
418
+ "gligen_boxes",
419
+ "gligen_inpaint_image",
420
+ "negative_prompt",
421
+ ],
422
+ "kwarg-actions": [None, None, None, wandb.Image, None],
423
+ },
424
+ "VersatileDiffusionTextToImagePipeline": {
425
+ "table-schema": [
426
+ "Prompt",
427
+ "Negative-Prompt",
428
+ "Generated-Image",
429
+ ],
430
+ "kwarg-logging": ["prompt", "negative_prompt"],
431
+ "kwarg-actions": [None, None],
432
+ },
433
+ "VersatileDiffusionImageVariationPipeline": {
434
+ "table-schema": [
435
+ "Source-Image",
436
+ "Negative-Prompt",
437
+ "Generated-Image",
438
+ ],
439
+ "kwarg-logging": ["image", "negative_prompt"],
440
+ "kwarg-actions": [wandb.Image, None],
441
+ },
442
+ "VersatileDiffusionDualGuidedPipeline": {
443
+ "table-schema": [
444
+ "Source-Image",
445
+ "Prompt",
446
+ "Negative-Prompt",
447
+ "Generated-Image",
448
+ ],
449
+ "kwarg-logging": ["image", "prompt", "negative_prompt"],
450
+ "kwarg-actions": [wandb.Image, None, None],
451
+ },
452
+ "LDMPipeline": {
453
+ "table-schema": [
454
+ "Batch-Size",
455
+ "Number-of-Inference-Steps",
456
+ "Generated-Image",
457
+ ],
458
+ "kwarg-logging": ["batch_size", "num_inference_steps"],
459
+ "kwarg-actions": [None, None],
460
+ },
461
+ "TextToVideoSDPipeline": {
462
+ "table-schema": [
463
+ "Prompt",
464
+ "Negative-Prompt",
465
+ "Number-of-Frames",
466
+ "Generated-Video",
467
+ ],
468
+ "kwarg-logging": ["prompt", "negative_prompt", "num_frames"],
469
+ "output-type": "video",
470
+ },
471
+ "TextToVideoZeroPipeline": {
472
+ "table-schema": [
473
+ "Prompt",
474
+ "Negative-Prompt",
475
+ "Number-of-Frames",
476
+ "Generated-Video",
477
+ ],
478
+ "kwarg-logging": ["prompt", "negative_prompt", "video_length"],
479
+ },
480
+ "AmusedPipeline": {
481
+ "table-schema": [
482
+ "Prompt",
483
+ "Guidance Scale",
484
+ "Generated-Image",
485
+ ],
486
+ "kwarg-logging": [
487
+ "prompt",
488
+ "guidance_scale",
489
+ ],
490
+ "kwarg-actions": [None, None],
491
+ },
492
+ "StableDiffusionXLControlNetPipeline": {
493
+ "table-schema": [
494
+ "Prompt-1",
495
+ "Prompt-2",
496
+ "Control-Image",
497
+ "Negative-Prompt-1",
498
+ "Negative-Prompt-2",
499
+ "Generated-Image",
500
+ ],
501
+ "kwarg-logging": [
502
+ "prompt",
503
+ "prompt_2",
504
+ "image",
505
+ "negative_prompt",
506
+ "negative_prompt_2",
507
+ ],
508
+ "kwarg-actions": [None, None, wandb.Image, None, None],
509
+ },
510
+ "StableDiffusionXLControlNetImg2ImgPipeline": {
511
+ "table-schema": [
512
+ "Prompt-1",
513
+ "Prompt-2",
514
+ "Input-Image",
515
+ "Control-Image",
516
+ "Negative-Prompt-1",
517
+ "Negative-Prompt-2",
518
+ "Generated-Image",
519
+ ],
520
+ "kwarg-logging": [
521
+ "prompt",
522
+ "prompt_2",
523
+ "image",
524
+ "control_image",
525
+ "negative_prompt",
526
+ "negative_prompt_2",
527
+ ],
528
+ "kwarg-actions": [None, None, wandb.Image, wandb.Image, None, None],
529
+ },
530
+ "Kandinsky3Pipeline": {
531
+ "table-schema": [
532
+ "Prompt",
533
+ "Negative-Prompt",
534
+ "Generated-Image",
535
+ ],
536
+ "kwarg-logging": [
537
+ "prompt",
538
+ "negative_prompt",
539
+ ],
540
+ "kwarg-actions": [None, None],
541
+ },
542
+ "Kandinsky3Img2ImgPipeline": {
543
+ "table-schema": [
544
+ "Prompt",
545
+ "Negative-Prompt",
546
+ "Input-Image",
547
+ "Generated-Image",
548
+ ],
549
+ "kwarg-logging": [
550
+ "prompt",
551
+ "negative_prompt",
552
+ "image",
553
+ ],
554
+ "kwarg-actions": [None, None, wandb.Image],
555
+ },
556
+ "StableDiffusionXLPipeline": {
557
+ "table-schema": [
558
+ "Prompt",
559
+ "Negative-Prompt",
560
+ "Prompt-2",
561
+ "Negative-Prompt-2",
562
+ "Generated-Image",
563
+ ],
564
+ "kwarg-logging": [
565
+ "prompt",
566
+ "negative_prompt",
567
+ "prompt_2",
568
+ "negative_prompt_2",
569
+ ],
570
+ "kwarg-actions": [None, None, None, None],
571
+ },
572
+ "StableDiffusionXLImg2ImgPipeline": {
573
+ "table-schema": [
574
+ "Prompt",
575
+ "Negative-Prompt",
576
+ "Prompt-2",
577
+ "Negative-Prompt-2",
578
+ "Input-Image",
579
+ "Generated-Image",
580
+ ],
581
+ "kwarg-logging": [
582
+ "prompt",
583
+ "negative_prompt",
584
+ "prompt_2",
585
+ "negative_prompt_2",
586
+ "image",
587
+ ],
588
+ "kwarg-actions": [None, None, None, None, wandb.Image],
589
+ },
590
+ }
591
+
592
+
593
+ class DiffusersMultiModalPipelineResolver:
594
+ """Resolver for request and responses from [HuggingFace Diffusers](https://huggingface.co/docs/diffusers/index) multi-modal Diffusion Pipelines, providing necessary data transformations, formatting, and logging.
595
+
596
+ This resolver is internally involved in the
597
+ `__call__` for `wandb.integration.diffusers.pipeline_resolver.DiffusersPipelineResolver`.
598
+ This is based on `wandb.sdk.integration_utils.auto_logging.RequestResponseResolver`.
599
+
600
+ Args:
601
+ pipeline_name: (str) The name of the Diffusion Pipeline.
602
+ """
603
+
604
+ def __init__(self, pipeline_name: str, pipeline_call_count: int) -> None:
605
+ self.pipeline_name = pipeline_name
606
+ self.pipeline_call_count = pipeline_call_count
607
+ columns = []
608
+ if pipeline_name in SUPPORTED_MULTIMODAL_PIPELINES:
609
+ columns += SUPPORTED_MULTIMODAL_PIPELINES[pipeline_name]["table-schema"]
610
+ else:
611
+ wandb.Error("Pipeline not supported for logging")
612
+ self.wandb_table = wandb.Table(columns=columns)
613
+
614
+ def __call__(
615
+ self,
616
+ args: Sequence[Any],
617
+ kwargs: Dict[str, Any],
618
+ response: Response,
619
+ start_time: float,
620
+ time_elapsed: float,
621
+ ) -> Any:
622
+ """Main call method for the `DiffusersPipelineResolver` class.
623
+
624
+ Args:
625
+ args: (Sequence[Any]) List of arguments.
626
+ kwargs: (Dict[str, Any]) Dictionary of keyword arguments.
627
+ response: (wandb.sdk.integration_utils.auto_logging.Response) The response from
628
+ the request.
629
+ start_time: (float) Time when request started.
630
+ time_elapsed: (float) Time elapsed for the request.
631
+
632
+ Returns:
633
+ Packed data as a dictionary for logging to wandb, None if an exception occurred.
634
+ """
635
+ try:
636
+ # Get the pipeline and the args
637
+ pipeline, args = args[0], args[1:]
638
+
639
+ # Update the Kwargs so that they can be logged easily
640
+ kwargs = get_updated_kwargs(pipeline, args, kwargs)
641
+
642
+ # Get the pipeline configs
643
+ pipeline_configs = dict(pipeline.config)
644
+ pipeline_configs["pipeline-name"] = self.pipeline_name
645
+
646
+ if "workflow" not in wandb.config:
647
+ wandb.config.update(
648
+ {
649
+ "workflow": [
650
+ {
651
+ "pipeline": pipeline_configs,
652
+ "params": kwargs,
653
+ "stage": f"Pipeline-Call-{self.pipeline_call_count}",
654
+ }
655
+ ]
656
+ }
657
+ )
658
+ else:
659
+ existing_workflow = wandb.config.workflow
660
+ updated_workflow = existing_workflow + [
661
+ {
662
+ "pipeline": pipeline_configs,
663
+ "params": kwargs,
664
+ "stage": f"Pipeline-Call-{self.pipeline_call_count}",
665
+ }
666
+ ]
667
+ wandb.config.update(
668
+ {"workflow": updated_workflow}, allow_val_change=True
669
+ )
670
+
671
+ # Return the WandB loggable dict
672
+ loggable_dict = self.prepare_loggable_dict(pipeline, response, kwargs)
673
+ return loggable_dict
674
+ except Exception as e:
675
+ logger.warning(e)
676
+ return None
677
+
678
+ def get_output_images(self, response: Response) -> List:
679
+ """Unpack the generated images, audio, video, etc. from the Diffusion Pipeline's response.
680
+
681
+ Args:
682
+ response: (wandb.sdk.integration_utils.auto_logging.Response) The response from
683
+ the request.
684
+
685
+ Returns:
686
+ List of generated images, audio, video, etc.
687
+ """
688
+ if "output-type" not in SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name]:
689
+ return response.images
690
+ else:
691
+ if (
692
+ SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name]["output-type"]
693
+ == "video"
694
+ ):
695
+ if self.pipeline_name in ["ShapEPipeline"]:
696
+ return response.images
697
+ return response.frames
698
+ elif (
699
+ SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name]["output-type"]
700
+ == "audio"
701
+ ):
702
+ return response.audios
703
+
704
+ def log_media(self, image: Any, loggable_kwarg_chunks: List, idx: int) -> None:
705
+ """Log the generated images, audio, video, etc. from the Diffusion Pipeline's response along with an optional caption to a media panel in the run.
706
+
707
+ Args:
708
+ image: (Any) The generated images, audio, video, etc. from the Diffusion
709
+ Pipeline's response.
710
+ loggable_kwarg_chunks: (List) Loggable chunks of kwargs.
711
+ """
712
+ if "output-type" not in SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name]:
713
+ try:
714
+ caption = ""
715
+ if self.pipeline_name in [
716
+ "StableDiffusionXLPipeline",
717
+ "StableDiffusionXLImg2ImgPipeline",
718
+ ]:
719
+ prompt_index = SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name][
720
+ "kwarg-logging"
721
+ ].index("prompt")
722
+ prompt2_index = SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name][
723
+ "kwarg-logging"
724
+ ].index("prompt_2")
725
+ caption = f"Prompt-1: {loggable_kwarg_chunks[prompt_index][idx]}\nPrompt-2: {loggable_kwarg_chunks[prompt2_index][idx]}"
726
+ else:
727
+ prompt_index = SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name][
728
+ "kwarg-logging"
729
+ ].index("prompt")
730
+ caption = loggable_kwarg_chunks[prompt_index][idx]
731
+ except ValueError:
732
+ caption = None
733
+ wandb.log(
734
+ {
735
+ f"Generated-Image/Pipeline-Call-{self.pipeline_call_count}": wandb.Image(
736
+ image, caption=caption
737
+ )
738
+ }
739
+ )
740
+ else:
741
+ if (
742
+ SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name]["output-type"]
743
+ == "video"
744
+ ):
745
+ try:
746
+ prompt_index = SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name][
747
+ "kwarg-logging"
748
+ ].index("prompt")
749
+ caption = loggable_kwarg_chunks[prompt_index][idx]
750
+ except ValueError:
751
+ caption = None
752
+ wandb.log(
753
+ {
754
+ f"Generated-Video/Pipeline-Call-{self.pipeline_call_count}": wandb.Video(
755
+ postprocess_pils_to_np(image), fps=4, caption=caption
756
+ )
757
+ }
758
+ )
759
+ elif (
760
+ SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name]["output-type"]
761
+ == "audio"
762
+ ):
763
+ try:
764
+ prompt_index = SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name][
765
+ "kwarg-logging"
766
+ ].index("prompt")
767
+ caption = loggable_kwarg_chunks[prompt_index][idx]
768
+ except ValueError:
769
+ caption = None
770
+ wandb.log(
771
+ {
772
+ f"Generated-Audio/Pipeline-Call-{self.pipeline_call_count}": wandb.Audio(
773
+ image, sample_rate=16000, caption=caption
774
+ )
775
+ }
776
+ )
777
+
778
+ def add_data_to_table(
779
+ self, image: Any, loggable_kwarg_chunks: List, idx: int
780
+ ) -> None:
781
+ """Populate the row of the `wandb.Table`.
782
+
783
+ Args:
784
+ image: (Any) The generated images, audio, video, etc. from the Diffusion
785
+ Pipeline's response.
786
+ loggable_kwarg_chunks: (List) Loggable chunks of kwargs.
787
+ idx: (int) Chunk index.
788
+ """
789
+ table_row = []
790
+ kwarg_actions = SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name][
791
+ "kwarg-actions"
792
+ ]
793
+ for column_idx, loggable_kwarg_chunk in enumerate(loggable_kwarg_chunks):
794
+ if kwarg_actions[column_idx] is None:
795
+ table_row.append(
796
+ loggable_kwarg_chunk[idx]
797
+ if loggable_kwarg_chunk[idx] is not None
798
+ else ""
799
+ )
800
+ else:
801
+ table_row.append(kwarg_actions[column_idx](loggable_kwarg_chunk[idx]))
802
+ if "output-type" not in SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name]:
803
+ table_row.append(wandb.Image(image))
804
+ else:
805
+ if (
806
+ SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name]["output-type"]
807
+ == "video"
808
+ ):
809
+ table_row.append(wandb.Video(postprocess_pils_to_np(image), fps=4))
810
+ elif (
811
+ SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name]["output-type"]
812
+ == "audio"
813
+ ):
814
+ table_row.append(wandb.Audio(image, sample_rate=16000))
815
+ self.wandb_table.add_data(*table_row)
816
+
817
+ def prepare_loggable_dict(
818
+ self, pipeline: Any, response: Response, kwargs: Dict[str, Any]
819
+ ) -> Dict[str, Any]:
820
+ """Prepare the loggable dictionary, which is the packed data as a dictionary for logging to wandb, None if an exception occurred.
821
+
822
+ Args:
823
+ pipeline: (Any) The Diffusion Pipeline.
824
+ response: (wandb.sdk.integration_utils.auto_logging.Response) The response from
825
+ the request.
826
+ kwargs: (Dict[str, Any]) Dictionary of keyword arguments.
827
+
828
+ Returns:
829
+ Packed data as a dictionary for logging to wandb, None if an exception occurred.
830
+ """
831
+ # Unpack the generated images, audio, video, etc. from the Diffusion Pipeline's response.
832
+ images = self.get_output_images(response)
833
+ if (
834
+ self.pipeline_name == "StableDiffusionXLPipeline"
835
+ and kwargs["output_type"] == "latent"
836
+ ):
837
+ images = decode_sdxl_t2i_latents(pipeline, response.images)
838
+
839
+ # Account for exception pipelines for text-to-video
840
+ if self.pipeline_name in ["TextToVideoSDPipeline", "TextToVideoZeroPipeline"]:
841
+ video = postprocess_np_arrays_for_video(
842
+ images, normalize=self.pipeline_name == "TextToVideoZeroPipeline"
843
+ )
844
+ wandb.log(
845
+ {
846
+ f"Generated-Video/Pipeline-Call-{self.pipeline_call_count}": wandb.Video(
847
+ video, fps=4, caption=kwargs["prompt"]
848
+ )
849
+ }
850
+ )
851
+ loggable_kwarg_ids = SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name][
852
+ "kwarg-logging"
853
+ ]
854
+ table_row = [
855
+ kwargs[loggable_kwarg_ids[idx]]
856
+ for idx in range(len(loggable_kwarg_ids))
857
+ ]
858
+ table_row.append(wandb.Video(video, fps=4))
859
+ self.wandb_table.add_data(*table_row)
860
+ else:
861
+ loggable_kwarg_ids = SUPPORTED_MULTIMODAL_PIPELINES[self.pipeline_name][
862
+ "kwarg-logging"
863
+ ]
864
+ # chunkify loggable kwargs
865
+ loggable_kwarg_chunks = []
866
+ for loggable_kwarg_id in loggable_kwarg_ids:
867
+ loggable_kwarg_chunks.append(
868
+ kwargs[loggable_kwarg_id]
869
+ if isinstance(kwargs[loggable_kwarg_id], list)
870
+ else [kwargs[loggable_kwarg_id]]
871
+ )
872
+ # chunkify the generated media
873
+ images = chunkify(images, len(loggable_kwarg_chunks[0]))
874
+ for idx in range(len(loggable_kwarg_chunks[0])):
875
+ for image in images[idx]:
876
+ # Log media to media panel
877
+ self.log_media(image, loggable_kwarg_chunks, idx)
878
+ # Populate the row of the wandb_table
879
+ self.add_data_to_table(image, loggable_kwarg_chunks, idx)
880
+ return {
881
+ f"Result-Table/Pipeline-Call-{self.pipeline_call_count}": self.wandb_table
882
+ }
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/diffusers/resolvers/utils.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
3
+
4
+ import wandb
5
+ from wandb.util import get_module
6
+
7
+ if TYPE_CHECKING:
8
+ np_array = get_module("numpy.array")
9
+ torch_float_tensor = get_module("torch.FloatTensor")
10
+
11
+
12
+ def chunkify(input_list, chunk_size) -> List:
13
+ chunk_size = max(1, chunk_size)
14
+ return [
15
+ input_list[i : i + chunk_size] for i in range(0, len(input_list), chunk_size)
16
+ ]
17
+
18
+
19
+ def get_updated_kwargs(
20
+ pipeline: Any, args: Sequence[Any], kwargs: Dict[str, Any]
21
+ ) -> Dict[str, Any]:
22
+ pipeline_call_parameters = list(
23
+ inspect.signature(pipeline.__call__).parameters.items()
24
+ )
25
+ for idx, arg in enumerate(args):
26
+ kwargs[pipeline_call_parameters[idx][0]] = arg
27
+ for pipeline_parameter in pipeline_call_parameters:
28
+ if pipeline_parameter[0] not in kwargs:
29
+ kwargs[pipeline_parameter[0]] = pipeline_parameter[1].default
30
+ if "generator" in kwargs:
31
+ generator = kwargs["generator"]
32
+ kwargs["generator"] = (
33
+ {
34
+ "seed": generator.initial_seed(),
35
+ "device": generator.device,
36
+ "random_state": generator.get_state().cpu().numpy().tolist(),
37
+ }
38
+ if generator is not None
39
+ else None
40
+ )
41
+ if "ip_adapter_image" in kwargs:
42
+ if kwargs["ip_adapter_image"] is not None:
43
+ wandb.log({"IP-Adapter-Image": wandb.Image(kwargs["ip_adapter_image"])})
44
+ return kwargs
45
+
46
+
47
+ def postprocess_pils_to_np(image: List) -> "np_array":
48
+ np = get_module(
49
+ "numpy",
50
+ required="Please ensure NumPy is installed. You can run `pip install numpy` to install it.",
51
+ )
52
+ return np.stack(
53
+ [np.transpose(np.array(img).astype("uint8"), axes=(2, 0, 1)) for img in image],
54
+ axis=0,
55
+ )
56
+
57
+
58
+ def postprocess_np_arrays_for_video(
59
+ images: List["np_array"], normalize: Optional[bool] = False
60
+ ) -> "np_array":
61
+ np = get_module(
62
+ "numpy",
63
+ required="Please ensure NumPy is installed. You can run `pip install numpy` to install it.",
64
+ )
65
+ images = [(img * 255).astype("uint8") for img in images] if normalize else images
66
+ return np.transpose(np.stack((images), axis=0), axes=(0, 3, 1, 2))
67
+
68
+
69
+ def decode_sdxl_t2i_latents(pipeline: Any, latents: "torch_float_tensor") -> List:
70
+ """Decode latents generated by [`diffusers.StableDiffusionXLPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#stable-diffusion-xl).
71
+
72
+ Args:
73
+ pipeline: (diffusers.DiffusionPipeline) The Diffusion Pipeline from
74
+ [`diffusers`](https://huggingface.co/docs/diffusers).
75
+ latents (torch.FloatTensor): The generated latents.
76
+
77
+ Returns:
78
+ List of `PIL` images corresponding to the generated latents.
79
+ """
80
+ torch = get_module(
81
+ "torch",
82
+ required="Please ensure PyTorch is installed. You can check out https://pytorch.org/get-started/locally/#start-locally for installation instructions.",
83
+ )
84
+ with torch.no_grad():
85
+ needs_upcasting = (
86
+ pipeline.vae.dtype == torch.float16 and pipeline.vae.config.force_upcast
87
+ )
88
+ if needs_upcasting:
89
+ pipeline.upcast_vae()
90
+ latents = latents.to(
91
+ next(iter(pipeline.vae.post_quant_conv.parameters())).dtype
92
+ )
93
+ images = pipeline.vae.decode(
94
+ latents / pipeline.vae.config.scaling_factor, return_dict=False
95
+ )[0]
96
+ if needs_upcasting:
97
+ pipeline.vae.to(dtype=torch.float16)
98
+ if pipeline.watermark is not None:
99
+ images = pipeline.watermark.apply_watermark(images)
100
+ images = pipeline.image_processor.postprocess(images, output_type="pil")
101
+ pipeline.maybe_free_model_hooks()
102
+ return images
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tools for integrating `wandb` with [`Keras`](https://keras.io/)."""
2
+
3
+ __all__ = (
4
+ "WandbCallback",
5
+ "WandbMetricsLogger",
6
+ "WandbModelCheckpoint",
7
+ "WandbEvalCallback",
8
+ )
9
+
10
+ from .callbacks import WandbEvalCallback, WandbMetricsLogger, WandbModelCheckpoint
11
+ from .keras import WandbCallback # TODO: legacy callback to be deprecated
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (457 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/__pycache__/keras.cpython-310.pyc ADDED
Binary file (33.4 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __all__ = ("WandbMetricsLogger", "WandbModelCheckpoint", "WandbEvalCallback")
2
+
3
+ from .metrics_logger import WandbMetricsLogger
4
+ from .model_checkpoint import WandbModelCheckpoint
5
+ from .tables_builder import WandbEvalCallback
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (401 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/__pycache__/metrics_logger.cpython-310.pyc ADDED
Binary file (4.94 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/__pycache__/model_checkpoint.cpython-310.pyc ADDED
Binary file (7.48 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/__pycache__/tables_builder.cpython-310.pyc ADDED
Binary file (9.2 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/metrics_logger.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Literal, Optional, Union
2
+
3
+ import tensorflow as tf # type: ignore
4
+ from tensorflow.keras import callbacks
5
+
6
+ import wandb
7
+ from wandb.integration.keras.keras import patch_tf_keras
8
+ from wandb.sdk.lib import telemetry
9
+
10
+ LogStrategy = Literal["epoch", "batch"]
11
+
12
+
13
+ patch_tf_keras()
14
+
15
+
16
+ class WandbMetricsLogger(callbacks.Callback):
17
+ """Logger that sends system metrics to W&B.
18
+
19
+ `WandbMetricsLogger` automatically logs the `logs` dictionary that callback methods
20
+ take as argument to wandb.
21
+
22
+ This callback automatically logs the following to a W&B run page:
23
+ * system (CPU/GPU/TPU) metrics,
24
+ * train and validation metrics defined in `model.compile`,
25
+ * learning rate (both for a fixed value or a learning rate scheduler)
26
+
27
+ Notes:
28
+ If you resume training by passing `initial_epoch` to `model.fit` and you are using a
29
+ learning rate scheduler, make sure to pass `initial_global_step` to
30
+ `WandbMetricsLogger`. The `initial_global_step` is `step_size * initial_step`, where
31
+ `step_size` is number of training steps per epoch. `step_size` can be calculated as
32
+ the product of the cardinality of the training dataset and the batch size.
33
+
34
+ Args:
35
+ log_freq: ("epoch", "batch", or int) if "epoch", logs metrics
36
+ at the end of each epoch. If "batch", logs metrics at the end
37
+ of each batch. If an integer, logs metrics at the end of that
38
+ many batches. Defaults to "epoch".
39
+ initial_global_step: (int) Use this argument to correctly log the
40
+ learning rate when you resume training from some `initial_epoch`,
41
+ and a learning rate scheduler is used. This can be computed as
42
+ `step_size * initial_step`. Defaults to 0.
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ log_freq: Union[LogStrategy, int] = "epoch",
48
+ initial_global_step: int = 0,
49
+ *args: Any,
50
+ **kwargs: Any,
51
+ ) -> None:
52
+ super().__init__(*args, **kwargs)
53
+
54
+ if wandb.run is None:
55
+ raise wandb.Error(
56
+ "You must call `wandb.init()` before WandbMetricsLogger()"
57
+ )
58
+
59
+ with telemetry.context(run=wandb.run) as tel:
60
+ tel.feature.keras_metrics_logger = True
61
+
62
+ if log_freq == "batch":
63
+ log_freq = 1
64
+
65
+ self.logging_batch_wise = isinstance(log_freq, int)
66
+ self.log_freq: Any = log_freq if self.logging_batch_wise else None
67
+ self.global_batch = 0
68
+ self.global_step = initial_global_step
69
+
70
+ if self.logging_batch_wise:
71
+ # define custom x-axis for batch logging.
72
+ wandb.define_metric("batch/batch_step")
73
+ # set all batch metrics to be logged against batch_step.
74
+ wandb.define_metric("batch/*", step_metric="batch/batch_step")
75
+ else:
76
+ # define custom x-axis for epoch-wise logging.
77
+ wandb.define_metric("epoch/epoch")
78
+ # set all epoch-wise metrics to be logged against epoch.
79
+ wandb.define_metric("epoch/*", step_metric="epoch/epoch")
80
+
81
+ def _get_lr(self) -> Union[float, None]:
82
+ if isinstance(
83
+ self.model.optimizer.learning_rate,
84
+ (tf.Variable, tf.Tensor),
85
+ ) or (
86
+ hasattr(self.model.optimizer.learning_rate, "shape")
87
+ and self.model.optimizer.learning_rate.shape == ()
88
+ ):
89
+ return float(self.model.optimizer.learning_rate.numpy().item())
90
+ try:
91
+ return float(
92
+ self.model.optimizer.learning_rate(step=self.global_step).numpy().item()
93
+ )
94
+ except Exception as e:
95
+ wandb.termerror(f"Unable to log learning rate: {e}", repeat=False)
96
+ return None
97
+
98
+ def on_epoch_end(self, epoch: int, logs: Optional[Dict[str, Any]] = None) -> None:
99
+ """Called at the end of an epoch."""
100
+ logs = dict() if logs is None else {f"epoch/{k}": v for k, v in logs.items()}
101
+
102
+ logs["epoch/epoch"] = epoch
103
+
104
+ lr = self._get_lr()
105
+ if lr is not None:
106
+ logs["epoch/learning_rate"] = lr
107
+
108
+ wandb.log(logs)
109
+
110
+ def on_batch_end(self, batch: int, logs: Optional[Dict[str, Any]] = None) -> None:
111
+ self.global_step += 1
112
+ """An alias for `on_train_batch_end` for backwards compatibility."""
113
+ if self.logging_batch_wise and batch % self.log_freq == 0:
114
+ logs = {f"batch/{k}": v for k, v in logs.items()} if logs else {}
115
+ logs["batch/batch_step"] = self.global_batch
116
+
117
+ lr = self._get_lr()
118
+ if lr is not None:
119
+ logs["batch/learning_rate"] = lr
120
+
121
+ wandb.log(logs)
122
+
123
+ self.global_batch += self.log_freq
124
+
125
+ def on_train_batch_end(
126
+ self, batch: int, logs: Optional[Dict[str, Any]] = None
127
+ ) -> None:
128
+ """Called at the end of a training batch in `fit` methods."""
129
+ self.on_batch_end(batch, logs if logs else {})
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/model_checkpoint.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import string
3
+ from typing import Any, Dict, List, Literal, Optional, Union
4
+
5
+ import tensorflow as tf # type: ignore
6
+ from tensorflow.keras import callbacks # type: ignore
7
+
8
+ import wandb
9
+ from wandb.sdk.lib import telemetry
10
+ from wandb.sdk.lib.paths import StrPath
11
+
12
+ from ..keras import patch_tf_keras
13
+
14
+ Mode = Literal["auto", "min", "max"]
15
+ SaveStrategy = Literal["epoch"]
16
+
17
+ patch_tf_keras()
18
+
19
+
20
+ class WandbModelCheckpoint(callbacks.ModelCheckpoint):
21
+ """A checkpoint that periodically saves a Keras model or model weights.
22
+
23
+ Saved weights are uploaded to W&B as a `wandb.Artifact`.
24
+
25
+ Since this callback is subclassed from `tf.keras.callbacks.ModelCheckpoint`, the
26
+ checkpointing logic is taken care of by the parent callback. You can learn more
27
+ here: https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/ModelCheckpoint
28
+
29
+ This callback is to be used in conjunction with training using `model.fit()` to save
30
+ a model or weights (in a checkpoint file) at some interval. The model checkpoints
31
+ will be logged as W&B Artifacts. You can learn more here:
32
+ https://docs.wandb.ai/guides/artifacts
33
+
34
+ This callback provides the following features:
35
+ - Save the model that has achieved "best performance" based on "monitor".
36
+ - Save the model at the end of every epoch regardless of the performance.
37
+ - Save the model at the end of epoch or after a fixed number of training batches.
38
+ - Save only model weights, or save the whole model.
39
+ - Save the model either in SavedModel format or in `.h5` format.
40
+
41
+ Args:
42
+ filepath: (Union[str, os.PathLike]) path to save the model file. `filepath`
43
+ can contain named formatting options, which will be filled by the value
44
+ of `epoch` and keys in `logs` (passed in `on_epoch_end`). For example:
45
+ if `filepath` is `model-{epoch:02d}-{val_loss:.2f}`, then the
46
+ model checkpoints will be saved with the epoch number and the
47
+ validation loss in the filename.
48
+ monitor: (str) The metric name to monitor. Default to "val_loss".
49
+ verbose: (int) Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1
50
+ displays messages when the callback takes an action.
51
+ save_best_only: (bool) if `save_best_only=True`, it only saves when the model
52
+ is considered the "best" and the latest best model according to the
53
+ quantity monitored will not be overwritten. If `filepath` doesn't contain
54
+ formatting options like `{epoch}` then `filepath` will be overwritten by
55
+ each new better model locally. The model logged as an artifact will still be
56
+ associated with the correct `monitor`. Artifacts will be uploaded
57
+ continuously and versioned separately as a new best model is found.
58
+ save_weights_only: (bool) if True, then only the model's weights will be saved.
59
+ mode: (Mode) one of {'auto', 'min', 'max'}. For `val_acc`, this should be `max`,
60
+ for `val_loss` this should be `min`, etc.
61
+ save_freq: (Union[SaveStrategy, int]) `epoch` or integer. When using `'epoch'`,
62
+ the callback saves the model after each epoch. When using an integer, the
63
+ callback saves the model at end of this many batches.
64
+ Note that when monitoring validation metrics such as `val_acc` or `val_loss`,
65
+ save_freq must be set to "epoch" as those metrics are only available at the
66
+ end of an epoch.
67
+ initial_value_threshold: (Optional[float]) Floating point initial "best" value of the metric
68
+ to be monitored.
69
+ """
70
+
71
+ def __init__(
72
+ self,
73
+ filepath: StrPath,
74
+ monitor: str = "val_loss",
75
+ verbose: int = 0,
76
+ save_best_only: bool = False,
77
+ save_weights_only: bool = False,
78
+ mode: Mode = "auto",
79
+ save_freq: Union[SaveStrategy, int] = "epoch",
80
+ initial_value_threshold: Optional[float] = None,
81
+ **kwargs: Any,
82
+ ) -> None:
83
+ super().__init__(
84
+ filepath=filepath,
85
+ monitor=monitor,
86
+ verbose=verbose,
87
+ save_best_only=save_best_only,
88
+ save_weights_only=save_weights_only,
89
+ mode=mode,
90
+ save_freq=save_freq,
91
+ initial_value_threshold=initial_value_threshold,
92
+ **kwargs,
93
+ )
94
+ if wandb.run is None:
95
+ raise wandb.Error(
96
+ "You must call `wandb.init()` before `WandbModelCheckpoint()`"
97
+ )
98
+ with telemetry.context(run=wandb.run) as tel:
99
+ tel.feature.keras_model_checkpoint = True
100
+
101
+ self.save_weights_only = save_weights_only
102
+
103
+ # User-friendly warning when trying to save the best model.
104
+ if self.save_best_only:
105
+ self._check_filepath()
106
+
107
+ self._is_old_tf_keras_version: Optional[bool] = None
108
+
109
+ def on_train_batch_end(
110
+ self, batch: int, logs: Optional[Dict[str, float]] = None
111
+ ) -> None:
112
+ if self._should_save_on_batch(batch):
113
+ if self.is_old_tf_keras_version:
114
+ # Save the model and get filepath
115
+ self._save_model(epoch=self._current_epoch, logs=logs)
116
+ filepath = self._get_file_path(epoch=self._current_epoch, logs=logs)
117
+ else:
118
+ # Save the model and get filepath
119
+ self._save_model(epoch=self._current_epoch, batch=batch, logs=logs)
120
+ filepath = self._get_file_path(
121
+ epoch=self._current_epoch, batch=batch, logs=logs
122
+ )
123
+ # Log the model as artifact
124
+ aliases = ["latest", f"epoch_{self._current_epoch}_batch_{batch}"]
125
+ self._log_ckpt_as_artifact(filepath, aliases=aliases)
126
+
127
+ def on_epoch_end(self, epoch: int, logs: Optional[Dict[str, float]] = None) -> None:
128
+ super().on_epoch_end(epoch, logs)
129
+ # Check if model checkpoint is created at the end of epoch.
130
+ if self.save_freq == "epoch":
131
+ # Get filepath where the model checkpoint is saved.
132
+ if self.is_old_tf_keras_version:
133
+ filepath = self._get_file_path(epoch=epoch, logs=logs)
134
+ else:
135
+ filepath = self._get_file_path(epoch=epoch, batch=None, logs=logs)
136
+ # Log the model as artifact
137
+ aliases = ["latest", f"epoch_{epoch}"]
138
+ self._log_ckpt_as_artifact(filepath, aliases=aliases)
139
+
140
+ def _log_ckpt_as_artifact(
141
+ self, filepath: str, aliases: Optional[List[str]] = None
142
+ ) -> None:
143
+ """Log model checkpoint as W&B Artifact."""
144
+ try:
145
+ assert wandb.run is not None
146
+ model_checkpoint_artifact = wandb.Artifact(
147
+ f"run_{wandb.run.id}_model", type="model"
148
+ )
149
+ if os.path.isfile(filepath):
150
+ model_checkpoint_artifact.add_file(filepath)
151
+ elif os.path.isdir(filepath):
152
+ model_checkpoint_artifact.add_dir(filepath)
153
+ else:
154
+ raise FileNotFoundError(f"No such file or directory {filepath}")
155
+ wandb.log_artifact(model_checkpoint_artifact, aliases=aliases or [])
156
+ except ValueError:
157
+ # This error occurs when `save_best_only=True` and the model
158
+ # checkpoint is not saved for that epoch/batch. Since TF/Keras
159
+ # is giving friendly log, we can avoid clustering the stdout.
160
+ pass
161
+
162
+ def _check_filepath(self) -> None:
163
+ placeholders = []
164
+ for tup in string.Formatter().parse(self.filepath):
165
+ if tup[1] is not None:
166
+ placeholders.append(tup[1])
167
+ if len(placeholders) == 0:
168
+ wandb.termwarn(
169
+ "When using `save_best_only`, ensure that the `filepath` argument "
170
+ "contains formatting placeholders like `{epoch:02d}` or `{batch:02d}`. "
171
+ "This ensures correct interpretation of the logged artifacts.",
172
+ repeat=False,
173
+ )
174
+
175
+ @property
176
+ def is_old_tf_keras_version(self) -> Optional[bool]:
177
+ if self._is_old_tf_keras_version is None:
178
+ from wandb.util import parse_version
179
+
180
+ try:
181
+ if parse_version(tf.keras.__version__) < parse_version("2.6.0"):
182
+ self._is_old_tf_keras_version = True
183
+ else:
184
+ self._is_old_tf_keras_version = False
185
+ except AttributeError:
186
+ self._is_old_tf_keras_version = False
187
+
188
+ return self._is_old_tf_keras_version
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/callbacks/tables_builder.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ from tensorflow.keras.callbacks import Callback # type: ignore
5
+
6
+ import wandb
7
+ from wandb.sdk.lib import telemetry
8
+
9
+
10
+ class WandbEvalCallback(Callback, abc.ABC):
11
+ """Abstract base class to build Keras callbacks for model prediction visualization.
12
+
13
+ You can build callbacks for visualizing model predictions `on_epoch_end`
14
+ that can be passed to `model.fit()` for classification, object detection,
15
+ segmentation, etc. tasks.
16
+
17
+ To use this, inherit from this base callback class and implement the
18
+ `add_ground_truth` and `add_model_prediction` methods.
19
+
20
+ The base class will take care of the following:
21
+ - Initialize `data_table` for logging the ground truth and
22
+ `pred_table` for predictions.
23
+ - The data uploaded to `data_table` is used as a reference for the
24
+ `pred_table`. This is to reduce the memory footprint. The `data_table_ref`
25
+ is a list that can be used to access the referenced data.
26
+ Check out the example below to see how it's done.
27
+ - Log the tables to W&B as W&B Artifacts.
28
+ - Each new `pred_table` is logged as a new version with aliases.
29
+
30
+ Example:
31
+ ```python
32
+ class WandbClfEvalCallback(WandbEvalCallback):
33
+ def __init__(self, validation_data, data_table_columns, pred_table_columns):
34
+ super().__init__(data_table_columns, pred_table_columns)
35
+
36
+ self.x = validation_data[0]
37
+ self.y = validation_data[1]
38
+
39
+ def add_ground_truth(self):
40
+ for idx, (image, label) in enumerate(zip(self.x, self.y)):
41
+ self.data_table.add_data(idx, wandb.Image(image), label)
42
+
43
+ def add_model_predictions(self, epoch):
44
+ preds = self.model.predict(self.x, verbose=0)
45
+ preds = tf.argmax(preds, axis=-1)
46
+
47
+ data_table_ref = self.data_table_ref
48
+ table_idxs = data_table_ref.get_index()
49
+
50
+ for idx in table_idxs:
51
+ pred = preds[idx]
52
+ self.pred_table.add_data(
53
+ epoch,
54
+ data_table_ref.data[idx][0],
55
+ data_table_ref.data[idx][1],
56
+ data_table_ref.data[idx][2],
57
+ pred,
58
+ )
59
+
60
+
61
+ model.fit(
62
+ x,
63
+ y,
64
+ epochs=2,
65
+ validation_data=(x, y),
66
+ callbacks=[
67
+ WandbClfEvalCallback(
68
+ validation_data=(x, y),
69
+ data_table_columns=["idx", "image", "label"],
70
+ pred_table_columns=["epoch", "idx", "image", "label", "pred"],
71
+ )
72
+ ],
73
+ )
74
+ ```
75
+
76
+ To have more fine-grained control, you can override the `on_train_begin` and
77
+ `on_epoch_end` methods. If you want to log the samples after N batched, you
78
+ can implement `on_train_batch_end` method.
79
+ """
80
+
81
+ def __init__(
82
+ self,
83
+ data_table_columns: List[str],
84
+ pred_table_columns: List[str],
85
+ *args: Any,
86
+ **kwargs: Any,
87
+ ) -> None:
88
+ super().__init__(*args, **kwargs)
89
+
90
+ if wandb.run is None:
91
+ raise wandb.Error(
92
+ "You must call `wandb.init()` first before using this callback."
93
+ )
94
+
95
+ with telemetry.context(run=wandb.run) as tel:
96
+ tel.feature.keras_wandb_eval_callback = True
97
+
98
+ self.data_table_columns = data_table_columns
99
+ self.pred_table_columns = pred_table_columns
100
+
101
+ def on_train_begin(self, logs: Optional[Dict[str, float]] = None) -> None:
102
+ # Initialize the data_table
103
+ self.init_data_table(column_names=self.data_table_columns)
104
+ # Log the ground truth data
105
+ self.add_ground_truth(logs)
106
+ # Log the data_table as W&B Artifacts
107
+ self.log_data_table()
108
+
109
+ def on_epoch_end(self, epoch: int, logs: Optional[Dict[str, float]] = None) -> None:
110
+ # Initialize the pred_table
111
+ self.init_pred_table(column_names=self.pred_table_columns)
112
+ # Log the model prediction
113
+ self.add_model_predictions(epoch, logs)
114
+ # Log the pred_table as W&B Artifacts
115
+ self.log_pred_table()
116
+
117
+ @abc.abstractmethod
118
+ def add_ground_truth(self, logs: Optional[Dict[str, float]] = None) -> None:
119
+ """Add ground truth data to `data_table`.
120
+
121
+ Use this method to write the logic for adding validation/training data to
122
+ `data_table` initialized using `init_data_table` method.
123
+
124
+ Example:
125
+ ```python
126
+ for idx, data in enumerate(dataloader):
127
+ self.data_table.add_data(idx, data)
128
+ ```
129
+ This method is called once `on_train_begin` or equivalent hook.
130
+ """
131
+ raise NotImplementedError(f"{self.__class__.__name__}.add_ground_truth")
132
+
133
+ @abc.abstractmethod
134
+ def add_model_predictions(
135
+ self, epoch: int, logs: Optional[Dict[str, float]] = None
136
+ ) -> None:
137
+ """Add a prediction from a model to `pred_table`.
138
+
139
+ Use this method to write the logic for adding model prediction for validation/
140
+ training data to `pred_table` initialized using `init_pred_table` method.
141
+
142
+ Example:
143
+ ```python
144
+ # Assuming the dataloader is not shuffling the samples.
145
+ for idx, data in enumerate(dataloader):
146
+ preds = model.predict(data)
147
+ self.pred_table.add_data(
148
+ self.data_table_ref.data[idx][0],
149
+ self.data_table_ref.data[idx][1],
150
+ preds,
151
+ )
152
+ ```
153
+ This method is called `on_epoch_end` or equivalent hook.
154
+ """
155
+ raise NotImplementedError(f"{self.__class__.__name__}.add_model_predictions")
156
+
157
+ def init_data_table(self, column_names: List[str]) -> None:
158
+ """Initialize the W&B Tables for validation data.
159
+
160
+ Call this method `on_train_begin` or equivalent hook. This is followed by adding
161
+ data to the table row or column wise.
162
+
163
+ Args:
164
+ column_names: (list) Column names for W&B Tables.
165
+ """
166
+ self.data_table = wandb.Table(columns=column_names, allow_mixed_types=True)
167
+
168
+ def init_pred_table(self, column_names: List[str]) -> None:
169
+ """Initialize the W&B Tables for model evaluation.
170
+
171
+ Call this method `on_epoch_end` or equivalent hook. This is followed by adding
172
+ data to the table row or column wise.
173
+
174
+ Args:
175
+ column_names: (list) Column names for W&B Tables.
176
+ """
177
+ self.pred_table = wandb.Table(columns=column_names)
178
+
179
+ def log_data_table(
180
+ self, name: str = "val", type: str = "dataset", table_name: str = "val_data"
181
+ ) -> None:
182
+ """Log the `data_table` as W&B artifact and call `use_artifact` on it.
183
+
184
+ This lets the evaluation table use the reference of already uploaded data
185
+ (images, text, scalar, etc.) without re-uploading.
186
+
187
+ Args:
188
+ name: (str) A human-readable name for this artifact, which is how you can
189
+ identify this artifact in the UI or reference it in use_artifact calls.
190
+ (default is 'val')
191
+ type: (str) The type of the artifact, which is used to organize and
192
+ differentiate artifacts. (default is 'dataset')
193
+ table_name: (str) The name of the table as will be displayed in the UI.
194
+ (default is 'val_data').
195
+ """
196
+ data_artifact = wandb.Artifact(name, type=type)
197
+ data_artifact.add(self.data_table, table_name)
198
+
199
+ # Calling `use_artifact` uploads the data to W&B.
200
+ assert wandb.run is not None
201
+ wandb.run.use_artifact(data_artifact)
202
+ data_artifact.wait()
203
+
204
+ # We get the reference table.
205
+ self.data_table_ref = data_artifact.get(table_name)
206
+
207
+ def log_pred_table(
208
+ self,
209
+ type: str = "evaluation",
210
+ table_name: str = "eval_data",
211
+ aliases: Optional[List[str]] = None,
212
+ ) -> None:
213
+ """Log the W&B Tables for model evaluation.
214
+
215
+ The table will be logged multiple times creating new version. Use this
216
+ to compare models at different intervals interactively.
217
+
218
+ Args:
219
+ type: (str) The type of the artifact, which is used to organize and
220
+ differentiate artifacts. (default is 'evaluation')
221
+ table_name: (str) The name of the table as will be displayed in the UI.
222
+ (default is 'eval_data')
223
+ aliases: (List[str]) List of aliases for the prediction table.
224
+ """
225
+ assert wandb.run is not None
226
+ pred_artifact = wandb.Artifact(f"run_{wandb.run.id}_pred", type=type)
227
+ pred_artifact.add(self.pred_table, table_name)
228
+ wandb.run.log_artifact(pred_artifact, aliases=aliases or ["latest"])
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/keras/keras.py ADDED
@@ -0,0 +1,1091 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """keras init."""
2
+
3
+ import logging
4
+ import operator
5
+ import os
6
+ import shutil
7
+ import sys
8
+ from itertools import chain
9
+
10
+ import numpy as np
11
+ import tensorflow as tf
12
+ import tensorflow.keras.backend as K # noqa: N812
13
+
14
+ import wandb
15
+ from wandb.sdk.integration_utils.data_logging import ValidationDataLogger
16
+ from wandb.sdk.lib.deprecate import Deprecated, deprecate
17
+ from wandb.util import add_import_hook
18
+
19
+
20
+ def _check_keras_version():
21
+ from keras import __version__ as keras_version
22
+
23
+ from wandb.util import parse_version
24
+
25
+ if parse_version(keras_version) < parse_version("2.4.0"):
26
+ wandb.termwarn(
27
+ f"Keras version {keras_version} is not fully supported. Required keras >= 2.4.0"
28
+ )
29
+
30
+
31
+ def _can_compute_flops() -> bool:
32
+ """FLOPS computation is restricted to TF 2.x as it requires tf.compat.v1."""
33
+ from wandb.util import parse_version
34
+
35
+ if parse_version(tf.__version__) >= parse_version("2.0.0"):
36
+ return True
37
+
38
+ return False
39
+
40
+
41
+ if "keras" in sys.modules:
42
+ _check_keras_version()
43
+ else:
44
+ add_import_hook("keras", _check_keras_version)
45
+
46
+
47
+ logger = logging.getLogger(__name__)
48
+
49
+
50
+ def is_dataset(data):
51
+ dataset_ops = wandb.util.get_module("tensorflow.python.data.ops.dataset_ops")
52
+ if dataset_ops and hasattr(dataset_ops, "DatasetV2"):
53
+ dataset_types = (dataset_ops.DatasetV2,)
54
+ if hasattr(dataset_ops, "DatasetV1"):
55
+ dataset_types = dataset_types + (dataset_ops.DatasetV1,)
56
+ return isinstance(data, dataset_types)
57
+ else:
58
+ return False
59
+
60
+
61
+ def is_generator_like(data):
62
+ # Checks if data is a generator, Sequence, or Iterator.
63
+
64
+ types = (tf.keras.utils.Sequence,)
65
+ iterator_ops = wandb.util.get_module("tensorflow.python.data.ops.iterator_ops")
66
+ if iterator_ops:
67
+ types = types + (iterator_ops.Iterator,)
68
+ # EagerIterator was in tensorflow < 2
69
+ if hasattr(iterator_ops, "EagerIterator"):
70
+ types = types + (iterator_ops.EagerIterator,)
71
+ elif hasattr(iterator_ops, "IteratorV2"):
72
+ types = types + (iterator_ops.IteratorV2,)
73
+ return hasattr(data, "next") or hasattr(data, "__next__") or isinstance(data, types)
74
+
75
+
76
+ def patch_tf_keras(): # noqa: C901
77
+ from tensorflow.python.eager import context
78
+
79
+ from wandb.util import parse_version
80
+
81
+ if (
82
+ parse_version("2.6.0")
83
+ <= parse_version(tf.__version__)
84
+ < parse_version("2.13.0")
85
+ ):
86
+ keras_engine = "keras.engine"
87
+ try:
88
+ from keras.engine import training
89
+ from keras.engine import training_arrays_v1 as training_arrays
90
+ from keras.engine import training_generator_v1 as training_generator
91
+ except (ImportError, AttributeError):
92
+ wandb.termerror("Unable to patch Tensorflow/Keras")
93
+ logger.exception("exception while trying to patch_tf_keras")
94
+ return
95
+ else:
96
+ keras_engine = "tensorflow.python.keras.engine"
97
+
98
+ from tensorflow.python.keras.engine import training
99
+
100
+ try:
101
+ from tensorflow.python.keras.engine import (
102
+ training_arrays_v1 as training_arrays,
103
+ )
104
+ from tensorflow.python.keras.engine import (
105
+ training_generator_v1 as training_generator,
106
+ )
107
+ except (ImportError, AttributeError):
108
+ try:
109
+ from tensorflow.python.keras.engine import (
110
+ training_arrays,
111
+ training_generator,
112
+ )
113
+ except (ImportError, AttributeError):
114
+ wandb.termerror("Unable to patch Tensorflow/Keras")
115
+ logger.exception("exception while trying to patch_tf_keras")
116
+ return
117
+
118
+ # Tensorflow 2.1
119
+ training_v2_1 = wandb.util.get_module("tensorflow.python.keras.engine.training_v2")
120
+ # Tensorflow 2.2
121
+ training_v2_2 = wandb.util.get_module(f"{keras_engine}.training_v1")
122
+
123
+ if training_v2_1:
124
+ old_v2 = training_v2_1.Loop.fit
125
+ elif training_v2_2:
126
+ old_v2 = training.Model.fit
127
+
128
+ old_arrays = training_arrays.fit_loop
129
+ old_generator = training_generator.fit_generator
130
+
131
+ def set_wandb_attrs(cbk, val_data):
132
+ if isinstance(cbk, WandbCallback):
133
+ if is_generator_like(val_data):
134
+ cbk.generator = val_data
135
+ elif is_dataset(val_data):
136
+ if context.executing_eagerly():
137
+ cbk.generator = iter(val_data)
138
+ else:
139
+ wandb.termwarn(
140
+ "Found a validation dataset in graph mode, can't patch Keras."
141
+ )
142
+ elif isinstance(val_data, tuple) and isinstance(val_data[0], tf.Tensor):
143
+ # Graph mode dataset generator
144
+ def gen():
145
+ while True:
146
+ yield K.get_session().run(val_data)
147
+
148
+ cbk.generator = gen()
149
+ else:
150
+ cbk.validation_data = val_data
151
+
152
+ def new_arrays(*args, **kwargs):
153
+ cbks = kwargs.get("callbacks", [])
154
+ val_inputs = kwargs.get("val_inputs")
155
+ val_targets = kwargs.get("val_targets")
156
+ # TODO: these could be generators, why index 0?
157
+ if val_inputs and val_targets:
158
+ for cbk in cbks:
159
+ set_wandb_attrs(cbk, (val_inputs[0], val_targets[0]))
160
+ return old_arrays(*args, **kwargs)
161
+
162
+ def new_generator(*args, **kwargs):
163
+ cbks = kwargs.get("callbacks", [])
164
+ val_data = kwargs.get("validation_data")
165
+ if val_data:
166
+ for cbk in cbks:
167
+ set_wandb_attrs(cbk, val_data)
168
+ return old_generator(*args, **kwargs)
169
+
170
+ def new_v2(*args, **kwargs):
171
+ cbks = kwargs.get("callbacks", [])
172
+ val_data = kwargs.get("validation_data")
173
+ if val_data:
174
+ for cbk in cbks:
175
+ set_wandb_attrs(cbk, val_data)
176
+ return old_v2(*args, **kwargs)
177
+
178
+ training_arrays.orig_fit_loop = old_arrays
179
+ training_arrays.fit_loop = new_arrays
180
+ training_generator.orig_fit_generator = old_generator
181
+ training_generator.fit_generator = new_generator
182
+ wandb.patched["keras"].append([f"{keras_engine}.training_arrays", "fit_loop"])
183
+ wandb.patched["keras"].append(
184
+ [f"{keras_engine}.training_generator", "fit_generator"]
185
+ )
186
+
187
+ if training_v2_1:
188
+ training_v2_1.Loop.fit = new_v2
189
+ wandb.patched["keras"].append(
190
+ ["tensorflow.python.keras.engine.training_v2.Loop", "fit"]
191
+ )
192
+ elif training_v2_2:
193
+ training.Model.fit = new_v2
194
+ wandb.patched["keras"].append([f"{keras_engine}.training.Model", "fit"])
195
+
196
+
197
+ def _array_has_dtype(array):
198
+ return hasattr(array, "dtype")
199
+
200
+
201
+ def _update_if_numeric(metrics, key, values):
202
+ if not _array_has_dtype(values):
203
+ _warn_not_logging(key)
204
+ return
205
+
206
+ if not is_numeric_array(values):
207
+ _warn_not_logging_non_numeric(key)
208
+ return
209
+
210
+ metrics[key] = wandb.Histogram(values)
211
+
212
+
213
+ def is_numeric_array(array):
214
+ return np.issubdtype(array.dtype, np.number)
215
+
216
+
217
+ def _warn_not_logging_non_numeric(name):
218
+ wandb.termwarn(
219
+ f"Non-numeric values found in layer: {name}, not logging this layer",
220
+ repeat=False,
221
+ )
222
+
223
+
224
+ def _warn_not_logging(name):
225
+ wandb.termwarn(
226
+ f"Layer {name} has undetermined datatype not logging this layer",
227
+ repeat=False,
228
+ )
229
+
230
+
231
+ tf_logger = tf.get_logger()
232
+
233
+ patch_tf_keras()
234
+
235
+
236
+ ### For gradient logging ###
237
+
238
+
239
+ def _get_custom_optimizer_parent_class():
240
+ from wandb.util import parse_version
241
+
242
+ if parse_version(tf.__version__) >= parse_version("2.9.0"):
243
+ custom_optimizer_parent_class = tf.keras.optimizers.legacy.Optimizer
244
+ else:
245
+ custom_optimizer_parent_class = tf.keras.optimizers.Optimizer
246
+
247
+ return custom_optimizer_parent_class
248
+
249
+
250
+ _custom_optimizer_parent_class = _get_custom_optimizer_parent_class()
251
+
252
+
253
+ class _CustomOptimizer(_custom_optimizer_parent_class):
254
+ def __init__(self):
255
+ super().__init__(name="CustomOptimizer")
256
+ self._resource_apply_dense = tf.function(self._resource_apply_dense)
257
+ self._resource_apply_sparse = tf.function(self._resource_apply_sparse)
258
+
259
+ def _resource_apply_dense(self, grad, var):
260
+ var.assign(grad)
261
+
262
+ # this needs to be implemented to prevent a NotImplementedError when
263
+ # using Lookup layers.
264
+ def _resource_apply_sparse(self, grad, var, indices):
265
+ pass
266
+
267
+ def get_config(self):
268
+ return super().get_config()
269
+
270
+
271
+ class _GradAccumulatorCallback(tf.keras.callbacks.Callback):
272
+ """Accumulates gradients during a fit() call when used in conjunction with the CustomOptimizer above."""
273
+
274
+ def set_model(self, model):
275
+ super().set_model(model)
276
+ self.og_weights = model.get_weights()
277
+ self.grads = [np.zeros(tuple(w.shape)) for w in model.trainable_weights]
278
+
279
+ def on_batch_end(self, batch, logs=None):
280
+ for g, w in zip(self.grads, self.model.trainable_weights):
281
+ g += w.numpy()
282
+ self.model.set_weights(self.og_weights)
283
+
284
+ def get_grads(self):
285
+ return [g.copy() for g in self.grads]
286
+
287
+
288
+ ###
289
+
290
+
291
+ class WandbCallback(tf.keras.callbacks.Callback):
292
+ """`WandbCallback` automatically integrates keras with wandb.
293
+
294
+ Example:
295
+ ```python
296
+ model.fit(
297
+ X_train,
298
+ y_train,
299
+ validation_data=(X_test, y_test),
300
+ callbacks=[WandbCallback()],
301
+ )
302
+ ```
303
+
304
+ `WandbCallback` will automatically log history data from any
305
+ metrics collected by keras: loss and anything passed into `keras_model.compile()`.
306
+
307
+ `WandbCallback` will set summary metrics for the run associated with the "best" training
308
+ step, where "best" is defined by the `monitor` and `mode` attributes. This defaults
309
+ to the epoch with the minimum `val_loss`. `WandbCallback` will by default save the model
310
+ associated with the best `epoch`.
311
+
312
+ `WandbCallback` can optionally log gradient and parameter histograms.
313
+
314
+ `WandbCallback` can optionally save training and validation data for wandb to visualize.
315
+
316
+ Args:
317
+ monitor: (str) name of metric to monitor. Defaults to `val_loss`.
318
+ mode: (str) one of {`auto`, `min`, `max`}.
319
+ `min` - save model when monitor is minimized
320
+ `max` - save model when monitor is maximized
321
+ `auto` - try to guess when to save the model (default).
322
+ save_model:
323
+ True - save a model when monitor beats all previous epochs
324
+ False - don't save models
325
+ save_graph: (boolean) if True save model graph to wandb (default to True).
326
+ save_weights_only: (boolean) if True, then only the model's weights will be
327
+ saved (`model.save_weights(filepath)`), else the full model
328
+ is saved (`model.save(filepath)`).
329
+ log_weights: (boolean) if True save histograms of the model's layer's weights.
330
+ log_gradients: (boolean) if True log histograms of the training gradients
331
+ training_data: (tuple) Same format `(X,y)` as passed to `model.fit`. This is needed
332
+ for calculating gradients - this is mandatory if `log_gradients` is `True`.
333
+ validation_data: (tuple) Same format `(X,y)` as passed to `model.fit`. A set of data
334
+ for wandb to visualize. If this is set, every epoch, wandb will
335
+ make a small number of predictions and save the results for later visualization. In case
336
+ you are working with image data, please also set `input_type` and `output_type` in order
337
+ to log correctly.
338
+ generator: (generator) a generator that returns validation data for wandb to visualize. This
339
+ generator should return tuples `(X,y)`. Either `validate_data` or generator should
340
+ be set for wandb to visualize specific data examples. In case you are working with image data,
341
+ please also set `input_type` and `output_type` in order to log correctly.
342
+ validation_steps: (int) if `validation_data` is a generator, how many
343
+ steps to run the generator for the full validation set.
344
+ labels: (list) If you are visualizing your data with wandb this list of labels
345
+ will convert numeric output to understandable string if you are building a
346
+ multiclass classifier. If you are making a binary classifier you can pass in
347
+ a list of two labels ["label for false", "label for true"]. If `validate_data`
348
+ and generator are both false, this won't do anything.
349
+ predictions: (int) the number of predictions to make for visualization each epoch, max
350
+ is 100.
351
+ input_type: (string) type of the model input to help visualization. can be one of:
352
+ (`image`, `images`, `segmentation_mask`, `auto`).
353
+ output_type: (string) type of the model output to help visualization. can be one of:
354
+ (`image`, `images`, `segmentation_mask`, `label`).
355
+ log_evaluation: (boolean) if True, save a Table containing validation data and the
356
+ model's predictions at each epoch. See `validation_indexes`,
357
+ `validation_row_processor`, and `output_row_processor` for additional details.
358
+ class_colors: ([float, float, float]) if the input or output is a segmentation mask,
359
+ an array containing an rgb tuple (range 0-1) for each class.
360
+ log_batch_frequency: (integer) if None, callback will log every epoch.
361
+ If set to integer, callback will log training metrics every `log_batch_frequency`
362
+ batches.
363
+ log_best_prefix: (string) if None, no extra summary metrics will be saved.
364
+ If set to a string, the monitored metric and epoch will be prepended with this value
365
+ and stored as summary metrics.
366
+ validation_indexes: ([wandb.data_types._TableLinkMixin]) an ordered list of index keys to associate
367
+ with each validation example. If log_evaluation is True and `validation_indexes` is provided,
368
+ then a Table of validation data will not be created and instead each prediction will
369
+ be associated with the row represented by the `TableLinkMixin`. The most common way to obtain
370
+ such keys are is use `Table.get_index()` which will return a list of row keys.
371
+ validation_row_processor: (Callable) a function to apply to the validation data, commonly used to visualize the data.
372
+ The function will receive an `ndx` (int) and a `row` (dict). If your model has a single input,
373
+ then `row["input"]` will be the input data for the row. Else, it will be keyed based on the name of the
374
+ input slot. If your fit function takes a single target, then `row["target"]` will be the target data for the row. Else,
375
+ it will be keyed based on the name of the output slots. For example, if your input data is a single ndarray,
376
+ but you wish to visualize the data as an Image, then you can provide `lambda ndx, row: {"img": wandb.Image(row["input"])}`
377
+ as the processor. Ignored if log_evaluation is False or `validation_indexes` are present.
378
+ output_row_processor: (Callable) same as `validation_row_processor`, but applied to the model's output. `row["output"]` will contain
379
+ the results of the model output.
380
+ infer_missing_processors: (bool) Determines if `validation_row_processor` and `output_row_processor`
381
+ should be inferred if missing. Defaults to True. If `labels` are provided, we will attempt to infer classification-type
382
+ processors where appropriate.
383
+ log_evaluation_frequency: (int) Determines the frequency which evaluation results will be logged. Default 0 (only at the end of training).
384
+ Set to 1 to log every epoch, 2 to log every other epoch, and so on. Has no effect when log_evaluation is False.
385
+ compute_flops: (bool) Compute the FLOPs of your Keras Sequential or Functional model in GigaFLOPs unit.
386
+ """
387
+
388
+ def __init__(
389
+ self,
390
+ monitor="val_loss",
391
+ verbose=0,
392
+ mode="auto",
393
+ save_weights_only=False,
394
+ log_weights=False,
395
+ log_gradients=False,
396
+ save_model=True,
397
+ training_data=None,
398
+ validation_data=None,
399
+ labels=None,
400
+ predictions=36,
401
+ generator=None,
402
+ input_type=None,
403
+ output_type=None,
404
+ log_evaluation=False,
405
+ validation_steps=None,
406
+ class_colors=None,
407
+ log_batch_frequency=None,
408
+ log_best_prefix="best_",
409
+ save_graph=True,
410
+ validation_indexes=None,
411
+ validation_row_processor=None,
412
+ prediction_row_processor=None,
413
+ infer_missing_processors=True,
414
+ log_evaluation_frequency=0,
415
+ compute_flops=False,
416
+ **kwargs,
417
+ ):
418
+ if wandb.run is None:
419
+ raise wandb.Error("You must call wandb.init() before WandbCallback()")
420
+
421
+ deprecate(
422
+ field_name=Deprecated.keras_callback,
423
+ warning_message=(
424
+ "WandbCallback is deprecated and will be removed in a future release. "
425
+ "Please use the WandbMetricsLogger, WandbModelCheckpoint, and WandbEvalCallback "
426
+ "callbacks instead. "
427
+ "See https://docs.wandb.ai/guides/integrations/keras for more information."
428
+ ),
429
+ )
430
+
431
+ with wandb.wandb_lib.telemetry.context(run=wandb.run) as tel:
432
+ tel.feature.keras = True
433
+ self.validation_data = None
434
+ # This is kept around for legacy reasons
435
+ if validation_data is not None:
436
+ if is_generator_like(validation_data):
437
+ generator = validation_data
438
+ else:
439
+ self.validation_data = validation_data
440
+ if labels is None:
441
+ labels = []
442
+ self.labels = labels
443
+ self.predictions = min(predictions, 100)
444
+
445
+ self.monitor = monitor
446
+ self.verbose = verbose
447
+ self.save_weights_only = save_weights_only
448
+ self.save_graph = save_graph
449
+
450
+ wandb.save("model-best.h5")
451
+ self.filepath = os.path.join(wandb.run.dir, "model-best.h5")
452
+ self.save_model = save_model
453
+ if save_model:
454
+ deprecate(
455
+ field_name=Deprecated.keras_callback__save_model,
456
+ warning_message=(
457
+ "The save_model argument by default saves the model in the HDF5 format that cannot save "
458
+ "custom objects like subclassed models and custom layers. This behavior will be deprecated "
459
+ "in a future release in favor of the SavedModel format. Meanwhile, the HDF5 model is saved "
460
+ "as W&B files and the SavedModel as W&B Artifacts."
461
+ ),
462
+ )
463
+
464
+ self.save_model_as_artifact = True
465
+ self.log_weights = log_weights
466
+ self.log_gradients = log_gradients
467
+ self.training_data = training_data
468
+ self.generator = generator
469
+ self._graph_rendered = False
470
+
471
+ data_type = kwargs.get("data_type", None)
472
+ if data_type is not None:
473
+ deprecate(
474
+ field_name=Deprecated.keras_callback__data_type,
475
+ warning_message=(
476
+ "The data_type argument of wandb.keras.WandbCallback is deprecated "
477
+ "and will be removed in a future release. Please use input_type instead.\n"
478
+ "Setting input_type = data_type."
479
+ ),
480
+ )
481
+ input_type = data_type
482
+ self.input_type = input_type
483
+ self.output_type = output_type
484
+ self.log_evaluation = log_evaluation
485
+ self.validation_steps = validation_steps
486
+ self.class_colors = np.array(class_colors) if class_colors is not None else None
487
+ self.log_batch_frequency = log_batch_frequency
488
+ self.log_best_prefix = log_best_prefix
489
+ self.compute_flops = compute_flops
490
+
491
+ self._prediction_batch_size = None
492
+
493
+ if self.log_gradients:
494
+ if int(tf.__version__.split(".")[0]) < 2:
495
+ raise Exception("Gradient logging requires tensorflow 2.0 or higher.")
496
+ if self.training_data is None:
497
+ raise ValueError(
498
+ "training_data argument is required for gradient logging."
499
+ )
500
+ if isinstance(self.training_data, (list, tuple)):
501
+ if len(self.training_data) != 2:
502
+ raise ValueError("training data must be a tuple of length two")
503
+ self._training_data_x, self._training_data_y = self.training_data
504
+ else:
505
+ self._training_data_x = (
506
+ self.training_data
507
+ ) # generator, tf.data.Dataset etc
508
+ self._training_data_y = None
509
+
510
+ # From Keras
511
+ if mode not in ["auto", "min", "max"]:
512
+ wandb.termwarn(
513
+ f"WandbCallback mode {mode} is unknown, fallback to auto mode."
514
+ )
515
+ mode = "auto"
516
+
517
+ if mode == "min":
518
+ self.monitor_op = operator.lt
519
+ self.best = float("inf")
520
+ elif mode == "max":
521
+ self.monitor_op = operator.gt
522
+ self.best = float("-inf")
523
+ else:
524
+ if "acc" in self.monitor or self.monitor.startswith("fmeasure"):
525
+ self.monitor_op = operator.gt
526
+ self.best = float("-inf")
527
+ else:
528
+ self.monitor_op = operator.lt
529
+ self.best = float("inf")
530
+ # Get the previous best metric for resumed runs
531
+ previous_best = wandb.run.summary.get(f"{self.log_best_prefix}{self.monitor}")
532
+ if previous_best is not None:
533
+ self.best = previous_best
534
+
535
+ self._validation_data_logger = None
536
+ self._validation_indexes = validation_indexes
537
+ self._validation_row_processor = validation_row_processor
538
+ self._prediction_row_processor = prediction_row_processor
539
+ self._infer_missing_processors = infer_missing_processors
540
+ self._log_evaluation_frequency = log_evaluation_frequency
541
+ self._model_trained_since_last_eval = False
542
+
543
+ def _build_grad_accumulator_model(self):
544
+ inputs = self.model.inputs
545
+ outputs = self.model(inputs)
546
+ grad_acc_model = tf.keras.models.Model(inputs, outputs)
547
+ grad_acc_model.compile(loss=self.model.loss, optimizer=_CustomOptimizer())
548
+
549
+ # make sure magic doesn't think this is a user model
550
+ grad_acc_model._wandb_internal_model = True
551
+
552
+ self._grad_accumulator_model = grad_acc_model
553
+ self._grad_accumulator_callback = _GradAccumulatorCallback()
554
+
555
+ def _implements_train_batch_hooks(self):
556
+ return self.log_batch_frequency is not None
557
+
558
+ def _implements_test_batch_hooks(self):
559
+ return self.log_batch_frequency is not None
560
+
561
+ def _implements_predict_batch_hooks(self):
562
+ return self.log_batch_frequency is not None
563
+
564
+ def set_params(self, params):
565
+ self.params = params
566
+
567
+ def set_model(self, model):
568
+ super().set_model(model)
569
+ if self.input_type == "auto" and len(model.inputs) == 1:
570
+ self.input_type = wandb.util.guess_data_type(
571
+ model.inputs[0].shape, risky=True
572
+ )
573
+ if self.input_type and self.output_type is None and len(model.outputs) == 1:
574
+ self.output_type = wandb.util.guess_data_type(model.outputs[0].shape)
575
+ if self.log_gradients:
576
+ self._build_grad_accumulator_model()
577
+
578
+ def _attempt_evaluation_log(self, commit=True):
579
+ if self.log_evaluation and self._validation_data_logger:
580
+ try:
581
+ if not self.model:
582
+ wandb.termwarn("WandbCallback unable to read model from trainer")
583
+ else:
584
+ self._validation_data_logger.log_predictions(
585
+ predictions=self._validation_data_logger.make_predictions(
586
+ self.model.predict
587
+ ),
588
+ commit=commit,
589
+ )
590
+ self._model_trained_since_last_eval = False
591
+ except Exception as e:
592
+ wandb.termwarn("Error during prediction logging for epoch: " + str(e))
593
+
594
+ def on_epoch_end(self, epoch, logs=None):
595
+ if logs is None:
596
+ logs = {}
597
+ if self.log_weights:
598
+ wandb.log(self._log_weights(), commit=False)
599
+
600
+ if self.log_gradients:
601
+ wandb.log(self._log_gradients(), commit=False)
602
+
603
+ if self.input_type in (
604
+ "image",
605
+ "images",
606
+ "segmentation_mask",
607
+ ) or self.output_type in ("image", "images", "segmentation_mask"):
608
+ if self.generator:
609
+ self.validation_data = next(self.generator)
610
+ if self.validation_data is None:
611
+ wandb.termwarn(
612
+ "No validation_data set, pass a generator to the callback."
613
+ )
614
+ elif self.validation_data and len(self.validation_data) > 0:
615
+ wandb.log(
616
+ {"examples": self._log_images(num_images=self.predictions)},
617
+ commit=False,
618
+ )
619
+
620
+ if (
621
+ self._log_evaluation_frequency > 0
622
+ and epoch % self._log_evaluation_frequency == 0
623
+ ):
624
+ self._attempt_evaluation_log(commit=False)
625
+
626
+ wandb.log({"epoch": epoch}, commit=False)
627
+ wandb.log(logs, commit=True)
628
+
629
+ self.current = logs.get(self.monitor)
630
+ if self.current and self.monitor_op(self.current, self.best):
631
+ if self.log_best_prefix:
632
+ wandb.run.summary[f"{self.log_best_prefix}{self.monitor}"] = (
633
+ self.current
634
+ )
635
+ wandb.run.summary["{}{}".format(self.log_best_prefix, "epoch")] = epoch
636
+ if self.verbose and not self.save_model:
637
+ wandb.termlog(
638
+ f"Epoch {epoch:05d}: {self.monitor} improved from {self.best:.5f} to {self.current:.5f}"
639
+ )
640
+ if self.save_model:
641
+ self._save_model(epoch)
642
+
643
+ if self.save_model and self.save_model_as_artifact:
644
+ self._save_model_as_artifact(epoch)
645
+
646
+ self.best = self.current
647
+
648
+ # This is what keras used pre tensorflow.keras
649
+ def on_batch_begin(self, batch, logs=None):
650
+ pass
651
+
652
+ # This is what keras used pre tensorflow.keras
653
+ def on_batch_end(self, batch, logs=None):
654
+ if self.save_graph and not self._graph_rendered:
655
+ # Couldn't do this in train_begin because keras may still not be built
656
+ wandb.run.summary["graph"] = wandb.Graph.from_keras(self.model)
657
+ self._graph_rendered = True
658
+
659
+ if self.log_batch_frequency and batch % self.log_batch_frequency == 0:
660
+ wandb.log(logs, commit=True)
661
+
662
+ def on_train_batch_begin(self, batch, logs=None):
663
+ self._model_trained_since_last_eval = True
664
+
665
+ def on_train_batch_end(self, batch, logs=None):
666
+ if self.save_graph and not self._graph_rendered:
667
+ # Couldn't do this in train_begin because keras may still not be built
668
+ wandb.run.summary["graph"] = wandb.Graph.from_keras(self.model)
669
+ self._graph_rendered = True
670
+
671
+ if self.log_batch_frequency and batch % self.log_batch_frequency == 0:
672
+ wandb.log(logs, commit=True)
673
+
674
+ def on_test_begin(self, logs=None):
675
+ pass
676
+
677
+ def on_test_end(self, logs=None):
678
+ pass
679
+
680
+ def on_test_batch_begin(self, batch, logs=None):
681
+ pass
682
+
683
+ def on_test_batch_end(self, batch, logs=None):
684
+ pass
685
+
686
+ def on_train_begin(self, logs=None):
687
+ if self.log_evaluation:
688
+ try:
689
+ validation_data = None
690
+ if self.validation_data:
691
+ validation_data = self.validation_data
692
+ elif self.generator:
693
+ if not self.validation_steps:
694
+ wandb.termwarn(
695
+ "WandbCallback is unable to log validation data. "
696
+ "When using a generator for validation_data, you must pass validation_steps"
697
+ )
698
+ else:
699
+ x = None
700
+ y_true = None
701
+ for _ in range(self.validation_steps):
702
+ bx, by_true = next(self.generator)
703
+ if x is None:
704
+ x, y_true = bx, by_true
705
+ else:
706
+ x, y_true = (
707
+ np.append(x, bx, axis=0),
708
+ np.append(y_true, by_true, axis=0),
709
+ )
710
+ validation_data = (x, y_true)
711
+ else:
712
+ wandb.termwarn(
713
+ "WandbCallback is unable to read validation_data from trainer "
714
+ "and therefore cannot log validation data. Ensure Keras is properly "
715
+ "patched by calling `from wandb.keras import WandbCallback` at the top of your script."
716
+ )
717
+ if validation_data:
718
+ self._validation_data_logger = ValidationDataLogger(
719
+ inputs=validation_data[0],
720
+ targets=validation_data[1],
721
+ indexes=self._validation_indexes,
722
+ validation_row_processor=self._validation_row_processor,
723
+ prediction_row_processor=self._prediction_row_processor,
724
+ class_labels=self.labels,
725
+ infer_missing_processors=self._infer_missing_processors,
726
+ )
727
+ except Exception as e:
728
+ wandb.termwarn(
729
+ "Error initializing ValidationDataLogger in WandbCallback. "
730
+ f"Skipping logging validation data. Error: {str(e)}"
731
+ )
732
+
733
+ if self.compute_flops and _can_compute_flops():
734
+ try:
735
+ wandb.summary["GFLOPs"] = self.get_flops()
736
+ except Exception as e:
737
+ wandb.termwarn("Unable to compute FLOPs for this model.")
738
+ logger.exception(e)
739
+
740
+ def on_train_end(self, logs=None):
741
+ if self._model_trained_since_last_eval:
742
+ self._attempt_evaluation_log()
743
+
744
+ def on_predict_begin(self, logs=None):
745
+ pass
746
+
747
+ def on_predict_end(self, logs=None):
748
+ pass
749
+
750
+ def on_predict_batch_begin(self, batch, logs=None):
751
+ pass
752
+
753
+ def on_predict_batch_end(self, batch, logs=None):
754
+ pass
755
+
756
+ def _logits_to_captions(self, logits):
757
+ if logits[0].shape[-1] == 1:
758
+ # Scalar output from the model
759
+ # TODO: handle validation_y
760
+ if len(self.labels) == 2:
761
+ # User has named true and false
762
+ captions = [
763
+ self.labels[1] if logits[0] > 0.5 else self.labels[0]
764
+ for logit in logits
765
+ ]
766
+ else:
767
+ if len(self.labels) != 0:
768
+ wandb.termwarn(
769
+ "keras model is producing a single output, "
770
+ 'so labels should be a length two array: ["False label", "True label"].'
771
+ )
772
+ captions = [logit[0] for logit in logits]
773
+ else:
774
+ # Vector output from the model
775
+ # TODO: handle validation_y
776
+ labels = np.argmax(np.stack(logits), axis=1)
777
+
778
+ if len(self.labels) > 0:
779
+ # User has named the categories in self.labels
780
+ captions = []
781
+ for label in labels:
782
+ try:
783
+ captions.append(self.labels[label])
784
+ except IndexError:
785
+ captions.append(label)
786
+ else:
787
+ captions = labels
788
+ return captions
789
+
790
+ def _masks_to_pixels(self, masks):
791
+ # if its a binary mask, just return it as grayscale instead of picking the argmax
792
+ if len(masks[0].shape) == 2 or masks[0].shape[-1] == 1:
793
+ return masks
794
+ class_colors = (
795
+ self.class_colors
796
+ if self.class_colors is not None
797
+ else np.array(wandb.util.class_colors(masks[0].shape[2]))
798
+ )
799
+ imgs = class_colors[np.argmax(masks, axis=-1)]
800
+ return imgs
801
+
802
+ def _log_images(self, num_images=36):
803
+ validation_X = self.validation_data[0] # noqa: N806
804
+ validation_y = self.validation_data[1]
805
+
806
+ validation_length = len(validation_X)
807
+
808
+ if validation_length > num_images:
809
+ # pick some data at random
810
+ indices = np.random.choice(validation_length, num_images, replace=False)
811
+ else:
812
+ indices = range(validation_length)
813
+
814
+ test_data = []
815
+ test_output = []
816
+ for i in indices:
817
+ test_example = validation_X[i]
818
+ test_data.append(test_example)
819
+ test_output.append(validation_y[i])
820
+
821
+ if self.model.stateful:
822
+ predictions = self.model.predict(np.stack(test_data), batch_size=1)
823
+ self.model.reset_states()
824
+ else:
825
+ predictions = self.model.predict(
826
+ np.stack(test_data), batch_size=self._prediction_batch_size
827
+ )
828
+ if len(predictions) != len(test_data):
829
+ self._prediction_batch_size = 1
830
+ predictions = self.model.predict(
831
+ np.stack(test_data), batch_size=self._prediction_batch_size
832
+ )
833
+
834
+ if self.input_type == "label":
835
+ if self.output_type in ("image", "images", "segmentation_mask"):
836
+ captions = self._logits_to_captions(test_data)
837
+ output_image_data = (
838
+ self._masks_to_pixels(predictions)
839
+ if self.output_type == "segmentation_mask"
840
+ else predictions
841
+ )
842
+ reference_image_data = (
843
+ self._masks_to_pixels(test_output)
844
+ if self.output_type == "segmentation_mask"
845
+ else test_output
846
+ )
847
+ output_images = [
848
+ wandb.Image(data, caption=captions[i], grouping=2)
849
+ for i, data in enumerate(output_image_data)
850
+ ]
851
+ reference_images = [
852
+ wandb.Image(data, caption=captions[i])
853
+ for i, data in enumerate(reference_image_data)
854
+ ]
855
+ return list(chain.from_iterable(zip(output_images, reference_images)))
856
+ elif self.input_type in ("image", "images", "segmentation_mask"):
857
+ input_image_data = (
858
+ self._masks_to_pixels(test_data)
859
+ if self.input_type == "segmentation_mask"
860
+ else test_data
861
+ )
862
+ if self.output_type == "label":
863
+ # we just use the predicted label as the caption for now
864
+ captions = self._logits_to_captions(predictions)
865
+ return [
866
+ wandb.Image(data, caption=captions[i])
867
+ for i, data in enumerate(test_data)
868
+ ]
869
+ elif self.output_type in ("image", "images", "segmentation_mask"):
870
+ output_image_data = (
871
+ self._masks_to_pixels(predictions)
872
+ if self.output_type == "segmentation_mask"
873
+ else predictions
874
+ )
875
+ reference_image_data = (
876
+ self._masks_to_pixels(test_output)
877
+ if self.output_type == "segmentation_mask"
878
+ else test_output
879
+ )
880
+ input_images = [
881
+ wandb.Image(data, grouping=3)
882
+ for i, data in enumerate(input_image_data)
883
+ ]
884
+ output_images = [
885
+ wandb.Image(data) for i, data in enumerate(output_image_data)
886
+ ]
887
+ reference_images = [
888
+ wandb.Image(data) for i, data in enumerate(reference_image_data)
889
+ ]
890
+ return list(
891
+ chain.from_iterable(
892
+ zip(input_images, output_images, reference_images)
893
+ )
894
+ )
895
+ else:
896
+ # unknown output, just log the input images
897
+ return [wandb.Image(img) for img in test_data]
898
+ elif self.output_type in ("image", "images", "segmentation_mask"):
899
+ # unknown input, just log the predicted and reference outputs without captions
900
+ output_image_data = (
901
+ self._masks_to_pixels(predictions)
902
+ if self.output_type == "segmentation_mask"
903
+ else predictions
904
+ )
905
+ reference_image_data = (
906
+ self._masks_to_pixels(test_output)
907
+ if self.output_type == "segmentation_mask"
908
+ else test_output
909
+ )
910
+ output_images = [
911
+ wandb.Image(data, grouping=2)
912
+ for i, data in enumerate(output_image_data)
913
+ ]
914
+ reference_images = [
915
+ wandb.Image(data) for i, data in enumerate(reference_image_data)
916
+ ]
917
+ return list(chain.from_iterable(zip(output_images, reference_images)))
918
+
919
+ def _log_weights(self):
920
+ metrics = {}
921
+ for layer in self.model.layers:
922
+ weights = layer.get_weights()
923
+ if len(weights) == 1:
924
+ _update_if_numeric(
925
+ metrics, "parameters/" + layer.name + ".weights", weights[0]
926
+ )
927
+ elif len(weights) == 2:
928
+ _update_if_numeric(
929
+ metrics, "parameters/" + layer.name + ".weights", weights[0]
930
+ )
931
+ _update_if_numeric(
932
+ metrics, "parameters/" + layer.name + ".bias", weights[1]
933
+ )
934
+ return metrics
935
+
936
+ def _log_gradients(self):
937
+ # Suppress callback warnings grad accumulator
938
+ og_level = tf_logger.level
939
+ tf_logger.setLevel("ERROR")
940
+
941
+ self._grad_accumulator_model.fit(
942
+ self._training_data_x,
943
+ self._training_data_y,
944
+ verbose=0,
945
+ callbacks=[self._grad_accumulator_callback],
946
+ )
947
+ tf_logger.setLevel(og_level)
948
+ weights = self.model.trainable_weights
949
+ grads = self._grad_accumulator_callback.grads
950
+ metrics = {}
951
+ for weight, grad in zip(weights, grads):
952
+ metrics["gradients/" + weight.name.split(":")[0] + ".gradient"] = (
953
+ wandb.Histogram(grad)
954
+ )
955
+ return metrics
956
+
957
+ def _log_dataframe(self):
958
+ x, y_true, y_pred = None, None, None
959
+
960
+ if self.validation_data:
961
+ x, y_true = self.validation_data[0], self.validation_data[1]
962
+ y_pred = self.model.predict(x)
963
+ elif self.generator:
964
+ if not self.validation_steps:
965
+ wandb.termwarn(
966
+ "when using a generator for validation data with dataframes, "
967
+ "you must pass validation_steps. skipping"
968
+ )
969
+ return None
970
+
971
+ for _ in range(self.validation_steps):
972
+ bx, by_true = next(self.generator)
973
+ by_pred = self.model.predict(bx)
974
+ if x is None:
975
+ x, y_true, y_pred = bx, by_true, by_pred
976
+ else:
977
+ x, y_true, y_pred = (
978
+ np.append(x, bx, axis=0),
979
+ np.append(y_true, by_true, axis=0),
980
+ np.append(y_pred, by_pred, axis=0),
981
+ )
982
+
983
+ if self.input_type in ("image", "images") and self.output_type == "label":
984
+ return wandb.image_categorizer_dataframe(
985
+ x=x, y_true=y_true, y_pred=y_pred, labels=self.labels
986
+ )
987
+ elif (
988
+ self.input_type in ("image", "images")
989
+ and self.output_type == "segmentation_mask"
990
+ ):
991
+ return wandb.image_segmentation_dataframe(
992
+ x=x,
993
+ y_true=y_true,
994
+ y_pred=y_pred,
995
+ labels=self.labels,
996
+ class_colors=self.class_colors,
997
+ )
998
+ else:
999
+ wandb.termwarn(
1000
+ f"unknown dataframe type for input_type={self.input_type} and output_type={self.output_type}"
1001
+ )
1002
+ return None
1003
+
1004
+ def _save_model(self, epoch):
1005
+ if wandb.run.disabled:
1006
+ return
1007
+ if self.verbose > 0:
1008
+ wandb.termlog(
1009
+ f"Epoch {epoch:05d}: {self.monitor} improved from {self.best:.5f} to {self.current:.5f}, "
1010
+ f"saving model to {self.filepath}"
1011
+ )
1012
+
1013
+ try:
1014
+ if self.save_weights_only:
1015
+ self.model.save_weights(self.filepath, overwrite=True)
1016
+ else:
1017
+ self.model.save(self.filepath, overwrite=True)
1018
+ # Was getting `RuntimeError: Unable to create link` in TF 1.13.1
1019
+ # also saw `TypeError: can't pickle _thread.RLock objects`
1020
+ except (ImportError, RuntimeError, TypeError, AttributeError) as e:
1021
+ wandb.termerror(
1022
+ "Can't save model in the h5py format. The model will be saved as "
1023
+ "as an W&B Artifact in the 'tf' format."
1024
+ )
1025
+ logger.exception(e)
1026
+
1027
+ def _save_model_as_artifact(self, epoch):
1028
+ if wandb.run.disabled:
1029
+ return
1030
+
1031
+ # Save the model in the SavedModel format.
1032
+ # TODO: Replace this manual artifact creation with the `log_model` method
1033
+ # after `log_model` is released from beta.
1034
+ self.model.save(self.filepath[:-3], overwrite=True, save_format="tf")
1035
+
1036
+ # Log the model as artifact.
1037
+ name = wandb.util.make_artifact_name_safe(f"model-{wandb.run.name}")
1038
+ model_artifact = wandb.Artifact(name, type="model")
1039
+ model_artifact.add_dir(self.filepath[:-3])
1040
+ wandb.run.log_artifact(model_artifact, aliases=["latest", f"epoch_{epoch}"])
1041
+
1042
+ # Remove the SavedModel from wandb dir as we don't want to log it to save memory.
1043
+ shutil.rmtree(self.filepath[:-3])
1044
+
1045
+ def get_flops(self) -> float:
1046
+ """Calculate FLOPS [GFLOPs] for a tf.keras.Model or tf.keras.Sequential model in inference mode.
1047
+
1048
+ It uses tf.compat.v1.profiler under the hood.
1049
+ """
1050
+ if not hasattr(self, "model"):
1051
+ raise wandb.Error("self.model must be set before using this method.")
1052
+
1053
+ if not isinstance(
1054
+ self.model, (tf.keras.models.Sequential, tf.keras.models.Model)
1055
+ ):
1056
+ raise ValueError(
1057
+ "Calculating FLOPS is only supported for "
1058
+ "`tf.keras.Model` and `tf.keras.Sequential` instances."
1059
+ )
1060
+
1061
+ from tensorflow.python.framework.convert_to_constants import (
1062
+ convert_variables_to_constants_v2_as_graph,
1063
+ )
1064
+
1065
+ # Compute FLOPs for one sample
1066
+ batch_size = 1
1067
+ inputs = [
1068
+ tf.TensorSpec([batch_size] + inp.shape[1:], inp.dtype)
1069
+ for inp in self.model.inputs
1070
+ ]
1071
+
1072
+ # convert tf.keras model into frozen graph to count FLOPs about operations used at inference
1073
+ real_model = tf.function(self.model).get_concrete_function(inputs)
1074
+ frozen_func, _ = convert_variables_to_constants_v2_as_graph(real_model)
1075
+
1076
+ # Calculate FLOPs with tf.profiler
1077
+ run_meta = tf.compat.v1.RunMetadata()
1078
+ opts = (
1079
+ tf.compat.v1.profiler.ProfileOptionBuilder(
1080
+ tf.compat.v1.profiler.ProfileOptionBuilder().float_operation()
1081
+ )
1082
+ .with_empty_output()
1083
+ .build()
1084
+ )
1085
+
1086
+ flops = tf.compat.v1.profiler.profile(
1087
+ graph=frozen_func.graph, run_meta=run_meta, cmd="scope", options=opts
1088
+ )
1089
+
1090
+ # convert to GFLOPs
1091
+ return (flops.total_float_ops / 1e9) / 2
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sacred/__init__.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy
4
+ from sacred.dependencies import get_digest
5
+ from sacred.observers import RunObserver
6
+
7
+ import wandb
8
+
9
+
10
+ class WandbObserver(RunObserver):
11
+ """Log sacred experiment data to W&B.
12
+
13
+ Args:
14
+ Accepts all the arguments accepted by wandb.init().
15
+
16
+ name — A display name for this run, which shows up in the UI and is editable, doesn't have to be unique
17
+ notes — A multiline string description associated with the run
18
+ config — a dictionary-like object to set as initial config
19
+ project — the name of the project to which this run will belong
20
+ tags — a list of strings to associate with this run as tags
21
+ dir — the path to a directory where artifacts will be written (default: ./wandb)
22
+ entity — the team posting this run (default: your username or your default team)
23
+ job_type — the type of job you are logging, e.g. eval, worker, ps (default: training)
24
+ save_code — save the main python or notebook file to wandb to enable diffing (default: editable from your settings page)
25
+ group — a string by which to group other runs; see Grouping
26
+ reinit — whether to allow multiple calls to wandb.init in the same process (default: False)
27
+ id — A unique ID for this run primarily used for Resuming. It must be globally unique, and if you delete a run you can't reuse the ID. Use the name field for a descriptive, useful name for the run. The ID cannot contain special characters.
28
+ resume — if set to True, the run auto resumes; can also be a unique string for manual resuming; see Resuming (default: False)
29
+ anonymous — can be "allow", "never", or "must". This enables or explicitly disables anonymous logging. (default: never)
30
+ force — whether to force a user to be logged into wandb when running a script (default: False)
31
+ magic — (bool, dict, or str, optional): magic configuration as bool, dict, json string, yaml filename. If set to True will attempt to auto-instrument your script. (default: None)
32
+ sync_tensorboard — A boolean indicating whether or not copy all TensorBoard logs wandb; see Tensorboard (default: False)
33
+ monitor_gym — A boolean indicating whether or not to log videos generated by OpenAI Gym; see Ray Tune (default: False)
34
+ allow_val_change — whether to allow wandb.config values to change, by default we throw an exception if config values are overwritten. (default: False)
35
+
36
+ Examples:
37
+ Create sacred experiment::
38
+ from wandb.sacred import WandbObserver
39
+ ex.observers.append(WandbObserver(project='sacred_test',
40
+ name='test1'))
41
+ @ex.config
42
+ def cfg():
43
+ C = 1.0
44
+ gamma = 0.7
45
+ @ex.automain
46
+ def run(C, gamma, _run):
47
+ iris = datasets.load_iris()
48
+ per = permutation(iris.target.size)
49
+ iris.data = iris.data[per]
50
+ iris.target = iris.target[per]
51
+ clf = svm.SVC(C, 'rbf', gamma=gamma)
52
+ clf.fit(iris.data[:90],
53
+ iris.target[:90])
54
+ return clf.score(iris.data[90:],
55
+ iris.target[90:])
56
+ """
57
+
58
+ def __init__(self, **kwargs):
59
+ self.run = wandb.init(**kwargs)
60
+ self.resources = {}
61
+
62
+ def started_event(
63
+ self, ex_info, command, host_info, start_time, config, meta_info, _id
64
+ ):
65
+ # TODO: add the source code file
66
+ # TODO: add dependencies and metadata.
67
+ self.__update_config(config)
68
+
69
+ def completed_event(self, stop_time, result):
70
+ if result:
71
+ if not isinstance(result, tuple):
72
+ result = (
73
+ result,
74
+ ) # transform single result to tuple so that both single & multiple results use same code
75
+
76
+ for i, r in enumerate(result):
77
+ if isinstance(r, float) or isinstance(r, int):
78
+ wandb.log({f"result_{i}": float(r)})
79
+ elif isinstance(r, dict):
80
+ wandb.log(r)
81
+ elif isinstance(r, object):
82
+ artifact = wandb.Artifact(f"result_{i}.pkl", type="result")
83
+ artifact.add_file(r)
84
+ self.run.log_artifact(artifact)
85
+ elif isinstance(r, numpy.ndarray):
86
+ wandb.log({f"result_{i}": wandb.Image(r)})
87
+ else:
88
+ warnings.warn(
89
+ f"logging results does not support type '{type(r)}' results. Ignoring this result",
90
+ stacklevel=2,
91
+ )
92
+
93
+ def artifact_event(self, name, filename, metadata=None, content_type=None):
94
+ if content_type is None:
95
+ content_type = "file"
96
+ artifact = wandb.Artifact(name, type=content_type)
97
+ artifact.add_file(filename)
98
+ self.run.log_artifact(artifact)
99
+
100
+ def resource_event(self, filename):
101
+ """TODO: Maintain resources list."""
102
+ if filename not in self.resources:
103
+ md5 = get_digest(filename)
104
+ self.resources[filename] = md5
105
+
106
+ def log_metrics(self, metrics_by_name, info):
107
+ for metric_name, metric_ptr in metrics_by_name.items():
108
+ for _step, value in zip(metric_ptr["steps"], metric_ptr["values"]):
109
+ if isinstance(value, numpy.ndarray):
110
+ wandb.log({metric_name: wandb.Image(value)})
111
+ else:
112
+ wandb.log({metric_name: value})
113
+
114
+ def __update_config(self, config):
115
+ for k, v in config.items():
116
+ self.run.config[k] = v
117
+ self.run.config["resources"] = []
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sacred/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.79 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sagemaker/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (511 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sb3/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .sb3 import WandbCallback
2
+
3
+ __all__ = ["WandbCallback"]
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sb3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (246 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sb3/__pycache__/sb3.cpython-310.pyc ADDED
Binary file (4.8 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/sb3/sb3.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """W&B callback for sb3.
2
+
3
+ Really simple callback to get logging for each tree
4
+
5
+ Example usage:
6
+
7
+ ```python
8
+ import gym
9
+ from stable_baselines3 import PPO
10
+ from stable_baselines3.common.monitor import Monitor
11
+ from stable_baselines3.common.vec_env import DummyVecEnv, VecVideoRecorder
12
+ import wandb
13
+ from wandb.integration.sb3 import WandbCallback
14
+
15
+
16
+ config = {
17
+ "policy_type": "MlpPolicy",
18
+ "total_timesteps": 25000,
19
+ "env_name": "CartPole-v1",
20
+ }
21
+ run = wandb.init(
22
+ project="sb3",
23
+ config=config,
24
+ sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
25
+ monitor_gym=True, # auto-upload the videos of agents playing the game
26
+ save_code=True, # optional
27
+ )
28
+
29
+
30
+ def make_env():
31
+ env = gym.make(config["env_name"])
32
+ env = Monitor(env) # record stats such as returns
33
+ return env
34
+
35
+
36
+ env = DummyVecEnv([make_env])
37
+ env = VecVideoRecorder(
38
+ env, "videos", record_video_trigger=lambda x: x % 2000 == 0, video_length=200
39
+ )
40
+ model = PPO(config["policy_type"], env, verbose=1, tensorboard_log=f"runs")
41
+ model.learn(
42
+ total_timesteps=config["total_timesteps"],
43
+ callback=WandbCallback(
44
+ model_save_path=f"models/{run.id}",
45
+ gradient_save_freq=100,
46
+ log="all",
47
+ ),
48
+ )
49
+ ```
50
+ """
51
+
52
+ import logging
53
+ import os
54
+ from typing import Literal, Optional
55
+
56
+ from stable_baselines3.common.callbacks import BaseCallback # type: ignore
57
+
58
+ import wandb
59
+ from wandb.sdk.lib import telemetry as wb_telemetry
60
+
61
+ logger = logging.getLogger(__name__)
62
+
63
+
64
+ class WandbCallback(BaseCallback):
65
+ """Callback for logging experiments to Weights and Biases.
66
+
67
+ Log SB3 experiments to Weights and Biases
68
+ - Added model tracking and uploading
69
+ - Added complete hyperparameters recording
70
+ - Added gradient logging
71
+ - Note that `wandb.init(...)` must be called before the WandbCallback can be used.
72
+
73
+ Args:
74
+ verbose: The verbosity of sb3 output
75
+ model_save_path: Path to the folder where the model will be saved, The default value is `None` so the model is not logged
76
+ model_save_freq: Frequency to save the model
77
+ gradient_save_freq: Frequency to log gradient. The default value is 0 so the gradients are not logged
78
+ log: What to log. One of "gradients", "parameters", or "all".
79
+ """
80
+
81
+ def __init__(
82
+ self,
83
+ verbose: int = 0,
84
+ model_save_path: Optional[str] = None,
85
+ model_save_freq: int = 0,
86
+ gradient_save_freq: int = 0,
87
+ log: Optional[Literal["gradients", "parameters", "all"]] = "all",
88
+ ) -> None:
89
+ super().__init__(verbose)
90
+ if wandb.run is None:
91
+ raise wandb.Error("You must call wandb.init() before WandbCallback()")
92
+ with wb_telemetry.context() as tel:
93
+ tel.feature.sb3 = True
94
+ self.model_save_freq = model_save_freq
95
+ self.model_save_path = model_save_path
96
+ self.gradient_save_freq = gradient_save_freq
97
+ if log not in ["gradients", "parameters", "all", None]:
98
+ wandb.termwarn(
99
+ "`log` must be one of `None`, 'gradients', 'parameters', or 'all', "
100
+ "falling back to 'all'"
101
+ )
102
+ log = "all"
103
+ self.log = log
104
+ # Create folder if needed
105
+ if self.model_save_path is not None:
106
+ os.makedirs(self.model_save_path, exist_ok=True)
107
+ self.path = os.path.join(self.model_save_path, "model.zip")
108
+ else:
109
+ assert (
110
+ self.model_save_freq == 0
111
+ ), "to use the `model_save_freq` you have to set the `model_save_path` parameter"
112
+
113
+ def _init_callback(self) -> None:
114
+ d = {}
115
+ if "algo" not in d:
116
+ d["algo"] = type(self.model).__name__
117
+ for key in self.model.__dict__:
118
+ if key in wandb.config:
119
+ continue
120
+ if type(self.model.__dict__[key]) in [float, int, str]:
121
+ d[key] = self.model.__dict__[key]
122
+ else:
123
+ d[key] = str(self.model.__dict__[key])
124
+ if self.gradient_save_freq > 0:
125
+ wandb.watch(
126
+ self.model.policy,
127
+ log_freq=self.gradient_save_freq,
128
+ log=self.log,
129
+ )
130
+ wandb.config.setdefaults(d)
131
+
132
+ def _on_step(self) -> bool:
133
+ if self.model_save_freq > 0:
134
+ if self.model_save_path is not None:
135
+ if self.n_calls % self.model_save_freq == 0:
136
+ self.save_model()
137
+ return True
138
+
139
+ def _on_training_end(self) -> None:
140
+ if self.model_save_path is not None:
141
+ self.save_model()
142
+
143
+ def save_model(self) -> None:
144
+ self.model.save(self.path)
145
+ wandb.save(self.path, base_path=self.model_save_path)
146
+ if self.verbose > 1:
147
+ logger.info(f"Saving model checkpoint to {self.path}")
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/yolov8/__init__.py ADDED
File without changes
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/yolov8/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/yolov8/__pycache__/yolov8.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/wandb/integration/yolov8/yolov8.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, List, Optional
2
+
3
+ from ultralytics.yolo.engine.model import YOLO
4
+ from ultralytics.yolo.engine.trainer import BaseTrainer
5
+
6
+ try:
7
+ from ultralytics.yolo.utils import RANK
8
+ from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
9
+ except ModuleNotFoundError:
10
+ from ultralytics.utils import RANK
11
+ from ultralytics.utils.torch_utils import get_flops, get_num_params
12
+ from ultralytics.yolo.v8.classify.train import ClassificationTrainer
13
+
14
+ import wandb
15
+ from wandb.sdk.lib import telemetry
16
+
17
+
18
+ class WandbCallback:
19
+ """An internal YOLO model wrapper that tracks metrics, and logs models to Weights & Biases.
20
+
21
+ Usage:
22
+ ```python
23
+ from wandb.integration.yolov8.yolov8 import WandbCallback
24
+
25
+ model = YOLO("yolov8n.pt")
26
+ wandb_logger = WandbCallback(
27
+ model,
28
+ )
29
+ for event, callback_fn in wandb_logger.callbacks.items():
30
+ model.add_callback(event, callback_fn)
31
+ ```
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ yolo: YOLO,
37
+ run_name: Optional[str] = None,
38
+ project: Optional[str] = None,
39
+ tags: Optional[List[str]] = None,
40
+ resume: Optional[str] = None,
41
+ **kwargs: Optional[Any],
42
+ ) -> None:
43
+ """A utility class to manage wandb run and various callbacks for the ultralytics YOLOv8 framework.
44
+
45
+ Args:
46
+ yolo: A YOLOv8 model that's inherited from `:class:ultralytics.yolo.engine.model.YOLO`
47
+ run_name, str: The name of the Weights & Biases run, defaults to an auto generated run_name if `trainer.args.name` is not defined.
48
+ project, str: The name of the Weights & Biases project, defaults to `"YOLOv8"` if `trainer.args.project` is not defined.
49
+ tags, List[str]: A list of tags to be added to the Weights & Biases run, defaults to `["YOLOv8"]`.
50
+ resume, str: Whether to resume a previous run on Weights & Biases, defaults to `None`.
51
+ **kwargs: Additional arguments to be passed to `wandb.init()`.
52
+ """
53
+ self.yolo = yolo
54
+ self.run_name = run_name
55
+ self.project = project
56
+ self.tags = tags
57
+ self.resume = resume
58
+ self.kwargs = kwargs
59
+
60
+ def on_pretrain_routine_start(self, trainer: BaseTrainer) -> None:
61
+ """Starts a new wandb run to track the training process and log to Weights & Biases.
62
+
63
+ Args:
64
+ trainer: A task trainer that's inherited from `:class:ultralytics.yolo.engine.trainer.BaseTrainer`
65
+ that contains the model training and optimization routine.
66
+ """
67
+ if wandb.run is None:
68
+ self.run = wandb.init(
69
+ name=self.run_name if self.run_name else trainer.args.name,
70
+ project=self.project
71
+ if self.project
72
+ else trainer.args.project or "YOLOv8",
73
+ tags=self.tags if self.tags else ["YOLOv8"],
74
+ config=vars(trainer.args),
75
+ resume=self.resume if self.resume else None,
76
+ **self.kwargs,
77
+ )
78
+ else:
79
+ self.run = wandb.run
80
+ assert self.run is not None
81
+ self.run.define_metric("epoch", hidden=True)
82
+ self.run.define_metric(
83
+ "train/*", step_metric="epoch", step_sync=True, summary="min"
84
+ )
85
+
86
+ self.run.define_metric(
87
+ "val/*", step_metric="epoch", step_sync=True, summary="min"
88
+ )
89
+
90
+ self.run.define_metric(
91
+ "metrics/*", step_metric="epoch", step_sync=True, summary="max"
92
+ )
93
+ self.run.define_metric(
94
+ "lr/*", step_metric="epoch", step_sync=True, summary="last"
95
+ )
96
+
97
+ with telemetry.context(run=wandb.run) as tel:
98
+ tel.feature.ultralytics_yolov8 = True
99
+
100
+ def on_pretrain_routine_end(self, trainer: BaseTrainer) -> None:
101
+ assert self.run is not None
102
+ self.run.summary.update(
103
+ {
104
+ "model/parameters": get_num_params(trainer.model),
105
+ "model/GFLOPs": round(get_flops(trainer.model), 3),
106
+ }
107
+ )
108
+
109
+ def on_train_epoch_start(self, trainer: BaseTrainer) -> None:
110
+ """On train epoch start we only log epoch number to the Weights & Biases run."""
111
+ # We log the epoch number here to commit the previous step,
112
+ assert self.run is not None
113
+ self.run.log({"epoch": trainer.epoch + 1})
114
+
115
+ def on_train_epoch_end(self, trainer: BaseTrainer) -> None:
116
+ """On train epoch end we log all the metrics to the Weights & Biases run."""
117
+ assert self.run is not None
118
+ self.run.log(
119
+ {
120
+ **trainer.metrics,
121
+ **trainer.label_loss_items(trainer.tloss, prefix="train"),
122
+ **trainer.lr,
123
+ },
124
+ )
125
+ # Currently only the detection and segmentation trainers save images to the save_dir
126
+ if not isinstance(trainer, ClassificationTrainer):
127
+ self.run.log(
128
+ {
129
+ "train_batch_images": [
130
+ wandb.Image(str(image_path), caption=image_path.stem)
131
+ for image_path in trainer.save_dir.glob("train_batch*.jpg")
132
+ ]
133
+ }
134
+ )
135
+
136
+ def on_fit_epoch_end(self, trainer: BaseTrainer) -> None:
137
+ """On fit epoch end we log all the best metrics and model detail to Weights & Biases run summary."""
138
+ assert self.run is not None
139
+ if trainer.epoch == 0:
140
+ speeds = [
141
+ trainer.validator.speed.get(
142
+ key,
143
+ )
144
+ for key in (1, "inference")
145
+ ]
146
+ speed = speeds[0] if speeds[0] else speeds[1]
147
+ if speed:
148
+ self.run.summary.update(
149
+ {
150
+ "model/speed(ms/img)": round(speed, 3),
151
+ }
152
+ )
153
+ if trainer.best_fitness == trainer.fitness:
154
+ self.run.summary.update(
155
+ {
156
+ "best/epoch": trainer.epoch + 1,
157
+ **{f"best/{key}": val for key, val in trainer.metrics.items()},
158
+ }
159
+ )
160
+
161
+ def on_train_end(self, trainer: BaseTrainer) -> None:
162
+ """On train end we log all the media, including plots, images and best model artifact to Weights & Biases."""
163
+ # Currently only the detection and segmentation trainers save images to the save_dir
164
+ assert self.run is not None
165
+ if not isinstance(trainer, ClassificationTrainer):
166
+ assert self.run is not None
167
+ self.run.log(
168
+ {
169
+ "plots": [
170
+ wandb.Image(str(image_path), caption=image_path.stem)
171
+ for image_path in trainer.save_dir.glob("*.png")
172
+ ],
173
+ "val_images": [
174
+ wandb.Image(str(image_path), caption=image_path.stem)
175
+ for image_path in trainer.validator.save_dir.glob("val*.jpg")
176
+ ],
177
+ },
178
+ )
179
+
180
+ if trainer.best.exists():
181
+ assert self.run is not None
182
+ self.run.log_artifact(
183
+ str(trainer.best),
184
+ type="model",
185
+ name=f"{self.run.name}_{trainer.args.task}.pt",
186
+ aliases=["best", f"epoch_{trainer.epoch + 1}"],
187
+ )
188
+
189
+ def on_model_save(self, trainer: BaseTrainer) -> None:
190
+ """On model save we log the model as an artifact to Weights & Biases."""
191
+ assert self.run is not None
192
+ self.run.log_artifact(
193
+ str(trainer.last),
194
+ type="model",
195
+ name=f"{self.run.name}_{trainer.args.task}.pt",
196
+ aliases=["last", f"epoch_{trainer.epoch + 1}"],
197
+ )
198
+
199
+ def teardown(self, _trainer: BaseTrainer) -> None:
200
+ """On teardown, we finish the Weights & Biases run and set it to None."""
201
+ assert self.run is not None
202
+ self.run.finish()
203
+ self.run = None
204
+
205
+ @property
206
+ def callbacks(
207
+ self,
208
+ ) -> Dict[str, Callable]:
209
+ """Property contains all the relevant callbacks to add to the YOLO model for the Weights & Biases logging."""
210
+ return {
211
+ "on_pretrain_routine_start": self.on_pretrain_routine_start,
212
+ "on_pretrain_routine_end": self.on_pretrain_routine_end,
213
+ "on_train_epoch_start": self.on_train_epoch_start,
214
+ "on_train_epoch_end": self.on_train_epoch_end,
215
+ "on_fit_epoch_end": self.on_fit_epoch_end,
216
+ "on_train_end": self.on_train_end,
217
+ "on_model_save": self.on_model_save,
218
+ "teardown": self.teardown,
219
+ }
220
+
221
+
222
+ def add_callbacks(
223
+ yolo: YOLO,
224
+ run_name: Optional[str] = None,
225
+ project: Optional[str] = None,
226
+ tags: Optional[List[str]] = None,
227
+ resume: Optional[str] = None,
228
+ **kwargs: Optional[Any],
229
+ ) -> YOLO:
230
+ """A YOLO model wrapper that tracks metrics, and logs models to Weights & Biases.
231
+
232
+ Args:
233
+ yolo: A YOLOv8 model that's inherited from `:class:ultralytics.yolo.engine.model.YOLO`
234
+ run_name, str: The name of the Weights & Biases run, defaults to an auto generated name if `trainer.args.name` is not defined.
235
+ project, str: The name of the Weights & Biases project, defaults to `"YOLOv8"` if `trainer.args.project` is not defined.
236
+ tags, List[str]: A list of tags to be added to the Weights & Biases run, defaults to `["YOLOv8"]`.
237
+ resume, str: Whether to resume a previous run on Weights & Biases, defaults to `None`.
238
+ **kwargs: Additional arguments to be passed to `wandb.init()`.
239
+
240
+ Usage:
241
+ ```python
242
+ from wandb.integration.yolov8 import add_callbacks as add_wandb_callbacks
243
+
244
+ model = YOLO("yolov8n.pt")
245
+ add_wandb_callbacks(
246
+ model,
247
+ )
248
+ model.train(
249
+ data="coco128.yaml",
250
+ epochs=3,
251
+ imgsz=640,
252
+ )
253
+ ```
254
+ """
255
+ wandb.termwarn(
256
+ """The wandb callback is currently in beta and is subject to change based on updates to `ultralytics yolov8`.
257
+ The callback is tested and supported for ultralytics v8.0.43 and above.
258
+ Please report any issues to https://github.com/wandb/wandb/issues with the tag `yolov8`.
259
+ """,
260
+ repeat=False,
261
+ )
262
+ wandb.termwarn(
263
+ """This wandb callback is no longer functional and would be deprecated in the near future.
264
+ We recommend you to use the updated callback using `from wandb.integration.ultralytics import add_wandb_callback`.
265
+ The updated callback is tested and supported for ultralytics 8.0.167 and above.
266
+ You can refer to https://docs.wandb.ai/guides/integrations/ultralytics for the updated documentation.
267
+ Please report any issues to https://github.com/wandb/wandb/issues with the tag `yolov8`.
268
+ """,
269
+ repeat=False,
270
+ )
271
+
272
+ if RANK in [-1, 0]:
273
+ wandb_logger = WandbCallback(
274
+ yolo, run_name=run_name, project=project, tags=tags, resume=resume, **kwargs
275
+ )
276
+ for event, callback_fn in wandb_logger.callbacks.items():
277
+ yolo.add_callback(event, callback_fn)
278
+ return yolo
279
+ else:
280
+ wandb.termerror(
281
+ "The RANK of the process to add the callbacks was neither 0 or -1."
282
+ "No Weights & Biases callbacks were added to this instance of the YOLO model."
283
+ )
284
+ return yolo
evalkit_cambrian/lib/python3.10/site-packages/wandb/vendor/__init__.py ADDED
File without changes
evalkit_cambrian/lib/python3.10/site-packages/wandb/vendor/graphql-core-1.1/setup.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+ from setuptools.command.test import test as TestCommand
3
+ import sys
4
+
5
+ if sys.version_info[0] < 3:
6
+ import __builtin__ as builtins
7
+ else:
8
+ import builtins
9
+
10
+ # This is a bit (!) hackish: we are setting a global variable so that the main
11
+ # graphql __init__ can detect if it is being loaded by the setup routine, to
12
+ # avoid attempting to load components that aren't built yet:
13
+ # the numpy distutils extensions that are used by scikit-learn to recursively
14
+ # build the compiled extensions in sub-packages is based on the Python import
15
+ # machinery.
16
+ if 'test' not in sys.argv:
17
+ builtins.__GRAPHQL_SETUP__ = True
18
+
19
+ version = __import__('graphql').get_version()
20
+
21
+ install_requires = [
22
+ 'six>=1.10.0',
23
+ 'promise>=2.0'
24
+ ]
25
+
26
+ tests_requires = [
27
+ 'pytest==3.0.2',
28
+ 'pytest-django==2.9.1',
29
+ 'pytest-cov==2.3.1',
30
+ 'coveralls',
31
+ 'gevent==1.1rc1',
32
+ 'six>=1.10.0',
33
+ 'pytest-benchmark==3.0.0',
34
+ 'pytest-mock==1.2',
35
+ ]
36
+
37
+ class PyTest(TestCommand):
38
+ def finalize_options(self):
39
+ TestCommand.finalize_options(self)
40
+ self.test_args = ['graphql', '-vrsx']
41
+ self.test_suite = True
42
+
43
+ def run_tests(self):
44
+ #import here, cause outside the eggs aren't loaded
45
+ import pytest
46
+ errno = pytest.main(self.test_args)
47
+ sys.exit(errno)
48
+
49
+
50
+ setup(
51
+ name='graphql-core',
52
+ version=version,
53
+ description='GraphQL implementation for Python',
54
+ url='https://github.com/graphql-python/graphql-core',
55
+ download_url='https://github.com/graphql-python/graphql-core/releases',
56
+ author='Syrus Akbary, Jake Heinz, Taeho Kim',
57
+ author_email='Syrus Akbary <me@syrusakbary.com>, Jake Heinz <me@jh.gg>, Taeho Kim <dittos@gmail.com>',
58
+ license='MIT',
59
+ classifiers=[
60
+ 'Development Status :: 5 - Production/Stable',
61
+ 'Intended Audience :: Developers',
62
+ 'Topic :: Software Development :: Libraries',
63
+ 'Programming Language :: Python :: 2',
64
+ 'Programming Language :: Python :: 2.7',
65
+ 'Programming Language :: Python :: 3',
66
+ 'Programming Language :: Python :: 3.3',
67
+ 'Programming Language :: Python :: 3.4',
68
+ 'Programming Language :: Python :: 3.5',
69
+ 'Programming Language :: Python :: Implementation :: PyPy',
70
+ 'License :: OSI Approved :: MIT License',
71
+ 'Topic :: Database :: Front-Ends',
72
+ 'Topic :: Internet :: WWW/HTTP',
73
+ ],
74
+
75
+ keywords='api graphql protocol rest',
76
+ packages=find_packages(exclude=['tests', 'tests_py35']),
77
+ install_requires=install_requires,
78
+ tests_require=tests_requires,
79
+ cmdclass = {'test': PyTest},
80
+ extras_require={
81
+ 'gevent': [
82
+ 'gevent==1.1rc1'
83
+ ],
84
+ 'test': tests_requires
85
+ }
86
+ )
evalkit_cambrian/lib/python3.10/site-packages/wandb/vendor/graphql-core-1.1/wandb_graphql/execution/executor.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from collections.abc import Iterable
3
+ import functools
4
+ import logging
5
+ import sys
6
+
7
+ from wandb_promise import Promise, promise_for_dict, is_thenable
8
+
9
+ from ..error import GraphQLError, GraphQLLocatedError
10
+ from ..pyutils.default_ordered_dict import DefaultOrderedDict
11
+ from ..pyutils.ordereddict import OrderedDict
12
+ from ..type import (GraphQLEnumType, GraphQLInterfaceType, GraphQLList,
13
+ GraphQLNonNull, GraphQLObjectType, GraphQLScalarType,
14
+ GraphQLSchema, GraphQLUnionType)
15
+ from .base import (ExecutionContext, ExecutionResult, ResolveInfo, Undefined,
16
+ collect_fields, default_resolve_fn, get_field_def,
17
+ get_operation_root_type)
18
+ from .executors.sync import SyncExecutor
19
+ from .experimental.executor import execute as experimental_execute
20
+ from .middleware import MiddlewareManager
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ use_experimental_executor = False
26
+
27
+
28
+ def execute(schema, document_ast, root_value=None, context_value=None,
29
+ variable_values=None, operation_name=None, executor=None,
30
+ return_promise=False, middleware=None):
31
+ if use_experimental_executor:
32
+ return experimental_execute(
33
+ schema, document_ast, root_value, context_value,
34
+ variable_values, operation_name, executor,
35
+ return_promise, middleware
36
+ )
37
+
38
+ assert schema, 'Must provide schema'
39
+ assert isinstance(schema, GraphQLSchema), (
40
+ 'Schema must be an instance of GraphQLSchema. Also ensure that there are ' +
41
+ 'not multiple versions of GraphQL installed in your node_modules directory.'
42
+ )
43
+ if middleware:
44
+ if not isinstance(middleware, MiddlewareManager):
45
+ middleware = MiddlewareManager(*middleware)
46
+
47
+ assert isinstance(middleware, MiddlewareManager), (
48
+ 'middlewares have to be an instance'
49
+ ' of MiddlewareManager. Received "{}".'.format(middleware)
50
+ )
51
+
52
+ if executor is None:
53
+ executor = SyncExecutor()
54
+
55
+ context = ExecutionContext(
56
+ schema,
57
+ document_ast,
58
+ root_value,
59
+ context_value,
60
+ variable_values,
61
+ operation_name,
62
+ executor,
63
+ middleware
64
+ )
65
+
66
+ def executor(resolve, reject):
67
+ return resolve(execute_operation(context, context.operation, root_value))
68
+
69
+ def on_rejected(error):
70
+ context.errors.append(error)
71
+ return None
72
+
73
+ def on_resolve(data):
74
+ if not context.errors:
75
+ return ExecutionResult(data=data)
76
+ return ExecutionResult(data=data, errors=context.errors)
77
+
78
+ promise = Promise(executor).catch(on_rejected).then(on_resolve)
79
+ if return_promise:
80
+ return promise
81
+ context.executor.wait_until_finished()
82
+ return promise.get()
83
+
84
+
85
+ def execute_operation(exe_context, operation, root_value):
86
+ type = get_operation_root_type(exe_context.schema, operation)
87
+ fields = collect_fields(
88
+ exe_context,
89
+ type,
90
+ operation.selection_set,
91
+ DefaultOrderedDict(list),
92
+ set()
93
+ )
94
+
95
+ if operation.operation == 'mutation':
96
+ return execute_fields_serially(exe_context, type, root_value, fields)
97
+
98
+ return execute_fields(exe_context, type, root_value, fields)
99
+
100
+
101
+ def execute_fields_serially(exe_context, parent_type, source_value, fields):
102
+ def execute_field_callback(results, response_name):
103
+ field_asts = fields[response_name]
104
+ result = resolve_field(
105
+ exe_context,
106
+ parent_type,
107
+ source_value,
108
+ field_asts
109
+ )
110
+ if result is Undefined:
111
+ return results
112
+
113
+ if is_thenable(result):
114
+ def collect_result(resolved_result):
115
+ results[response_name] = resolved_result
116
+ return results
117
+
118
+ return result.then(collect_result, None)
119
+
120
+ results[response_name] = result
121
+ return results
122
+
123
+ def execute_field(prev_promise, response_name):
124
+ return prev_promise.then(lambda results: execute_field_callback(results, response_name))
125
+
126
+ return functools.reduce(execute_field, fields.keys(), Promise.resolve(collections.OrderedDict()))
127
+
128
+
129
+ def execute_fields(exe_context, parent_type, source_value, fields):
130
+ contains_promise = False
131
+
132
+ final_results = OrderedDict()
133
+
134
+ for response_name, field_asts in fields.items():
135
+ result = resolve_field(exe_context, parent_type, source_value, field_asts)
136
+ if result is Undefined:
137
+ continue
138
+
139
+ final_results[response_name] = result
140
+ if is_thenable(result):
141
+ contains_promise = True
142
+
143
+ if not contains_promise:
144
+ return final_results
145
+
146
+ return promise_for_dict(final_results)
147
+
148
+
149
+ def resolve_field(exe_context, parent_type, source, field_asts):
150
+ field_ast = field_asts[0]
151
+ field_name = field_ast.name.value
152
+
153
+ field_def = get_field_def(exe_context.schema, parent_type, field_name)
154
+ if not field_def:
155
+ return Undefined
156
+
157
+ return_type = field_def.type
158
+ resolve_fn = field_def.resolver or default_resolve_fn
159
+
160
+ # We wrap the resolve_fn from the middleware
161
+ resolve_fn_middleware = exe_context.get_field_resolver(resolve_fn)
162
+
163
+ # Build a dict of arguments from the field.arguments AST, using the variables scope to
164
+ # fulfill any variable references.
165
+ args = exe_context.get_argument_values(field_def, field_ast)
166
+
167
+ # The resolve function's optional third argument is a context value that
168
+ # is provided to every resolve function within an execution. It is commonly
169
+ # used to represent an authenticated user, or request-specific caches.
170
+ context = exe_context.context_value
171
+
172
+ # The resolve function's optional third argument is a collection of
173
+ # information about the current execution state.
174
+ info = ResolveInfo(
175
+ field_name,
176
+ field_asts,
177
+ return_type,
178
+ parent_type,
179
+ schema=exe_context.schema,
180
+ fragments=exe_context.fragments,
181
+ root_value=exe_context.root_value,
182
+ operation=exe_context.operation,
183
+ variable_values=exe_context.variable_values,
184
+ )
185
+
186
+ executor = exe_context.executor
187
+ result = resolve_or_error(resolve_fn_middleware, source, args, context, info, executor)
188
+
189
+ return complete_value_catching_error(
190
+ exe_context,
191
+ return_type,
192
+ field_asts,
193
+ info,
194
+ result
195
+ )
196
+
197
+
198
+ def resolve_or_error(resolve_fn, source, args, context, info, executor):
199
+ try:
200
+ return executor.execute(resolve_fn, source, args, context, info)
201
+ except Exception as e:
202
+ logger.exception("An error occurred while resolving field {}.{}".format(
203
+ info.parent_type.name, info.field_name
204
+ ))
205
+ e.stack = sys.exc_info()[2]
206
+ return e
207
+
208
+
209
+ def complete_value_catching_error(exe_context, return_type, field_asts, info, result):
210
+ # If the field type is non-nullable, then it is resolved without any
211
+ # protection from errors.
212
+ if isinstance(return_type, GraphQLNonNull):
213
+ return complete_value(exe_context, return_type, field_asts, info, result)
214
+
215
+ # Otherwise, error protection is applied, logging the error and
216
+ # resolving a null value for this field if one is encountered.
217
+ try:
218
+ completed = complete_value(exe_context, return_type, field_asts, info, result)
219
+ if is_thenable(completed):
220
+ def handle_error(error):
221
+ exe_context.errors.append(error)
222
+ return None
223
+
224
+ return completed.catch(handle_error)
225
+
226
+ return completed
227
+ except Exception as e:
228
+ exe_context.errors.append(e)
229
+ return None
230
+
231
+
232
+ def complete_value(exe_context, return_type, field_asts, info, result):
233
+ """
234
+ Implements the instructions for completeValue as defined in the
235
+ "Field entries" section of the spec.
236
+
237
+ If the field type is Non-Null, then this recursively completes the value for the inner type. It throws a field
238
+ error if that completion returns null, as per the "Nullability" section of the spec.
239
+
240
+ If the field type is a List, then this recursively completes the value for the inner type on each item in the
241
+ list.
242
+
243
+ If the field type is a Scalar or Enum, ensures the completed value is a legal value of the type by calling the
244
+ `serialize` method of GraphQL type definition.
245
+
246
+ If the field is an abstract type, determine the runtime type of the value and then complete based on that type.
247
+
248
+ Otherwise, the field type expects a sub-selection set, and will complete the value by evaluating all
249
+ sub-selections.
250
+ """
251
+ # If field type is NonNull, complete for inner type, and throw field error if result is null.
252
+
253
+ if is_thenable(result):
254
+ return Promise.resolve(result).then(
255
+ lambda resolved: complete_value(
256
+ exe_context,
257
+ return_type,
258
+ field_asts,
259
+ info,
260
+ resolved
261
+ ),
262
+ lambda error: Promise.rejected(GraphQLLocatedError(field_asts, original_error=error))
263
+ )
264
+
265
+ # print return_type, type(result)
266
+ if isinstance(result, Exception):
267
+ raise GraphQLLocatedError(field_asts, original_error=result)
268
+
269
+ if isinstance(return_type, GraphQLNonNull):
270
+ return complete_nonnull_value(exe_context, return_type, field_asts, info, result)
271
+
272
+ # If result is null-like, return null.
273
+ if result is None:
274
+ return None
275
+
276
+ # If field type is List, complete each item in the list with the inner type
277
+ if isinstance(return_type, GraphQLList):
278
+ return complete_list_value(exe_context, return_type, field_asts, info, result)
279
+
280
+ # If field type is Scalar or Enum, serialize to a valid value, returning null if coercion is not possible.
281
+ if isinstance(return_type, (GraphQLScalarType, GraphQLEnumType)):
282
+ return complete_leaf_value(return_type, result)
283
+
284
+ if isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)):
285
+ return complete_abstract_value(exe_context, return_type, field_asts, info, result)
286
+
287
+ if isinstance(return_type, GraphQLObjectType):
288
+ return complete_object_value(exe_context, return_type, field_asts, info, result)
289
+
290
+ assert False, u'Cannot complete value of unexpected type "{}".'.format(return_type)
291
+
292
+
293
+ def complete_list_value(exe_context, return_type, field_asts, info, result):
294
+ """
295
+ Complete a list value by completing each item in the list with the inner type
296
+ """
297
+ assert isinstance(result, Iterable), \
298
+ ('User Error: expected iterable, but did not find one ' +
299
+ 'for field {}.{}.').format(info.parent_type, info.field_name)
300
+
301
+ item_type = return_type.of_type
302
+ completed_results = []
303
+ contains_promise = False
304
+ for item in result:
305
+ completed_item = complete_value_catching_error(exe_context, item_type, field_asts, info, item)
306
+ if not contains_promise and is_thenable(completed_item):
307
+ contains_promise = True
308
+
309
+ completed_results.append(completed_item)
310
+
311
+ return Promise.all(completed_results) if contains_promise else completed_results
312
+
313
+
314
+ def complete_leaf_value(return_type, result):
315
+ """
316
+ Complete a Scalar or Enum by serializing to a valid value, returning null if serialization is not possible.
317
+ """
318
+ # serialize = getattr(return_type, 'serialize', None)
319
+ # assert serialize, 'Missing serialize method on type'
320
+
321
+ return return_type.serialize(result)
322
+
323
+
324
+ def complete_abstract_value(exe_context, return_type, field_asts, info, result):
325
+ """
326
+ Complete an value of an abstract type by determining the runtime type of that value, then completing based
327
+ on that type.
328
+ """
329
+ runtime_type = None
330
+
331
+ # Field type must be Object, Interface or Union and expect sub-selections.
332
+ if isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)):
333
+ if return_type.resolve_type:
334
+ runtime_type = return_type.resolve_type(result, exe_context.context_value, info)
335
+ else:
336
+ runtime_type = get_default_resolve_type_fn(result, exe_context.context_value, info, return_type)
337
+
338
+ if isinstance(runtime_type, str):
339
+ runtime_type = info.schema.get_type(runtime_type)
340
+
341
+ if not isinstance(runtime_type, GraphQLObjectType):
342
+ raise GraphQLError(
343
+ ('Abstract type {} must resolve to an Object type at runtime ' +
344
+ 'for field {}.{} with value "{}", received "{}".').format(
345
+ return_type,
346
+ info.parent_type,
347
+ info.field_name,
348
+ result,
349
+ runtime_type,
350
+ ),
351
+ field_asts
352
+ )
353
+
354
+ if not exe_context.schema.is_possible_type(return_type, runtime_type):
355
+ raise GraphQLError(
356
+ u'Runtime Object type "{}" is not a possible type for "{}".'.format(runtime_type, return_type),
357
+ field_asts
358
+ )
359
+
360
+ return complete_object_value(exe_context, runtime_type, field_asts, info, result)
361
+
362
+
363
+ def get_default_resolve_type_fn(value, context, info, abstract_type):
364
+ possible_types = info.schema.get_possible_types(abstract_type)
365
+ for type in possible_types:
366
+ if callable(type.is_type_of) and type.is_type_of(value, context, info):
367
+ return type
368
+
369
+
370
+ def complete_object_value(exe_context, return_type, field_asts, info, result):
371
+ """
372
+ Complete an Object value by evaluating all sub-selections.
373
+ """
374
+ if return_type.is_type_of and not return_type.is_type_of(result, exe_context.context_value, info):
375
+ raise GraphQLError(
376
+ u'Expected value of type "{}" but got: {}.'.format(return_type, type(result).__name__),
377
+ field_asts
378
+ )
379
+
380
+ # Collect sub-fields to execute to complete this value.
381
+ subfield_asts = exe_context.get_sub_fields(return_type, field_asts)
382
+ return execute_fields(exe_context, return_type, result, subfield_asts)
383
+
384
+
385
+ def complete_nonnull_value(exe_context, return_type, field_asts, info, result):
386
+ """
387
+ Complete a NonNull value by completing the inner type
388
+ """
389
+ completed = complete_value(
390
+ exe_context, return_type.of_type, field_asts, info, result
391
+ )
392
+ if completed is None:
393
+ raise GraphQLError(
394
+ 'Cannot return null for non-nullable field {}.{}.'.format(info.parent_type, info.field_name),
395
+ field_asts
396
+ )
397
+
398
+ return completed
evalkit_eagle/lib/python3.10/site-packages/scipy/__pycache__/__config__.cpython-310.pyc ADDED
Binary file (3.61 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.66 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/__pycache__/_distributor_init.cpython-310.pyc ADDED
Binary file (799 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/__pycache__/version.cpython-310.pyc ADDED
Binary file (453 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/misc/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.warn(
3
+ "scipy.misc is deprecated and will be removed in 2.0.0",
4
+ DeprecationWarning,
5
+ stacklevel=2
6
+ )
evalkit_eagle/lib/python3.10/site-packages/scipy/misc/__pycache__/common.cpython-310.pyc ADDED
Binary file (320 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/misc/__pycache__/doccer.cpython-310.pyc ADDED
Binary file (320 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/misc/common.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.warn(
3
+ "scipy.misc.common is deprecated and will be removed in 2.0.0",
4
+ DeprecationWarning,
5
+ stacklevel=2
6
+ )
evalkit_eagle/lib/python3.10/site-packages/scipy/misc/doccer.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.warn(
3
+ "scipy.misc.doccer is deprecated and will be removed in 2.0.0",
4
+ DeprecationWarning,
5
+ stacklevel=2
6
+ )
evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc ADDED
Binary file (44.1 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc ADDED
Binary file (37.2 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc ADDED
Binary file (18.3 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc ADDED
Binary file (642 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc ADDED
Binary file (616 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc ADDED
Binary file (627 Bytes). View file