koichi12 commited on
Commit
db1baab
·
verified ·
1 Parent(s): 761e0a7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/__init__.py +155 -0
  2. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/gapic_metadata.json +370 -0
  3. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/gapic_version.py +16 -0
  4. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/py.typed +2 -0
  5. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/__init__.py +15 -0
  6. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/__init__.py +22 -0
  7. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/client.py +1541 -0
  8. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/pagers.py +353 -0
  9. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__init__.py +36 -0
  10. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/base.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/grpc.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/rest.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/rest_base.cpython-311.pyc +0 -0
  15. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/base.py +259 -0
  16. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/grpc.py +540 -0
  17. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/grpc_asyncio.py +603 -0
  18. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/rest.py +1462 -0
  19. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/rest_base.py +416 -0
  20. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/__init__.py +22 -0
  21. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/async_client.py +1040 -0
  22. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/client.py +1433 -0
  23. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/pagers.py +197 -0
  24. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/__init__.py +36 -0
  25. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc +0 -0
  26. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/__pycache__/rest.cpython-311.pyc +0 -0
  27. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/base.py +241 -0
  28. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/grpc.py +505 -0
  29. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/grpc_asyncio.py +558 -0
  30. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/rest.py +1340 -0
  31. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/rest_base.py +409 -0
  32. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__init__.py +135 -0
  33. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/__init__.cpython-311.pyc +0 -0
  34. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/citation.cpython-311.pyc +0 -0
  35. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/discuss_service.cpython-311.pyc +0 -0
  36. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/model.cpython-311.pyc +0 -0
  37. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/model_service.cpython-311.pyc +0 -0
  38. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/permission.cpython-311.pyc +0 -0
  39. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/permission_service.cpython-311.pyc +0 -0
  40. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/safety.cpython-311.pyc +0 -0
  41. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/text_service.cpython-311.pyc +0 -0
  42. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/tuned_model.cpython-311.pyc +0 -0
  43. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/citation.py +101 -0
  44. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/discuss_service.py +356 -0
  45. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/model.py +155 -0
  46. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/model_service.py +309 -0
  47. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/permission.py +140 -0
  48. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/permission_service.py +218 -0
  49. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/safety.py +252 -0
  50. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/text_service.py +429 -0
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/__init__.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from google.ai.generativelanguage_v1beta3 import gapic_version as package_version
17
+
18
+ __version__ = package_version.__version__
19
+
20
+
21
+ from .services.discuss_service import DiscussServiceAsyncClient, DiscussServiceClient
22
+ from .services.model_service import ModelServiceAsyncClient, ModelServiceClient
23
+ from .services.permission_service import (
24
+ PermissionServiceAsyncClient,
25
+ PermissionServiceClient,
26
+ )
27
+ from .services.text_service import TextServiceAsyncClient, TextServiceClient
28
+ from .types.citation import CitationMetadata, CitationSource
29
+ from .types.discuss_service import (
30
+ CountMessageTokensRequest,
31
+ CountMessageTokensResponse,
32
+ Example,
33
+ GenerateMessageRequest,
34
+ GenerateMessageResponse,
35
+ Message,
36
+ MessagePrompt,
37
+ )
38
+ from .types.model import Model
39
+ from .types.model_service import (
40
+ CreateTunedModelMetadata,
41
+ CreateTunedModelRequest,
42
+ DeleteTunedModelRequest,
43
+ GetModelRequest,
44
+ GetTunedModelRequest,
45
+ ListModelsRequest,
46
+ ListModelsResponse,
47
+ ListTunedModelsRequest,
48
+ ListTunedModelsResponse,
49
+ UpdateTunedModelRequest,
50
+ )
51
+ from .types.permission import Permission
52
+ from .types.permission_service import (
53
+ CreatePermissionRequest,
54
+ DeletePermissionRequest,
55
+ GetPermissionRequest,
56
+ ListPermissionsRequest,
57
+ ListPermissionsResponse,
58
+ TransferOwnershipRequest,
59
+ TransferOwnershipResponse,
60
+ UpdatePermissionRequest,
61
+ )
62
+ from .types.safety import (
63
+ ContentFilter,
64
+ HarmCategory,
65
+ SafetyFeedback,
66
+ SafetyRating,
67
+ SafetySetting,
68
+ )
69
+ from .types.text_service import (
70
+ BatchEmbedTextRequest,
71
+ BatchEmbedTextResponse,
72
+ CountTextTokensRequest,
73
+ CountTextTokensResponse,
74
+ Embedding,
75
+ EmbedTextRequest,
76
+ EmbedTextResponse,
77
+ GenerateTextRequest,
78
+ GenerateTextResponse,
79
+ TextCompletion,
80
+ TextPrompt,
81
+ )
82
+ from .types.tuned_model import (
83
+ Dataset,
84
+ Hyperparameters,
85
+ TunedModel,
86
+ TunedModelSource,
87
+ TuningExample,
88
+ TuningExamples,
89
+ TuningSnapshot,
90
+ TuningTask,
91
+ )
92
+
93
+ __all__ = (
94
+ "DiscussServiceAsyncClient",
95
+ "ModelServiceAsyncClient",
96
+ "PermissionServiceAsyncClient",
97
+ "TextServiceAsyncClient",
98
+ "BatchEmbedTextRequest",
99
+ "BatchEmbedTextResponse",
100
+ "CitationMetadata",
101
+ "CitationSource",
102
+ "ContentFilter",
103
+ "CountMessageTokensRequest",
104
+ "CountMessageTokensResponse",
105
+ "CountTextTokensRequest",
106
+ "CountTextTokensResponse",
107
+ "CreatePermissionRequest",
108
+ "CreateTunedModelMetadata",
109
+ "CreateTunedModelRequest",
110
+ "Dataset",
111
+ "DeletePermissionRequest",
112
+ "DeleteTunedModelRequest",
113
+ "DiscussServiceClient",
114
+ "EmbedTextRequest",
115
+ "EmbedTextResponse",
116
+ "Embedding",
117
+ "Example",
118
+ "GenerateMessageRequest",
119
+ "GenerateMessageResponse",
120
+ "GenerateTextRequest",
121
+ "GenerateTextResponse",
122
+ "GetModelRequest",
123
+ "GetPermissionRequest",
124
+ "GetTunedModelRequest",
125
+ "HarmCategory",
126
+ "Hyperparameters",
127
+ "ListModelsRequest",
128
+ "ListModelsResponse",
129
+ "ListPermissionsRequest",
130
+ "ListPermissionsResponse",
131
+ "ListTunedModelsRequest",
132
+ "ListTunedModelsResponse",
133
+ "Message",
134
+ "MessagePrompt",
135
+ "Model",
136
+ "ModelServiceClient",
137
+ "Permission",
138
+ "PermissionServiceClient",
139
+ "SafetyFeedback",
140
+ "SafetyRating",
141
+ "SafetySetting",
142
+ "TextCompletion",
143
+ "TextPrompt",
144
+ "TextServiceClient",
145
+ "TransferOwnershipRequest",
146
+ "TransferOwnershipResponse",
147
+ "TunedModel",
148
+ "TunedModelSource",
149
+ "TuningExample",
150
+ "TuningExamples",
151
+ "TuningSnapshot",
152
+ "TuningTask",
153
+ "UpdatePermissionRequest",
154
+ "UpdateTunedModelRequest",
155
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/gapic_metadata.json ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "comment": "This file maps proto services/RPCs to the corresponding library clients/methods",
3
+ "language": "python",
4
+ "libraryPackage": "google.ai.generativelanguage_v1beta3",
5
+ "protoPackage": "google.ai.generativelanguage.v1beta3",
6
+ "schema": "1.0",
7
+ "services": {
8
+ "DiscussService": {
9
+ "clients": {
10
+ "grpc": {
11
+ "libraryClient": "DiscussServiceClient",
12
+ "rpcs": {
13
+ "CountMessageTokens": {
14
+ "methods": [
15
+ "count_message_tokens"
16
+ ]
17
+ },
18
+ "GenerateMessage": {
19
+ "methods": [
20
+ "generate_message"
21
+ ]
22
+ }
23
+ }
24
+ },
25
+ "grpc-async": {
26
+ "libraryClient": "DiscussServiceAsyncClient",
27
+ "rpcs": {
28
+ "CountMessageTokens": {
29
+ "methods": [
30
+ "count_message_tokens"
31
+ ]
32
+ },
33
+ "GenerateMessage": {
34
+ "methods": [
35
+ "generate_message"
36
+ ]
37
+ }
38
+ }
39
+ },
40
+ "rest": {
41
+ "libraryClient": "DiscussServiceClient",
42
+ "rpcs": {
43
+ "CountMessageTokens": {
44
+ "methods": [
45
+ "count_message_tokens"
46
+ ]
47
+ },
48
+ "GenerateMessage": {
49
+ "methods": [
50
+ "generate_message"
51
+ ]
52
+ }
53
+ }
54
+ }
55
+ }
56
+ },
57
+ "ModelService": {
58
+ "clients": {
59
+ "grpc": {
60
+ "libraryClient": "ModelServiceClient",
61
+ "rpcs": {
62
+ "CreateTunedModel": {
63
+ "methods": [
64
+ "create_tuned_model"
65
+ ]
66
+ },
67
+ "DeleteTunedModel": {
68
+ "methods": [
69
+ "delete_tuned_model"
70
+ ]
71
+ },
72
+ "GetModel": {
73
+ "methods": [
74
+ "get_model"
75
+ ]
76
+ },
77
+ "GetTunedModel": {
78
+ "methods": [
79
+ "get_tuned_model"
80
+ ]
81
+ },
82
+ "ListModels": {
83
+ "methods": [
84
+ "list_models"
85
+ ]
86
+ },
87
+ "ListTunedModels": {
88
+ "methods": [
89
+ "list_tuned_models"
90
+ ]
91
+ },
92
+ "UpdateTunedModel": {
93
+ "methods": [
94
+ "update_tuned_model"
95
+ ]
96
+ }
97
+ }
98
+ },
99
+ "grpc-async": {
100
+ "libraryClient": "ModelServiceAsyncClient",
101
+ "rpcs": {
102
+ "CreateTunedModel": {
103
+ "methods": [
104
+ "create_tuned_model"
105
+ ]
106
+ },
107
+ "DeleteTunedModel": {
108
+ "methods": [
109
+ "delete_tuned_model"
110
+ ]
111
+ },
112
+ "GetModel": {
113
+ "methods": [
114
+ "get_model"
115
+ ]
116
+ },
117
+ "GetTunedModel": {
118
+ "methods": [
119
+ "get_tuned_model"
120
+ ]
121
+ },
122
+ "ListModels": {
123
+ "methods": [
124
+ "list_models"
125
+ ]
126
+ },
127
+ "ListTunedModels": {
128
+ "methods": [
129
+ "list_tuned_models"
130
+ ]
131
+ },
132
+ "UpdateTunedModel": {
133
+ "methods": [
134
+ "update_tuned_model"
135
+ ]
136
+ }
137
+ }
138
+ },
139
+ "rest": {
140
+ "libraryClient": "ModelServiceClient",
141
+ "rpcs": {
142
+ "CreateTunedModel": {
143
+ "methods": [
144
+ "create_tuned_model"
145
+ ]
146
+ },
147
+ "DeleteTunedModel": {
148
+ "methods": [
149
+ "delete_tuned_model"
150
+ ]
151
+ },
152
+ "GetModel": {
153
+ "methods": [
154
+ "get_model"
155
+ ]
156
+ },
157
+ "GetTunedModel": {
158
+ "methods": [
159
+ "get_tuned_model"
160
+ ]
161
+ },
162
+ "ListModels": {
163
+ "methods": [
164
+ "list_models"
165
+ ]
166
+ },
167
+ "ListTunedModels": {
168
+ "methods": [
169
+ "list_tuned_models"
170
+ ]
171
+ },
172
+ "UpdateTunedModel": {
173
+ "methods": [
174
+ "update_tuned_model"
175
+ ]
176
+ }
177
+ }
178
+ }
179
+ }
180
+ },
181
+ "PermissionService": {
182
+ "clients": {
183
+ "grpc": {
184
+ "libraryClient": "PermissionServiceClient",
185
+ "rpcs": {
186
+ "CreatePermission": {
187
+ "methods": [
188
+ "create_permission"
189
+ ]
190
+ },
191
+ "DeletePermission": {
192
+ "methods": [
193
+ "delete_permission"
194
+ ]
195
+ },
196
+ "GetPermission": {
197
+ "methods": [
198
+ "get_permission"
199
+ ]
200
+ },
201
+ "ListPermissions": {
202
+ "methods": [
203
+ "list_permissions"
204
+ ]
205
+ },
206
+ "TransferOwnership": {
207
+ "methods": [
208
+ "transfer_ownership"
209
+ ]
210
+ },
211
+ "UpdatePermission": {
212
+ "methods": [
213
+ "update_permission"
214
+ ]
215
+ }
216
+ }
217
+ },
218
+ "grpc-async": {
219
+ "libraryClient": "PermissionServiceAsyncClient",
220
+ "rpcs": {
221
+ "CreatePermission": {
222
+ "methods": [
223
+ "create_permission"
224
+ ]
225
+ },
226
+ "DeletePermission": {
227
+ "methods": [
228
+ "delete_permission"
229
+ ]
230
+ },
231
+ "GetPermission": {
232
+ "methods": [
233
+ "get_permission"
234
+ ]
235
+ },
236
+ "ListPermissions": {
237
+ "methods": [
238
+ "list_permissions"
239
+ ]
240
+ },
241
+ "TransferOwnership": {
242
+ "methods": [
243
+ "transfer_ownership"
244
+ ]
245
+ },
246
+ "UpdatePermission": {
247
+ "methods": [
248
+ "update_permission"
249
+ ]
250
+ }
251
+ }
252
+ },
253
+ "rest": {
254
+ "libraryClient": "PermissionServiceClient",
255
+ "rpcs": {
256
+ "CreatePermission": {
257
+ "methods": [
258
+ "create_permission"
259
+ ]
260
+ },
261
+ "DeletePermission": {
262
+ "methods": [
263
+ "delete_permission"
264
+ ]
265
+ },
266
+ "GetPermission": {
267
+ "methods": [
268
+ "get_permission"
269
+ ]
270
+ },
271
+ "ListPermissions": {
272
+ "methods": [
273
+ "list_permissions"
274
+ ]
275
+ },
276
+ "TransferOwnership": {
277
+ "methods": [
278
+ "transfer_ownership"
279
+ ]
280
+ },
281
+ "UpdatePermission": {
282
+ "methods": [
283
+ "update_permission"
284
+ ]
285
+ }
286
+ }
287
+ }
288
+ }
289
+ },
290
+ "TextService": {
291
+ "clients": {
292
+ "grpc": {
293
+ "libraryClient": "TextServiceClient",
294
+ "rpcs": {
295
+ "BatchEmbedText": {
296
+ "methods": [
297
+ "batch_embed_text"
298
+ ]
299
+ },
300
+ "CountTextTokens": {
301
+ "methods": [
302
+ "count_text_tokens"
303
+ ]
304
+ },
305
+ "EmbedText": {
306
+ "methods": [
307
+ "embed_text"
308
+ ]
309
+ },
310
+ "GenerateText": {
311
+ "methods": [
312
+ "generate_text"
313
+ ]
314
+ }
315
+ }
316
+ },
317
+ "grpc-async": {
318
+ "libraryClient": "TextServiceAsyncClient",
319
+ "rpcs": {
320
+ "BatchEmbedText": {
321
+ "methods": [
322
+ "batch_embed_text"
323
+ ]
324
+ },
325
+ "CountTextTokens": {
326
+ "methods": [
327
+ "count_text_tokens"
328
+ ]
329
+ },
330
+ "EmbedText": {
331
+ "methods": [
332
+ "embed_text"
333
+ ]
334
+ },
335
+ "GenerateText": {
336
+ "methods": [
337
+ "generate_text"
338
+ ]
339
+ }
340
+ }
341
+ },
342
+ "rest": {
343
+ "libraryClient": "TextServiceClient",
344
+ "rpcs": {
345
+ "BatchEmbedText": {
346
+ "methods": [
347
+ "batch_embed_text"
348
+ ]
349
+ },
350
+ "CountTextTokens": {
351
+ "methods": [
352
+ "count_text_tokens"
353
+ ]
354
+ },
355
+ "EmbedText": {
356
+ "methods": [
357
+ "embed_text"
358
+ ]
359
+ },
360
+ "GenerateText": {
361
+ "methods": [
362
+ "generate_text"
363
+ ]
364
+ }
365
+ }
366
+ }
367
+ }
368
+ }
369
+ }
370
+ }
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/gapic_version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ __version__ = "0.6.15" # {x-release-please-version}
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/py.typed ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Marker file for PEP 561.
2
+ # The google-ai-generativelanguage package uses inline types.
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from .async_client import ModelServiceAsyncClient
17
+ from .client import ModelServiceClient
18
+
19
+ __all__ = (
20
+ "ModelServiceClient",
21
+ "ModelServiceAsyncClient",
22
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/client.py ADDED
@@ -0,0 +1,1541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import os
19
+ import re
20
+ from typing import (
21
+ Callable,
22
+ Dict,
23
+ Mapping,
24
+ MutableMapping,
25
+ MutableSequence,
26
+ Optional,
27
+ Sequence,
28
+ Tuple,
29
+ Type,
30
+ Union,
31
+ cast,
32
+ )
33
+ import warnings
34
+
35
+ from google.api_core import client_options as client_options_lib
36
+ from google.api_core import exceptions as core_exceptions
37
+ from google.api_core import gapic_v1
38
+ from google.api_core import retry as retries
39
+ from google.auth import credentials as ga_credentials # type: ignore
40
+ from google.auth.exceptions import MutualTLSChannelError # type: ignore
41
+ from google.auth.transport import mtls # type: ignore
42
+ from google.auth.transport.grpc import SslCredentials # type: ignore
43
+ from google.oauth2 import service_account # type: ignore
44
+
45
+ from google.ai.generativelanguage_v1beta3 import gapic_version as package_version
46
+
47
+ try:
48
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
49
+ except AttributeError: # pragma: NO COVER
50
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
51
+
52
+ try:
53
+ from google.api_core import client_logging # type: ignore
54
+
55
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
56
+ except ImportError: # pragma: NO COVER
57
+ CLIENT_LOGGING_SUPPORTED = False
58
+
59
+ _LOGGER = std_logging.getLogger(__name__)
60
+
61
+ from google.api_core import operation # type: ignore
62
+ from google.api_core import operation_async # type: ignore
63
+ from google.longrunning import operations_pb2 # type: ignore
64
+ from google.protobuf import field_mask_pb2 # type: ignore
65
+ from google.protobuf import timestamp_pb2 # type: ignore
66
+
67
+ from google.ai.generativelanguage_v1beta3.services.model_service import pagers
68
+ from google.ai.generativelanguage_v1beta3.types import tuned_model as gag_tuned_model
69
+ from google.ai.generativelanguage_v1beta3.types import model, model_service
70
+ from google.ai.generativelanguage_v1beta3.types import tuned_model
71
+
72
+ from .transports.base import DEFAULT_CLIENT_INFO, ModelServiceTransport
73
+ from .transports.grpc import ModelServiceGrpcTransport
74
+ from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
75
+ from .transports.rest import ModelServiceRestTransport
76
+
77
+
78
+ class ModelServiceClientMeta(type):
79
+ """Metaclass for the ModelService client.
80
+
81
+ This provides class-level methods for building and retrieving
82
+ support objects (e.g. transport) without polluting the client instance
83
+ objects.
84
+ """
85
+
86
+ _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]]
87
+ _transport_registry["grpc"] = ModelServiceGrpcTransport
88
+ _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport
89
+ _transport_registry["rest"] = ModelServiceRestTransport
90
+
91
+ def get_transport_class(
92
+ cls,
93
+ label: Optional[str] = None,
94
+ ) -> Type[ModelServiceTransport]:
95
+ """Returns an appropriate transport class.
96
+
97
+ Args:
98
+ label: The name of the desired transport. If none is
99
+ provided, then the first transport in the registry is used.
100
+
101
+ Returns:
102
+ The transport class to use.
103
+ """
104
+ # If a specific transport is requested, return that one.
105
+ if label:
106
+ return cls._transport_registry[label]
107
+
108
+ # No transport is requested; return the default (that is, the first one
109
+ # in the dictionary).
110
+ return next(iter(cls._transport_registry.values()))
111
+
112
+
113
+ class ModelServiceClient(metaclass=ModelServiceClientMeta):
114
+ """Provides methods for getting metadata information about
115
+ Generative Models.
116
+ """
117
+
118
+ @staticmethod
119
+ def _get_default_mtls_endpoint(api_endpoint):
120
+ """Converts api endpoint to mTLS endpoint.
121
+
122
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
123
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
124
+ Args:
125
+ api_endpoint (Optional[str]): the api endpoint to convert.
126
+ Returns:
127
+ str: converted mTLS api endpoint.
128
+ """
129
+ if not api_endpoint:
130
+ return api_endpoint
131
+
132
+ mtls_endpoint_re = re.compile(
133
+ r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
134
+ )
135
+
136
+ m = mtls_endpoint_re.match(api_endpoint)
137
+ name, mtls, sandbox, googledomain = m.groups()
138
+ if mtls or not googledomain:
139
+ return api_endpoint
140
+
141
+ if sandbox:
142
+ return api_endpoint.replace(
143
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
144
+ )
145
+
146
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
147
+
148
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
149
+ DEFAULT_ENDPOINT = "generativelanguage.googleapis.com"
150
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
151
+ DEFAULT_ENDPOINT
152
+ )
153
+
154
+ _DEFAULT_ENDPOINT_TEMPLATE = "generativelanguage.{UNIVERSE_DOMAIN}"
155
+ _DEFAULT_UNIVERSE = "googleapis.com"
156
+
157
+ @classmethod
158
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
159
+ """Creates an instance of this client using the provided credentials
160
+ info.
161
+
162
+ Args:
163
+ info (dict): The service account private key info.
164
+ args: Additional arguments to pass to the constructor.
165
+ kwargs: Additional arguments to pass to the constructor.
166
+
167
+ Returns:
168
+ ModelServiceClient: The constructed client.
169
+ """
170
+ credentials = service_account.Credentials.from_service_account_info(info)
171
+ kwargs["credentials"] = credentials
172
+ return cls(*args, **kwargs)
173
+
174
+ @classmethod
175
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
176
+ """Creates an instance of this client using the provided credentials
177
+ file.
178
+
179
+ Args:
180
+ filename (str): The path to the service account private key json
181
+ file.
182
+ args: Additional arguments to pass to the constructor.
183
+ kwargs: Additional arguments to pass to the constructor.
184
+
185
+ Returns:
186
+ ModelServiceClient: The constructed client.
187
+ """
188
+ credentials = service_account.Credentials.from_service_account_file(filename)
189
+ kwargs["credentials"] = credentials
190
+ return cls(*args, **kwargs)
191
+
192
+ from_service_account_json = from_service_account_file
193
+
194
+ @property
195
+ def transport(self) -> ModelServiceTransport:
196
+ """Returns the transport used by the client instance.
197
+
198
+ Returns:
199
+ ModelServiceTransport: The transport used by the client
200
+ instance.
201
+ """
202
+ return self._transport
203
+
204
+ @staticmethod
205
+ def model_path(
206
+ model: str,
207
+ ) -> str:
208
+ """Returns a fully-qualified model string."""
209
+ return "models/{model}".format(
210
+ model=model,
211
+ )
212
+
213
+ @staticmethod
214
+ def parse_model_path(path: str) -> Dict[str, str]:
215
+ """Parses a model path into its component segments."""
216
+ m = re.match(r"^models/(?P<model>.+?)$", path)
217
+ return m.groupdict() if m else {}
218
+
219
+ @staticmethod
220
+ def tuned_model_path(
221
+ tuned_model: str,
222
+ ) -> str:
223
+ """Returns a fully-qualified tuned_model string."""
224
+ return "tunedModels/{tuned_model}".format(
225
+ tuned_model=tuned_model,
226
+ )
227
+
228
+ @staticmethod
229
+ def parse_tuned_model_path(path: str) -> Dict[str, str]:
230
+ """Parses a tuned_model path into its component segments."""
231
+ m = re.match(r"^tunedModels/(?P<tuned_model>.+?)$", path)
232
+ return m.groupdict() if m else {}
233
+
234
+ @staticmethod
235
+ def common_billing_account_path(
236
+ billing_account: str,
237
+ ) -> str:
238
+ """Returns a fully-qualified billing_account string."""
239
+ return "billingAccounts/{billing_account}".format(
240
+ billing_account=billing_account,
241
+ )
242
+
243
+ @staticmethod
244
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
245
+ """Parse a billing_account path into its component segments."""
246
+ m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
247
+ return m.groupdict() if m else {}
248
+
249
+ @staticmethod
250
+ def common_folder_path(
251
+ folder: str,
252
+ ) -> str:
253
+ """Returns a fully-qualified folder string."""
254
+ return "folders/{folder}".format(
255
+ folder=folder,
256
+ )
257
+
258
+ @staticmethod
259
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
260
+ """Parse a folder path into its component segments."""
261
+ m = re.match(r"^folders/(?P<folder>.+?)$", path)
262
+ return m.groupdict() if m else {}
263
+
264
+ @staticmethod
265
+ def common_organization_path(
266
+ organization: str,
267
+ ) -> str:
268
+ """Returns a fully-qualified organization string."""
269
+ return "organizations/{organization}".format(
270
+ organization=organization,
271
+ )
272
+
273
+ @staticmethod
274
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
275
+ """Parse a organization path into its component segments."""
276
+ m = re.match(r"^organizations/(?P<organization>.+?)$", path)
277
+ return m.groupdict() if m else {}
278
+
279
+ @staticmethod
280
+ def common_project_path(
281
+ project: str,
282
+ ) -> str:
283
+ """Returns a fully-qualified project string."""
284
+ return "projects/{project}".format(
285
+ project=project,
286
+ )
287
+
288
+ @staticmethod
289
+ def parse_common_project_path(path: str) -> Dict[str, str]:
290
+ """Parse a project path into its component segments."""
291
+ m = re.match(r"^projects/(?P<project>.+?)$", path)
292
+ return m.groupdict() if m else {}
293
+
294
+ @staticmethod
295
+ def common_location_path(
296
+ project: str,
297
+ location: str,
298
+ ) -> str:
299
+ """Returns a fully-qualified location string."""
300
+ return "projects/{project}/locations/{location}".format(
301
+ project=project,
302
+ location=location,
303
+ )
304
+
305
+ @staticmethod
306
+ def parse_common_location_path(path: str) -> Dict[str, str]:
307
+ """Parse a location path into its component segments."""
308
+ m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
309
+ return m.groupdict() if m else {}
310
+
311
+ @classmethod
312
+ def get_mtls_endpoint_and_cert_source(
313
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
314
+ ):
315
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
316
+
317
+ The client cert source is determined in the following order:
318
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
319
+ client cert source is None.
320
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
321
+ default client cert source exists, use the default one; otherwise the client cert
322
+ source is None.
323
+
324
+ The API endpoint is determined in the following order:
325
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
326
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
327
+ default mTLS endpoint; if the environment variable is "never", use the default API
328
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
329
+ use the default API endpoint.
330
+
331
+ More details can be found at https://google.aip.dev/auth/4114.
332
+
333
+ Args:
334
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
335
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
336
+ in this method.
337
+
338
+ Returns:
339
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
340
+ client cert source to use.
341
+
342
+ Raises:
343
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
344
+ """
345
+
346
+ warnings.warn(
347
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
348
+ DeprecationWarning,
349
+ )
350
+ if client_options is None:
351
+ client_options = client_options_lib.ClientOptions()
352
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
353
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
354
+ if use_client_cert not in ("true", "false"):
355
+ raise ValueError(
356
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
357
+ )
358
+ if use_mtls_endpoint not in ("auto", "never", "always"):
359
+ raise MutualTLSChannelError(
360
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
361
+ )
362
+
363
+ # Figure out the client cert source to use.
364
+ client_cert_source = None
365
+ if use_client_cert == "true":
366
+ if client_options.client_cert_source:
367
+ client_cert_source = client_options.client_cert_source
368
+ elif mtls.has_default_client_cert_source():
369
+ client_cert_source = mtls.default_client_cert_source()
370
+
371
+ # Figure out which api endpoint to use.
372
+ if client_options.api_endpoint is not None:
373
+ api_endpoint = client_options.api_endpoint
374
+ elif use_mtls_endpoint == "always" or (
375
+ use_mtls_endpoint == "auto" and client_cert_source
376
+ ):
377
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
378
+ else:
379
+ api_endpoint = cls.DEFAULT_ENDPOINT
380
+
381
+ return api_endpoint, client_cert_source
382
+
383
+ @staticmethod
384
+ def _read_environment_variables():
385
+ """Returns the environment variables used by the client.
386
+
387
+ Returns:
388
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
389
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
390
+
391
+ Raises:
392
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
393
+ any of ["true", "false"].
394
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
395
+ is not any of ["auto", "never", "always"].
396
+ """
397
+ use_client_cert = os.getenv(
398
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
399
+ ).lower()
400
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
401
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
402
+ if use_client_cert not in ("true", "false"):
403
+ raise ValueError(
404
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
405
+ )
406
+ if use_mtls_endpoint not in ("auto", "never", "always"):
407
+ raise MutualTLSChannelError(
408
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
409
+ )
410
+ return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
411
+
412
+ @staticmethod
413
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
414
+ """Return the client cert source to be used by the client.
415
+
416
+ Args:
417
+ provided_cert_source (bytes): The client certificate source provided.
418
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
419
+
420
+ Returns:
421
+ bytes or None: The client cert source to be used by the client.
422
+ """
423
+ client_cert_source = None
424
+ if use_cert_flag:
425
+ if provided_cert_source:
426
+ client_cert_source = provided_cert_source
427
+ elif mtls.has_default_client_cert_source():
428
+ client_cert_source = mtls.default_client_cert_source()
429
+ return client_cert_source
430
+
431
+ @staticmethod
432
+ def _get_api_endpoint(
433
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
434
+ ):
435
+ """Return the API endpoint used by the client.
436
+
437
+ Args:
438
+ api_override (str): The API endpoint override. If specified, this is always
439
+ the return value of this function and the other arguments are not used.
440
+ client_cert_source (bytes): The client certificate source used by the client.
441
+ universe_domain (str): The universe domain used by the client.
442
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
443
+ Possible values are "always", "auto", or "never".
444
+
445
+ Returns:
446
+ str: The API endpoint to be used by the client.
447
+ """
448
+ if api_override is not None:
449
+ api_endpoint = api_override
450
+ elif use_mtls_endpoint == "always" or (
451
+ use_mtls_endpoint == "auto" and client_cert_source
452
+ ):
453
+ _default_universe = ModelServiceClient._DEFAULT_UNIVERSE
454
+ if universe_domain != _default_universe:
455
+ raise MutualTLSChannelError(
456
+ f"mTLS is not supported in any universe other than {_default_universe}."
457
+ )
458
+ api_endpoint = ModelServiceClient.DEFAULT_MTLS_ENDPOINT
459
+ else:
460
+ api_endpoint = ModelServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
461
+ UNIVERSE_DOMAIN=universe_domain
462
+ )
463
+ return api_endpoint
464
+
465
+ @staticmethod
466
+ def _get_universe_domain(
467
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
468
+ ) -> str:
469
+ """Return the universe domain used by the client.
470
+
471
+ Args:
472
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
473
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
474
+
475
+ Returns:
476
+ str: The universe domain to be used by the client.
477
+
478
+ Raises:
479
+ ValueError: If the universe domain is an empty string.
480
+ """
481
+ universe_domain = ModelServiceClient._DEFAULT_UNIVERSE
482
+ if client_universe_domain is not None:
483
+ universe_domain = client_universe_domain
484
+ elif universe_domain_env is not None:
485
+ universe_domain = universe_domain_env
486
+ if len(universe_domain.strip()) == 0:
487
+ raise ValueError("Universe Domain cannot be an empty string.")
488
+ return universe_domain
489
+
490
+ def _validate_universe_domain(self):
491
+ """Validates client's and credentials' universe domains are consistent.
492
+
493
+ Returns:
494
+ bool: True iff the configured universe domain is valid.
495
+
496
+ Raises:
497
+ ValueError: If the configured universe domain is not valid.
498
+ """
499
+
500
+ # NOTE (b/349488459): universe validation is disabled until further notice.
501
+ return True
502
+
503
+ @property
504
+ def api_endpoint(self):
505
+ """Return the API endpoint used by the client instance.
506
+
507
+ Returns:
508
+ str: The API endpoint used by the client instance.
509
+ """
510
+ return self._api_endpoint
511
+
512
+ @property
513
+ def universe_domain(self) -> str:
514
+ """Return the universe domain used by the client instance.
515
+
516
+ Returns:
517
+ str: The universe domain used by the client instance.
518
+ """
519
+ return self._universe_domain
520
+
521
+ def __init__(
522
+ self,
523
+ *,
524
+ credentials: Optional[ga_credentials.Credentials] = None,
525
+ transport: Optional[
526
+ Union[str, ModelServiceTransport, Callable[..., ModelServiceTransport]]
527
+ ] = None,
528
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
529
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
530
+ ) -> None:
531
+ """Instantiates the model service client.
532
+
533
+ Args:
534
+ credentials (Optional[google.auth.credentials.Credentials]): The
535
+ authorization credentials to attach to requests. These
536
+ credentials identify the application to the service; if none
537
+ are specified, the client will attempt to ascertain the
538
+ credentials from the environment.
539
+ transport (Optional[Union[str,ModelServiceTransport,Callable[..., ModelServiceTransport]]]):
540
+ The transport to use, or a Callable that constructs and returns a new transport.
541
+ If a Callable is given, it will be called with the same set of initialization
542
+ arguments as used in the ModelServiceTransport constructor.
543
+ If set to None, a transport is chosen automatically.
544
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
545
+ Custom options for the client.
546
+
547
+ 1. The ``api_endpoint`` property can be used to override the
548
+ default endpoint provided by the client when ``transport`` is
549
+ not explicitly provided. Only if this property is not set and
550
+ ``transport`` was not explicitly provided, the endpoint is
551
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
552
+ variable, which have one of the following values:
553
+ "always" (always use the default mTLS endpoint), "never" (always
554
+ use the default regular endpoint) and "auto" (auto-switch to the
555
+ default mTLS endpoint if client certificate is present; this is
556
+ the default value).
557
+
558
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
559
+ is "true", then the ``client_cert_source`` property can be used
560
+ to provide a client certificate for mTLS transport. If
561
+ not provided, the default SSL client certificate will be used if
562
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
563
+ set, no client certificate will be used.
564
+
565
+ 3. The ``universe_domain`` property can be used to override the
566
+ default "googleapis.com" universe. Note that the ``api_endpoint``
567
+ property still takes precedence; and ``universe_domain`` is
568
+ currently not supported for mTLS.
569
+
570
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
571
+ The client info used to send a user-agent string along with
572
+ API requests. If ``None``, then default info will be used.
573
+ Generally, you only need to set this if you're developing
574
+ your own client library.
575
+
576
+ Raises:
577
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
578
+ creation failed for any reason.
579
+ """
580
+ self._client_options = client_options
581
+ if isinstance(self._client_options, dict):
582
+ self._client_options = client_options_lib.from_dict(self._client_options)
583
+ if self._client_options is None:
584
+ self._client_options = client_options_lib.ClientOptions()
585
+ self._client_options = cast(
586
+ client_options_lib.ClientOptions, self._client_options
587
+ )
588
+
589
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
590
+
591
+ (
592
+ self._use_client_cert,
593
+ self._use_mtls_endpoint,
594
+ self._universe_domain_env,
595
+ ) = ModelServiceClient._read_environment_variables()
596
+ self._client_cert_source = ModelServiceClient._get_client_cert_source(
597
+ self._client_options.client_cert_source, self._use_client_cert
598
+ )
599
+ self._universe_domain = ModelServiceClient._get_universe_domain(
600
+ universe_domain_opt, self._universe_domain_env
601
+ )
602
+ self._api_endpoint = None # updated below, depending on `transport`
603
+
604
+ # Initialize the universe domain validation.
605
+ self._is_universe_domain_valid = False
606
+
607
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
608
+ # Setup logging.
609
+ client_logging.initialize_logging()
610
+
611
+ api_key_value = getattr(self._client_options, "api_key", None)
612
+ if api_key_value and credentials:
613
+ raise ValueError(
614
+ "client_options.api_key and credentials are mutually exclusive"
615
+ )
616
+
617
+ # Save or instantiate the transport.
618
+ # Ordinarily, we provide the transport, but allowing a custom transport
619
+ # instance provides an extensibility point for unusual situations.
620
+ transport_provided = isinstance(transport, ModelServiceTransport)
621
+ if transport_provided:
622
+ # transport is a ModelServiceTransport instance.
623
+ if credentials or self._client_options.credentials_file or api_key_value:
624
+ raise ValueError(
625
+ "When providing a transport instance, "
626
+ "provide its credentials directly."
627
+ )
628
+ if self._client_options.scopes:
629
+ raise ValueError(
630
+ "When providing a transport instance, provide its scopes "
631
+ "directly."
632
+ )
633
+ self._transport = cast(ModelServiceTransport, transport)
634
+ self._api_endpoint = self._transport.host
635
+
636
+ self._api_endpoint = self._api_endpoint or ModelServiceClient._get_api_endpoint(
637
+ self._client_options.api_endpoint,
638
+ self._client_cert_source,
639
+ self._universe_domain,
640
+ self._use_mtls_endpoint,
641
+ )
642
+
643
+ if not transport_provided:
644
+ import google.auth._default # type: ignore
645
+
646
+ if api_key_value and hasattr(
647
+ google.auth._default, "get_api_key_credentials"
648
+ ):
649
+ credentials = google.auth._default.get_api_key_credentials(
650
+ api_key_value
651
+ )
652
+
653
+ transport_init: Union[
654
+ Type[ModelServiceTransport], Callable[..., ModelServiceTransport]
655
+ ] = (
656
+ ModelServiceClient.get_transport_class(transport)
657
+ if isinstance(transport, str) or transport is None
658
+ else cast(Callable[..., ModelServiceTransport], transport)
659
+ )
660
+ # initialize with the provided callable or the passed in class
661
+ self._transport = transport_init(
662
+ credentials=credentials,
663
+ credentials_file=self._client_options.credentials_file,
664
+ host=self._api_endpoint,
665
+ scopes=self._client_options.scopes,
666
+ client_cert_source_for_mtls=self._client_cert_source,
667
+ quota_project_id=self._client_options.quota_project_id,
668
+ client_info=client_info,
669
+ always_use_jwt_access=True,
670
+ api_audience=self._client_options.api_audience,
671
+ )
672
+
673
+ if "async" not in str(self._transport):
674
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
675
+ std_logging.DEBUG
676
+ ): # pragma: NO COVER
677
+ _LOGGER.debug(
678
+ "Created client `google.ai.generativelanguage_v1beta3.ModelServiceClient`.",
679
+ extra={
680
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
681
+ "universeDomain": getattr(
682
+ self._transport._credentials, "universe_domain", ""
683
+ ),
684
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
685
+ "credentialsInfo": getattr(
686
+ self.transport._credentials, "get_cred_info", lambda: None
687
+ )(),
688
+ }
689
+ if hasattr(self._transport, "_credentials")
690
+ else {
691
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
692
+ "credentialsType": None,
693
+ },
694
+ )
695
+
696
+ def get_model(
697
+ self,
698
+ request: Optional[Union[model_service.GetModelRequest, dict]] = None,
699
+ *,
700
+ name: Optional[str] = None,
701
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
702
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
703
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
704
+ ) -> model.Model:
705
+ r"""Gets information about a specific Model.
706
+
707
+ .. code-block:: python
708
+
709
+ # This snippet has been automatically generated and should be regarded as a
710
+ # code template only.
711
+ # It will require modifications to work:
712
+ # - It may require correct/in-range values for request initialization.
713
+ # - It may require specifying regional endpoints when creating the service
714
+ # client as shown in:
715
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
716
+ from google.ai import generativelanguage_v1beta3
717
+
718
+ def sample_get_model():
719
+ # Create a client
720
+ client = generativelanguage_v1beta3.ModelServiceClient()
721
+
722
+ # Initialize request argument(s)
723
+ request = generativelanguage_v1beta3.GetModelRequest(
724
+ name="name_value",
725
+ )
726
+
727
+ # Make the request
728
+ response = client.get_model(request=request)
729
+
730
+ # Handle the response
731
+ print(response)
732
+
733
+ Args:
734
+ request (Union[google.ai.generativelanguage_v1beta3.types.GetModelRequest, dict]):
735
+ The request object. Request for getting information about
736
+ a specific Model.
737
+ name (str):
738
+ Required. The resource name of the model.
739
+
740
+ This name should match a model name returned by the
741
+ ``ListModels`` method.
742
+
743
+ Format: ``models/{model}``
744
+
745
+ This corresponds to the ``name`` field
746
+ on the ``request`` instance; if ``request`` is provided, this
747
+ should not be set.
748
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
749
+ should be retried.
750
+ timeout (float): The timeout for this request.
751
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
752
+ sent along with the request as metadata. Normally, each value must be of type `str`,
753
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
754
+ be of type `bytes`.
755
+
756
+ Returns:
757
+ google.ai.generativelanguage_v1beta3.types.Model:
758
+ Information about a Generative
759
+ Language Model.
760
+
761
+ """
762
+ # Create or coerce a protobuf request object.
763
+ # - Quick check: If we got a request object, we should *not* have
764
+ # gotten any keyword arguments that map to the request.
765
+ has_flattened_params = any([name])
766
+ if request is not None and has_flattened_params:
767
+ raise ValueError(
768
+ "If the `request` argument is set, then none of "
769
+ "the individual field arguments should be set."
770
+ )
771
+
772
+ # - Use the request object if provided (there's no risk of modifying the input as
773
+ # there are no flattened fields), or create one.
774
+ if not isinstance(request, model_service.GetModelRequest):
775
+ request = model_service.GetModelRequest(request)
776
+ # If we have keyword arguments corresponding to fields on the
777
+ # request, apply these.
778
+ if name is not None:
779
+ request.name = name
780
+
781
+ # Wrap the RPC method; this adds retry and timeout information,
782
+ # and friendly error handling.
783
+ rpc = self._transport._wrapped_methods[self._transport.get_model]
784
+
785
+ # Certain fields should be provided within the metadata header;
786
+ # add these here.
787
+ metadata = tuple(metadata) + (
788
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
789
+ )
790
+
791
+ # Validate the universe domain.
792
+ self._validate_universe_domain()
793
+
794
+ # Send the request.
795
+ response = rpc(
796
+ request,
797
+ retry=retry,
798
+ timeout=timeout,
799
+ metadata=metadata,
800
+ )
801
+
802
+ # Done; return the response.
803
+ return response
804
+
805
+ def list_models(
806
+ self,
807
+ request: Optional[Union[model_service.ListModelsRequest, dict]] = None,
808
+ *,
809
+ page_size: Optional[int] = None,
810
+ page_token: Optional[str] = None,
811
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
812
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
813
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
814
+ ) -> pagers.ListModelsPager:
815
+ r"""Lists models available through the API.
816
+
817
+ .. code-block:: python
818
+
819
+ # This snippet has been automatically generated and should be regarded as a
820
+ # code template only.
821
+ # It will require modifications to work:
822
+ # - It may require correct/in-range values for request initialization.
823
+ # - It may require specifying regional endpoints when creating the service
824
+ # client as shown in:
825
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
826
+ from google.ai import generativelanguage_v1beta3
827
+
828
+ def sample_list_models():
829
+ # Create a client
830
+ client = generativelanguage_v1beta3.ModelServiceClient()
831
+
832
+ # Initialize request argument(s)
833
+ request = generativelanguage_v1beta3.ListModelsRequest(
834
+ )
835
+
836
+ # Make the request
837
+ page_result = client.list_models(request=request)
838
+
839
+ # Handle the response
840
+ for response in page_result:
841
+ print(response)
842
+
843
+ Args:
844
+ request (Union[google.ai.generativelanguage_v1beta3.types.ListModelsRequest, dict]):
845
+ The request object. Request for listing all Models.
846
+ page_size (int):
847
+ The maximum number of ``Models`` to return (per page).
848
+
849
+ The service may return fewer models. If unspecified, at
850
+ most 50 models will be returned per page. This method
851
+ returns at most 1000 models per page, even if you pass a
852
+ larger page_size.
853
+
854
+ This corresponds to the ``page_size`` field
855
+ on the ``request`` instance; if ``request`` is provided, this
856
+ should not be set.
857
+ page_token (str):
858
+ A page token, received from a previous ``ListModels``
859
+ call.
860
+
861
+ Provide the ``page_token`` returned by one request as an
862
+ argument to the next request to retrieve the next page.
863
+
864
+ When paginating, all other parameters provided to
865
+ ``ListModels`` must match the call that provided the
866
+ page token.
867
+
868
+ This corresponds to the ``page_token`` field
869
+ on the ``request`` instance; if ``request`` is provided, this
870
+ should not be set.
871
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
872
+ should be retried.
873
+ timeout (float): The timeout for this request.
874
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
875
+ sent along with the request as metadata. Normally, each value must be of type `str`,
876
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
877
+ be of type `bytes`.
878
+
879
+ Returns:
880
+ google.ai.generativelanguage_v1beta3.services.model_service.pagers.ListModelsPager:
881
+ Response from ListModel containing a paginated list of
882
+ Models.
883
+
884
+ Iterating over this object will yield results and
885
+ resolve additional pages automatically.
886
+
887
+ """
888
+ # Create or coerce a protobuf request object.
889
+ # - Quick check: If we got a request object, we should *not* have
890
+ # gotten any keyword arguments that map to the request.
891
+ has_flattened_params = any([page_size, page_token])
892
+ if request is not None and has_flattened_params:
893
+ raise ValueError(
894
+ "If the `request` argument is set, then none of "
895
+ "the individual field arguments should be set."
896
+ )
897
+
898
+ # - Use the request object if provided (there's no risk of modifying the input as
899
+ # there are no flattened fields), or create one.
900
+ if not isinstance(request, model_service.ListModelsRequest):
901
+ request = model_service.ListModelsRequest(request)
902
+ # If we have keyword arguments corresponding to fields on the
903
+ # request, apply these.
904
+ if page_size is not None:
905
+ request.page_size = page_size
906
+ if page_token is not None:
907
+ request.page_token = page_token
908
+
909
+ # Wrap the RPC method; this adds retry and timeout information,
910
+ # and friendly error handling.
911
+ rpc = self._transport._wrapped_methods[self._transport.list_models]
912
+
913
+ # Validate the universe domain.
914
+ self._validate_universe_domain()
915
+
916
+ # Send the request.
917
+ response = rpc(
918
+ request,
919
+ retry=retry,
920
+ timeout=timeout,
921
+ metadata=metadata,
922
+ )
923
+
924
+ # This method is paged; wrap the response in a pager, which provides
925
+ # an `__iter__` convenience method.
926
+ response = pagers.ListModelsPager(
927
+ method=rpc,
928
+ request=request,
929
+ response=response,
930
+ retry=retry,
931
+ timeout=timeout,
932
+ metadata=metadata,
933
+ )
934
+
935
+ # Done; return the response.
936
+ return response
937
+
938
+ def get_tuned_model(
939
+ self,
940
+ request: Optional[Union[model_service.GetTunedModelRequest, dict]] = None,
941
+ *,
942
+ name: Optional[str] = None,
943
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
944
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
945
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
946
+ ) -> tuned_model.TunedModel:
947
+ r"""Gets information about a specific TunedModel.
948
+
949
+ .. code-block:: python
950
+
951
+ # This snippet has been automatically generated and should be regarded as a
952
+ # code template only.
953
+ # It will require modifications to work:
954
+ # - It may require correct/in-range values for request initialization.
955
+ # - It may require specifying regional endpoints when creating the service
956
+ # client as shown in:
957
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
958
+ from google.ai import generativelanguage_v1beta3
959
+
960
+ def sample_get_tuned_model():
961
+ # Create a client
962
+ client = generativelanguage_v1beta3.ModelServiceClient()
963
+
964
+ # Initialize request argument(s)
965
+ request = generativelanguage_v1beta3.GetTunedModelRequest(
966
+ name="name_value",
967
+ )
968
+
969
+ # Make the request
970
+ response = client.get_tuned_model(request=request)
971
+
972
+ # Handle the response
973
+ print(response)
974
+
975
+ Args:
976
+ request (Union[google.ai.generativelanguage_v1beta3.types.GetTunedModelRequest, dict]):
977
+ The request object. Request for getting information about
978
+ a specific Model.
979
+ name (str):
980
+ Required. The resource name of the model.
981
+
982
+ Format: ``tunedModels/my-model-id``
983
+
984
+ This corresponds to the ``name`` field
985
+ on the ``request`` instance; if ``request`` is provided, this
986
+ should not be set.
987
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
988
+ should be retried.
989
+ timeout (float): The timeout for this request.
990
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
991
+ sent along with the request as metadata. Normally, each value must be of type `str`,
992
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
993
+ be of type `bytes`.
994
+
995
+ Returns:
996
+ google.ai.generativelanguage_v1beta3.types.TunedModel:
997
+ A fine-tuned model created using
998
+ ModelService.CreateTunedModel.
999
+
1000
+ """
1001
+ # Create or coerce a protobuf request object.
1002
+ # - Quick check: If we got a request object, we should *not* have
1003
+ # gotten any keyword arguments that map to the request.
1004
+ has_flattened_params = any([name])
1005
+ if request is not None and has_flattened_params:
1006
+ raise ValueError(
1007
+ "If the `request` argument is set, then none of "
1008
+ "the individual field arguments should be set."
1009
+ )
1010
+
1011
+ # - Use the request object if provided (there's no risk of modifying the input as
1012
+ # there are no flattened fields), or create one.
1013
+ if not isinstance(request, model_service.GetTunedModelRequest):
1014
+ request = model_service.GetTunedModelRequest(request)
1015
+ # If we have keyword arguments corresponding to fields on the
1016
+ # request, apply these.
1017
+ if name is not None:
1018
+ request.name = name
1019
+
1020
+ # Wrap the RPC method; this adds retry and timeout information,
1021
+ # and friendly error handling.
1022
+ rpc = self._transport._wrapped_methods[self._transport.get_tuned_model]
1023
+
1024
+ # Certain fields should be provided within the metadata header;
1025
+ # add these here.
1026
+ metadata = tuple(metadata) + (
1027
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1028
+ )
1029
+
1030
+ # Validate the universe domain.
1031
+ self._validate_universe_domain()
1032
+
1033
+ # Send the request.
1034
+ response = rpc(
1035
+ request,
1036
+ retry=retry,
1037
+ timeout=timeout,
1038
+ metadata=metadata,
1039
+ )
1040
+
1041
+ # Done; return the response.
1042
+ return response
1043
+
1044
+ def list_tuned_models(
1045
+ self,
1046
+ request: Optional[Union[model_service.ListTunedModelsRequest, dict]] = None,
1047
+ *,
1048
+ page_size: Optional[int] = None,
1049
+ page_token: Optional[str] = None,
1050
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1051
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1052
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1053
+ ) -> pagers.ListTunedModelsPager:
1054
+ r"""Lists tuned models owned by the user.
1055
+
1056
+ .. code-block:: python
1057
+
1058
+ # This snippet has been automatically generated and should be regarded as a
1059
+ # code template only.
1060
+ # It will require modifications to work:
1061
+ # - It may require correct/in-range values for request initialization.
1062
+ # - It may require specifying regional endpoints when creating the service
1063
+ # client as shown in:
1064
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1065
+ from google.ai import generativelanguage_v1beta3
1066
+
1067
+ def sample_list_tuned_models():
1068
+ # Create a client
1069
+ client = generativelanguage_v1beta3.ModelServiceClient()
1070
+
1071
+ # Initialize request argument(s)
1072
+ request = generativelanguage_v1beta3.ListTunedModelsRequest(
1073
+ )
1074
+
1075
+ # Make the request
1076
+ page_result = client.list_tuned_models(request=request)
1077
+
1078
+ # Handle the response
1079
+ for response in page_result:
1080
+ print(response)
1081
+
1082
+ Args:
1083
+ request (Union[google.ai.generativelanguage_v1beta3.types.ListTunedModelsRequest, dict]):
1084
+ The request object. Request for listing TunedModels.
1085
+ page_size (int):
1086
+ Optional. The maximum number of ``TunedModels`` to
1087
+ return (per page). The service may return fewer tuned
1088
+ models.
1089
+
1090
+ If unspecified, at most 10 tuned models will be
1091
+ returned. This method returns at most 1000 models per
1092
+ page, even if you pass a larger page_size.
1093
+
1094
+ This corresponds to the ``page_size`` field
1095
+ on the ``request`` instance; if ``request`` is provided, this
1096
+ should not be set.
1097
+ page_token (str):
1098
+ Optional. A page token, received from a previous
1099
+ ``ListTunedModels`` call.
1100
+
1101
+ Provide the ``page_token`` returned by one request as an
1102
+ argument to the next request to retrieve the next page.
1103
+
1104
+ When paginating, all other parameters provided to
1105
+ ``ListTunedModels`` must match the call that provided
1106
+ the page token.
1107
+
1108
+ This corresponds to the ``page_token`` field
1109
+ on the ``request`` instance; if ``request`` is provided, this
1110
+ should not be set.
1111
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1112
+ should be retried.
1113
+ timeout (float): The timeout for this request.
1114
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1115
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1116
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1117
+ be of type `bytes`.
1118
+
1119
+ Returns:
1120
+ google.ai.generativelanguage_v1beta3.services.model_service.pagers.ListTunedModelsPager:
1121
+ Response from ListTunedModels containing a paginated
1122
+ list of Models.
1123
+
1124
+ Iterating over this object will yield results and
1125
+ resolve additional pages automatically.
1126
+
1127
+ """
1128
+ # Create or coerce a protobuf request object.
1129
+ # - Quick check: If we got a request object, we should *not* have
1130
+ # gotten any keyword arguments that map to the request.
1131
+ has_flattened_params = any([page_size, page_token])
1132
+ if request is not None and has_flattened_params:
1133
+ raise ValueError(
1134
+ "If the `request` argument is set, then none of "
1135
+ "the individual field arguments should be set."
1136
+ )
1137
+
1138
+ # - Use the request object if provided (there's no risk of modifying the input as
1139
+ # there are no flattened fields), or create one.
1140
+ if not isinstance(request, model_service.ListTunedModelsRequest):
1141
+ request = model_service.ListTunedModelsRequest(request)
1142
+ # If we have keyword arguments corresponding to fields on the
1143
+ # request, apply these.
1144
+ if page_size is not None:
1145
+ request.page_size = page_size
1146
+ if page_token is not None:
1147
+ request.page_token = page_token
1148
+
1149
+ # Wrap the RPC method; this adds retry and timeout information,
1150
+ # and friendly error handling.
1151
+ rpc = self._transport._wrapped_methods[self._transport.list_tuned_models]
1152
+
1153
+ # Validate the universe domain.
1154
+ self._validate_universe_domain()
1155
+
1156
+ # Send the request.
1157
+ response = rpc(
1158
+ request,
1159
+ retry=retry,
1160
+ timeout=timeout,
1161
+ metadata=metadata,
1162
+ )
1163
+
1164
+ # This method is paged; wrap the response in a pager, which provides
1165
+ # an `__iter__` convenience method.
1166
+ response = pagers.ListTunedModelsPager(
1167
+ method=rpc,
1168
+ request=request,
1169
+ response=response,
1170
+ retry=retry,
1171
+ timeout=timeout,
1172
+ metadata=metadata,
1173
+ )
1174
+
1175
+ # Done; return the response.
1176
+ return response
1177
+
1178
+ def create_tuned_model(
1179
+ self,
1180
+ request: Optional[Union[model_service.CreateTunedModelRequest, dict]] = None,
1181
+ *,
1182
+ tuned_model: Optional[gag_tuned_model.TunedModel] = None,
1183
+ tuned_model_id: Optional[str] = None,
1184
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1185
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1186
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1187
+ ) -> operation.Operation:
1188
+ r"""Creates a tuned model. Intermediate tuning progress (if any) is
1189
+ accessed through the [google.longrunning.Operations] service.
1190
+
1191
+ Status and results can be accessed through the Operations
1192
+ service. Example: GET
1193
+ /v1/tunedModels/az2mb0bpw6i/operations/000-111-222
1194
+
1195
+ .. code-block:: python
1196
+
1197
+ # This snippet has been automatically generated and should be regarded as a
1198
+ # code template only.
1199
+ # It will require modifications to work:
1200
+ # - It may require correct/in-range values for request initialization.
1201
+ # - It may require specifying regional endpoints when creating the service
1202
+ # client as shown in:
1203
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1204
+ from google.ai import generativelanguage_v1beta3
1205
+
1206
+ def sample_create_tuned_model():
1207
+ # Create a client
1208
+ client = generativelanguage_v1beta3.ModelServiceClient()
1209
+
1210
+ # Initialize request argument(s)
1211
+ tuned_model = generativelanguage_v1beta3.TunedModel()
1212
+ tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value"
1213
+ tuned_model.tuning_task.training_data.examples.examples.output = "output_value"
1214
+
1215
+ request = generativelanguage_v1beta3.CreateTunedModelRequest(
1216
+ tuned_model=tuned_model,
1217
+ )
1218
+
1219
+ # Make the request
1220
+ operation = client.create_tuned_model(request=request)
1221
+
1222
+ print("Waiting for operation to complete...")
1223
+
1224
+ response = operation.result()
1225
+
1226
+ # Handle the response
1227
+ print(response)
1228
+
1229
+ Args:
1230
+ request (Union[google.ai.generativelanguage_v1beta3.types.CreateTunedModelRequest, dict]):
1231
+ The request object. Request to create a TunedModel.
1232
+ tuned_model (google.ai.generativelanguage_v1beta3.types.TunedModel):
1233
+ Required. The tuned model to create.
1234
+ This corresponds to the ``tuned_model`` field
1235
+ on the ``request`` instance; if ``request`` is provided, this
1236
+ should not be set.
1237
+ tuned_model_id (str):
1238
+ Optional. The unique id for the tuned model if
1239
+ specified. This value should be up to 40 characters, the
1240
+ first character must be a letter, the last could be a
1241
+ letter or a number. The id must match the regular
1242
+ expression: `a-z <[a-z0-9-]{0,38}[a-z0-9]>`__?.
1243
+
1244
+ This corresponds to the ``tuned_model_id`` field
1245
+ on the ``request`` instance; if ``request`` is provided, this
1246
+ should not be set.
1247
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1248
+ should be retried.
1249
+ timeout (float): The timeout for this request.
1250
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1251
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1252
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1253
+ be of type `bytes`.
1254
+
1255
+ Returns:
1256
+ google.api_core.operation.Operation:
1257
+ An object representing a long-running operation.
1258
+
1259
+ The result type for the operation will be
1260
+ :class:`google.ai.generativelanguage_v1beta3.types.TunedModel`
1261
+ A fine-tuned model created using
1262
+ ModelService.CreateTunedModel.
1263
+
1264
+ """
1265
+ # Create or coerce a protobuf request object.
1266
+ # - Quick check: If we got a request object, we should *not* have
1267
+ # gotten any keyword arguments that map to the request.
1268
+ has_flattened_params = any([tuned_model, tuned_model_id])
1269
+ if request is not None and has_flattened_params:
1270
+ raise ValueError(
1271
+ "If the `request` argument is set, then none of "
1272
+ "the individual field arguments should be set."
1273
+ )
1274
+
1275
+ # - Use the request object if provided (there's no risk of modifying the input as
1276
+ # there are no flattened fields), or create one.
1277
+ if not isinstance(request, model_service.CreateTunedModelRequest):
1278
+ request = model_service.CreateTunedModelRequest(request)
1279
+ # If we have keyword arguments corresponding to fields on the
1280
+ # request, apply these.
1281
+ if tuned_model is not None:
1282
+ request.tuned_model = tuned_model
1283
+ if tuned_model_id is not None:
1284
+ request.tuned_model_id = tuned_model_id
1285
+
1286
+ # Wrap the RPC method; this adds retry and timeout information,
1287
+ # and friendly error handling.
1288
+ rpc = self._transport._wrapped_methods[self._transport.create_tuned_model]
1289
+
1290
+ # Validate the universe domain.
1291
+ self._validate_universe_domain()
1292
+
1293
+ # Send the request.
1294
+ response = rpc(
1295
+ request,
1296
+ retry=retry,
1297
+ timeout=timeout,
1298
+ metadata=metadata,
1299
+ )
1300
+
1301
+ # Wrap the response in an operation future.
1302
+ response = operation.from_gapic(
1303
+ response,
1304
+ self._transport.operations_client,
1305
+ gag_tuned_model.TunedModel,
1306
+ metadata_type=model_service.CreateTunedModelMetadata,
1307
+ )
1308
+
1309
+ # Done; return the response.
1310
+ return response
1311
+
1312
+ def update_tuned_model(
1313
+ self,
1314
+ request: Optional[Union[model_service.UpdateTunedModelRequest, dict]] = None,
1315
+ *,
1316
+ tuned_model: Optional[gag_tuned_model.TunedModel] = None,
1317
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
1318
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1319
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1320
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1321
+ ) -> gag_tuned_model.TunedModel:
1322
+ r"""Updates a tuned model.
1323
+
1324
+ .. code-block:: python
1325
+
1326
+ # This snippet has been automatically generated and should be regarded as a
1327
+ # code template only.
1328
+ # It will require modifications to work:
1329
+ # - It may require correct/in-range values for request initialization.
1330
+ # - It may require specifying regional endpoints when creating the service
1331
+ # client as shown in:
1332
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1333
+ from google.ai import generativelanguage_v1beta3
1334
+
1335
+ def sample_update_tuned_model():
1336
+ # Create a client
1337
+ client = generativelanguage_v1beta3.ModelServiceClient()
1338
+
1339
+ # Initialize request argument(s)
1340
+ tuned_model = generativelanguage_v1beta3.TunedModel()
1341
+ tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value"
1342
+ tuned_model.tuning_task.training_data.examples.examples.output = "output_value"
1343
+
1344
+ request = generativelanguage_v1beta3.UpdateTunedModelRequest(
1345
+ tuned_model=tuned_model,
1346
+ )
1347
+
1348
+ # Make the request
1349
+ response = client.update_tuned_model(request=request)
1350
+
1351
+ # Handle the response
1352
+ print(response)
1353
+
1354
+ Args:
1355
+ request (Union[google.ai.generativelanguage_v1beta3.types.UpdateTunedModelRequest, dict]):
1356
+ The request object. Request to update a TunedModel.
1357
+ tuned_model (google.ai.generativelanguage_v1beta3.types.TunedModel):
1358
+ Required. The tuned model to update.
1359
+ This corresponds to the ``tuned_model`` field
1360
+ on the ``request`` instance; if ``request`` is provided, this
1361
+ should not be set.
1362
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
1363
+ Required. The list of fields to
1364
+ update.
1365
+
1366
+ This corresponds to the ``update_mask`` field
1367
+ on the ``request`` instance; if ``request`` is provided, this
1368
+ should not be set.
1369
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1370
+ should be retried.
1371
+ timeout (float): The timeout for this request.
1372
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1373
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1374
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1375
+ be of type `bytes`.
1376
+
1377
+ Returns:
1378
+ google.ai.generativelanguage_v1beta3.types.TunedModel:
1379
+ A fine-tuned model created using
1380
+ ModelService.CreateTunedModel.
1381
+
1382
+ """
1383
+ # Create or coerce a protobuf request object.
1384
+ # - Quick check: If we got a request object, we should *not* have
1385
+ # gotten any keyword arguments that map to the request.
1386
+ has_flattened_params = any([tuned_model, update_mask])
1387
+ if request is not None and has_flattened_params:
1388
+ raise ValueError(
1389
+ "If the `request` argument is set, then none of "
1390
+ "the individual field arguments should be set."
1391
+ )
1392
+
1393
+ # - Use the request object if provided (there's no risk of modifying the input as
1394
+ # there are no flattened fields), or create one.
1395
+ if not isinstance(request, model_service.UpdateTunedModelRequest):
1396
+ request = model_service.UpdateTunedModelRequest(request)
1397
+ # If we have keyword arguments corresponding to fields on the
1398
+ # request, apply these.
1399
+ if tuned_model is not None:
1400
+ request.tuned_model = tuned_model
1401
+ if update_mask is not None:
1402
+ request.update_mask = update_mask
1403
+
1404
+ # Wrap the RPC method; this adds retry and timeout information,
1405
+ # and friendly error handling.
1406
+ rpc = self._transport._wrapped_methods[self._transport.update_tuned_model]
1407
+
1408
+ # Certain fields should be provided within the metadata header;
1409
+ # add these here.
1410
+ metadata = tuple(metadata) + (
1411
+ gapic_v1.routing_header.to_grpc_metadata(
1412
+ (("tuned_model.name", request.tuned_model.name),)
1413
+ ),
1414
+ )
1415
+
1416
+ # Validate the universe domain.
1417
+ self._validate_universe_domain()
1418
+
1419
+ # Send the request.
1420
+ response = rpc(
1421
+ request,
1422
+ retry=retry,
1423
+ timeout=timeout,
1424
+ metadata=metadata,
1425
+ )
1426
+
1427
+ # Done; return the response.
1428
+ return response
1429
+
1430
+ def delete_tuned_model(
1431
+ self,
1432
+ request: Optional[Union[model_service.DeleteTunedModelRequest, dict]] = None,
1433
+ *,
1434
+ name: Optional[str] = None,
1435
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1436
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1437
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1438
+ ) -> None:
1439
+ r"""Deletes a tuned model.
1440
+
1441
+ .. code-block:: python
1442
+
1443
+ # This snippet has been automatically generated and should be regarded as a
1444
+ # code template only.
1445
+ # It will require modifications to work:
1446
+ # - It may require correct/in-range values for request initialization.
1447
+ # - It may require specifying regional endpoints when creating the service
1448
+ # client as shown in:
1449
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1450
+ from google.ai import generativelanguage_v1beta3
1451
+
1452
+ def sample_delete_tuned_model():
1453
+ # Create a client
1454
+ client = generativelanguage_v1beta3.ModelServiceClient()
1455
+
1456
+ # Initialize request argument(s)
1457
+ request = generativelanguage_v1beta3.DeleteTunedModelRequest(
1458
+ name="name_value",
1459
+ )
1460
+
1461
+ # Make the request
1462
+ client.delete_tuned_model(request=request)
1463
+
1464
+ Args:
1465
+ request (Union[google.ai.generativelanguage_v1beta3.types.DeleteTunedModelRequest, dict]):
1466
+ The request object. Request to delete a TunedModel.
1467
+ name (str):
1468
+ Required. The resource name of the model. Format:
1469
+ ``tunedModels/my-model-id``
1470
+
1471
+ This corresponds to the ``name`` field
1472
+ on the ``request`` instance; if ``request`` is provided, this
1473
+ should not be set.
1474
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1475
+ should be retried.
1476
+ timeout (float): The timeout for this request.
1477
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1478
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1479
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1480
+ be of type `bytes`.
1481
+ """
1482
+ # Create or coerce a protobuf request object.
1483
+ # - Quick check: If we got a request object, we should *not* have
1484
+ # gotten any keyword arguments that map to the request.
1485
+ has_flattened_params = any([name])
1486
+ if request is not None and has_flattened_params:
1487
+ raise ValueError(
1488
+ "If the `request` argument is set, then none of "
1489
+ "the individual field arguments should be set."
1490
+ )
1491
+
1492
+ # - Use the request object if provided (there's no risk of modifying the input as
1493
+ # there are no flattened fields), or create one.
1494
+ if not isinstance(request, model_service.DeleteTunedModelRequest):
1495
+ request = model_service.DeleteTunedModelRequest(request)
1496
+ # If we have keyword arguments corresponding to fields on the
1497
+ # request, apply these.
1498
+ if name is not None:
1499
+ request.name = name
1500
+
1501
+ # Wrap the RPC method; this adds retry and timeout information,
1502
+ # and friendly error handling.
1503
+ rpc = self._transport._wrapped_methods[self._transport.delete_tuned_model]
1504
+
1505
+ # Certain fields should be provided within the metadata header;
1506
+ # add these here.
1507
+ metadata = tuple(metadata) + (
1508
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1509
+ )
1510
+
1511
+ # Validate the universe domain.
1512
+ self._validate_universe_domain()
1513
+
1514
+ # Send the request.
1515
+ rpc(
1516
+ request,
1517
+ retry=retry,
1518
+ timeout=timeout,
1519
+ metadata=metadata,
1520
+ )
1521
+
1522
+ def __enter__(self) -> "ModelServiceClient":
1523
+ return self
1524
+
1525
+ def __exit__(self, type, value, traceback):
1526
+ """Releases underlying transport's resources.
1527
+
1528
+ .. warning::
1529
+ ONLY use as a context manager if the transport is NOT shared
1530
+ with other clients! Exiting the with block will CLOSE the transport
1531
+ and may cause errors in other clients!
1532
+ """
1533
+ self.transport.close()
1534
+
1535
+
1536
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1537
+ gapic_version=package_version.__version__
1538
+ )
1539
+
1540
+
1541
+ __all__ = ("ModelServiceClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/pagers.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from typing import (
17
+ Any,
18
+ AsyncIterator,
19
+ Awaitable,
20
+ Callable,
21
+ Iterator,
22
+ Optional,
23
+ Sequence,
24
+ Tuple,
25
+ Union,
26
+ )
27
+
28
+ from google.api_core import gapic_v1
29
+ from google.api_core import retry as retries
30
+ from google.api_core import retry_async as retries_async
31
+
32
+ try:
33
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
34
+ OptionalAsyncRetry = Union[
35
+ retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None
36
+ ]
37
+ except AttributeError: # pragma: NO COVER
38
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
39
+ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore
40
+
41
+ from google.ai.generativelanguage_v1beta3.types import model, model_service, tuned_model
42
+
43
+
44
+ class ListModelsPager:
45
+ """A pager for iterating through ``list_models`` requests.
46
+
47
+ This class thinly wraps an initial
48
+ :class:`google.ai.generativelanguage_v1beta3.types.ListModelsResponse` object, and
49
+ provides an ``__iter__`` method to iterate through its
50
+ ``models`` field.
51
+
52
+ If there are more pages, the ``__iter__`` method will make additional
53
+ ``ListModels`` requests and continue to iterate
54
+ through the ``models`` field on the
55
+ corresponding responses.
56
+
57
+ All the usual :class:`google.ai.generativelanguage_v1beta3.types.ListModelsResponse`
58
+ attributes are available on the pager. If multiple requests are made, only
59
+ the most recent response is retained, and thus used for attribute lookup.
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ method: Callable[..., model_service.ListModelsResponse],
65
+ request: model_service.ListModelsRequest,
66
+ response: model_service.ListModelsResponse,
67
+ *,
68
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
69
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
70
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
71
+ ):
72
+ """Instantiate the pager.
73
+
74
+ Args:
75
+ method (Callable): The method that was originally called, and
76
+ which instantiated this pager.
77
+ request (google.ai.generativelanguage_v1beta3.types.ListModelsRequest):
78
+ The initial request object.
79
+ response (google.ai.generativelanguage_v1beta3.types.ListModelsResponse):
80
+ The initial response object.
81
+ retry (google.api_core.retry.Retry): Designation of what errors,
82
+ if any, should be retried.
83
+ timeout (float): The timeout for this request.
84
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
85
+ sent along with the request as metadata. Normally, each value must be of type `str`,
86
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
87
+ be of type `bytes`.
88
+ """
89
+ self._method = method
90
+ self._request = model_service.ListModelsRequest(request)
91
+ self._response = response
92
+ self._retry = retry
93
+ self._timeout = timeout
94
+ self._metadata = metadata
95
+
96
+ def __getattr__(self, name: str) -> Any:
97
+ return getattr(self._response, name)
98
+
99
+ @property
100
+ def pages(self) -> Iterator[model_service.ListModelsResponse]:
101
+ yield self._response
102
+ while self._response.next_page_token:
103
+ self._request.page_token = self._response.next_page_token
104
+ self._response = self._method(
105
+ self._request,
106
+ retry=self._retry,
107
+ timeout=self._timeout,
108
+ metadata=self._metadata,
109
+ )
110
+ yield self._response
111
+
112
+ def __iter__(self) -> Iterator[model.Model]:
113
+ for page in self.pages:
114
+ yield from page.models
115
+
116
+ def __repr__(self) -> str:
117
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
118
+
119
+
120
+ class ListModelsAsyncPager:
121
+ """A pager for iterating through ``list_models`` requests.
122
+
123
+ This class thinly wraps an initial
124
+ :class:`google.ai.generativelanguage_v1beta3.types.ListModelsResponse` object, and
125
+ provides an ``__aiter__`` method to iterate through its
126
+ ``models`` field.
127
+
128
+ If there are more pages, the ``__aiter__`` method will make additional
129
+ ``ListModels`` requests and continue to iterate
130
+ through the ``models`` field on the
131
+ corresponding responses.
132
+
133
+ All the usual :class:`google.ai.generativelanguage_v1beta3.types.ListModelsResponse`
134
+ attributes are available on the pager. If multiple requests are made, only
135
+ the most recent response is retained, and thus used for attribute lookup.
136
+ """
137
+
138
+ def __init__(
139
+ self,
140
+ method: Callable[..., Awaitable[model_service.ListModelsResponse]],
141
+ request: model_service.ListModelsRequest,
142
+ response: model_service.ListModelsResponse,
143
+ *,
144
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
145
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
146
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
147
+ ):
148
+ """Instantiates the pager.
149
+
150
+ Args:
151
+ method (Callable): The method that was originally called, and
152
+ which instantiated this pager.
153
+ request (google.ai.generativelanguage_v1beta3.types.ListModelsRequest):
154
+ The initial request object.
155
+ response (google.ai.generativelanguage_v1beta3.types.ListModelsResponse):
156
+ The initial response object.
157
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
158
+ if any, should be retried.
159
+ timeout (float): The timeout for this request.
160
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
161
+ sent along with the request as metadata. Normally, each value must be of type `str`,
162
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
163
+ be of type `bytes`.
164
+ """
165
+ self._method = method
166
+ self._request = model_service.ListModelsRequest(request)
167
+ self._response = response
168
+ self._retry = retry
169
+ self._timeout = timeout
170
+ self._metadata = metadata
171
+
172
+ def __getattr__(self, name: str) -> Any:
173
+ return getattr(self._response, name)
174
+
175
+ @property
176
+ async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]:
177
+ yield self._response
178
+ while self._response.next_page_token:
179
+ self._request.page_token = self._response.next_page_token
180
+ self._response = await self._method(
181
+ self._request,
182
+ retry=self._retry,
183
+ timeout=self._timeout,
184
+ metadata=self._metadata,
185
+ )
186
+ yield self._response
187
+
188
+ def __aiter__(self) -> AsyncIterator[model.Model]:
189
+ async def async_generator():
190
+ async for page in self.pages:
191
+ for response in page.models:
192
+ yield response
193
+
194
+ return async_generator()
195
+
196
+ def __repr__(self) -> str:
197
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
198
+
199
+
200
+ class ListTunedModelsPager:
201
+ """A pager for iterating through ``list_tuned_models`` requests.
202
+
203
+ This class thinly wraps an initial
204
+ :class:`google.ai.generativelanguage_v1beta3.types.ListTunedModelsResponse` object, and
205
+ provides an ``__iter__`` method to iterate through its
206
+ ``tuned_models`` field.
207
+
208
+ If there are more pages, the ``__iter__`` method will make additional
209
+ ``ListTunedModels`` requests and continue to iterate
210
+ through the ``tuned_models`` field on the
211
+ corresponding responses.
212
+
213
+ All the usual :class:`google.ai.generativelanguage_v1beta3.types.ListTunedModelsResponse`
214
+ attributes are available on the pager. If multiple requests are made, only
215
+ the most recent response is retained, and thus used for attribute lookup.
216
+ """
217
+
218
+ def __init__(
219
+ self,
220
+ method: Callable[..., model_service.ListTunedModelsResponse],
221
+ request: model_service.ListTunedModelsRequest,
222
+ response: model_service.ListTunedModelsResponse,
223
+ *,
224
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
225
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
226
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
227
+ ):
228
+ """Instantiate the pager.
229
+
230
+ Args:
231
+ method (Callable): The method that was originally called, and
232
+ which instantiated this pager.
233
+ request (google.ai.generativelanguage_v1beta3.types.ListTunedModelsRequest):
234
+ The initial request object.
235
+ response (google.ai.generativelanguage_v1beta3.types.ListTunedModelsResponse):
236
+ The initial response object.
237
+ retry (google.api_core.retry.Retry): Designation of what errors,
238
+ if any, should be retried.
239
+ timeout (float): The timeout for this request.
240
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
241
+ sent along with the request as metadata. Normally, each value must be of type `str`,
242
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
243
+ be of type `bytes`.
244
+ """
245
+ self._method = method
246
+ self._request = model_service.ListTunedModelsRequest(request)
247
+ self._response = response
248
+ self._retry = retry
249
+ self._timeout = timeout
250
+ self._metadata = metadata
251
+
252
+ def __getattr__(self, name: str) -> Any:
253
+ return getattr(self._response, name)
254
+
255
+ @property
256
+ def pages(self) -> Iterator[model_service.ListTunedModelsResponse]:
257
+ yield self._response
258
+ while self._response.next_page_token:
259
+ self._request.page_token = self._response.next_page_token
260
+ self._response = self._method(
261
+ self._request,
262
+ retry=self._retry,
263
+ timeout=self._timeout,
264
+ metadata=self._metadata,
265
+ )
266
+ yield self._response
267
+
268
+ def __iter__(self) -> Iterator[tuned_model.TunedModel]:
269
+ for page in self.pages:
270
+ yield from page.tuned_models
271
+
272
+ def __repr__(self) -> str:
273
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
274
+
275
+
276
+ class ListTunedModelsAsyncPager:
277
+ """A pager for iterating through ``list_tuned_models`` requests.
278
+
279
+ This class thinly wraps an initial
280
+ :class:`google.ai.generativelanguage_v1beta3.types.ListTunedModelsResponse` object, and
281
+ provides an ``__aiter__`` method to iterate through its
282
+ ``tuned_models`` field.
283
+
284
+ If there are more pages, the ``__aiter__`` method will make additional
285
+ ``ListTunedModels`` requests and continue to iterate
286
+ through the ``tuned_models`` field on the
287
+ corresponding responses.
288
+
289
+ All the usual :class:`google.ai.generativelanguage_v1beta3.types.ListTunedModelsResponse`
290
+ attributes are available on the pager. If multiple requests are made, only
291
+ the most recent response is retained, and thus used for attribute lookup.
292
+ """
293
+
294
+ def __init__(
295
+ self,
296
+ method: Callable[..., Awaitable[model_service.ListTunedModelsResponse]],
297
+ request: model_service.ListTunedModelsRequest,
298
+ response: model_service.ListTunedModelsResponse,
299
+ *,
300
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
301
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
302
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
303
+ ):
304
+ """Instantiates the pager.
305
+
306
+ Args:
307
+ method (Callable): The method that was originally called, and
308
+ which instantiated this pager.
309
+ request (google.ai.generativelanguage_v1beta3.types.ListTunedModelsRequest):
310
+ The initial request object.
311
+ response (google.ai.generativelanguage_v1beta3.types.ListTunedModelsResponse):
312
+ The initial response object.
313
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
314
+ if any, should be retried.
315
+ timeout (float): The timeout for this request.
316
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
317
+ sent along with the request as metadata. Normally, each value must be of type `str`,
318
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
319
+ be of type `bytes`.
320
+ """
321
+ self._method = method
322
+ self._request = model_service.ListTunedModelsRequest(request)
323
+ self._response = response
324
+ self._retry = retry
325
+ self._timeout = timeout
326
+ self._metadata = metadata
327
+
328
+ def __getattr__(self, name: str) -> Any:
329
+ return getattr(self._response, name)
330
+
331
+ @property
332
+ async def pages(self) -> AsyncIterator[model_service.ListTunedModelsResponse]:
333
+ yield self._response
334
+ while self._response.next_page_token:
335
+ self._request.page_token = self._response.next_page_token
336
+ self._response = await self._method(
337
+ self._request,
338
+ retry=self._retry,
339
+ timeout=self._timeout,
340
+ metadata=self._metadata,
341
+ )
342
+ yield self._response
343
+
344
+ def __aiter__(self) -> AsyncIterator[tuned_model.TunedModel]:
345
+ async def async_generator():
346
+ async for page in self.pages:
347
+ for response in page.tuned_models:
348
+ yield response
349
+
350
+ return async_generator()
351
+
352
+ def __repr__(self) -> str:
353
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ from typing import Dict, Type
18
+
19
+ from .base import ModelServiceTransport
20
+ from .grpc import ModelServiceGrpcTransport
21
+ from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport
22
+ from .rest import ModelServiceRestInterceptor, ModelServiceRestTransport
23
+
24
+ # Compile a registry of transports.
25
+ _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]]
26
+ _transport_registry["grpc"] = ModelServiceGrpcTransport
27
+ _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport
28
+ _transport_registry["rest"] = ModelServiceRestTransport
29
+
30
+ __all__ = (
31
+ "ModelServiceTransport",
32
+ "ModelServiceGrpcTransport",
33
+ "ModelServiceGrpcAsyncIOTransport",
34
+ "ModelServiceRestTransport",
35
+ "ModelServiceRestInterceptor",
36
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/base.cpython-311.pyc ADDED
Binary file (10.5 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/grpc.cpython-311.pyc ADDED
Binary file (23.6 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc ADDED
Binary file (26.2 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/rest.cpython-311.pyc ADDED
Binary file (55.6 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/rest_base.cpython-311.pyc ADDED
Binary file (19.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/base.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import abc
17
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
+
19
+ import google.api_core
20
+ from google.api_core import exceptions as core_exceptions
21
+ from google.api_core import gapic_v1, operations_v1
22
+ from google.api_core import retry as retries
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.longrunning import operations_pb2 # type: ignore
26
+ from google.oauth2 import service_account # type: ignore
27
+ from google.protobuf import empty_pb2 # type: ignore
28
+
29
+ from google.ai.generativelanguage_v1beta3 import gapic_version as package_version
30
+ from google.ai.generativelanguage_v1beta3.types import tuned_model as gag_tuned_model
31
+ from google.ai.generativelanguage_v1beta3.types import model, model_service
32
+ from google.ai.generativelanguage_v1beta3.types import tuned_model
33
+
34
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
35
+ gapic_version=package_version.__version__
36
+ )
37
+
38
+
39
+ class ModelServiceTransport(abc.ABC):
40
+ """Abstract transport class for ModelService."""
41
+
42
+ AUTH_SCOPES = ()
43
+
44
+ DEFAULT_HOST: str = "generativelanguage.googleapis.com"
45
+
46
+ def __init__(
47
+ self,
48
+ *,
49
+ host: str = DEFAULT_HOST,
50
+ credentials: Optional[ga_credentials.Credentials] = None,
51
+ credentials_file: Optional[str] = None,
52
+ scopes: Optional[Sequence[str]] = None,
53
+ quota_project_id: Optional[str] = None,
54
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
55
+ always_use_jwt_access: Optional[bool] = False,
56
+ api_audience: Optional[str] = None,
57
+ **kwargs,
58
+ ) -> None:
59
+ """Instantiate the transport.
60
+
61
+ Args:
62
+ host (Optional[str]):
63
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
64
+ credentials (Optional[google.auth.credentials.Credentials]): The
65
+ authorization credentials to attach to requests. These
66
+ credentials identify the application to the service; if none
67
+ are specified, the client will attempt to ascertain the
68
+ credentials from the environment.
69
+ credentials_file (Optional[str]): A file with credentials that can
70
+ be loaded with :func:`google.auth.load_credentials_from_file`.
71
+ This argument is mutually exclusive with credentials.
72
+ scopes (Optional[Sequence[str]]): A list of scopes.
73
+ quota_project_id (Optional[str]): An optional project to use for billing
74
+ and quota.
75
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
76
+ The client info used to send a user-agent string along with
77
+ API requests. If ``None``, then default info will be used.
78
+ Generally, you only need to set this if you're developing
79
+ your own client library.
80
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
81
+ be used for service account credentials.
82
+ """
83
+
84
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
85
+
86
+ # Save the scopes.
87
+ self._scopes = scopes
88
+ if not hasattr(self, "_ignore_credentials"):
89
+ self._ignore_credentials: bool = False
90
+
91
+ # If no credentials are provided, then determine the appropriate
92
+ # defaults.
93
+ if credentials and credentials_file:
94
+ raise core_exceptions.DuplicateCredentialArgs(
95
+ "'credentials_file' and 'credentials' are mutually exclusive"
96
+ )
97
+
98
+ if credentials_file is not None:
99
+ credentials, _ = google.auth.load_credentials_from_file(
100
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
101
+ )
102
+ elif credentials is None and not self._ignore_credentials:
103
+ credentials, _ = google.auth.default(
104
+ **scopes_kwargs, quota_project_id=quota_project_id
105
+ )
106
+ # Don't apply audience if the credentials file passed from user.
107
+ if hasattr(credentials, "with_gdch_audience"):
108
+ credentials = credentials.with_gdch_audience(
109
+ api_audience if api_audience else host
110
+ )
111
+
112
+ # If the credentials are service account credentials, then always try to use self signed JWT.
113
+ if (
114
+ always_use_jwt_access
115
+ and isinstance(credentials, service_account.Credentials)
116
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
117
+ ):
118
+ credentials = credentials.with_always_use_jwt_access(True)
119
+
120
+ # Save the credentials.
121
+ self._credentials = credentials
122
+
123
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
124
+ if ":" not in host:
125
+ host += ":443"
126
+ self._host = host
127
+
128
+ @property
129
+ def host(self):
130
+ return self._host
131
+
132
+ def _prep_wrapped_messages(self, client_info):
133
+ # Precompute the wrapped methods.
134
+ self._wrapped_methods = {
135
+ self.get_model: gapic_v1.method.wrap_method(
136
+ self.get_model,
137
+ default_timeout=None,
138
+ client_info=client_info,
139
+ ),
140
+ self.list_models: gapic_v1.method.wrap_method(
141
+ self.list_models,
142
+ default_timeout=None,
143
+ client_info=client_info,
144
+ ),
145
+ self.get_tuned_model: gapic_v1.method.wrap_method(
146
+ self.get_tuned_model,
147
+ default_timeout=None,
148
+ client_info=client_info,
149
+ ),
150
+ self.list_tuned_models: gapic_v1.method.wrap_method(
151
+ self.list_tuned_models,
152
+ default_timeout=None,
153
+ client_info=client_info,
154
+ ),
155
+ self.create_tuned_model: gapic_v1.method.wrap_method(
156
+ self.create_tuned_model,
157
+ default_timeout=None,
158
+ client_info=client_info,
159
+ ),
160
+ self.update_tuned_model: gapic_v1.method.wrap_method(
161
+ self.update_tuned_model,
162
+ default_timeout=None,
163
+ client_info=client_info,
164
+ ),
165
+ self.delete_tuned_model: gapic_v1.method.wrap_method(
166
+ self.delete_tuned_model,
167
+ default_timeout=None,
168
+ client_info=client_info,
169
+ ),
170
+ }
171
+
172
+ def close(self):
173
+ """Closes resources associated with the transport.
174
+
175
+ .. warning::
176
+ Only call this method if the transport is NOT shared
177
+ with other clients - this may cause errors in other clients!
178
+ """
179
+ raise NotImplementedError()
180
+
181
+ @property
182
+ def operations_client(self):
183
+ """Return the client designed to process long-running operations."""
184
+ raise NotImplementedError()
185
+
186
+ @property
187
+ def get_model(
188
+ self,
189
+ ) -> Callable[
190
+ [model_service.GetModelRequest], Union[model.Model, Awaitable[model.Model]]
191
+ ]:
192
+ raise NotImplementedError()
193
+
194
+ @property
195
+ def list_models(
196
+ self,
197
+ ) -> Callable[
198
+ [model_service.ListModelsRequest],
199
+ Union[
200
+ model_service.ListModelsResponse,
201
+ Awaitable[model_service.ListModelsResponse],
202
+ ],
203
+ ]:
204
+ raise NotImplementedError()
205
+
206
+ @property
207
+ def get_tuned_model(
208
+ self,
209
+ ) -> Callable[
210
+ [model_service.GetTunedModelRequest],
211
+ Union[tuned_model.TunedModel, Awaitable[tuned_model.TunedModel]],
212
+ ]:
213
+ raise NotImplementedError()
214
+
215
+ @property
216
+ def list_tuned_models(
217
+ self,
218
+ ) -> Callable[
219
+ [model_service.ListTunedModelsRequest],
220
+ Union[
221
+ model_service.ListTunedModelsResponse,
222
+ Awaitable[model_service.ListTunedModelsResponse],
223
+ ],
224
+ ]:
225
+ raise NotImplementedError()
226
+
227
+ @property
228
+ def create_tuned_model(
229
+ self,
230
+ ) -> Callable[
231
+ [model_service.CreateTunedModelRequest],
232
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
233
+ ]:
234
+ raise NotImplementedError()
235
+
236
+ @property
237
+ def update_tuned_model(
238
+ self,
239
+ ) -> Callable[
240
+ [model_service.UpdateTunedModelRequest],
241
+ Union[gag_tuned_model.TunedModel, Awaitable[gag_tuned_model.TunedModel]],
242
+ ]:
243
+ raise NotImplementedError()
244
+
245
+ @property
246
+ def delete_tuned_model(
247
+ self,
248
+ ) -> Callable[
249
+ [model_service.DeleteTunedModelRequest],
250
+ Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
251
+ ]:
252
+ raise NotImplementedError()
253
+
254
+ @property
255
+ def kind(self) -> str:
256
+ raise NotImplementedError()
257
+
258
+
259
+ __all__ = ("ModelServiceTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/grpc.py ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json
17
+ import logging as std_logging
18
+ import pickle
19
+ from typing import Callable, Dict, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import gapic_v1, grpc_helpers, operations_v1
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.auth.transport.grpc import SslCredentials # type: ignore
26
+ from google.longrunning import operations_pb2 # type: ignore
27
+ from google.protobuf import empty_pb2 # type: ignore
28
+ from google.protobuf.json_format import MessageToJson
29
+ import google.protobuf.message
30
+ import grpc # type: ignore
31
+ import proto # type: ignore
32
+
33
+ from google.ai.generativelanguage_v1beta3.types import tuned_model as gag_tuned_model
34
+ from google.ai.generativelanguage_v1beta3.types import model, model_service
35
+ from google.ai.generativelanguage_v1beta3.types import tuned_model
36
+
37
+ from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport
38
+
39
+ try:
40
+ from google.api_core import client_logging # type: ignore
41
+
42
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
43
+ except ImportError: # pragma: NO COVER
44
+ CLIENT_LOGGING_SUPPORTED = False
45
+
46
+ _LOGGER = std_logging.getLogger(__name__)
47
+
48
+
49
+ class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER
50
+ def intercept_unary_unary(self, continuation, client_call_details, request):
51
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
52
+ std_logging.DEBUG
53
+ )
54
+ if logging_enabled: # pragma: NO COVER
55
+ request_metadata = client_call_details.metadata
56
+ if isinstance(request, proto.Message):
57
+ request_payload = type(request).to_json(request)
58
+ elif isinstance(request, google.protobuf.message.Message):
59
+ request_payload = MessageToJson(request)
60
+ else:
61
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
62
+
63
+ request_metadata = {
64
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
65
+ for key, value in request_metadata
66
+ }
67
+ grpc_request = {
68
+ "payload": request_payload,
69
+ "requestMethod": "grpc",
70
+ "metadata": dict(request_metadata),
71
+ }
72
+ _LOGGER.debug(
73
+ f"Sending request for {client_call_details.method}",
74
+ extra={
75
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
76
+ "rpcName": client_call_details.method,
77
+ "request": grpc_request,
78
+ "metadata": grpc_request["metadata"],
79
+ },
80
+ )
81
+
82
+ response = continuation(client_call_details, request)
83
+ if logging_enabled: # pragma: NO COVER
84
+ response_metadata = response.trailing_metadata()
85
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
86
+ metadata = (
87
+ dict([(k, str(v)) for k, v in response_metadata])
88
+ if response_metadata
89
+ else None
90
+ )
91
+ result = response.result()
92
+ if isinstance(result, proto.Message):
93
+ response_payload = type(result).to_json(result)
94
+ elif isinstance(result, google.protobuf.message.Message):
95
+ response_payload = MessageToJson(result)
96
+ else:
97
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
98
+ grpc_response = {
99
+ "payload": response_payload,
100
+ "metadata": metadata,
101
+ "status": "OK",
102
+ }
103
+ _LOGGER.debug(
104
+ f"Received response for {client_call_details.method}.",
105
+ extra={
106
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
107
+ "rpcName": client_call_details.method,
108
+ "response": grpc_response,
109
+ "metadata": grpc_response["metadata"],
110
+ },
111
+ )
112
+ return response
113
+
114
+
115
+ class ModelServiceGrpcTransport(ModelServiceTransport):
116
+ """gRPC backend transport for ModelService.
117
+
118
+ Provides methods for getting metadata information about
119
+ Generative Models.
120
+
121
+ This class defines the same methods as the primary client, so the
122
+ primary client can load the underlying transport implementation
123
+ and call it.
124
+
125
+ It sends protocol buffers over the wire using gRPC (which is built on
126
+ top of HTTP/2); the ``grpcio`` package must be installed.
127
+ """
128
+
129
+ _stubs: Dict[str, Callable]
130
+
131
+ def __init__(
132
+ self,
133
+ *,
134
+ host: str = "generativelanguage.googleapis.com",
135
+ credentials: Optional[ga_credentials.Credentials] = None,
136
+ credentials_file: Optional[str] = None,
137
+ scopes: Optional[Sequence[str]] = None,
138
+ channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None,
139
+ api_mtls_endpoint: Optional[str] = None,
140
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
141
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
142
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
143
+ quota_project_id: Optional[str] = None,
144
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
145
+ always_use_jwt_access: Optional[bool] = False,
146
+ api_audience: Optional[str] = None,
147
+ ) -> None:
148
+ """Instantiate the transport.
149
+
150
+ Args:
151
+ host (Optional[str]):
152
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
153
+ credentials (Optional[google.auth.credentials.Credentials]): The
154
+ authorization credentials to attach to requests. These
155
+ credentials identify the application to the service; if none
156
+ are specified, the client will attempt to ascertain the
157
+ credentials from the environment.
158
+ This argument is ignored if a ``channel`` instance is provided.
159
+ credentials_file (Optional[str]): A file with credentials that can
160
+ be loaded with :func:`google.auth.load_credentials_from_file`.
161
+ This argument is ignored if a ``channel`` instance is provided.
162
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
163
+ ignored if a ``channel`` instance is provided.
164
+ channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
165
+ A ``Channel`` instance through which to make calls, or a Callable
166
+ that constructs and returns one. If set to None, ``self.create_channel``
167
+ is used to create the channel. If a Callable is given, it will be called
168
+ with the same arguments as used in ``self.create_channel``.
169
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
170
+ If provided, it overrides the ``host`` argument and tries to create
171
+ a mutual TLS channel with client SSL credentials from
172
+ ``client_cert_source`` or application default SSL credentials.
173
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
174
+ Deprecated. A callback to provide client SSL certificate bytes and
175
+ private key bytes, both in PEM format. It is ignored if
176
+ ``api_mtls_endpoint`` is None.
177
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
178
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
179
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
180
+ A callback to provide client certificate bytes and private key bytes,
181
+ both in PEM format. It is used to configure a mutual TLS channel. It is
182
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
183
+ quota_project_id (Optional[str]): An optional project to use for billing
184
+ and quota.
185
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
186
+ The client info used to send a user-agent string along with
187
+ API requests. If ``None``, then default info will be used.
188
+ Generally, you only need to set this if you're developing
189
+ your own client library.
190
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
191
+ be used for service account credentials.
192
+
193
+ Raises:
194
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
195
+ creation failed for any reason.
196
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
197
+ and ``credentials_file`` are passed.
198
+ """
199
+ self._grpc_channel = None
200
+ self._ssl_channel_credentials = ssl_channel_credentials
201
+ self._stubs: Dict[str, Callable] = {}
202
+ self._operations_client: Optional[operations_v1.OperationsClient] = None
203
+
204
+ if api_mtls_endpoint:
205
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
206
+ if client_cert_source:
207
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
208
+
209
+ if isinstance(channel, grpc.Channel):
210
+ # Ignore credentials if a channel was passed.
211
+ credentials = None
212
+ self._ignore_credentials = True
213
+ # If a channel was explicitly provided, set it.
214
+ self._grpc_channel = channel
215
+ self._ssl_channel_credentials = None
216
+
217
+ else:
218
+ if api_mtls_endpoint:
219
+ host = api_mtls_endpoint
220
+
221
+ # Create SSL credentials with client_cert_source or application
222
+ # default SSL credentials.
223
+ if client_cert_source:
224
+ cert, key = client_cert_source()
225
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
226
+ certificate_chain=cert, private_key=key
227
+ )
228
+ else:
229
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
230
+
231
+ else:
232
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
233
+ cert, key = client_cert_source_for_mtls()
234
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
235
+ certificate_chain=cert, private_key=key
236
+ )
237
+
238
+ # The base transport sets the host, credentials and scopes
239
+ super().__init__(
240
+ host=host,
241
+ credentials=credentials,
242
+ credentials_file=credentials_file,
243
+ scopes=scopes,
244
+ quota_project_id=quota_project_id,
245
+ client_info=client_info,
246
+ always_use_jwt_access=always_use_jwt_access,
247
+ api_audience=api_audience,
248
+ )
249
+
250
+ if not self._grpc_channel:
251
+ # initialize with the provided callable or the default channel
252
+ channel_init = channel or type(self).create_channel
253
+ self._grpc_channel = channel_init(
254
+ self._host,
255
+ # use the credentials which are saved
256
+ credentials=self._credentials,
257
+ # Set ``credentials_file`` to ``None`` here as
258
+ # the credentials that we saved earlier should be used.
259
+ credentials_file=None,
260
+ scopes=self._scopes,
261
+ ssl_credentials=self._ssl_channel_credentials,
262
+ quota_project_id=quota_project_id,
263
+ options=[
264
+ ("grpc.max_send_message_length", -1),
265
+ ("grpc.max_receive_message_length", -1),
266
+ ],
267
+ )
268
+
269
+ self._interceptor = _LoggingClientInterceptor()
270
+ self._logged_channel = grpc.intercept_channel(
271
+ self._grpc_channel, self._interceptor
272
+ )
273
+
274
+ # Wrap messages. This must be done after self._logged_channel exists
275
+ self._prep_wrapped_messages(client_info)
276
+
277
+ @classmethod
278
+ def create_channel(
279
+ cls,
280
+ host: str = "generativelanguage.googleapis.com",
281
+ credentials: Optional[ga_credentials.Credentials] = None,
282
+ credentials_file: Optional[str] = None,
283
+ scopes: Optional[Sequence[str]] = None,
284
+ quota_project_id: Optional[str] = None,
285
+ **kwargs,
286
+ ) -> grpc.Channel:
287
+ """Create and return a gRPC channel object.
288
+ Args:
289
+ host (Optional[str]): The host for the channel to use.
290
+ credentials (Optional[~.Credentials]): The
291
+ authorization credentials to attach to requests. These
292
+ credentials identify this application to the service. If
293
+ none are specified, the client will attempt to ascertain
294
+ the credentials from the environment.
295
+ credentials_file (Optional[str]): A file with credentials that can
296
+ be loaded with :func:`google.auth.load_credentials_from_file`.
297
+ This argument is mutually exclusive with credentials.
298
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
299
+ service. These are only used when credentials are not specified and
300
+ are passed to :func:`google.auth.default`.
301
+ quota_project_id (Optional[str]): An optional project to use for billing
302
+ and quota.
303
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
304
+ channel creation.
305
+ Returns:
306
+ grpc.Channel: A gRPC channel object.
307
+
308
+ Raises:
309
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
310
+ and ``credentials_file`` are passed.
311
+ """
312
+
313
+ return grpc_helpers.create_channel(
314
+ host,
315
+ credentials=credentials,
316
+ credentials_file=credentials_file,
317
+ quota_project_id=quota_project_id,
318
+ default_scopes=cls.AUTH_SCOPES,
319
+ scopes=scopes,
320
+ default_host=cls.DEFAULT_HOST,
321
+ **kwargs,
322
+ )
323
+
324
+ @property
325
+ def grpc_channel(self) -> grpc.Channel:
326
+ """Return the channel designed to connect to this service."""
327
+ return self._grpc_channel
328
+
329
+ @property
330
+ def operations_client(self) -> operations_v1.OperationsClient:
331
+ """Create the client designed to process long-running operations.
332
+
333
+ This property caches on the instance; repeated calls return the same
334
+ client.
335
+ """
336
+ # Quick check: Only create a new client if we do not already have one.
337
+ if self._operations_client is None:
338
+ self._operations_client = operations_v1.OperationsClient(
339
+ self._logged_channel
340
+ )
341
+
342
+ # Return the client from cache.
343
+ return self._operations_client
344
+
345
+ @property
346
+ def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]:
347
+ r"""Return a callable for the get model method over gRPC.
348
+
349
+ Gets information about a specific Model.
350
+
351
+ Returns:
352
+ Callable[[~.GetModelRequest],
353
+ ~.Model]:
354
+ A function that, when called, will call the underlying RPC
355
+ on the server.
356
+ """
357
+ # Generate a "stub function" on-the-fly which will actually make
358
+ # the request.
359
+ # gRPC handles serialization and deserialization, so we just need
360
+ # to pass in the functions for each.
361
+ if "get_model" not in self._stubs:
362
+ self._stubs["get_model"] = self._logged_channel.unary_unary(
363
+ "/google.ai.generativelanguage.v1beta3.ModelService/GetModel",
364
+ request_serializer=model_service.GetModelRequest.serialize,
365
+ response_deserializer=model.Model.deserialize,
366
+ )
367
+ return self._stubs["get_model"]
368
+
369
+ @property
370
+ def list_models(
371
+ self,
372
+ ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]:
373
+ r"""Return a callable for the list models method over gRPC.
374
+
375
+ Lists models available through the API.
376
+
377
+ Returns:
378
+ Callable[[~.ListModelsRequest],
379
+ ~.ListModelsResponse]:
380
+ A function that, when called, will call the underlying RPC
381
+ on the server.
382
+ """
383
+ # Generate a "stub function" on-the-fly which will actually make
384
+ # the request.
385
+ # gRPC handles serialization and deserialization, so we just need
386
+ # to pass in the functions for each.
387
+ if "list_models" not in self._stubs:
388
+ self._stubs["list_models"] = self._logged_channel.unary_unary(
389
+ "/google.ai.generativelanguage.v1beta3.ModelService/ListModels",
390
+ request_serializer=model_service.ListModelsRequest.serialize,
391
+ response_deserializer=model_service.ListModelsResponse.deserialize,
392
+ )
393
+ return self._stubs["list_models"]
394
+
395
+ @property
396
+ def get_tuned_model(
397
+ self,
398
+ ) -> Callable[[model_service.GetTunedModelRequest], tuned_model.TunedModel]:
399
+ r"""Return a callable for the get tuned model method over gRPC.
400
+
401
+ Gets information about a specific TunedModel.
402
+
403
+ Returns:
404
+ Callable[[~.GetTunedModelRequest],
405
+ ~.TunedModel]:
406
+ A function that, when called, will call the underlying RPC
407
+ on the server.
408
+ """
409
+ # Generate a "stub function" on-the-fly which will actually make
410
+ # the request.
411
+ # gRPC handles serialization and deserialization, so we just need
412
+ # to pass in the functions for each.
413
+ if "get_tuned_model" not in self._stubs:
414
+ self._stubs["get_tuned_model"] = self._logged_channel.unary_unary(
415
+ "/google.ai.generativelanguage.v1beta3.ModelService/GetTunedModel",
416
+ request_serializer=model_service.GetTunedModelRequest.serialize,
417
+ response_deserializer=tuned_model.TunedModel.deserialize,
418
+ )
419
+ return self._stubs["get_tuned_model"]
420
+
421
+ @property
422
+ def list_tuned_models(
423
+ self,
424
+ ) -> Callable[
425
+ [model_service.ListTunedModelsRequest], model_service.ListTunedModelsResponse
426
+ ]:
427
+ r"""Return a callable for the list tuned models method over gRPC.
428
+
429
+ Lists tuned models owned by the user.
430
+
431
+ Returns:
432
+ Callable[[~.ListTunedModelsRequest],
433
+ ~.ListTunedModelsResponse]:
434
+ A function that, when called, will call the underlying RPC
435
+ on the server.
436
+ """
437
+ # Generate a "stub function" on-the-fly which will actually make
438
+ # the request.
439
+ # gRPC handles serialization and deserialization, so we just need
440
+ # to pass in the functions for each.
441
+ if "list_tuned_models" not in self._stubs:
442
+ self._stubs["list_tuned_models"] = self._logged_channel.unary_unary(
443
+ "/google.ai.generativelanguage.v1beta3.ModelService/ListTunedModels",
444
+ request_serializer=model_service.ListTunedModelsRequest.serialize,
445
+ response_deserializer=model_service.ListTunedModelsResponse.deserialize,
446
+ )
447
+ return self._stubs["list_tuned_models"]
448
+
449
+ @property
450
+ def create_tuned_model(
451
+ self,
452
+ ) -> Callable[[model_service.CreateTunedModelRequest], operations_pb2.Operation]:
453
+ r"""Return a callable for the create tuned model method over gRPC.
454
+
455
+ Creates a tuned model. Intermediate tuning progress (if any) is
456
+ accessed through the [google.longrunning.Operations] service.
457
+
458
+ Status and results can be accessed through the Operations
459
+ service. Example: GET
460
+ /v1/tunedModels/az2mb0bpw6i/operations/000-111-222
461
+
462
+ Returns:
463
+ Callable[[~.CreateTunedModelRequest],
464
+ ~.Operation]:
465
+ A function that, when called, will call the underlying RPC
466
+ on the server.
467
+ """
468
+ # Generate a "stub function" on-the-fly which will actually make
469
+ # the request.
470
+ # gRPC handles serialization and deserialization, so we just need
471
+ # to pass in the functions for each.
472
+ if "create_tuned_model" not in self._stubs:
473
+ self._stubs["create_tuned_model"] = self._logged_channel.unary_unary(
474
+ "/google.ai.generativelanguage.v1beta3.ModelService/CreateTunedModel",
475
+ request_serializer=model_service.CreateTunedModelRequest.serialize,
476
+ response_deserializer=operations_pb2.Operation.FromString,
477
+ )
478
+ return self._stubs["create_tuned_model"]
479
+
480
+ @property
481
+ def update_tuned_model(
482
+ self,
483
+ ) -> Callable[[model_service.UpdateTunedModelRequest], gag_tuned_model.TunedModel]:
484
+ r"""Return a callable for the update tuned model method over gRPC.
485
+
486
+ Updates a tuned model.
487
+
488
+ Returns:
489
+ Callable[[~.UpdateTunedModelRequest],
490
+ ~.TunedModel]:
491
+ A function that, when called, will call the underlying RPC
492
+ on the server.
493
+ """
494
+ # Generate a "stub function" on-the-fly which will actually make
495
+ # the request.
496
+ # gRPC handles serialization and deserialization, so we just need
497
+ # to pass in the functions for each.
498
+ if "update_tuned_model" not in self._stubs:
499
+ self._stubs["update_tuned_model"] = self._logged_channel.unary_unary(
500
+ "/google.ai.generativelanguage.v1beta3.ModelService/UpdateTunedModel",
501
+ request_serializer=model_service.UpdateTunedModelRequest.serialize,
502
+ response_deserializer=gag_tuned_model.TunedModel.deserialize,
503
+ )
504
+ return self._stubs["update_tuned_model"]
505
+
506
+ @property
507
+ def delete_tuned_model(
508
+ self,
509
+ ) -> Callable[[model_service.DeleteTunedModelRequest], empty_pb2.Empty]:
510
+ r"""Return a callable for the delete tuned model method over gRPC.
511
+
512
+ Deletes a tuned model.
513
+
514
+ Returns:
515
+ Callable[[~.DeleteTunedModelRequest],
516
+ ~.Empty]:
517
+ A function that, when called, will call the underlying RPC
518
+ on the server.
519
+ """
520
+ # Generate a "stub function" on-the-fly which will actually make
521
+ # the request.
522
+ # gRPC handles serialization and deserialization, so we just need
523
+ # to pass in the functions for each.
524
+ if "delete_tuned_model" not in self._stubs:
525
+ self._stubs["delete_tuned_model"] = self._logged_channel.unary_unary(
526
+ "/google.ai.generativelanguage.v1beta3.ModelService/DeleteTunedModel",
527
+ request_serializer=model_service.DeleteTunedModelRequest.serialize,
528
+ response_deserializer=empty_pb2.Empty.FromString,
529
+ )
530
+ return self._stubs["delete_tuned_model"]
531
+
532
+ def close(self):
533
+ self._logged_channel.close()
534
+
535
+ @property
536
+ def kind(self) -> str:
537
+ return "grpc"
538
+
539
+
540
+ __all__ = ("ModelServiceGrpcTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/grpc_asyncio.py ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import inspect
17
+ import json
18
+ import logging as std_logging
19
+ import pickle
20
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
21
+ import warnings
22
+
23
+ from google.api_core import exceptions as core_exceptions
24
+ from google.api_core import gapic_v1, grpc_helpers_async, operations_v1
25
+ from google.api_core import retry_async as retries
26
+ from google.auth import credentials as ga_credentials # type: ignore
27
+ from google.auth.transport.grpc import SslCredentials # type: ignore
28
+ from google.longrunning import operations_pb2 # type: ignore
29
+ from google.protobuf import empty_pb2 # type: ignore
30
+ from google.protobuf.json_format import MessageToJson
31
+ import google.protobuf.message
32
+ import grpc # type: ignore
33
+ from grpc.experimental import aio # type: ignore
34
+ import proto # type: ignore
35
+
36
+ from google.ai.generativelanguage_v1beta3.types import tuned_model as gag_tuned_model
37
+ from google.ai.generativelanguage_v1beta3.types import model, model_service
38
+ from google.ai.generativelanguage_v1beta3.types import tuned_model
39
+
40
+ from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport
41
+ from .grpc import ModelServiceGrpcTransport
42
+
43
+ try:
44
+ from google.api_core import client_logging # type: ignore
45
+
46
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
47
+ except ImportError: # pragma: NO COVER
48
+ CLIENT_LOGGING_SUPPORTED = False
49
+
50
+ _LOGGER = std_logging.getLogger(__name__)
51
+
52
+
53
+ class _LoggingClientAIOInterceptor(
54
+ grpc.aio.UnaryUnaryClientInterceptor
55
+ ): # pragma: NO COVER
56
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
57
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
58
+ std_logging.DEBUG
59
+ )
60
+ if logging_enabled: # pragma: NO COVER
61
+ request_metadata = client_call_details.metadata
62
+ if isinstance(request, proto.Message):
63
+ request_payload = type(request).to_json(request)
64
+ elif isinstance(request, google.protobuf.message.Message):
65
+ request_payload = MessageToJson(request)
66
+ else:
67
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
68
+
69
+ request_metadata = {
70
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
71
+ for key, value in request_metadata
72
+ }
73
+ grpc_request = {
74
+ "payload": request_payload,
75
+ "requestMethod": "grpc",
76
+ "metadata": dict(request_metadata),
77
+ }
78
+ _LOGGER.debug(
79
+ f"Sending request for {client_call_details.method}",
80
+ extra={
81
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
82
+ "rpcName": str(client_call_details.method),
83
+ "request": grpc_request,
84
+ "metadata": grpc_request["metadata"],
85
+ },
86
+ )
87
+ response = await continuation(client_call_details, request)
88
+ if logging_enabled: # pragma: NO COVER
89
+ response_metadata = await response.trailing_metadata()
90
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
91
+ metadata = (
92
+ dict([(k, str(v)) for k, v in response_metadata])
93
+ if response_metadata
94
+ else None
95
+ )
96
+ result = await response
97
+ if isinstance(result, proto.Message):
98
+ response_payload = type(result).to_json(result)
99
+ elif isinstance(result, google.protobuf.message.Message):
100
+ response_payload = MessageToJson(result)
101
+ else:
102
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
103
+ grpc_response = {
104
+ "payload": response_payload,
105
+ "metadata": metadata,
106
+ "status": "OK",
107
+ }
108
+ _LOGGER.debug(
109
+ f"Received response to rpc {client_call_details.method}.",
110
+ extra={
111
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
112
+ "rpcName": str(client_call_details.method),
113
+ "response": grpc_response,
114
+ "metadata": grpc_response["metadata"],
115
+ },
116
+ )
117
+ return response
118
+
119
+
120
+ class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport):
121
+ """gRPC AsyncIO backend transport for ModelService.
122
+
123
+ Provides methods for getting metadata information about
124
+ Generative Models.
125
+
126
+ This class defines the same methods as the primary client, so the
127
+ primary client can load the underlying transport implementation
128
+ and call it.
129
+
130
+ It sends protocol buffers over the wire using gRPC (which is built on
131
+ top of HTTP/2); the ``grpcio`` package must be installed.
132
+ """
133
+
134
+ _grpc_channel: aio.Channel
135
+ _stubs: Dict[str, Callable] = {}
136
+
137
+ @classmethod
138
+ def create_channel(
139
+ cls,
140
+ host: str = "generativelanguage.googleapis.com",
141
+ credentials: Optional[ga_credentials.Credentials] = None,
142
+ credentials_file: Optional[str] = None,
143
+ scopes: Optional[Sequence[str]] = None,
144
+ quota_project_id: Optional[str] = None,
145
+ **kwargs,
146
+ ) -> aio.Channel:
147
+ """Create and return a gRPC AsyncIO channel object.
148
+ Args:
149
+ host (Optional[str]): The host for the channel to use.
150
+ credentials (Optional[~.Credentials]): The
151
+ authorization credentials to attach to requests. These
152
+ credentials identify this application to the service. If
153
+ none are specified, the client will attempt to ascertain
154
+ the credentials from the environment.
155
+ credentials_file (Optional[str]): A file with credentials that can
156
+ be loaded with :func:`google.auth.load_credentials_from_file`.
157
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
158
+ service. These are only used when credentials are not specified and
159
+ are passed to :func:`google.auth.default`.
160
+ quota_project_id (Optional[str]): An optional project to use for billing
161
+ and quota.
162
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
163
+ channel creation.
164
+ Returns:
165
+ aio.Channel: A gRPC AsyncIO channel object.
166
+ """
167
+
168
+ return grpc_helpers_async.create_channel(
169
+ host,
170
+ credentials=credentials,
171
+ credentials_file=credentials_file,
172
+ quota_project_id=quota_project_id,
173
+ default_scopes=cls.AUTH_SCOPES,
174
+ scopes=scopes,
175
+ default_host=cls.DEFAULT_HOST,
176
+ **kwargs,
177
+ )
178
+
179
+ def __init__(
180
+ self,
181
+ *,
182
+ host: str = "generativelanguage.googleapis.com",
183
+ credentials: Optional[ga_credentials.Credentials] = None,
184
+ credentials_file: Optional[str] = None,
185
+ scopes: Optional[Sequence[str]] = None,
186
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
187
+ api_mtls_endpoint: Optional[str] = None,
188
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
189
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
190
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
191
+ quota_project_id: Optional[str] = None,
192
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
193
+ always_use_jwt_access: Optional[bool] = False,
194
+ api_audience: Optional[str] = None,
195
+ ) -> None:
196
+ """Instantiate the transport.
197
+
198
+ Args:
199
+ host (Optional[str]):
200
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
201
+ credentials (Optional[google.auth.credentials.Credentials]): The
202
+ authorization credentials to attach to requests. These
203
+ credentials identify the application to the service; if none
204
+ are specified, the client will attempt to ascertain the
205
+ credentials from the environment.
206
+ This argument is ignored if a ``channel`` instance is provided.
207
+ credentials_file (Optional[str]): A file with credentials that can
208
+ be loaded with :func:`google.auth.load_credentials_from_file`.
209
+ This argument is ignored if a ``channel`` instance is provided.
210
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
211
+ service. These are only used when credentials are not specified and
212
+ are passed to :func:`google.auth.default`.
213
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
214
+ A ``Channel`` instance through which to make calls, or a Callable
215
+ that constructs and returns one. If set to None, ``self.create_channel``
216
+ is used to create the channel. If a Callable is given, it will be called
217
+ with the same arguments as used in ``self.create_channel``.
218
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
219
+ If provided, it overrides the ``host`` argument and tries to create
220
+ a mutual TLS channel with client SSL credentials from
221
+ ``client_cert_source`` or application default SSL credentials.
222
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
223
+ Deprecated. A callback to provide client SSL certificate bytes and
224
+ private key bytes, both in PEM format. It is ignored if
225
+ ``api_mtls_endpoint`` is None.
226
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
227
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
228
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
229
+ A callback to provide client certificate bytes and private key bytes,
230
+ both in PEM format. It is used to configure a mutual TLS channel. It is
231
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
232
+ quota_project_id (Optional[str]): An optional project to use for billing
233
+ and quota.
234
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
235
+ The client info used to send a user-agent string along with
236
+ API requests. If ``None``, then default info will be used.
237
+ Generally, you only need to set this if you're developing
238
+ your own client library.
239
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
240
+ be used for service account credentials.
241
+
242
+ Raises:
243
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
244
+ creation failed for any reason.
245
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
246
+ and ``credentials_file`` are passed.
247
+ """
248
+ self._grpc_channel = None
249
+ self._ssl_channel_credentials = ssl_channel_credentials
250
+ self._stubs: Dict[str, Callable] = {}
251
+ self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
252
+
253
+ if api_mtls_endpoint:
254
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
255
+ if client_cert_source:
256
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
257
+
258
+ if isinstance(channel, aio.Channel):
259
+ # Ignore credentials if a channel was passed.
260
+ credentials = None
261
+ self._ignore_credentials = True
262
+ # If a channel was explicitly provided, set it.
263
+ self._grpc_channel = channel
264
+ self._ssl_channel_credentials = None
265
+ else:
266
+ if api_mtls_endpoint:
267
+ host = api_mtls_endpoint
268
+
269
+ # Create SSL credentials with client_cert_source or application
270
+ # default SSL credentials.
271
+ if client_cert_source:
272
+ cert, key = client_cert_source()
273
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
274
+ certificate_chain=cert, private_key=key
275
+ )
276
+ else:
277
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
278
+
279
+ else:
280
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
281
+ cert, key = client_cert_source_for_mtls()
282
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
283
+ certificate_chain=cert, private_key=key
284
+ )
285
+
286
+ # The base transport sets the host, credentials and scopes
287
+ super().__init__(
288
+ host=host,
289
+ credentials=credentials,
290
+ credentials_file=credentials_file,
291
+ scopes=scopes,
292
+ quota_project_id=quota_project_id,
293
+ client_info=client_info,
294
+ always_use_jwt_access=always_use_jwt_access,
295
+ api_audience=api_audience,
296
+ )
297
+
298
+ if not self._grpc_channel:
299
+ # initialize with the provided callable or the default channel
300
+ channel_init = channel or type(self).create_channel
301
+ self._grpc_channel = channel_init(
302
+ self._host,
303
+ # use the credentials which are saved
304
+ credentials=self._credentials,
305
+ # Set ``credentials_file`` to ``None`` here as
306
+ # the credentials that we saved earlier should be used.
307
+ credentials_file=None,
308
+ scopes=self._scopes,
309
+ ssl_credentials=self._ssl_channel_credentials,
310
+ quota_project_id=quota_project_id,
311
+ options=[
312
+ ("grpc.max_send_message_length", -1),
313
+ ("grpc.max_receive_message_length", -1),
314
+ ],
315
+ )
316
+
317
+ self._interceptor = _LoggingClientAIOInterceptor()
318
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
319
+ self._logged_channel = self._grpc_channel
320
+ self._wrap_with_kind = (
321
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
322
+ )
323
+ # Wrap messages. This must be done after self._logged_channel exists
324
+ self._prep_wrapped_messages(client_info)
325
+
326
+ @property
327
+ def grpc_channel(self) -> aio.Channel:
328
+ """Create the channel designed to connect to this service.
329
+
330
+ This property caches on the instance; repeated calls return
331
+ the same channel.
332
+ """
333
+ # Return the channel from cache.
334
+ return self._grpc_channel
335
+
336
+ @property
337
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
338
+ """Create the client designed to process long-running operations.
339
+
340
+ This property caches on the instance; repeated calls return the same
341
+ client.
342
+ """
343
+ # Quick check: Only create a new client if we do not already have one.
344
+ if self._operations_client is None:
345
+ self._operations_client = operations_v1.OperationsAsyncClient(
346
+ self._logged_channel
347
+ )
348
+
349
+ # Return the client from cache.
350
+ return self._operations_client
351
+
352
+ @property
353
+ def get_model(
354
+ self,
355
+ ) -> Callable[[model_service.GetModelRequest], Awaitable[model.Model]]:
356
+ r"""Return a callable for the get model method over gRPC.
357
+
358
+ Gets information about a specific Model.
359
+
360
+ Returns:
361
+ Callable[[~.GetModelRequest],
362
+ Awaitable[~.Model]]:
363
+ A function that, when called, will call the underlying RPC
364
+ on the server.
365
+ """
366
+ # Generate a "stub function" on-the-fly which will actually make
367
+ # the request.
368
+ # gRPC handles serialization and deserialization, so we just need
369
+ # to pass in the functions for each.
370
+ if "get_model" not in self._stubs:
371
+ self._stubs["get_model"] = self._logged_channel.unary_unary(
372
+ "/google.ai.generativelanguage.v1beta3.ModelService/GetModel",
373
+ request_serializer=model_service.GetModelRequest.serialize,
374
+ response_deserializer=model.Model.deserialize,
375
+ )
376
+ return self._stubs["get_model"]
377
+
378
+ @property
379
+ def list_models(
380
+ self,
381
+ ) -> Callable[
382
+ [model_service.ListModelsRequest], Awaitable[model_service.ListModelsResponse]
383
+ ]:
384
+ r"""Return a callable for the list models method over gRPC.
385
+
386
+ Lists models available through the API.
387
+
388
+ Returns:
389
+ Callable[[~.ListModelsRequest],
390
+ Awaitable[~.ListModelsResponse]]:
391
+ A function that, when called, will call the underlying RPC
392
+ on the server.
393
+ """
394
+ # Generate a "stub function" on-the-fly which will actually make
395
+ # the request.
396
+ # gRPC handles serialization and deserialization, so we just need
397
+ # to pass in the functions for each.
398
+ if "list_models" not in self._stubs:
399
+ self._stubs["list_models"] = self._logged_channel.unary_unary(
400
+ "/google.ai.generativelanguage.v1beta3.ModelService/ListModels",
401
+ request_serializer=model_service.ListModelsRequest.serialize,
402
+ response_deserializer=model_service.ListModelsResponse.deserialize,
403
+ )
404
+ return self._stubs["list_models"]
405
+
406
+ @property
407
+ def get_tuned_model(
408
+ self,
409
+ ) -> Callable[
410
+ [model_service.GetTunedModelRequest], Awaitable[tuned_model.TunedModel]
411
+ ]:
412
+ r"""Return a callable for the get tuned model method over gRPC.
413
+
414
+ Gets information about a specific TunedModel.
415
+
416
+ Returns:
417
+ Callable[[~.GetTunedModelRequest],
418
+ Awaitable[~.TunedModel]]:
419
+ A function that, when called, will call the underlying RPC
420
+ on the server.
421
+ """
422
+ # Generate a "stub function" on-the-fly which will actually make
423
+ # the request.
424
+ # gRPC handles serialization and deserialization, so we just need
425
+ # to pass in the functions for each.
426
+ if "get_tuned_model" not in self._stubs:
427
+ self._stubs["get_tuned_model"] = self._logged_channel.unary_unary(
428
+ "/google.ai.generativelanguage.v1beta3.ModelService/GetTunedModel",
429
+ request_serializer=model_service.GetTunedModelRequest.serialize,
430
+ response_deserializer=tuned_model.TunedModel.deserialize,
431
+ )
432
+ return self._stubs["get_tuned_model"]
433
+
434
+ @property
435
+ def list_tuned_models(
436
+ self,
437
+ ) -> Callable[
438
+ [model_service.ListTunedModelsRequest],
439
+ Awaitable[model_service.ListTunedModelsResponse],
440
+ ]:
441
+ r"""Return a callable for the list tuned models method over gRPC.
442
+
443
+ Lists tuned models owned by the user.
444
+
445
+ Returns:
446
+ Callable[[~.ListTunedModelsRequest],
447
+ Awaitable[~.ListTunedModelsResponse]]:
448
+ A function that, when called, will call the underlying RPC
449
+ on the server.
450
+ """
451
+ # Generate a "stub function" on-the-fly which will actually make
452
+ # the request.
453
+ # gRPC handles serialization and deserialization, so we just need
454
+ # to pass in the functions for each.
455
+ if "list_tuned_models" not in self._stubs:
456
+ self._stubs["list_tuned_models"] = self._logged_channel.unary_unary(
457
+ "/google.ai.generativelanguage.v1beta3.ModelService/ListTunedModels",
458
+ request_serializer=model_service.ListTunedModelsRequest.serialize,
459
+ response_deserializer=model_service.ListTunedModelsResponse.deserialize,
460
+ )
461
+ return self._stubs["list_tuned_models"]
462
+
463
+ @property
464
+ def create_tuned_model(
465
+ self,
466
+ ) -> Callable[
467
+ [model_service.CreateTunedModelRequest], Awaitable[operations_pb2.Operation]
468
+ ]:
469
+ r"""Return a callable for the create tuned model method over gRPC.
470
+
471
+ Creates a tuned model. Intermediate tuning progress (if any) is
472
+ accessed through the [google.longrunning.Operations] service.
473
+
474
+ Status and results can be accessed through the Operations
475
+ service. Example: GET
476
+ /v1/tunedModels/az2mb0bpw6i/operations/000-111-222
477
+
478
+ Returns:
479
+ Callable[[~.CreateTunedModelRequest],
480
+ Awaitable[~.Operation]]:
481
+ A function that, when called, will call the underlying RPC
482
+ on the server.
483
+ """
484
+ # Generate a "stub function" on-the-fly which will actually make
485
+ # the request.
486
+ # gRPC handles serialization and deserialization, so we just need
487
+ # to pass in the functions for each.
488
+ if "create_tuned_model" not in self._stubs:
489
+ self._stubs["create_tuned_model"] = self._logged_channel.unary_unary(
490
+ "/google.ai.generativelanguage.v1beta3.ModelService/CreateTunedModel",
491
+ request_serializer=model_service.CreateTunedModelRequest.serialize,
492
+ response_deserializer=operations_pb2.Operation.FromString,
493
+ )
494
+ return self._stubs["create_tuned_model"]
495
+
496
+ @property
497
+ def update_tuned_model(
498
+ self,
499
+ ) -> Callable[
500
+ [model_service.UpdateTunedModelRequest], Awaitable[gag_tuned_model.TunedModel]
501
+ ]:
502
+ r"""Return a callable for the update tuned model method over gRPC.
503
+
504
+ Updates a tuned model.
505
+
506
+ Returns:
507
+ Callable[[~.UpdateTunedModelRequest],
508
+ Awaitable[~.TunedModel]]:
509
+ A function that, when called, will call the underlying RPC
510
+ on the server.
511
+ """
512
+ # Generate a "stub function" on-the-fly which will actually make
513
+ # the request.
514
+ # gRPC handles serialization and deserialization, so we just need
515
+ # to pass in the functions for each.
516
+ if "update_tuned_model" not in self._stubs:
517
+ self._stubs["update_tuned_model"] = self._logged_channel.unary_unary(
518
+ "/google.ai.generativelanguage.v1beta3.ModelService/UpdateTunedModel",
519
+ request_serializer=model_service.UpdateTunedModelRequest.serialize,
520
+ response_deserializer=gag_tuned_model.TunedModel.deserialize,
521
+ )
522
+ return self._stubs["update_tuned_model"]
523
+
524
+ @property
525
+ def delete_tuned_model(
526
+ self,
527
+ ) -> Callable[[model_service.DeleteTunedModelRequest], Awaitable[empty_pb2.Empty]]:
528
+ r"""Return a callable for the delete tuned model method over gRPC.
529
+
530
+ Deletes a tuned model.
531
+
532
+ Returns:
533
+ Callable[[~.DeleteTunedModelRequest],
534
+ Awaitable[~.Empty]]:
535
+ A function that, when called, will call the underlying RPC
536
+ on the server.
537
+ """
538
+ # Generate a "stub function" on-the-fly which will actually make
539
+ # the request.
540
+ # gRPC handles serialization and deserialization, so we just need
541
+ # to pass in the functions for each.
542
+ if "delete_tuned_model" not in self._stubs:
543
+ self._stubs["delete_tuned_model"] = self._logged_channel.unary_unary(
544
+ "/google.ai.generativelanguage.v1beta3.ModelService/DeleteTunedModel",
545
+ request_serializer=model_service.DeleteTunedModelRequest.serialize,
546
+ response_deserializer=empty_pb2.Empty.FromString,
547
+ )
548
+ return self._stubs["delete_tuned_model"]
549
+
550
+ def _prep_wrapped_messages(self, client_info):
551
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
552
+ self._wrapped_methods = {
553
+ self.get_model: self._wrap_method(
554
+ self.get_model,
555
+ default_timeout=None,
556
+ client_info=client_info,
557
+ ),
558
+ self.list_models: self._wrap_method(
559
+ self.list_models,
560
+ default_timeout=None,
561
+ client_info=client_info,
562
+ ),
563
+ self.get_tuned_model: self._wrap_method(
564
+ self.get_tuned_model,
565
+ default_timeout=None,
566
+ client_info=client_info,
567
+ ),
568
+ self.list_tuned_models: self._wrap_method(
569
+ self.list_tuned_models,
570
+ default_timeout=None,
571
+ client_info=client_info,
572
+ ),
573
+ self.create_tuned_model: self._wrap_method(
574
+ self.create_tuned_model,
575
+ default_timeout=None,
576
+ client_info=client_info,
577
+ ),
578
+ self.update_tuned_model: self._wrap_method(
579
+ self.update_tuned_model,
580
+ default_timeout=None,
581
+ client_info=client_info,
582
+ ),
583
+ self.delete_tuned_model: self._wrap_method(
584
+ self.delete_tuned_model,
585
+ default_timeout=None,
586
+ client_info=client_info,
587
+ ),
588
+ }
589
+
590
+ def _wrap_method(self, func, *args, **kwargs):
591
+ if self._wrap_with_kind: # pragma: NO COVER
592
+ kwargs["kind"] = self.kind
593
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
594
+
595
+ def close(self):
596
+ return self._logged_channel.close()
597
+
598
+ @property
599
+ def kind(self) -> str:
600
+ return "grpc_asyncio"
601
+
602
+
603
+ __all__ = ("ModelServiceGrpcAsyncIOTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/rest.py ADDED
@@ -0,0 +1,1462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import dataclasses
17
+ import json # type: ignore
18
+ import logging
19
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import gapic_v1, operations_v1, rest_helpers, rest_streaming
23
+ from google.api_core import exceptions as core_exceptions
24
+ from google.api_core import retry as retries
25
+ from google.auth import credentials as ga_credentials # type: ignore
26
+ from google.auth.transport.requests import AuthorizedSession # type: ignore
27
+ from google.longrunning import operations_pb2 # type: ignore
28
+ from google.protobuf import empty_pb2 # type: ignore
29
+ from google.protobuf import json_format
30
+ from requests import __version__ as requests_version
31
+
32
+ from google.ai.generativelanguage_v1beta3.types import tuned_model as gag_tuned_model
33
+ from google.ai.generativelanguage_v1beta3.types import model, model_service
34
+ from google.ai.generativelanguage_v1beta3.types import tuned_model
35
+
36
+ from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
37
+ from .rest_base import _BaseModelServiceRestTransport
38
+
39
+ try:
40
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
41
+ except AttributeError: # pragma: NO COVER
42
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
43
+
44
+ try:
45
+ from google.api_core import client_logging # type: ignore
46
+
47
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
48
+ except ImportError: # pragma: NO COVER
49
+ CLIENT_LOGGING_SUPPORTED = False
50
+
51
+ _LOGGER = logging.getLogger(__name__)
52
+
53
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
54
+ gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
55
+ grpc_version=None,
56
+ rest_version=f"requests@{requests_version}",
57
+ )
58
+
59
+
60
+ class ModelServiceRestInterceptor:
61
+ """Interceptor for ModelService.
62
+
63
+ Interceptors are used to manipulate requests, request metadata, and responses
64
+ in arbitrary ways.
65
+ Example use cases include:
66
+ * Logging
67
+ * Verifying requests according to service or custom semantics
68
+ * Stripping extraneous information from responses
69
+
70
+ These use cases and more can be enabled by injecting an
71
+ instance of a custom subclass when constructing the ModelServiceRestTransport.
72
+
73
+ .. code-block:: python
74
+ class MyCustomModelServiceInterceptor(ModelServiceRestInterceptor):
75
+ def pre_create_tuned_model(self, request, metadata):
76
+ logging.log(f"Received request: {request}")
77
+ return request, metadata
78
+
79
+ def post_create_tuned_model(self, response):
80
+ logging.log(f"Received response: {response}")
81
+ return response
82
+
83
+ def pre_delete_tuned_model(self, request, metadata):
84
+ logging.log(f"Received request: {request}")
85
+ return request, metadata
86
+
87
+ def pre_get_model(self, request, metadata):
88
+ logging.log(f"Received request: {request}")
89
+ return request, metadata
90
+
91
+ def post_get_model(self, response):
92
+ logging.log(f"Received response: {response}")
93
+ return response
94
+
95
+ def pre_get_tuned_model(self, request, metadata):
96
+ logging.log(f"Received request: {request}")
97
+ return request, metadata
98
+
99
+ def post_get_tuned_model(self, response):
100
+ logging.log(f"Received response: {response}")
101
+ return response
102
+
103
+ def pre_list_models(self, request, metadata):
104
+ logging.log(f"Received request: {request}")
105
+ return request, metadata
106
+
107
+ def post_list_models(self, response):
108
+ logging.log(f"Received response: {response}")
109
+ return response
110
+
111
+ def pre_list_tuned_models(self, request, metadata):
112
+ logging.log(f"Received request: {request}")
113
+ return request, metadata
114
+
115
+ def post_list_tuned_models(self, response):
116
+ logging.log(f"Received response: {response}")
117
+ return response
118
+
119
+ def pre_update_tuned_model(self, request, metadata):
120
+ logging.log(f"Received request: {request}")
121
+ return request, metadata
122
+
123
+ def post_update_tuned_model(self, response):
124
+ logging.log(f"Received response: {response}")
125
+ return response
126
+
127
+ transport = ModelServiceRestTransport(interceptor=MyCustomModelServiceInterceptor())
128
+ client = ModelServiceClient(transport=transport)
129
+
130
+
131
+ """
132
+
133
+ def pre_create_tuned_model(
134
+ self,
135
+ request: model_service.CreateTunedModelRequest,
136
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
137
+ ) -> Tuple[
138
+ model_service.CreateTunedModelRequest, Sequence[Tuple[str, Union[str, bytes]]]
139
+ ]:
140
+ """Pre-rpc interceptor for create_tuned_model
141
+
142
+ Override in a subclass to manipulate the request or metadata
143
+ before they are sent to the ModelService server.
144
+ """
145
+ return request, metadata
146
+
147
+ def post_create_tuned_model(
148
+ self, response: operations_pb2.Operation
149
+ ) -> operations_pb2.Operation:
150
+ """Post-rpc interceptor for create_tuned_model
151
+
152
+ Override in a subclass to manipulate the response
153
+ after it is returned by the ModelService server but before
154
+ it is returned to user code.
155
+ """
156
+ return response
157
+
158
+ def pre_delete_tuned_model(
159
+ self,
160
+ request: model_service.DeleteTunedModelRequest,
161
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
162
+ ) -> Tuple[
163
+ model_service.DeleteTunedModelRequest, Sequence[Tuple[str, Union[str, bytes]]]
164
+ ]:
165
+ """Pre-rpc interceptor for delete_tuned_model
166
+
167
+ Override in a subclass to manipulate the request or metadata
168
+ before they are sent to the ModelService server.
169
+ """
170
+ return request, metadata
171
+
172
+ def pre_get_model(
173
+ self,
174
+ request: model_service.GetModelRequest,
175
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
176
+ ) -> Tuple[model_service.GetModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
177
+ """Pre-rpc interceptor for get_model
178
+
179
+ Override in a subclass to manipulate the request or metadata
180
+ before they are sent to the ModelService server.
181
+ """
182
+ return request, metadata
183
+
184
+ def post_get_model(self, response: model.Model) -> model.Model:
185
+ """Post-rpc interceptor for get_model
186
+
187
+ Override in a subclass to manipulate the response
188
+ after it is returned by the ModelService server but before
189
+ it is returned to user code.
190
+ """
191
+ return response
192
+
193
+ def pre_get_tuned_model(
194
+ self,
195
+ request: model_service.GetTunedModelRequest,
196
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
197
+ ) -> Tuple[
198
+ model_service.GetTunedModelRequest, Sequence[Tuple[str, Union[str, bytes]]]
199
+ ]:
200
+ """Pre-rpc interceptor for get_tuned_model
201
+
202
+ Override in a subclass to manipulate the request or metadata
203
+ before they are sent to the ModelService server.
204
+ """
205
+ return request, metadata
206
+
207
+ def post_get_tuned_model(
208
+ self, response: tuned_model.TunedModel
209
+ ) -> tuned_model.TunedModel:
210
+ """Post-rpc interceptor for get_tuned_model
211
+
212
+ Override in a subclass to manipulate the response
213
+ after it is returned by the ModelService server but before
214
+ it is returned to user code.
215
+ """
216
+ return response
217
+
218
+ def pre_list_models(
219
+ self,
220
+ request: model_service.ListModelsRequest,
221
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
222
+ ) -> Tuple[
223
+ model_service.ListModelsRequest, Sequence[Tuple[str, Union[str, bytes]]]
224
+ ]:
225
+ """Pre-rpc interceptor for list_models
226
+
227
+ Override in a subclass to manipulate the request or metadata
228
+ before they are sent to the ModelService server.
229
+ """
230
+ return request, metadata
231
+
232
+ def post_list_models(
233
+ self, response: model_service.ListModelsResponse
234
+ ) -> model_service.ListModelsResponse:
235
+ """Post-rpc interceptor for list_models
236
+
237
+ Override in a subclass to manipulate the response
238
+ after it is returned by the ModelService server but before
239
+ it is returned to user code.
240
+ """
241
+ return response
242
+
243
+ def pre_list_tuned_models(
244
+ self,
245
+ request: model_service.ListTunedModelsRequest,
246
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
247
+ ) -> Tuple[
248
+ model_service.ListTunedModelsRequest, Sequence[Tuple[str, Union[str, bytes]]]
249
+ ]:
250
+ """Pre-rpc interceptor for list_tuned_models
251
+
252
+ Override in a subclass to manipulate the request or metadata
253
+ before they are sent to the ModelService server.
254
+ """
255
+ return request, metadata
256
+
257
+ def post_list_tuned_models(
258
+ self, response: model_service.ListTunedModelsResponse
259
+ ) -> model_service.ListTunedModelsResponse:
260
+ """Post-rpc interceptor for list_tuned_models
261
+
262
+ Override in a subclass to manipulate the response
263
+ after it is returned by the ModelService server but before
264
+ it is returned to user code.
265
+ """
266
+ return response
267
+
268
+ def pre_update_tuned_model(
269
+ self,
270
+ request: model_service.UpdateTunedModelRequest,
271
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
272
+ ) -> Tuple[
273
+ model_service.UpdateTunedModelRequest, Sequence[Tuple[str, Union[str, bytes]]]
274
+ ]:
275
+ """Pre-rpc interceptor for update_tuned_model
276
+
277
+ Override in a subclass to manipulate the request or metadata
278
+ before they are sent to the ModelService server.
279
+ """
280
+ return request, metadata
281
+
282
+ def post_update_tuned_model(
283
+ self, response: gag_tuned_model.TunedModel
284
+ ) -> gag_tuned_model.TunedModel:
285
+ """Post-rpc interceptor for update_tuned_model
286
+
287
+ Override in a subclass to manipulate the response
288
+ after it is returned by the ModelService server but before
289
+ it is returned to user code.
290
+ """
291
+ return response
292
+
293
+
294
+ @dataclasses.dataclass
295
+ class ModelServiceRestStub:
296
+ _session: AuthorizedSession
297
+ _host: str
298
+ _interceptor: ModelServiceRestInterceptor
299
+
300
+
301
+ class ModelServiceRestTransport(_BaseModelServiceRestTransport):
302
+ """REST backend synchronous transport for ModelService.
303
+
304
+ Provides methods for getting metadata information about
305
+ Generative Models.
306
+
307
+ This class defines the same methods as the primary client, so the
308
+ primary client can load the underlying transport implementation
309
+ and call it.
310
+
311
+ It sends JSON representations of protocol buffers over HTTP/1.1
312
+ """
313
+
314
+ def __init__(
315
+ self,
316
+ *,
317
+ host: str = "generativelanguage.googleapis.com",
318
+ credentials: Optional[ga_credentials.Credentials] = None,
319
+ credentials_file: Optional[str] = None,
320
+ scopes: Optional[Sequence[str]] = None,
321
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
322
+ quota_project_id: Optional[str] = None,
323
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
324
+ always_use_jwt_access: Optional[bool] = False,
325
+ url_scheme: str = "https",
326
+ interceptor: Optional[ModelServiceRestInterceptor] = None,
327
+ api_audience: Optional[str] = None,
328
+ ) -> None:
329
+ """Instantiate the transport.
330
+
331
+ Args:
332
+ host (Optional[str]):
333
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
334
+ credentials (Optional[google.auth.credentials.Credentials]): The
335
+ authorization credentials to attach to requests. These
336
+ credentials identify the application to the service; if none
337
+ are specified, the client will attempt to ascertain the
338
+ credentials from the environment.
339
+
340
+ credentials_file (Optional[str]): A file with credentials that can
341
+ be loaded with :func:`google.auth.load_credentials_from_file`.
342
+ This argument is ignored if ``channel`` is provided.
343
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
344
+ ignored if ``channel`` is provided.
345
+ client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
346
+ certificate to configure mutual TLS HTTP channel. It is ignored
347
+ if ``channel`` is provided.
348
+ quota_project_id (Optional[str]): An optional project to use for billing
349
+ and quota.
350
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
351
+ The client info used to send a user-agent string along with
352
+ API requests. If ``None``, then default info will be used.
353
+ Generally, you only need to set this if you are developing
354
+ your own client library.
355
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
356
+ be used for service account credentials.
357
+ url_scheme: the protocol scheme for the API endpoint. Normally
358
+ "https", but for testing or local servers,
359
+ "http" can be specified.
360
+ """
361
+ # Run the base constructor
362
+ # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
363
+ # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
364
+ # credentials object
365
+ super().__init__(
366
+ host=host,
367
+ credentials=credentials,
368
+ client_info=client_info,
369
+ always_use_jwt_access=always_use_jwt_access,
370
+ url_scheme=url_scheme,
371
+ api_audience=api_audience,
372
+ )
373
+ self._session = AuthorizedSession(
374
+ self._credentials, default_host=self.DEFAULT_HOST
375
+ )
376
+ self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None
377
+ if client_cert_source_for_mtls:
378
+ self._session.configure_mtls_channel(client_cert_source_for_mtls)
379
+ self._interceptor = interceptor or ModelServiceRestInterceptor()
380
+ self._prep_wrapped_messages(client_info)
381
+
382
+ @property
383
+ def operations_client(self) -> operations_v1.AbstractOperationsClient:
384
+ """Create the client designed to process long-running operations.
385
+
386
+ This property caches on the instance; repeated calls return the same
387
+ client.
388
+ """
389
+ # Only create a new client if we do not already have one.
390
+ if self._operations_client is None:
391
+ http_options: Dict[str, List[Dict[str, str]]] = {}
392
+
393
+ rest_transport = operations_v1.OperationsRestTransport(
394
+ host=self._host,
395
+ # use the credentials which are saved
396
+ credentials=self._credentials,
397
+ scopes=self._scopes,
398
+ http_options=http_options,
399
+ path_prefix="v1beta3",
400
+ )
401
+
402
+ self._operations_client = operations_v1.AbstractOperationsClient(
403
+ transport=rest_transport
404
+ )
405
+
406
+ # Return the client from cache.
407
+ return self._operations_client
408
+
409
+ class _CreateTunedModel(
410
+ _BaseModelServiceRestTransport._BaseCreateTunedModel, ModelServiceRestStub
411
+ ):
412
+ def __hash__(self):
413
+ return hash("ModelServiceRestTransport.CreateTunedModel")
414
+
415
+ @staticmethod
416
+ def _get_response(
417
+ host,
418
+ metadata,
419
+ query_params,
420
+ session,
421
+ timeout,
422
+ transcoded_request,
423
+ body=None,
424
+ ):
425
+ uri = transcoded_request["uri"]
426
+ method = transcoded_request["method"]
427
+ headers = dict(metadata)
428
+ headers["Content-Type"] = "application/json"
429
+ response = getattr(session, method)(
430
+ "{host}{uri}".format(host=host, uri=uri),
431
+ timeout=timeout,
432
+ headers=headers,
433
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
434
+ data=body,
435
+ )
436
+ return response
437
+
438
+ def __call__(
439
+ self,
440
+ request: model_service.CreateTunedModelRequest,
441
+ *,
442
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
443
+ timeout: Optional[float] = None,
444
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
445
+ ) -> operations_pb2.Operation:
446
+ r"""Call the create tuned model method over HTTP.
447
+
448
+ Args:
449
+ request (~.model_service.CreateTunedModelRequest):
450
+ The request object. Request to create a TunedModel.
451
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
452
+ should be retried.
453
+ timeout (float): The timeout for this request.
454
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
455
+ sent along with the request as metadata. Normally, each value must be of type `str`,
456
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
457
+ be of type `bytes`.
458
+
459
+ Returns:
460
+ ~.operations_pb2.Operation:
461
+ This resource represents a
462
+ long-running operation that is the
463
+ result of a network API call.
464
+
465
+ """
466
+
467
+ http_options = (
468
+ _BaseModelServiceRestTransport._BaseCreateTunedModel._get_http_options()
469
+ )
470
+
471
+ request, metadata = self._interceptor.pre_create_tuned_model(
472
+ request, metadata
473
+ )
474
+ transcoded_request = _BaseModelServiceRestTransport._BaseCreateTunedModel._get_transcoded_request(
475
+ http_options, request
476
+ )
477
+
478
+ body = _BaseModelServiceRestTransport._BaseCreateTunedModel._get_request_body_json(
479
+ transcoded_request
480
+ )
481
+
482
+ # Jsonify the query params
483
+ query_params = _BaseModelServiceRestTransport._BaseCreateTunedModel._get_query_params_json(
484
+ transcoded_request
485
+ )
486
+
487
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
488
+ logging.DEBUG
489
+ ): # pragma: NO COVER
490
+ request_url = "{host}{uri}".format(
491
+ host=self._host, uri=transcoded_request["uri"]
492
+ )
493
+ method = transcoded_request["method"]
494
+ try:
495
+ request_payload = json_format.MessageToJson(request)
496
+ except:
497
+ request_payload = None
498
+ http_request = {
499
+ "payload": request_payload,
500
+ "requestMethod": method,
501
+ "requestUrl": request_url,
502
+ "headers": dict(metadata),
503
+ }
504
+ _LOGGER.debug(
505
+ f"Sending request for google.ai.generativelanguage_v1beta3.ModelServiceClient.CreateTunedModel",
506
+ extra={
507
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
508
+ "rpcName": "CreateTunedModel",
509
+ "httpRequest": http_request,
510
+ "metadata": http_request["headers"],
511
+ },
512
+ )
513
+
514
+ # Send the request
515
+ response = ModelServiceRestTransport._CreateTunedModel._get_response(
516
+ self._host,
517
+ metadata,
518
+ query_params,
519
+ self._session,
520
+ timeout,
521
+ transcoded_request,
522
+ body,
523
+ )
524
+
525
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
526
+ # subclass.
527
+ if response.status_code >= 400:
528
+ raise core_exceptions.from_http_response(response)
529
+
530
+ # Return the response
531
+ resp = operations_pb2.Operation()
532
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
533
+
534
+ resp = self._interceptor.post_create_tuned_model(resp)
535
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
536
+ logging.DEBUG
537
+ ): # pragma: NO COVER
538
+ try:
539
+ response_payload = json_format.MessageToJson(resp)
540
+ except:
541
+ response_payload = None
542
+ http_response = {
543
+ "payload": response_payload,
544
+ "headers": dict(response.headers),
545
+ "status": response.status_code,
546
+ }
547
+ _LOGGER.debug(
548
+ "Received response for google.ai.generativelanguage_v1beta3.ModelServiceClient.create_tuned_model",
549
+ extra={
550
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
551
+ "rpcName": "CreateTunedModel",
552
+ "metadata": http_response["headers"],
553
+ "httpResponse": http_response,
554
+ },
555
+ )
556
+ return resp
557
+
558
+ class _DeleteTunedModel(
559
+ _BaseModelServiceRestTransport._BaseDeleteTunedModel, ModelServiceRestStub
560
+ ):
561
+ def __hash__(self):
562
+ return hash("ModelServiceRestTransport.DeleteTunedModel")
563
+
564
+ @staticmethod
565
+ def _get_response(
566
+ host,
567
+ metadata,
568
+ query_params,
569
+ session,
570
+ timeout,
571
+ transcoded_request,
572
+ body=None,
573
+ ):
574
+ uri = transcoded_request["uri"]
575
+ method = transcoded_request["method"]
576
+ headers = dict(metadata)
577
+ headers["Content-Type"] = "application/json"
578
+ response = getattr(session, method)(
579
+ "{host}{uri}".format(host=host, uri=uri),
580
+ timeout=timeout,
581
+ headers=headers,
582
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
583
+ )
584
+ return response
585
+
586
+ def __call__(
587
+ self,
588
+ request: model_service.DeleteTunedModelRequest,
589
+ *,
590
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
591
+ timeout: Optional[float] = None,
592
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
593
+ ):
594
+ r"""Call the delete tuned model method over HTTP.
595
+
596
+ Args:
597
+ request (~.model_service.DeleteTunedModelRequest):
598
+ The request object. Request to delete a TunedModel.
599
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
600
+ should be retried.
601
+ timeout (float): The timeout for this request.
602
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
603
+ sent along with the request as metadata. Normally, each value must be of type `str`,
604
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
605
+ be of type `bytes`.
606
+ """
607
+
608
+ http_options = (
609
+ _BaseModelServiceRestTransport._BaseDeleteTunedModel._get_http_options()
610
+ )
611
+
612
+ request, metadata = self._interceptor.pre_delete_tuned_model(
613
+ request, metadata
614
+ )
615
+ transcoded_request = _BaseModelServiceRestTransport._BaseDeleteTunedModel._get_transcoded_request(
616
+ http_options, request
617
+ )
618
+
619
+ # Jsonify the query params
620
+ query_params = _BaseModelServiceRestTransport._BaseDeleteTunedModel._get_query_params_json(
621
+ transcoded_request
622
+ )
623
+
624
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
625
+ logging.DEBUG
626
+ ): # pragma: NO COVER
627
+ request_url = "{host}{uri}".format(
628
+ host=self._host, uri=transcoded_request["uri"]
629
+ )
630
+ method = transcoded_request["method"]
631
+ try:
632
+ request_payload = json_format.MessageToJson(request)
633
+ except:
634
+ request_payload = None
635
+ http_request = {
636
+ "payload": request_payload,
637
+ "requestMethod": method,
638
+ "requestUrl": request_url,
639
+ "headers": dict(metadata),
640
+ }
641
+ _LOGGER.debug(
642
+ f"Sending request for google.ai.generativelanguage_v1beta3.ModelServiceClient.DeleteTunedModel",
643
+ extra={
644
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
645
+ "rpcName": "DeleteTunedModel",
646
+ "httpRequest": http_request,
647
+ "metadata": http_request["headers"],
648
+ },
649
+ )
650
+
651
+ # Send the request
652
+ response = ModelServiceRestTransport._DeleteTunedModel._get_response(
653
+ self._host,
654
+ metadata,
655
+ query_params,
656
+ self._session,
657
+ timeout,
658
+ transcoded_request,
659
+ )
660
+
661
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
662
+ # subclass.
663
+ if response.status_code >= 400:
664
+ raise core_exceptions.from_http_response(response)
665
+
666
+ class _GetModel(_BaseModelServiceRestTransport._BaseGetModel, ModelServiceRestStub):
667
+ def __hash__(self):
668
+ return hash("ModelServiceRestTransport.GetModel")
669
+
670
+ @staticmethod
671
+ def _get_response(
672
+ host,
673
+ metadata,
674
+ query_params,
675
+ session,
676
+ timeout,
677
+ transcoded_request,
678
+ body=None,
679
+ ):
680
+ uri = transcoded_request["uri"]
681
+ method = transcoded_request["method"]
682
+ headers = dict(metadata)
683
+ headers["Content-Type"] = "application/json"
684
+ response = getattr(session, method)(
685
+ "{host}{uri}".format(host=host, uri=uri),
686
+ timeout=timeout,
687
+ headers=headers,
688
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
689
+ )
690
+ return response
691
+
692
+ def __call__(
693
+ self,
694
+ request: model_service.GetModelRequest,
695
+ *,
696
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
697
+ timeout: Optional[float] = None,
698
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
699
+ ) -> model.Model:
700
+ r"""Call the get model method over HTTP.
701
+
702
+ Args:
703
+ request (~.model_service.GetModelRequest):
704
+ The request object. Request for getting information about
705
+ a specific Model.
706
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
707
+ should be retried.
708
+ timeout (float): The timeout for this request.
709
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
710
+ sent along with the request as metadata. Normally, each value must be of type `str`,
711
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
712
+ be of type `bytes`.
713
+
714
+ Returns:
715
+ ~.model.Model:
716
+ Information about a Generative
717
+ Language Model.
718
+
719
+ """
720
+
721
+ http_options = (
722
+ _BaseModelServiceRestTransport._BaseGetModel._get_http_options()
723
+ )
724
+
725
+ request, metadata = self._interceptor.pre_get_model(request, metadata)
726
+ transcoded_request = (
727
+ _BaseModelServiceRestTransport._BaseGetModel._get_transcoded_request(
728
+ http_options, request
729
+ )
730
+ )
731
+
732
+ # Jsonify the query params
733
+ query_params = (
734
+ _BaseModelServiceRestTransport._BaseGetModel._get_query_params_json(
735
+ transcoded_request
736
+ )
737
+ )
738
+
739
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
740
+ logging.DEBUG
741
+ ): # pragma: NO COVER
742
+ request_url = "{host}{uri}".format(
743
+ host=self._host, uri=transcoded_request["uri"]
744
+ )
745
+ method = transcoded_request["method"]
746
+ try:
747
+ request_payload = type(request).to_json(request)
748
+ except:
749
+ request_payload = None
750
+ http_request = {
751
+ "payload": request_payload,
752
+ "requestMethod": method,
753
+ "requestUrl": request_url,
754
+ "headers": dict(metadata),
755
+ }
756
+ _LOGGER.debug(
757
+ f"Sending request for google.ai.generativelanguage_v1beta3.ModelServiceClient.GetModel",
758
+ extra={
759
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
760
+ "rpcName": "GetModel",
761
+ "httpRequest": http_request,
762
+ "metadata": http_request["headers"],
763
+ },
764
+ )
765
+
766
+ # Send the request
767
+ response = ModelServiceRestTransport._GetModel._get_response(
768
+ self._host,
769
+ metadata,
770
+ query_params,
771
+ self._session,
772
+ timeout,
773
+ transcoded_request,
774
+ )
775
+
776
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
777
+ # subclass.
778
+ if response.status_code >= 400:
779
+ raise core_exceptions.from_http_response(response)
780
+
781
+ # Return the response
782
+ resp = model.Model()
783
+ pb_resp = model.Model.pb(resp)
784
+
785
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
786
+
787
+ resp = self._interceptor.post_get_model(resp)
788
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
789
+ logging.DEBUG
790
+ ): # pragma: NO COVER
791
+ try:
792
+ response_payload = model.Model.to_json(response)
793
+ except:
794
+ response_payload = None
795
+ http_response = {
796
+ "payload": response_payload,
797
+ "headers": dict(response.headers),
798
+ "status": response.status_code,
799
+ }
800
+ _LOGGER.debug(
801
+ "Received response for google.ai.generativelanguage_v1beta3.ModelServiceClient.get_model",
802
+ extra={
803
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
804
+ "rpcName": "GetModel",
805
+ "metadata": http_response["headers"],
806
+ "httpResponse": http_response,
807
+ },
808
+ )
809
+ return resp
810
+
811
+ class _GetTunedModel(
812
+ _BaseModelServiceRestTransport._BaseGetTunedModel, ModelServiceRestStub
813
+ ):
814
+ def __hash__(self):
815
+ return hash("ModelServiceRestTransport.GetTunedModel")
816
+
817
+ @staticmethod
818
+ def _get_response(
819
+ host,
820
+ metadata,
821
+ query_params,
822
+ session,
823
+ timeout,
824
+ transcoded_request,
825
+ body=None,
826
+ ):
827
+ uri = transcoded_request["uri"]
828
+ method = transcoded_request["method"]
829
+ headers = dict(metadata)
830
+ headers["Content-Type"] = "application/json"
831
+ response = getattr(session, method)(
832
+ "{host}{uri}".format(host=host, uri=uri),
833
+ timeout=timeout,
834
+ headers=headers,
835
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
836
+ )
837
+ return response
838
+
839
+ def __call__(
840
+ self,
841
+ request: model_service.GetTunedModelRequest,
842
+ *,
843
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
844
+ timeout: Optional[float] = None,
845
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
846
+ ) -> tuned_model.TunedModel:
847
+ r"""Call the get tuned model method over HTTP.
848
+
849
+ Args:
850
+ request (~.model_service.GetTunedModelRequest):
851
+ The request object. Request for getting information about
852
+ a specific Model.
853
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
854
+ should be retried.
855
+ timeout (float): The timeout for this request.
856
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
857
+ sent along with the request as metadata. Normally, each value must be of type `str`,
858
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
859
+ be of type `bytes`.
860
+
861
+ Returns:
862
+ ~.tuned_model.TunedModel:
863
+ A fine-tuned model created using
864
+ ModelService.CreateTunedModel.
865
+
866
+ """
867
+
868
+ http_options = (
869
+ _BaseModelServiceRestTransport._BaseGetTunedModel._get_http_options()
870
+ )
871
+
872
+ request, metadata = self._interceptor.pre_get_tuned_model(request, metadata)
873
+ transcoded_request = _BaseModelServiceRestTransport._BaseGetTunedModel._get_transcoded_request(
874
+ http_options, request
875
+ )
876
+
877
+ # Jsonify the query params
878
+ query_params = _BaseModelServiceRestTransport._BaseGetTunedModel._get_query_params_json(
879
+ transcoded_request
880
+ )
881
+
882
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
883
+ logging.DEBUG
884
+ ): # pragma: NO COVER
885
+ request_url = "{host}{uri}".format(
886
+ host=self._host, uri=transcoded_request["uri"]
887
+ )
888
+ method = transcoded_request["method"]
889
+ try:
890
+ request_payload = type(request).to_json(request)
891
+ except:
892
+ request_payload = None
893
+ http_request = {
894
+ "payload": request_payload,
895
+ "requestMethod": method,
896
+ "requestUrl": request_url,
897
+ "headers": dict(metadata),
898
+ }
899
+ _LOGGER.debug(
900
+ f"Sending request for google.ai.generativelanguage_v1beta3.ModelServiceClient.GetTunedModel",
901
+ extra={
902
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
903
+ "rpcName": "GetTunedModel",
904
+ "httpRequest": http_request,
905
+ "metadata": http_request["headers"],
906
+ },
907
+ )
908
+
909
+ # Send the request
910
+ response = ModelServiceRestTransport._GetTunedModel._get_response(
911
+ self._host,
912
+ metadata,
913
+ query_params,
914
+ self._session,
915
+ timeout,
916
+ transcoded_request,
917
+ )
918
+
919
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
920
+ # subclass.
921
+ if response.status_code >= 400:
922
+ raise core_exceptions.from_http_response(response)
923
+
924
+ # Return the response
925
+ resp = tuned_model.TunedModel()
926
+ pb_resp = tuned_model.TunedModel.pb(resp)
927
+
928
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
929
+
930
+ resp = self._interceptor.post_get_tuned_model(resp)
931
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
932
+ logging.DEBUG
933
+ ): # pragma: NO COVER
934
+ try:
935
+ response_payload = tuned_model.TunedModel.to_json(response)
936
+ except:
937
+ response_payload = None
938
+ http_response = {
939
+ "payload": response_payload,
940
+ "headers": dict(response.headers),
941
+ "status": response.status_code,
942
+ }
943
+ _LOGGER.debug(
944
+ "Received response for google.ai.generativelanguage_v1beta3.ModelServiceClient.get_tuned_model",
945
+ extra={
946
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
947
+ "rpcName": "GetTunedModel",
948
+ "metadata": http_response["headers"],
949
+ "httpResponse": http_response,
950
+ },
951
+ )
952
+ return resp
953
+
954
+ class _ListModels(
955
+ _BaseModelServiceRestTransport._BaseListModels, ModelServiceRestStub
956
+ ):
957
+ def __hash__(self):
958
+ return hash("ModelServiceRestTransport.ListModels")
959
+
960
+ @staticmethod
961
+ def _get_response(
962
+ host,
963
+ metadata,
964
+ query_params,
965
+ session,
966
+ timeout,
967
+ transcoded_request,
968
+ body=None,
969
+ ):
970
+ uri = transcoded_request["uri"]
971
+ method = transcoded_request["method"]
972
+ headers = dict(metadata)
973
+ headers["Content-Type"] = "application/json"
974
+ response = getattr(session, method)(
975
+ "{host}{uri}".format(host=host, uri=uri),
976
+ timeout=timeout,
977
+ headers=headers,
978
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
979
+ )
980
+ return response
981
+
982
+ def __call__(
983
+ self,
984
+ request: model_service.ListModelsRequest,
985
+ *,
986
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
987
+ timeout: Optional[float] = None,
988
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
989
+ ) -> model_service.ListModelsResponse:
990
+ r"""Call the list models method over HTTP.
991
+
992
+ Args:
993
+ request (~.model_service.ListModelsRequest):
994
+ The request object. Request for listing all Models.
995
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
996
+ should be retried.
997
+ timeout (float): The timeout for this request.
998
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
999
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1000
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1001
+ be of type `bytes`.
1002
+
1003
+ Returns:
1004
+ ~.model_service.ListModelsResponse:
1005
+ Response from ``ListModel`` containing a paginated list
1006
+ of Models.
1007
+
1008
+ """
1009
+
1010
+ http_options = (
1011
+ _BaseModelServiceRestTransport._BaseListModels._get_http_options()
1012
+ )
1013
+
1014
+ request, metadata = self._interceptor.pre_list_models(request, metadata)
1015
+ transcoded_request = (
1016
+ _BaseModelServiceRestTransport._BaseListModels._get_transcoded_request(
1017
+ http_options, request
1018
+ )
1019
+ )
1020
+
1021
+ # Jsonify the query params
1022
+ query_params = (
1023
+ _BaseModelServiceRestTransport._BaseListModels._get_query_params_json(
1024
+ transcoded_request
1025
+ )
1026
+ )
1027
+
1028
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1029
+ logging.DEBUG
1030
+ ): # pragma: NO COVER
1031
+ request_url = "{host}{uri}".format(
1032
+ host=self._host, uri=transcoded_request["uri"]
1033
+ )
1034
+ method = transcoded_request["method"]
1035
+ try:
1036
+ request_payload = type(request).to_json(request)
1037
+ except:
1038
+ request_payload = None
1039
+ http_request = {
1040
+ "payload": request_payload,
1041
+ "requestMethod": method,
1042
+ "requestUrl": request_url,
1043
+ "headers": dict(metadata),
1044
+ }
1045
+ _LOGGER.debug(
1046
+ f"Sending request for google.ai.generativelanguage_v1beta3.ModelServiceClient.ListModels",
1047
+ extra={
1048
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
1049
+ "rpcName": "ListModels",
1050
+ "httpRequest": http_request,
1051
+ "metadata": http_request["headers"],
1052
+ },
1053
+ )
1054
+
1055
+ # Send the request
1056
+ response = ModelServiceRestTransport._ListModels._get_response(
1057
+ self._host,
1058
+ metadata,
1059
+ query_params,
1060
+ self._session,
1061
+ timeout,
1062
+ transcoded_request,
1063
+ )
1064
+
1065
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
1066
+ # subclass.
1067
+ if response.status_code >= 400:
1068
+ raise core_exceptions.from_http_response(response)
1069
+
1070
+ # Return the response
1071
+ resp = model_service.ListModelsResponse()
1072
+ pb_resp = model_service.ListModelsResponse.pb(resp)
1073
+
1074
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
1075
+
1076
+ resp = self._interceptor.post_list_models(resp)
1077
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1078
+ logging.DEBUG
1079
+ ): # pragma: NO COVER
1080
+ try:
1081
+ response_payload = model_service.ListModelsResponse.to_json(
1082
+ response
1083
+ )
1084
+ except:
1085
+ response_payload = None
1086
+ http_response = {
1087
+ "payload": response_payload,
1088
+ "headers": dict(response.headers),
1089
+ "status": response.status_code,
1090
+ }
1091
+ _LOGGER.debug(
1092
+ "Received response for google.ai.generativelanguage_v1beta3.ModelServiceClient.list_models",
1093
+ extra={
1094
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
1095
+ "rpcName": "ListModels",
1096
+ "metadata": http_response["headers"],
1097
+ "httpResponse": http_response,
1098
+ },
1099
+ )
1100
+ return resp
1101
+
1102
+ class _ListTunedModels(
1103
+ _BaseModelServiceRestTransport._BaseListTunedModels, ModelServiceRestStub
1104
+ ):
1105
+ def __hash__(self):
1106
+ return hash("ModelServiceRestTransport.ListTunedModels")
1107
+
1108
+ @staticmethod
1109
+ def _get_response(
1110
+ host,
1111
+ metadata,
1112
+ query_params,
1113
+ session,
1114
+ timeout,
1115
+ transcoded_request,
1116
+ body=None,
1117
+ ):
1118
+ uri = transcoded_request["uri"]
1119
+ method = transcoded_request["method"]
1120
+ headers = dict(metadata)
1121
+ headers["Content-Type"] = "application/json"
1122
+ response = getattr(session, method)(
1123
+ "{host}{uri}".format(host=host, uri=uri),
1124
+ timeout=timeout,
1125
+ headers=headers,
1126
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
1127
+ )
1128
+ return response
1129
+
1130
+ def __call__(
1131
+ self,
1132
+ request: model_service.ListTunedModelsRequest,
1133
+ *,
1134
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1135
+ timeout: Optional[float] = None,
1136
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1137
+ ) -> model_service.ListTunedModelsResponse:
1138
+ r"""Call the list tuned models method over HTTP.
1139
+
1140
+ Args:
1141
+ request (~.model_service.ListTunedModelsRequest):
1142
+ The request object. Request for listing TunedModels.
1143
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1144
+ should be retried.
1145
+ timeout (float): The timeout for this request.
1146
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1147
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1148
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1149
+ be of type `bytes`.
1150
+
1151
+ Returns:
1152
+ ~.model_service.ListTunedModelsResponse:
1153
+ Response from ``ListTunedModels`` containing a paginated
1154
+ list of Models.
1155
+
1156
+ """
1157
+
1158
+ http_options = (
1159
+ _BaseModelServiceRestTransport._BaseListTunedModels._get_http_options()
1160
+ )
1161
+
1162
+ request, metadata = self._interceptor.pre_list_tuned_models(
1163
+ request, metadata
1164
+ )
1165
+ transcoded_request = _BaseModelServiceRestTransport._BaseListTunedModels._get_transcoded_request(
1166
+ http_options, request
1167
+ )
1168
+
1169
+ # Jsonify the query params
1170
+ query_params = _BaseModelServiceRestTransport._BaseListTunedModels._get_query_params_json(
1171
+ transcoded_request
1172
+ )
1173
+
1174
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1175
+ logging.DEBUG
1176
+ ): # pragma: NO COVER
1177
+ request_url = "{host}{uri}".format(
1178
+ host=self._host, uri=transcoded_request["uri"]
1179
+ )
1180
+ method = transcoded_request["method"]
1181
+ try:
1182
+ request_payload = type(request).to_json(request)
1183
+ except:
1184
+ request_payload = None
1185
+ http_request = {
1186
+ "payload": request_payload,
1187
+ "requestMethod": method,
1188
+ "requestUrl": request_url,
1189
+ "headers": dict(metadata),
1190
+ }
1191
+ _LOGGER.debug(
1192
+ f"Sending request for google.ai.generativelanguage_v1beta3.ModelServiceClient.ListTunedModels",
1193
+ extra={
1194
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
1195
+ "rpcName": "ListTunedModels",
1196
+ "httpRequest": http_request,
1197
+ "metadata": http_request["headers"],
1198
+ },
1199
+ )
1200
+
1201
+ # Send the request
1202
+ response = ModelServiceRestTransport._ListTunedModels._get_response(
1203
+ self._host,
1204
+ metadata,
1205
+ query_params,
1206
+ self._session,
1207
+ timeout,
1208
+ transcoded_request,
1209
+ )
1210
+
1211
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
1212
+ # subclass.
1213
+ if response.status_code >= 400:
1214
+ raise core_exceptions.from_http_response(response)
1215
+
1216
+ # Return the response
1217
+ resp = model_service.ListTunedModelsResponse()
1218
+ pb_resp = model_service.ListTunedModelsResponse.pb(resp)
1219
+
1220
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
1221
+
1222
+ resp = self._interceptor.post_list_tuned_models(resp)
1223
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1224
+ logging.DEBUG
1225
+ ): # pragma: NO COVER
1226
+ try:
1227
+ response_payload = model_service.ListTunedModelsResponse.to_json(
1228
+ response
1229
+ )
1230
+ except:
1231
+ response_payload = None
1232
+ http_response = {
1233
+ "payload": response_payload,
1234
+ "headers": dict(response.headers),
1235
+ "status": response.status_code,
1236
+ }
1237
+ _LOGGER.debug(
1238
+ "Received response for google.ai.generativelanguage_v1beta3.ModelServiceClient.list_tuned_models",
1239
+ extra={
1240
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
1241
+ "rpcName": "ListTunedModels",
1242
+ "metadata": http_response["headers"],
1243
+ "httpResponse": http_response,
1244
+ },
1245
+ )
1246
+ return resp
1247
+
1248
+ class _UpdateTunedModel(
1249
+ _BaseModelServiceRestTransport._BaseUpdateTunedModel, ModelServiceRestStub
1250
+ ):
1251
+ def __hash__(self):
1252
+ return hash("ModelServiceRestTransport.UpdateTunedModel")
1253
+
1254
+ @staticmethod
1255
+ def _get_response(
1256
+ host,
1257
+ metadata,
1258
+ query_params,
1259
+ session,
1260
+ timeout,
1261
+ transcoded_request,
1262
+ body=None,
1263
+ ):
1264
+ uri = transcoded_request["uri"]
1265
+ method = transcoded_request["method"]
1266
+ headers = dict(metadata)
1267
+ headers["Content-Type"] = "application/json"
1268
+ response = getattr(session, method)(
1269
+ "{host}{uri}".format(host=host, uri=uri),
1270
+ timeout=timeout,
1271
+ headers=headers,
1272
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
1273
+ data=body,
1274
+ )
1275
+ return response
1276
+
1277
+ def __call__(
1278
+ self,
1279
+ request: model_service.UpdateTunedModelRequest,
1280
+ *,
1281
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1282
+ timeout: Optional[float] = None,
1283
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1284
+ ) -> gag_tuned_model.TunedModel:
1285
+ r"""Call the update tuned model method over HTTP.
1286
+
1287
+ Args:
1288
+ request (~.model_service.UpdateTunedModelRequest):
1289
+ The request object. Request to update a TunedModel.
1290
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1291
+ should be retried.
1292
+ timeout (float): The timeout for this request.
1293
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1294
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1295
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1296
+ be of type `bytes`.
1297
+
1298
+ Returns:
1299
+ ~.gag_tuned_model.TunedModel:
1300
+ A fine-tuned model created using
1301
+ ModelService.CreateTunedModel.
1302
+
1303
+ """
1304
+
1305
+ http_options = (
1306
+ _BaseModelServiceRestTransport._BaseUpdateTunedModel._get_http_options()
1307
+ )
1308
+
1309
+ request, metadata = self._interceptor.pre_update_tuned_model(
1310
+ request, metadata
1311
+ )
1312
+ transcoded_request = _BaseModelServiceRestTransport._BaseUpdateTunedModel._get_transcoded_request(
1313
+ http_options, request
1314
+ )
1315
+
1316
+ body = _BaseModelServiceRestTransport._BaseUpdateTunedModel._get_request_body_json(
1317
+ transcoded_request
1318
+ )
1319
+
1320
+ # Jsonify the query params
1321
+ query_params = _BaseModelServiceRestTransport._BaseUpdateTunedModel._get_query_params_json(
1322
+ transcoded_request
1323
+ )
1324
+
1325
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1326
+ logging.DEBUG
1327
+ ): # pragma: NO COVER
1328
+ request_url = "{host}{uri}".format(
1329
+ host=self._host, uri=transcoded_request["uri"]
1330
+ )
1331
+ method = transcoded_request["method"]
1332
+ try:
1333
+ request_payload = type(request).to_json(request)
1334
+ except:
1335
+ request_payload = None
1336
+ http_request = {
1337
+ "payload": request_payload,
1338
+ "requestMethod": method,
1339
+ "requestUrl": request_url,
1340
+ "headers": dict(metadata),
1341
+ }
1342
+ _LOGGER.debug(
1343
+ f"Sending request for google.ai.generativelanguage_v1beta3.ModelServiceClient.UpdateTunedModel",
1344
+ extra={
1345
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
1346
+ "rpcName": "UpdateTunedModel",
1347
+ "httpRequest": http_request,
1348
+ "metadata": http_request["headers"],
1349
+ },
1350
+ )
1351
+
1352
+ # Send the request
1353
+ response = ModelServiceRestTransport._UpdateTunedModel._get_response(
1354
+ self._host,
1355
+ metadata,
1356
+ query_params,
1357
+ self._session,
1358
+ timeout,
1359
+ transcoded_request,
1360
+ body,
1361
+ )
1362
+
1363
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
1364
+ # subclass.
1365
+ if response.status_code >= 400:
1366
+ raise core_exceptions.from_http_response(response)
1367
+
1368
+ # Return the response
1369
+ resp = gag_tuned_model.TunedModel()
1370
+ pb_resp = gag_tuned_model.TunedModel.pb(resp)
1371
+
1372
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
1373
+
1374
+ resp = self._interceptor.post_update_tuned_model(resp)
1375
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1376
+ logging.DEBUG
1377
+ ): # pragma: NO COVER
1378
+ try:
1379
+ response_payload = gag_tuned_model.TunedModel.to_json(response)
1380
+ except:
1381
+ response_payload = None
1382
+ http_response = {
1383
+ "payload": response_payload,
1384
+ "headers": dict(response.headers),
1385
+ "status": response.status_code,
1386
+ }
1387
+ _LOGGER.debug(
1388
+ "Received response for google.ai.generativelanguage_v1beta3.ModelServiceClient.update_tuned_model",
1389
+ extra={
1390
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
1391
+ "rpcName": "UpdateTunedModel",
1392
+ "metadata": http_response["headers"],
1393
+ "httpResponse": http_response,
1394
+ },
1395
+ )
1396
+ return resp
1397
+
1398
+ @property
1399
+ def create_tuned_model(
1400
+ self,
1401
+ ) -> Callable[[model_service.CreateTunedModelRequest], operations_pb2.Operation]:
1402
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1403
+ # In C++ this would require a dynamic_cast
1404
+ return self._CreateTunedModel(self._session, self._host, self._interceptor) # type: ignore
1405
+
1406
+ @property
1407
+ def delete_tuned_model(
1408
+ self,
1409
+ ) -> Callable[[model_service.DeleteTunedModelRequest], empty_pb2.Empty]:
1410
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1411
+ # In C++ this would require a dynamic_cast
1412
+ return self._DeleteTunedModel(self._session, self._host, self._interceptor) # type: ignore
1413
+
1414
+ @property
1415
+ def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]:
1416
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1417
+ # In C++ this would require a dynamic_cast
1418
+ return self._GetModel(self._session, self._host, self._interceptor) # type: ignore
1419
+
1420
+ @property
1421
+ def get_tuned_model(
1422
+ self,
1423
+ ) -> Callable[[model_service.GetTunedModelRequest], tuned_model.TunedModel]:
1424
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1425
+ # In C++ this would require a dynamic_cast
1426
+ return self._GetTunedModel(self._session, self._host, self._interceptor) # type: ignore
1427
+
1428
+ @property
1429
+ def list_models(
1430
+ self,
1431
+ ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]:
1432
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1433
+ # In C++ this would require a dynamic_cast
1434
+ return self._ListModels(self._session, self._host, self._interceptor) # type: ignore
1435
+
1436
+ @property
1437
+ def list_tuned_models(
1438
+ self,
1439
+ ) -> Callable[
1440
+ [model_service.ListTunedModelsRequest], model_service.ListTunedModelsResponse
1441
+ ]:
1442
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1443
+ # In C++ this would require a dynamic_cast
1444
+ return self._ListTunedModels(self._session, self._host, self._interceptor) # type: ignore
1445
+
1446
+ @property
1447
+ def update_tuned_model(
1448
+ self,
1449
+ ) -> Callable[[model_service.UpdateTunedModelRequest], gag_tuned_model.TunedModel]:
1450
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1451
+ # In C++ this would require a dynamic_cast
1452
+ return self._UpdateTunedModel(self._session, self._host, self._interceptor) # type: ignore
1453
+
1454
+ @property
1455
+ def kind(self) -> str:
1456
+ return "rest"
1457
+
1458
+ def close(self):
1459
+ self._session.close()
1460
+
1461
+
1462
+ __all__ = ("ModelServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/rest_base.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json # type: ignore
17
+ import re
18
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
19
+
20
+ from google.api_core import gapic_v1, path_template
21
+ from google.longrunning import operations_pb2 # type: ignore
22
+ from google.protobuf import empty_pb2 # type: ignore
23
+ from google.protobuf import json_format
24
+
25
+ from google.ai.generativelanguage_v1beta3.types import tuned_model as gag_tuned_model
26
+ from google.ai.generativelanguage_v1beta3.types import model, model_service
27
+ from google.ai.generativelanguage_v1beta3.types import tuned_model
28
+
29
+ from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport
30
+
31
+
32
+ class _BaseModelServiceRestTransport(ModelServiceTransport):
33
+ """Base REST backend transport for ModelService.
34
+
35
+ Note: This class is not meant to be used directly. Use its sync and
36
+ async sub-classes instead.
37
+
38
+ This class defines the same methods as the primary client, so the
39
+ primary client can load the underlying transport implementation
40
+ and call it.
41
+
42
+ It sends JSON representations of protocol buffers over HTTP/1.1
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ *,
48
+ host: str = "generativelanguage.googleapis.com",
49
+ credentials: Optional[Any] = None,
50
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
51
+ always_use_jwt_access: Optional[bool] = False,
52
+ url_scheme: str = "https",
53
+ api_audience: Optional[str] = None,
54
+ ) -> None:
55
+ """Instantiate the transport.
56
+ Args:
57
+ host (Optional[str]):
58
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
59
+ credentials (Optional[Any]): The
60
+ authorization credentials to attach to requests. These
61
+ credentials identify the application to the service; if none
62
+ are specified, the client will attempt to ascertain the
63
+ credentials from the environment.
64
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
65
+ The client info used to send a user-agent string along with
66
+ API requests. If ``None``, then default info will be used.
67
+ Generally, you only need to set this if you are developing
68
+ your own client library.
69
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
70
+ be used for service account credentials.
71
+ url_scheme: the protocol scheme for the API endpoint. Normally
72
+ "https", but for testing or local servers,
73
+ "http" can be specified.
74
+ """
75
+ # Run the base constructor
76
+ maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
77
+ if maybe_url_match is None:
78
+ raise ValueError(
79
+ f"Unexpected hostname structure: {host}"
80
+ ) # pragma: NO COVER
81
+
82
+ url_match_items = maybe_url_match.groupdict()
83
+
84
+ host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
85
+
86
+ super().__init__(
87
+ host=host,
88
+ credentials=credentials,
89
+ client_info=client_info,
90
+ always_use_jwt_access=always_use_jwt_access,
91
+ api_audience=api_audience,
92
+ )
93
+
94
+ class _BaseCreateTunedModel:
95
+ def __hash__(self): # pragma: NO COVER
96
+ return NotImplementedError("__hash__ must be implemented.")
97
+
98
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
99
+
100
+ @classmethod
101
+ def _get_unset_required_fields(cls, message_dict):
102
+ return {
103
+ k: v
104
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
105
+ if k not in message_dict
106
+ }
107
+
108
+ @staticmethod
109
+ def _get_http_options():
110
+ http_options: List[Dict[str, str]] = [
111
+ {
112
+ "method": "post",
113
+ "uri": "/v1beta3/tunedModels",
114
+ "body": "tuned_model",
115
+ },
116
+ ]
117
+ return http_options
118
+
119
+ @staticmethod
120
+ def _get_transcoded_request(http_options, request):
121
+ pb_request = model_service.CreateTunedModelRequest.pb(request)
122
+ transcoded_request = path_template.transcode(http_options, pb_request)
123
+ return transcoded_request
124
+
125
+ @staticmethod
126
+ def _get_request_body_json(transcoded_request):
127
+ # Jsonify the request body
128
+
129
+ body = json_format.MessageToJson(
130
+ transcoded_request["body"], use_integers_for_enums=True
131
+ )
132
+ return body
133
+
134
+ @staticmethod
135
+ def _get_query_params_json(transcoded_request):
136
+ query_params = json.loads(
137
+ json_format.MessageToJson(
138
+ transcoded_request["query_params"],
139
+ use_integers_for_enums=True,
140
+ )
141
+ )
142
+ query_params.update(
143
+ _BaseModelServiceRestTransport._BaseCreateTunedModel._get_unset_required_fields(
144
+ query_params
145
+ )
146
+ )
147
+
148
+ query_params["$alt"] = "json;enum-encoding=int"
149
+ return query_params
150
+
151
+ class _BaseDeleteTunedModel:
152
+ def __hash__(self): # pragma: NO COVER
153
+ return NotImplementedError("__hash__ must be implemented.")
154
+
155
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
156
+
157
+ @classmethod
158
+ def _get_unset_required_fields(cls, message_dict):
159
+ return {
160
+ k: v
161
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
162
+ if k not in message_dict
163
+ }
164
+
165
+ @staticmethod
166
+ def _get_http_options():
167
+ http_options: List[Dict[str, str]] = [
168
+ {
169
+ "method": "delete",
170
+ "uri": "/v1beta3/{name=tunedModels/*}",
171
+ },
172
+ ]
173
+ return http_options
174
+
175
+ @staticmethod
176
+ def _get_transcoded_request(http_options, request):
177
+ pb_request = model_service.DeleteTunedModelRequest.pb(request)
178
+ transcoded_request = path_template.transcode(http_options, pb_request)
179
+ return transcoded_request
180
+
181
+ @staticmethod
182
+ def _get_query_params_json(transcoded_request):
183
+ query_params = json.loads(
184
+ json_format.MessageToJson(
185
+ transcoded_request["query_params"],
186
+ use_integers_for_enums=True,
187
+ )
188
+ )
189
+ query_params.update(
190
+ _BaseModelServiceRestTransport._BaseDeleteTunedModel._get_unset_required_fields(
191
+ query_params
192
+ )
193
+ )
194
+
195
+ query_params["$alt"] = "json;enum-encoding=int"
196
+ return query_params
197
+
198
+ class _BaseGetModel:
199
+ def __hash__(self): # pragma: NO COVER
200
+ return NotImplementedError("__hash__ must be implemented.")
201
+
202
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
203
+
204
+ @classmethod
205
+ def _get_unset_required_fields(cls, message_dict):
206
+ return {
207
+ k: v
208
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
209
+ if k not in message_dict
210
+ }
211
+
212
+ @staticmethod
213
+ def _get_http_options():
214
+ http_options: List[Dict[str, str]] = [
215
+ {
216
+ "method": "get",
217
+ "uri": "/v1beta3/{name=models/*}",
218
+ },
219
+ ]
220
+ return http_options
221
+
222
+ @staticmethod
223
+ def _get_transcoded_request(http_options, request):
224
+ pb_request = model_service.GetModelRequest.pb(request)
225
+ transcoded_request = path_template.transcode(http_options, pb_request)
226
+ return transcoded_request
227
+
228
+ @staticmethod
229
+ def _get_query_params_json(transcoded_request):
230
+ query_params = json.loads(
231
+ json_format.MessageToJson(
232
+ transcoded_request["query_params"],
233
+ use_integers_for_enums=True,
234
+ )
235
+ )
236
+ query_params.update(
237
+ _BaseModelServiceRestTransport._BaseGetModel._get_unset_required_fields(
238
+ query_params
239
+ )
240
+ )
241
+
242
+ query_params["$alt"] = "json;enum-encoding=int"
243
+ return query_params
244
+
245
+ class _BaseGetTunedModel:
246
+ def __hash__(self): # pragma: NO COVER
247
+ return NotImplementedError("__hash__ must be implemented.")
248
+
249
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
250
+
251
+ @classmethod
252
+ def _get_unset_required_fields(cls, message_dict):
253
+ return {
254
+ k: v
255
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
256
+ if k not in message_dict
257
+ }
258
+
259
+ @staticmethod
260
+ def _get_http_options():
261
+ http_options: List[Dict[str, str]] = [
262
+ {
263
+ "method": "get",
264
+ "uri": "/v1beta3/{name=tunedModels/*}",
265
+ },
266
+ ]
267
+ return http_options
268
+
269
+ @staticmethod
270
+ def _get_transcoded_request(http_options, request):
271
+ pb_request = model_service.GetTunedModelRequest.pb(request)
272
+ transcoded_request = path_template.transcode(http_options, pb_request)
273
+ return transcoded_request
274
+
275
+ @staticmethod
276
+ def _get_query_params_json(transcoded_request):
277
+ query_params = json.loads(
278
+ json_format.MessageToJson(
279
+ transcoded_request["query_params"],
280
+ use_integers_for_enums=True,
281
+ )
282
+ )
283
+ query_params.update(
284
+ _BaseModelServiceRestTransport._BaseGetTunedModel._get_unset_required_fields(
285
+ query_params
286
+ )
287
+ )
288
+
289
+ query_params["$alt"] = "json;enum-encoding=int"
290
+ return query_params
291
+
292
+ class _BaseListModels:
293
+ def __hash__(self): # pragma: NO COVER
294
+ return NotImplementedError("__hash__ must be implemented.")
295
+
296
+ @staticmethod
297
+ def _get_http_options():
298
+ http_options: List[Dict[str, str]] = [
299
+ {
300
+ "method": "get",
301
+ "uri": "/v1beta3/models",
302
+ },
303
+ ]
304
+ return http_options
305
+
306
+ @staticmethod
307
+ def _get_transcoded_request(http_options, request):
308
+ pb_request = model_service.ListModelsRequest.pb(request)
309
+ transcoded_request = path_template.transcode(http_options, pb_request)
310
+ return transcoded_request
311
+
312
+ @staticmethod
313
+ def _get_query_params_json(transcoded_request):
314
+ query_params = json.loads(
315
+ json_format.MessageToJson(
316
+ transcoded_request["query_params"],
317
+ use_integers_for_enums=True,
318
+ )
319
+ )
320
+
321
+ query_params["$alt"] = "json;enum-encoding=int"
322
+ return query_params
323
+
324
+ class _BaseListTunedModels:
325
+ def __hash__(self): # pragma: NO COVER
326
+ return NotImplementedError("__hash__ must be implemented.")
327
+
328
+ @staticmethod
329
+ def _get_http_options():
330
+ http_options: List[Dict[str, str]] = [
331
+ {
332
+ "method": "get",
333
+ "uri": "/v1beta3/tunedModels",
334
+ },
335
+ ]
336
+ return http_options
337
+
338
+ @staticmethod
339
+ def _get_transcoded_request(http_options, request):
340
+ pb_request = model_service.ListTunedModelsRequest.pb(request)
341
+ transcoded_request = path_template.transcode(http_options, pb_request)
342
+ return transcoded_request
343
+
344
+ @staticmethod
345
+ def _get_query_params_json(transcoded_request):
346
+ query_params = json.loads(
347
+ json_format.MessageToJson(
348
+ transcoded_request["query_params"],
349
+ use_integers_for_enums=True,
350
+ )
351
+ )
352
+
353
+ query_params["$alt"] = "json;enum-encoding=int"
354
+ return query_params
355
+
356
+ class _BaseUpdateTunedModel:
357
+ def __hash__(self): # pragma: NO COVER
358
+ return NotImplementedError("__hash__ must be implemented.")
359
+
360
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
361
+ "updateMask": {},
362
+ }
363
+
364
+ @classmethod
365
+ def _get_unset_required_fields(cls, message_dict):
366
+ return {
367
+ k: v
368
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
369
+ if k not in message_dict
370
+ }
371
+
372
+ @staticmethod
373
+ def _get_http_options():
374
+ http_options: List[Dict[str, str]] = [
375
+ {
376
+ "method": "patch",
377
+ "uri": "/v1beta3/{tuned_model.name=tunedModels/*}",
378
+ "body": "tuned_model",
379
+ },
380
+ ]
381
+ return http_options
382
+
383
+ @staticmethod
384
+ def _get_transcoded_request(http_options, request):
385
+ pb_request = model_service.UpdateTunedModelRequest.pb(request)
386
+ transcoded_request = path_template.transcode(http_options, pb_request)
387
+ return transcoded_request
388
+
389
+ @staticmethod
390
+ def _get_request_body_json(transcoded_request):
391
+ # Jsonify the request body
392
+
393
+ body = json_format.MessageToJson(
394
+ transcoded_request["body"], use_integers_for_enums=True
395
+ )
396
+ return body
397
+
398
+ @staticmethod
399
+ def _get_query_params_json(transcoded_request):
400
+ query_params = json.loads(
401
+ json_format.MessageToJson(
402
+ transcoded_request["query_params"],
403
+ use_integers_for_enums=True,
404
+ )
405
+ )
406
+ query_params.update(
407
+ _BaseModelServiceRestTransport._BaseUpdateTunedModel._get_unset_required_fields(
408
+ query_params
409
+ )
410
+ )
411
+
412
+ query_params["$alt"] = "json;enum-encoding=int"
413
+ return query_params
414
+
415
+
416
+ __all__ = ("_BaseModelServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from .async_client import PermissionServiceAsyncClient
17
+ from .client import PermissionServiceClient
18
+
19
+ __all__ = (
20
+ "PermissionServiceClient",
21
+ "PermissionServiceAsyncClient",
22
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/async_client.py ADDED
@@ -0,0 +1,1040 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import re
19
+ from typing import (
20
+ Callable,
21
+ Dict,
22
+ Mapping,
23
+ MutableMapping,
24
+ MutableSequence,
25
+ Optional,
26
+ Sequence,
27
+ Tuple,
28
+ Type,
29
+ Union,
30
+ )
31
+
32
+ from google.api_core import exceptions as core_exceptions
33
+ from google.api_core import gapic_v1
34
+ from google.api_core import retry_async as retries
35
+ from google.api_core.client_options import ClientOptions
36
+ from google.auth import credentials as ga_credentials # type: ignore
37
+ from google.oauth2 import service_account # type: ignore
38
+
39
+ from google.ai.generativelanguage_v1beta3 import gapic_version as package_version
40
+
41
+ try:
42
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
43
+ except AttributeError: # pragma: NO COVER
44
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
45
+
46
+ from google.longrunning import operations_pb2 # type: ignore
47
+ from google.protobuf import field_mask_pb2 # type: ignore
48
+
49
+ from google.ai.generativelanguage_v1beta3.services.permission_service import pagers
50
+ from google.ai.generativelanguage_v1beta3.types import permission as gag_permission
51
+ from google.ai.generativelanguage_v1beta3.types import permission
52
+ from google.ai.generativelanguage_v1beta3.types import permission_service
53
+
54
+ from .client import PermissionServiceClient
55
+ from .transports.base import DEFAULT_CLIENT_INFO, PermissionServiceTransport
56
+ from .transports.grpc_asyncio import PermissionServiceGrpcAsyncIOTransport
57
+
58
+ try:
59
+ from google.api_core import client_logging # type: ignore
60
+
61
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
62
+ except ImportError: # pragma: NO COVER
63
+ CLIENT_LOGGING_SUPPORTED = False
64
+
65
+ _LOGGER = std_logging.getLogger(__name__)
66
+
67
+
68
+ class PermissionServiceAsyncClient:
69
+ """Provides methods for managing permissions to PaLM API
70
+ resources.
71
+ """
72
+
73
+ _client: PermissionServiceClient
74
+
75
+ # Copy defaults from the synchronous client for use here.
76
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
77
+ DEFAULT_ENDPOINT = PermissionServiceClient.DEFAULT_ENDPOINT
78
+ DEFAULT_MTLS_ENDPOINT = PermissionServiceClient.DEFAULT_MTLS_ENDPOINT
79
+ _DEFAULT_ENDPOINT_TEMPLATE = PermissionServiceClient._DEFAULT_ENDPOINT_TEMPLATE
80
+ _DEFAULT_UNIVERSE = PermissionServiceClient._DEFAULT_UNIVERSE
81
+
82
+ permission_path = staticmethod(PermissionServiceClient.permission_path)
83
+ parse_permission_path = staticmethod(PermissionServiceClient.parse_permission_path)
84
+ tuned_model_path = staticmethod(PermissionServiceClient.tuned_model_path)
85
+ parse_tuned_model_path = staticmethod(
86
+ PermissionServiceClient.parse_tuned_model_path
87
+ )
88
+ common_billing_account_path = staticmethod(
89
+ PermissionServiceClient.common_billing_account_path
90
+ )
91
+ parse_common_billing_account_path = staticmethod(
92
+ PermissionServiceClient.parse_common_billing_account_path
93
+ )
94
+ common_folder_path = staticmethod(PermissionServiceClient.common_folder_path)
95
+ parse_common_folder_path = staticmethod(
96
+ PermissionServiceClient.parse_common_folder_path
97
+ )
98
+ common_organization_path = staticmethod(
99
+ PermissionServiceClient.common_organization_path
100
+ )
101
+ parse_common_organization_path = staticmethod(
102
+ PermissionServiceClient.parse_common_organization_path
103
+ )
104
+ common_project_path = staticmethod(PermissionServiceClient.common_project_path)
105
+ parse_common_project_path = staticmethod(
106
+ PermissionServiceClient.parse_common_project_path
107
+ )
108
+ common_location_path = staticmethod(PermissionServiceClient.common_location_path)
109
+ parse_common_location_path = staticmethod(
110
+ PermissionServiceClient.parse_common_location_path
111
+ )
112
+
113
+ @classmethod
114
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
115
+ """Creates an instance of this client using the provided credentials
116
+ info.
117
+
118
+ Args:
119
+ info (dict): The service account private key info.
120
+ args: Additional arguments to pass to the constructor.
121
+ kwargs: Additional arguments to pass to the constructor.
122
+
123
+ Returns:
124
+ PermissionServiceAsyncClient: The constructed client.
125
+ """
126
+ return PermissionServiceClient.from_service_account_info.__func__(PermissionServiceAsyncClient, info, *args, **kwargs) # type: ignore
127
+
128
+ @classmethod
129
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
130
+ """Creates an instance of this client using the provided credentials
131
+ file.
132
+
133
+ Args:
134
+ filename (str): The path to the service account private key json
135
+ file.
136
+ args: Additional arguments to pass to the constructor.
137
+ kwargs: Additional arguments to pass to the constructor.
138
+
139
+ Returns:
140
+ PermissionServiceAsyncClient: The constructed client.
141
+ """
142
+ return PermissionServiceClient.from_service_account_file.__func__(PermissionServiceAsyncClient, filename, *args, **kwargs) # type: ignore
143
+
144
+ from_service_account_json = from_service_account_file
145
+
146
+ @classmethod
147
+ def get_mtls_endpoint_and_cert_source(
148
+ cls, client_options: Optional[ClientOptions] = None
149
+ ):
150
+ """Return the API endpoint and client cert source for mutual TLS.
151
+
152
+ The client cert source is determined in the following order:
153
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
154
+ client cert source is None.
155
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
156
+ default client cert source exists, use the default one; otherwise the client cert
157
+ source is None.
158
+
159
+ The API endpoint is determined in the following order:
160
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
161
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
162
+ default mTLS endpoint; if the environment variable is "never", use the default API
163
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
164
+ use the default API endpoint.
165
+
166
+ More details can be found at https://google.aip.dev/auth/4114.
167
+
168
+ Args:
169
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
170
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
171
+ in this method.
172
+
173
+ Returns:
174
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
175
+ client cert source to use.
176
+
177
+ Raises:
178
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
179
+ """
180
+ return PermissionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
181
+
182
+ @property
183
+ def transport(self) -> PermissionServiceTransport:
184
+ """Returns the transport used by the client instance.
185
+
186
+ Returns:
187
+ PermissionServiceTransport: The transport used by the client instance.
188
+ """
189
+ return self._client.transport
190
+
191
+ @property
192
+ def api_endpoint(self):
193
+ """Return the API endpoint used by the client instance.
194
+
195
+ Returns:
196
+ str: The API endpoint used by the client instance.
197
+ """
198
+ return self._client._api_endpoint
199
+
200
+ @property
201
+ def universe_domain(self) -> str:
202
+ """Return the universe domain used by the client instance.
203
+
204
+ Returns:
205
+ str: The universe domain used
206
+ by the client instance.
207
+ """
208
+ return self._client._universe_domain
209
+
210
+ get_transport_class = PermissionServiceClient.get_transport_class
211
+
212
+ def __init__(
213
+ self,
214
+ *,
215
+ credentials: Optional[ga_credentials.Credentials] = None,
216
+ transport: Optional[
217
+ Union[
218
+ str,
219
+ PermissionServiceTransport,
220
+ Callable[..., PermissionServiceTransport],
221
+ ]
222
+ ] = "grpc_asyncio",
223
+ client_options: Optional[ClientOptions] = None,
224
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
225
+ ) -> None:
226
+ """Instantiates the permission service async client.
227
+
228
+ Args:
229
+ credentials (Optional[google.auth.credentials.Credentials]): The
230
+ authorization credentials to attach to requests. These
231
+ credentials identify the application to the service; if none
232
+ are specified, the client will attempt to ascertain the
233
+ credentials from the environment.
234
+ transport (Optional[Union[str,PermissionServiceTransport,Callable[..., PermissionServiceTransport]]]):
235
+ The transport to use, or a Callable that constructs and returns a new transport to use.
236
+ If a Callable is given, it will be called with the same set of initialization
237
+ arguments as used in the PermissionServiceTransport constructor.
238
+ If set to None, a transport is chosen automatically.
239
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
240
+ Custom options for the client.
241
+
242
+ 1. The ``api_endpoint`` property can be used to override the
243
+ default endpoint provided by the client when ``transport`` is
244
+ not explicitly provided. Only if this property is not set and
245
+ ``transport`` was not explicitly provided, the endpoint is
246
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
247
+ variable, which have one of the following values:
248
+ "always" (always use the default mTLS endpoint), "never" (always
249
+ use the default regular endpoint) and "auto" (auto-switch to the
250
+ default mTLS endpoint if client certificate is present; this is
251
+ the default value).
252
+
253
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
254
+ is "true", then the ``client_cert_source`` property can be used
255
+ to provide a client certificate for mTLS transport. If
256
+ not provided, the default SSL client certificate will be used if
257
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
258
+ set, no client certificate will be used.
259
+
260
+ 3. The ``universe_domain`` property can be used to override the
261
+ default "googleapis.com" universe. Note that ``api_endpoint``
262
+ property still takes precedence; and ``universe_domain`` is
263
+ currently not supported for mTLS.
264
+
265
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
266
+ The client info used to send a user-agent string along with
267
+ API requests. If ``None``, then default info will be used.
268
+ Generally, you only need to set this if you're developing
269
+ your own client library.
270
+
271
+ Raises:
272
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
273
+ creation failed for any reason.
274
+ """
275
+ self._client = PermissionServiceClient(
276
+ credentials=credentials,
277
+ transport=transport,
278
+ client_options=client_options,
279
+ client_info=client_info,
280
+ )
281
+
282
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
283
+ std_logging.DEBUG
284
+ ): # pragma: NO COVER
285
+ _LOGGER.debug(
286
+ "Created client `google.ai.generativelanguage_v1beta3.PermissionServiceAsyncClient`.",
287
+ extra={
288
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
289
+ "universeDomain": getattr(
290
+ self._client._transport._credentials, "universe_domain", ""
291
+ ),
292
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
293
+ "credentialsInfo": getattr(
294
+ self.transport._credentials, "get_cred_info", lambda: None
295
+ )(),
296
+ }
297
+ if hasattr(self._client._transport, "_credentials")
298
+ else {
299
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
300
+ "credentialsType": None,
301
+ },
302
+ )
303
+
304
+ async def create_permission(
305
+ self,
306
+ request: Optional[
307
+ Union[permission_service.CreatePermissionRequest, dict]
308
+ ] = None,
309
+ *,
310
+ parent: Optional[str] = None,
311
+ permission: Optional[gag_permission.Permission] = None,
312
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
313
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
314
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
315
+ ) -> gag_permission.Permission:
316
+ r"""Create a permission to a specific resource.
317
+
318
+ .. code-block:: python
319
+
320
+ # This snippet has been automatically generated and should be regarded as a
321
+ # code template only.
322
+ # It will require modifications to work:
323
+ # - It may require correct/in-range values for request initialization.
324
+ # - It may require specifying regional endpoints when creating the service
325
+ # client as shown in:
326
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
327
+ from google.ai import generativelanguage_v1beta3
328
+
329
+ async def sample_create_permission():
330
+ # Create a client
331
+ client = generativelanguage_v1beta3.PermissionServiceAsyncClient()
332
+
333
+ # Initialize request argument(s)
334
+ request = generativelanguage_v1beta3.CreatePermissionRequest(
335
+ parent="parent_value",
336
+ )
337
+
338
+ # Make the request
339
+ response = await client.create_permission(request=request)
340
+
341
+ # Handle the response
342
+ print(response)
343
+
344
+ Args:
345
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.CreatePermissionRequest, dict]]):
346
+ The request object. Request to create a ``Permission``.
347
+ parent (:class:`str`):
348
+ Required. The parent resource of the ``Permission``.
349
+ Format: tunedModels/{tuned_model}
350
+
351
+ This corresponds to the ``parent`` field
352
+ on the ``request`` instance; if ``request`` is provided, this
353
+ should not be set.
354
+ permission (:class:`google.ai.generativelanguage_v1beta3.types.Permission`):
355
+ Required. The permission to create.
356
+ This corresponds to the ``permission`` field
357
+ on the ``request`` instance; if ``request`` is provided, this
358
+ should not be set.
359
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
360
+ should be retried.
361
+ timeout (float): The timeout for this request.
362
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
363
+ sent along with the request as metadata. Normally, each value must be of type `str`,
364
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
365
+ be of type `bytes`.
366
+
367
+ Returns:
368
+ google.ai.generativelanguage_v1beta3.types.Permission:
369
+ Permission resource grants user,
370
+ group or the rest of the world access to
371
+ the PaLM API resource (e.g. a tuned
372
+ model, file).
373
+
374
+ A role is a collection of permitted
375
+ operations that allows users to perform
376
+ specific actions on PaLM API resources.
377
+ To make them available to users, groups,
378
+ or service accounts, you assign roles.
379
+ When you assign a role, you grant
380
+ permissions that the role contains.
381
+
382
+ There are three concentric roles. Each
383
+ role is a superset of the previous
384
+ role's permitted operations:
385
+
386
+ - reader can use the resource (e.g.
387
+ tuned model) for inference
388
+ - writer has reader's permissions and
389
+ additionally can edit and share
390
+ - owner has writer's permissions and
391
+ additionally can delete
392
+
393
+ """
394
+ # Create or coerce a protobuf request object.
395
+ # - Quick check: If we got a request object, we should *not* have
396
+ # gotten any keyword arguments that map to the request.
397
+ has_flattened_params = any([parent, permission])
398
+ if request is not None and has_flattened_params:
399
+ raise ValueError(
400
+ "If the `request` argument is set, then none of "
401
+ "the individual field arguments should be set."
402
+ )
403
+
404
+ # - Use the request object if provided (there's no risk of modifying the input as
405
+ # there are no flattened fields), or create one.
406
+ if not isinstance(request, permission_service.CreatePermissionRequest):
407
+ request = permission_service.CreatePermissionRequest(request)
408
+
409
+ # If we have keyword arguments corresponding to fields on the
410
+ # request, apply these.
411
+ if parent is not None:
412
+ request.parent = parent
413
+ if permission is not None:
414
+ request.permission = permission
415
+
416
+ # Wrap the RPC method; this adds retry and timeout information,
417
+ # and friendly error handling.
418
+ rpc = self._client._transport._wrapped_methods[
419
+ self._client._transport.create_permission
420
+ ]
421
+
422
+ # Certain fields should be provided within the metadata header;
423
+ # add these here.
424
+ metadata = tuple(metadata) + (
425
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
426
+ )
427
+
428
+ # Validate the universe domain.
429
+ self._client._validate_universe_domain()
430
+
431
+ # Send the request.
432
+ response = await rpc(
433
+ request,
434
+ retry=retry,
435
+ timeout=timeout,
436
+ metadata=metadata,
437
+ )
438
+
439
+ # Done; return the response.
440
+ return response
441
+
442
+ async def get_permission(
443
+ self,
444
+ request: Optional[Union[permission_service.GetPermissionRequest, dict]] = None,
445
+ *,
446
+ name: Optional[str] = None,
447
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
448
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
449
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
450
+ ) -> permission.Permission:
451
+ r"""Gets information about a specific Permission.
452
+
453
+ .. code-block:: python
454
+
455
+ # This snippet has been automatically generated and should be regarded as a
456
+ # code template only.
457
+ # It will require modifications to work:
458
+ # - It may require correct/in-range values for request initialization.
459
+ # - It may require specifying regional endpoints when creating the service
460
+ # client as shown in:
461
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
462
+ from google.ai import generativelanguage_v1beta3
463
+
464
+ async def sample_get_permission():
465
+ # Create a client
466
+ client = generativelanguage_v1beta3.PermissionServiceAsyncClient()
467
+
468
+ # Initialize request argument(s)
469
+ request = generativelanguage_v1beta3.GetPermissionRequest(
470
+ name="name_value",
471
+ )
472
+
473
+ # Make the request
474
+ response = await client.get_permission(request=request)
475
+
476
+ # Handle the response
477
+ print(response)
478
+
479
+ Args:
480
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.GetPermissionRequest, dict]]):
481
+ The request object. Request for getting information about a specific
482
+ ``Permission``.
483
+ name (:class:`str`):
484
+ Required. The resource name of the permission.
485
+
486
+ Format:
487
+ ``tunedModels/{tuned_model}permissions/{permission}``
488
+
489
+ This corresponds to the ``name`` field
490
+ on the ``request`` instance; if ``request`` is provided, this
491
+ should not be set.
492
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
493
+ should be retried.
494
+ timeout (float): The timeout for this request.
495
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
496
+ sent along with the request as metadata. Normally, each value must be of type `str`,
497
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
498
+ be of type `bytes`.
499
+
500
+ Returns:
501
+ google.ai.generativelanguage_v1beta3.types.Permission:
502
+ Permission resource grants user,
503
+ group or the rest of the world access to
504
+ the PaLM API resource (e.g. a tuned
505
+ model, file).
506
+
507
+ A role is a collection of permitted
508
+ operations that allows users to perform
509
+ specific actions on PaLM API resources.
510
+ To make them available to users, groups,
511
+ or service accounts, you assign roles.
512
+ When you assign a role, you grant
513
+ permissions that the role contains.
514
+
515
+ There are three concentric roles. Each
516
+ role is a superset of the previous
517
+ role's permitted operations:
518
+
519
+ - reader can use the resource (e.g.
520
+ tuned model) for inference
521
+ - writer has reader's permissions and
522
+ additionally can edit and share
523
+ - owner has writer's permissions and
524
+ additionally can delete
525
+
526
+ """
527
+ # Create or coerce a protobuf request object.
528
+ # - Quick check: If we got a request object, we should *not* have
529
+ # gotten any keyword arguments that map to the request.
530
+ has_flattened_params = any([name])
531
+ if request is not None and has_flattened_params:
532
+ raise ValueError(
533
+ "If the `request` argument is set, then none of "
534
+ "the individual field arguments should be set."
535
+ )
536
+
537
+ # - Use the request object if provided (there's no risk of modifying the input as
538
+ # there are no flattened fields), or create one.
539
+ if not isinstance(request, permission_service.GetPermissionRequest):
540
+ request = permission_service.GetPermissionRequest(request)
541
+
542
+ # If we have keyword arguments corresponding to fields on the
543
+ # request, apply these.
544
+ if name is not None:
545
+ request.name = name
546
+
547
+ # Wrap the RPC method; this adds retry and timeout information,
548
+ # and friendly error handling.
549
+ rpc = self._client._transport._wrapped_methods[
550
+ self._client._transport.get_permission
551
+ ]
552
+
553
+ # Certain fields should be provided within the metadata header;
554
+ # add these here.
555
+ metadata = tuple(metadata) + (
556
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
557
+ )
558
+
559
+ # Validate the universe domain.
560
+ self._client._validate_universe_domain()
561
+
562
+ # Send the request.
563
+ response = await rpc(
564
+ request,
565
+ retry=retry,
566
+ timeout=timeout,
567
+ metadata=metadata,
568
+ )
569
+
570
+ # Done; return the response.
571
+ return response
572
+
573
+ async def list_permissions(
574
+ self,
575
+ request: Optional[
576
+ Union[permission_service.ListPermissionsRequest, dict]
577
+ ] = None,
578
+ *,
579
+ parent: Optional[str] = None,
580
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
581
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
582
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
583
+ ) -> pagers.ListPermissionsAsyncPager:
584
+ r"""Lists permissions for the specific resource.
585
+
586
+ .. code-block:: python
587
+
588
+ # This snippet has been automatically generated and should be regarded as a
589
+ # code template only.
590
+ # It will require modifications to work:
591
+ # - It may require correct/in-range values for request initialization.
592
+ # - It may require specifying regional endpoints when creating the service
593
+ # client as shown in:
594
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
595
+ from google.ai import generativelanguage_v1beta3
596
+
597
+ async def sample_list_permissions():
598
+ # Create a client
599
+ client = generativelanguage_v1beta3.PermissionServiceAsyncClient()
600
+
601
+ # Initialize request argument(s)
602
+ request = generativelanguage_v1beta3.ListPermissionsRequest(
603
+ parent="parent_value",
604
+ )
605
+
606
+ # Make the request
607
+ page_result = client.list_permissions(request=request)
608
+
609
+ # Handle the response
610
+ async for response in page_result:
611
+ print(response)
612
+
613
+ Args:
614
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.ListPermissionsRequest, dict]]):
615
+ The request object. Request for listing permissions.
616
+ parent (:class:`str`):
617
+ Required. The parent resource of the permissions.
618
+ Format: tunedModels/{tuned_model}
619
+
620
+ This corresponds to the ``parent`` field
621
+ on the ``request`` instance; if ``request`` is provided, this
622
+ should not be set.
623
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
624
+ should be retried.
625
+ timeout (float): The timeout for this request.
626
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
627
+ sent along with the request as metadata. Normally, each value must be of type `str`,
628
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
629
+ be of type `bytes`.
630
+
631
+ Returns:
632
+ google.ai.generativelanguage_v1beta3.services.permission_service.pagers.ListPermissionsAsyncPager:
633
+ Response from ListPermissions containing a paginated list of
634
+ permissions.
635
+
636
+ Iterating over this object will yield results and
637
+ resolve additional pages automatically.
638
+
639
+ """
640
+ # Create or coerce a protobuf request object.
641
+ # - Quick check: If we got a request object, we should *not* have
642
+ # gotten any keyword arguments that map to the request.
643
+ has_flattened_params = any([parent])
644
+ if request is not None and has_flattened_params:
645
+ raise ValueError(
646
+ "If the `request` argument is set, then none of "
647
+ "the individual field arguments should be set."
648
+ )
649
+
650
+ # - Use the request object if provided (there's no risk of modifying the input as
651
+ # there are no flattened fields), or create one.
652
+ if not isinstance(request, permission_service.ListPermissionsRequest):
653
+ request = permission_service.ListPermissionsRequest(request)
654
+
655
+ # If we have keyword arguments corresponding to fields on the
656
+ # request, apply these.
657
+ if parent is not None:
658
+ request.parent = parent
659
+
660
+ # Wrap the RPC method; this adds retry and timeout information,
661
+ # and friendly error handling.
662
+ rpc = self._client._transport._wrapped_methods[
663
+ self._client._transport.list_permissions
664
+ ]
665
+
666
+ # Certain fields should be provided within the metadata header;
667
+ # add these here.
668
+ metadata = tuple(metadata) + (
669
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
670
+ )
671
+
672
+ # Validate the universe domain.
673
+ self._client._validate_universe_domain()
674
+
675
+ # Send the request.
676
+ response = await rpc(
677
+ request,
678
+ retry=retry,
679
+ timeout=timeout,
680
+ metadata=metadata,
681
+ )
682
+
683
+ # This method is paged; wrap the response in a pager, which provides
684
+ # an `__aiter__` convenience method.
685
+ response = pagers.ListPermissionsAsyncPager(
686
+ method=rpc,
687
+ request=request,
688
+ response=response,
689
+ retry=retry,
690
+ timeout=timeout,
691
+ metadata=metadata,
692
+ )
693
+
694
+ # Done; return the response.
695
+ return response
696
+
697
+ async def update_permission(
698
+ self,
699
+ request: Optional[
700
+ Union[permission_service.UpdatePermissionRequest, dict]
701
+ ] = None,
702
+ *,
703
+ permission: Optional[gag_permission.Permission] = None,
704
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
705
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
706
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
707
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
708
+ ) -> gag_permission.Permission:
709
+ r"""Updates the permission.
710
+
711
+ .. code-block:: python
712
+
713
+ # This snippet has been automatically generated and should be regarded as a
714
+ # code template only.
715
+ # It will require modifications to work:
716
+ # - It may require correct/in-range values for request initialization.
717
+ # - It may require specifying regional endpoints when creating the service
718
+ # client as shown in:
719
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
720
+ from google.ai import generativelanguage_v1beta3
721
+
722
+ async def sample_update_permission():
723
+ # Create a client
724
+ client = generativelanguage_v1beta3.PermissionServiceAsyncClient()
725
+
726
+ # Initialize request argument(s)
727
+ request = generativelanguage_v1beta3.UpdatePermissionRequest(
728
+ )
729
+
730
+ # Make the request
731
+ response = await client.update_permission(request=request)
732
+
733
+ # Handle the response
734
+ print(response)
735
+
736
+ Args:
737
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.UpdatePermissionRequest, dict]]):
738
+ The request object. Request to update the ``Permission``.
739
+ permission (:class:`google.ai.generativelanguage_v1beta3.types.Permission`):
740
+ Required. The permission to update.
741
+
742
+ The permission's ``name`` field is used to identify the
743
+ permission to update.
744
+
745
+ This corresponds to the ``permission`` field
746
+ on the ``request`` instance; if ``request`` is provided, this
747
+ should not be set.
748
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
749
+ Required. The list of fields to update. Accepted ones:
750
+
751
+ - role (``Permission.role`` field)
752
+
753
+ This corresponds to the ``update_mask`` field
754
+ on the ``request`` instance; if ``request`` is provided, this
755
+ should not be set.
756
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
757
+ should be retried.
758
+ timeout (float): The timeout for this request.
759
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
760
+ sent along with the request as metadata. Normally, each value must be of type `str`,
761
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
762
+ be of type `bytes`.
763
+
764
+ Returns:
765
+ google.ai.generativelanguage_v1beta3.types.Permission:
766
+ Permission resource grants user,
767
+ group or the rest of the world access to
768
+ the PaLM API resource (e.g. a tuned
769
+ model, file).
770
+
771
+ A role is a collection of permitted
772
+ operations that allows users to perform
773
+ specific actions on PaLM API resources.
774
+ To make them available to users, groups,
775
+ or service accounts, you assign roles.
776
+ When you assign a role, you grant
777
+ permissions that the role contains.
778
+
779
+ There are three concentric roles. Each
780
+ role is a superset of the previous
781
+ role's permitted operations:
782
+
783
+ - reader can use the resource (e.g.
784
+ tuned model) for inference
785
+ - writer has reader's permissions and
786
+ additionally can edit and share
787
+ - owner has writer's permissions and
788
+ additionally can delete
789
+
790
+ """
791
+ # Create or coerce a protobuf request object.
792
+ # - Quick check: If we got a request object, we should *not* have
793
+ # gotten any keyword arguments that map to the request.
794
+ has_flattened_params = any([permission, update_mask])
795
+ if request is not None and has_flattened_params:
796
+ raise ValueError(
797
+ "If the `request` argument is set, then none of "
798
+ "the individual field arguments should be set."
799
+ )
800
+
801
+ # - Use the request object if provided (there's no risk of modifying the input as
802
+ # there are no flattened fields), or create one.
803
+ if not isinstance(request, permission_service.UpdatePermissionRequest):
804
+ request = permission_service.UpdatePermissionRequest(request)
805
+
806
+ # If we have keyword arguments corresponding to fields on the
807
+ # request, apply these.
808
+ if permission is not None:
809
+ request.permission = permission
810
+ if update_mask is not None:
811
+ request.update_mask = update_mask
812
+
813
+ # Wrap the RPC method; this adds retry and timeout information,
814
+ # and friendly error handling.
815
+ rpc = self._client._transport._wrapped_methods[
816
+ self._client._transport.update_permission
817
+ ]
818
+
819
+ # Certain fields should be provided within the metadata header;
820
+ # add these here.
821
+ metadata = tuple(metadata) + (
822
+ gapic_v1.routing_header.to_grpc_metadata(
823
+ (("permission.name", request.permission.name),)
824
+ ),
825
+ )
826
+
827
+ # Validate the universe domain.
828
+ self._client._validate_universe_domain()
829
+
830
+ # Send the request.
831
+ response = await rpc(
832
+ request,
833
+ retry=retry,
834
+ timeout=timeout,
835
+ metadata=metadata,
836
+ )
837
+
838
+ # Done; return the response.
839
+ return response
840
+
841
+ async def delete_permission(
842
+ self,
843
+ request: Optional[
844
+ Union[permission_service.DeletePermissionRequest, dict]
845
+ ] = None,
846
+ *,
847
+ name: Optional[str] = None,
848
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
849
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
850
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
851
+ ) -> None:
852
+ r"""Deletes the permission.
853
+
854
+ .. code-block:: python
855
+
856
+ # This snippet has been automatically generated and should be regarded as a
857
+ # code template only.
858
+ # It will require modifications to work:
859
+ # - It may require correct/in-range values for request initialization.
860
+ # - It may require specifying regional endpoints when creating the service
861
+ # client as shown in:
862
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
863
+ from google.ai import generativelanguage_v1beta3
864
+
865
+ async def sample_delete_permission():
866
+ # Create a client
867
+ client = generativelanguage_v1beta3.PermissionServiceAsyncClient()
868
+
869
+ # Initialize request argument(s)
870
+ request = generativelanguage_v1beta3.DeletePermissionRequest(
871
+ name="name_value",
872
+ )
873
+
874
+ # Make the request
875
+ await client.delete_permission(request=request)
876
+
877
+ Args:
878
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.DeletePermissionRequest, dict]]):
879
+ The request object. Request to delete the ``Permission``.
880
+ name (:class:`str`):
881
+ Required. The resource name of the permission. Format:
882
+ ``tunedModels/{tuned_model}/permissions/{permission}``
883
+
884
+ This corresponds to the ``name`` field
885
+ on the ``request`` instance; if ``request`` is provided, this
886
+ should not be set.
887
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
888
+ should be retried.
889
+ timeout (float): The timeout for this request.
890
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
891
+ sent along with the request as metadata. Normally, each value must be of type `str`,
892
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
893
+ be of type `bytes`.
894
+ """
895
+ # Create or coerce a protobuf request object.
896
+ # - Quick check: If we got a request object, we should *not* have
897
+ # gotten any keyword arguments that map to the request.
898
+ has_flattened_params = any([name])
899
+ if request is not None and has_flattened_params:
900
+ raise ValueError(
901
+ "If the `request` argument is set, then none of "
902
+ "the individual field arguments should be set."
903
+ )
904
+
905
+ # - Use the request object if provided (there's no risk of modifying the input as
906
+ # there are no flattened fields), or create one.
907
+ if not isinstance(request, permission_service.DeletePermissionRequest):
908
+ request = permission_service.DeletePermissionRequest(request)
909
+
910
+ # If we have keyword arguments corresponding to fields on the
911
+ # request, apply these.
912
+ if name is not None:
913
+ request.name = name
914
+
915
+ # Wrap the RPC method; this adds retry and timeout information,
916
+ # and friendly error handling.
917
+ rpc = self._client._transport._wrapped_methods[
918
+ self._client._transport.delete_permission
919
+ ]
920
+
921
+ # Certain fields should be provided within the metadata header;
922
+ # add these here.
923
+ metadata = tuple(metadata) + (
924
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
925
+ )
926
+
927
+ # Validate the universe domain.
928
+ self._client._validate_universe_domain()
929
+
930
+ # Send the request.
931
+ await rpc(
932
+ request,
933
+ retry=retry,
934
+ timeout=timeout,
935
+ metadata=metadata,
936
+ )
937
+
938
+ async def transfer_ownership(
939
+ self,
940
+ request: Optional[
941
+ Union[permission_service.TransferOwnershipRequest, dict]
942
+ ] = None,
943
+ *,
944
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
945
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
946
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
947
+ ) -> permission_service.TransferOwnershipResponse:
948
+ r"""Transfers ownership of the tuned model.
949
+ This is the only way to change ownership of the tuned
950
+ model. The current owner will be downgraded to writer
951
+ role.
952
+
953
+ .. code-block:: python
954
+
955
+ # This snippet has been automatically generated and should be regarded as a
956
+ # code template only.
957
+ # It will require modifications to work:
958
+ # - It may require correct/in-range values for request initialization.
959
+ # - It may require specifying regional endpoints when creating the service
960
+ # client as shown in:
961
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
962
+ from google.ai import generativelanguage_v1beta3
963
+
964
+ async def sample_transfer_ownership():
965
+ # Create a client
966
+ client = generativelanguage_v1beta3.PermissionServiceAsyncClient()
967
+
968
+ # Initialize request argument(s)
969
+ request = generativelanguage_v1beta3.TransferOwnershipRequest(
970
+ name="name_value",
971
+ email_address="email_address_value",
972
+ )
973
+
974
+ # Make the request
975
+ response = await client.transfer_ownership(request=request)
976
+
977
+ # Handle the response
978
+ print(response)
979
+
980
+ Args:
981
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.TransferOwnershipRequest, dict]]):
982
+ The request object. Request to transfer the ownership of
983
+ the tuned model.
984
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
985
+ should be retried.
986
+ timeout (float): The timeout for this request.
987
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
988
+ sent along with the request as metadata. Normally, each value must be of type `str`,
989
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
990
+ be of type `bytes`.
991
+
992
+ Returns:
993
+ google.ai.generativelanguage_v1beta3.types.TransferOwnershipResponse:
994
+ Response from TransferOwnership.
995
+ """
996
+ # Create or coerce a protobuf request object.
997
+ # - Use the request object if provided (there's no risk of modifying the input as
998
+ # there are no flattened fields), or create one.
999
+ if not isinstance(request, permission_service.TransferOwnershipRequest):
1000
+ request = permission_service.TransferOwnershipRequest(request)
1001
+
1002
+ # Wrap the RPC method; this adds retry and timeout information,
1003
+ # and friendly error handling.
1004
+ rpc = self._client._transport._wrapped_methods[
1005
+ self._client._transport.transfer_ownership
1006
+ ]
1007
+
1008
+ # Certain fields should be provided within the metadata header;
1009
+ # add these here.
1010
+ metadata = tuple(metadata) + (
1011
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1012
+ )
1013
+
1014
+ # Validate the universe domain.
1015
+ self._client._validate_universe_domain()
1016
+
1017
+ # Send the request.
1018
+ response = await rpc(
1019
+ request,
1020
+ retry=retry,
1021
+ timeout=timeout,
1022
+ metadata=metadata,
1023
+ )
1024
+
1025
+ # Done; return the response.
1026
+ return response
1027
+
1028
+ async def __aenter__(self) -> "PermissionServiceAsyncClient":
1029
+ return self
1030
+
1031
+ async def __aexit__(self, exc_type, exc, tb):
1032
+ await self.transport.close()
1033
+
1034
+
1035
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1036
+ gapic_version=package_version.__version__
1037
+ )
1038
+
1039
+
1040
+ __all__ = ("PermissionServiceAsyncClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/client.py ADDED
@@ -0,0 +1,1433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import os
19
+ import re
20
+ from typing import (
21
+ Callable,
22
+ Dict,
23
+ Mapping,
24
+ MutableMapping,
25
+ MutableSequence,
26
+ Optional,
27
+ Sequence,
28
+ Tuple,
29
+ Type,
30
+ Union,
31
+ cast,
32
+ )
33
+ import warnings
34
+
35
+ from google.api_core import client_options as client_options_lib
36
+ from google.api_core import exceptions as core_exceptions
37
+ from google.api_core import gapic_v1
38
+ from google.api_core import retry as retries
39
+ from google.auth import credentials as ga_credentials # type: ignore
40
+ from google.auth.exceptions import MutualTLSChannelError # type: ignore
41
+ from google.auth.transport import mtls # type: ignore
42
+ from google.auth.transport.grpc import SslCredentials # type: ignore
43
+ from google.oauth2 import service_account # type: ignore
44
+
45
+ from google.ai.generativelanguage_v1beta3 import gapic_version as package_version
46
+
47
+ try:
48
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
49
+ except AttributeError: # pragma: NO COVER
50
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
51
+
52
+ try:
53
+ from google.api_core import client_logging # type: ignore
54
+
55
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
56
+ except ImportError: # pragma: NO COVER
57
+ CLIENT_LOGGING_SUPPORTED = False
58
+
59
+ _LOGGER = std_logging.getLogger(__name__)
60
+
61
+ from google.longrunning import operations_pb2 # type: ignore
62
+ from google.protobuf import field_mask_pb2 # type: ignore
63
+
64
+ from google.ai.generativelanguage_v1beta3.services.permission_service import pagers
65
+ from google.ai.generativelanguage_v1beta3.types import permission as gag_permission
66
+ from google.ai.generativelanguage_v1beta3.types import permission
67
+ from google.ai.generativelanguage_v1beta3.types import permission_service
68
+
69
+ from .transports.base import DEFAULT_CLIENT_INFO, PermissionServiceTransport
70
+ from .transports.grpc import PermissionServiceGrpcTransport
71
+ from .transports.grpc_asyncio import PermissionServiceGrpcAsyncIOTransport
72
+ from .transports.rest import PermissionServiceRestTransport
73
+
74
+
75
+ class PermissionServiceClientMeta(type):
76
+ """Metaclass for the PermissionService client.
77
+
78
+ This provides class-level methods for building and retrieving
79
+ support objects (e.g. transport) without polluting the client instance
80
+ objects.
81
+ """
82
+
83
+ _transport_registry = (
84
+ OrderedDict()
85
+ ) # type: Dict[str, Type[PermissionServiceTransport]]
86
+ _transport_registry["grpc"] = PermissionServiceGrpcTransport
87
+ _transport_registry["grpc_asyncio"] = PermissionServiceGrpcAsyncIOTransport
88
+ _transport_registry["rest"] = PermissionServiceRestTransport
89
+
90
+ def get_transport_class(
91
+ cls,
92
+ label: Optional[str] = None,
93
+ ) -> Type[PermissionServiceTransport]:
94
+ """Returns an appropriate transport class.
95
+
96
+ Args:
97
+ label: The name of the desired transport. If none is
98
+ provided, then the first transport in the registry is used.
99
+
100
+ Returns:
101
+ The transport class to use.
102
+ """
103
+ # If a specific transport is requested, return that one.
104
+ if label:
105
+ return cls._transport_registry[label]
106
+
107
+ # No transport is requested; return the default (that is, the first one
108
+ # in the dictionary).
109
+ return next(iter(cls._transport_registry.values()))
110
+
111
+
112
+ class PermissionServiceClient(metaclass=PermissionServiceClientMeta):
113
+ """Provides methods for managing permissions to PaLM API
114
+ resources.
115
+ """
116
+
117
+ @staticmethod
118
+ def _get_default_mtls_endpoint(api_endpoint):
119
+ """Converts api endpoint to mTLS endpoint.
120
+
121
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
122
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
123
+ Args:
124
+ api_endpoint (Optional[str]): the api endpoint to convert.
125
+ Returns:
126
+ str: converted mTLS api endpoint.
127
+ """
128
+ if not api_endpoint:
129
+ return api_endpoint
130
+
131
+ mtls_endpoint_re = re.compile(
132
+ r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
133
+ )
134
+
135
+ m = mtls_endpoint_re.match(api_endpoint)
136
+ name, mtls, sandbox, googledomain = m.groups()
137
+ if mtls or not googledomain:
138
+ return api_endpoint
139
+
140
+ if sandbox:
141
+ return api_endpoint.replace(
142
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
143
+ )
144
+
145
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
146
+
147
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
148
+ DEFAULT_ENDPOINT = "generativelanguage.googleapis.com"
149
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
150
+ DEFAULT_ENDPOINT
151
+ )
152
+
153
+ _DEFAULT_ENDPOINT_TEMPLATE = "generativelanguage.{UNIVERSE_DOMAIN}"
154
+ _DEFAULT_UNIVERSE = "googleapis.com"
155
+
156
+ @classmethod
157
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
158
+ """Creates an instance of this client using the provided credentials
159
+ info.
160
+
161
+ Args:
162
+ info (dict): The service account private key info.
163
+ args: Additional arguments to pass to the constructor.
164
+ kwargs: Additional arguments to pass to the constructor.
165
+
166
+ Returns:
167
+ PermissionServiceClient: The constructed client.
168
+ """
169
+ credentials = service_account.Credentials.from_service_account_info(info)
170
+ kwargs["credentials"] = credentials
171
+ return cls(*args, **kwargs)
172
+
173
+ @classmethod
174
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
175
+ """Creates an instance of this client using the provided credentials
176
+ file.
177
+
178
+ Args:
179
+ filename (str): The path to the service account private key json
180
+ file.
181
+ args: Additional arguments to pass to the constructor.
182
+ kwargs: Additional arguments to pass to the constructor.
183
+
184
+ Returns:
185
+ PermissionServiceClient: The constructed client.
186
+ """
187
+ credentials = service_account.Credentials.from_service_account_file(filename)
188
+ kwargs["credentials"] = credentials
189
+ return cls(*args, **kwargs)
190
+
191
+ from_service_account_json = from_service_account_file
192
+
193
+ @property
194
+ def transport(self) -> PermissionServiceTransport:
195
+ """Returns the transport used by the client instance.
196
+
197
+ Returns:
198
+ PermissionServiceTransport: The transport used by the client
199
+ instance.
200
+ """
201
+ return self._transport
202
+
203
+ @staticmethod
204
+ def permission_path(
205
+ tuned_model: str,
206
+ permission: str,
207
+ ) -> str:
208
+ """Returns a fully-qualified permission string."""
209
+ return "tunedModels/{tuned_model}/permissions/{permission}".format(
210
+ tuned_model=tuned_model,
211
+ permission=permission,
212
+ )
213
+
214
+ @staticmethod
215
+ def parse_permission_path(path: str) -> Dict[str, str]:
216
+ """Parses a permission path into its component segments."""
217
+ m = re.match(
218
+ r"^tunedModels/(?P<tuned_model>.+?)/permissions/(?P<permission>.+?)$", path
219
+ )
220
+ return m.groupdict() if m else {}
221
+
222
+ @staticmethod
223
+ def tuned_model_path(
224
+ tuned_model: str,
225
+ ) -> str:
226
+ """Returns a fully-qualified tuned_model string."""
227
+ return "tunedModels/{tuned_model}".format(
228
+ tuned_model=tuned_model,
229
+ )
230
+
231
+ @staticmethod
232
+ def parse_tuned_model_path(path: str) -> Dict[str, str]:
233
+ """Parses a tuned_model path into its component segments."""
234
+ m = re.match(r"^tunedModels/(?P<tuned_model>.+?)$", path)
235
+ return m.groupdict() if m else {}
236
+
237
+ @staticmethod
238
+ def common_billing_account_path(
239
+ billing_account: str,
240
+ ) -> str:
241
+ """Returns a fully-qualified billing_account string."""
242
+ return "billingAccounts/{billing_account}".format(
243
+ billing_account=billing_account,
244
+ )
245
+
246
+ @staticmethod
247
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
248
+ """Parse a billing_account path into its component segments."""
249
+ m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
250
+ return m.groupdict() if m else {}
251
+
252
+ @staticmethod
253
+ def common_folder_path(
254
+ folder: str,
255
+ ) -> str:
256
+ """Returns a fully-qualified folder string."""
257
+ return "folders/{folder}".format(
258
+ folder=folder,
259
+ )
260
+
261
+ @staticmethod
262
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
263
+ """Parse a folder path into its component segments."""
264
+ m = re.match(r"^folders/(?P<folder>.+?)$", path)
265
+ return m.groupdict() if m else {}
266
+
267
+ @staticmethod
268
+ def common_organization_path(
269
+ organization: str,
270
+ ) -> str:
271
+ """Returns a fully-qualified organization string."""
272
+ return "organizations/{organization}".format(
273
+ organization=organization,
274
+ )
275
+
276
+ @staticmethod
277
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
278
+ """Parse a organization path into its component segments."""
279
+ m = re.match(r"^organizations/(?P<organization>.+?)$", path)
280
+ return m.groupdict() if m else {}
281
+
282
+ @staticmethod
283
+ def common_project_path(
284
+ project: str,
285
+ ) -> str:
286
+ """Returns a fully-qualified project string."""
287
+ return "projects/{project}".format(
288
+ project=project,
289
+ )
290
+
291
+ @staticmethod
292
+ def parse_common_project_path(path: str) -> Dict[str, str]:
293
+ """Parse a project path into its component segments."""
294
+ m = re.match(r"^projects/(?P<project>.+?)$", path)
295
+ return m.groupdict() if m else {}
296
+
297
+ @staticmethod
298
+ def common_location_path(
299
+ project: str,
300
+ location: str,
301
+ ) -> str:
302
+ """Returns a fully-qualified location string."""
303
+ return "projects/{project}/locations/{location}".format(
304
+ project=project,
305
+ location=location,
306
+ )
307
+
308
+ @staticmethod
309
+ def parse_common_location_path(path: str) -> Dict[str, str]:
310
+ """Parse a location path into its component segments."""
311
+ m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
312
+ return m.groupdict() if m else {}
313
+
314
+ @classmethod
315
+ def get_mtls_endpoint_and_cert_source(
316
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
317
+ ):
318
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
319
+
320
+ The client cert source is determined in the following order:
321
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
322
+ client cert source is None.
323
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
324
+ default client cert source exists, use the default one; otherwise the client cert
325
+ source is None.
326
+
327
+ The API endpoint is determined in the following order:
328
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
329
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
330
+ default mTLS endpoint; if the environment variable is "never", use the default API
331
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
332
+ use the default API endpoint.
333
+
334
+ More details can be found at https://google.aip.dev/auth/4114.
335
+
336
+ Args:
337
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
338
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
339
+ in this method.
340
+
341
+ Returns:
342
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
343
+ client cert source to use.
344
+
345
+ Raises:
346
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
347
+ """
348
+
349
+ warnings.warn(
350
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
351
+ DeprecationWarning,
352
+ )
353
+ if client_options is None:
354
+ client_options = client_options_lib.ClientOptions()
355
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
356
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
357
+ if use_client_cert not in ("true", "false"):
358
+ raise ValueError(
359
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
360
+ )
361
+ if use_mtls_endpoint not in ("auto", "never", "always"):
362
+ raise MutualTLSChannelError(
363
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
364
+ )
365
+
366
+ # Figure out the client cert source to use.
367
+ client_cert_source = None
368
+ if use_client_cert == "true":
369
+ if client_options.client_cert_source:
370
+ client_cert_source = client_options.client_cert_source
371
+ elif mtls.has_default_client_cert_source():
372
+ client_cert_source = mtls.default_client_cert_source()
373
+
374
+ # Figure out which api endpoint to use.
375
+ if client_options.api_endpoint is not None:
376
+ api_endpoint = client_options.api_endpoint
377
+ elif use_mtls_endpoint == "always" or (
378
+ use_mtls_endpoint == "auto" and client_cert_source
379
+ ):
380
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
381
+ else:
382
+ api_endpoint = cls.DEFAULT_ENDPOINT
383
+
384
+ return api_endpoint, client_cert_source
385
+
386
+ @staticmethod
387
+ def _read_environment_variables():
388
+ """Returns the environment variables used by the client.
389
+
390
+ Returns:
391
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
392
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
393
+
394
+ Raises:
395
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
396
+ any of ["true", "false"].
397
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
398
+ is not any of ["auto", "never", "always"].
399
+ """
400
+ use_client_cert = os.getenv(
401
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
402
+ ).lower()
403
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
404
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
405
+ if use_client_cert not in ("true", "false"):
406
+ raise ValueError(
407
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
408
+ )
409
+ if use_mtls_endpoint not in ("auto", "never", "always"):
410
+ raise MutualTLSChannelError(
411
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
412
+ )
413
+ return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
414
+
415
+ @staticmethod
416
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
417
+ """Return the client cert source to be used by the client.
418
+
419
+ Args:
420
+ provided_cert_source (bytes): The client certificate source provided.
421
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
422
+
423
+ Returns:
424
+ bytes or None: The client cert source to be used by the client.
425
+ """
426
+ client_cert_source = None
427
+ if use_cert_flag:
428
+ if provided_cert_source:
429
+ client_cert_source = provided_cert_source
430
+ elif mtls.has_default_client_cert_source():
431
+ client_cert_source = mtls.default_client_cert_source()
432
+ return client_cert_source
433
+
434
+ @staticmethod
435
+ def _get_api_endpoint(
436
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
437
+ ):
438
+ """Return the API endpoint used by the client.
439
+
440
+ Args:
441
+ api_override (str): The API endpoint override. If specified, this is always
442
+ the return value of this function and the other arguments are not used.
443
+ client_cert_source (bytes): The client certificate source used by the client.
444
+ universe_domain (str): The universe domain used by the client.
445
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
446
+ Possible values are "always", "auto", or "never".
447
+
448
+ Returns:
449
+ str: The API endpoint to be used by the client.
450
+ """
451
+ if api_override is not None:
452
+ api_endpoint = api_override
453
+ elif use_mtls_endpoint == "always" or (
454
+ use_mtls_endpoint == "auto" and client_cert_source
455
+ ):
456
+ _default_universe = PermissionServiceClient._DEFAULT_UNIVERSE
457
+ if universe_domain != _default_universe:
458
+ raise MutualTLSChannelError(
459
+ f"mTLS is not supported in any universe other than {_default_universe}."
460
+ )
461
+ api_endpoint = PermissionServiceClient.DEFAULT_MTLS_ENDPOINT
462
+ else:
463
+ api_endpoint = PermissionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
464
+ UNIVERSE_DOMAIN=universe_domain
465
+ )
466
+ return api_endpoint
467
+
468
+ @staticmethod
469
+ def _get_universe_domain(
470
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
471
+ ) -> str:
472
+ """Return the universe domain used by the client.
473
+
474
+ Args:
475
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
476
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
477
+
478
+ Returns:
479
+ str: The universe domain to be used by the client.
480
+
481
+ Raises:
482
+ ValueError: If the universe domain is an empty string.
483
+ """
484
+ universe_domain = PermissionServiceClient._DEFAULT_UNIVERSE
485
+ if client_universe_domain is not None:
486
+ universe_domain = client_universe_domain
487
+ elif universe_domain_env is not None:
488
+ universe_domain = universe_domain_env
489
+ if len(universe_domain.strip()) == 0:
490
+ raise ValueError("Universe Domain cannot be an empty string.")
491
+ return universe_domain
492
+
493
+ def _validate_universe_domain(self):
494
+ """Validates client's and credentials' universe domains are consistent.
495
+
496
+ Returns:
497
+ bool: True iff the configured universe domain is valid.
498
+
499
+ Raises:
500
+ ValueError: If the configured universe domain is not valid.
501
+ """
502
+
503
+ # NOTE (b/349488459): universe validation is disabled until further notice.
504
+ return True
505
+
506
+ @property
507
+ def api_endpoint(self):
508
+ """Return the API endpoint used by the client instance.
509
+
510
+ Returns:
511
+ str: The API endpoint used by the client instance.
512
+ """
513
+ return self._api_endpoint
514
+
515
+ @property
516
+ def universe_domain(self) -> str:
517
+ """Return the universe domain used by the client instance.
518
+
519
+ Returns:
520
+ str: The universe domain used by the client instance.
521
+ """
522
+ return self._universe_domain
523
+
524
+ def __init__(
525
+ self,
526
+ *,
527
+ credentials: Optional[ga_credentials.Credentials] = None,
528
+ transport: Optional[
529
+ Union[
530
+ str,
531
+ PermissionServiceTransport,
532
+ Callable[..., PermissionServiceTransport],
533
+ ]
534
+ ] = None,
535
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
536
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
537
+ ) -> None:
538
+ """Instantiates the permission service client.
539
+
540
+ Args:
541
+ credentials (Optional[google.auth.credentials.Credentials]): The
542
+ authorization credentials to attach to requests. These
543
+ credentials identify the application to the service; if none
544
+ are specified, the client will attempt to ascertain the
545
+ credentials from the environment.
546
+ transport (Optional[Union[str,PermissionServiceTransport,Callable[..., PermissionServiceTransport]]]):
547
+ The transport to use, or a Callable that constructs and returns a new transport.
548
+ If a Callable is given, it will be called with the same set of initialization
549
+ arguments as used in the PermissionServiceTransport constructor.
550
+ If set to None, a transport is chosen automatically.
551
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
552
+ Custom options for the client.
553
+
554
+ 1. The ``api_endpoint`` property can be used to override the
555
+ default endpoint provided by the client when ``transport`` is
556
+ not explicitly provided. Only if this property is not set and
557
+ ``transport`` was not explicitly provided, the endpoint is
558
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
559
+ variable, which have one of the following values:
560
+ "always" (always use the default mTLS endpoint), "never" (always
561
+ use the default regular endpoint) and "auto" (auto-switch to the
562
+ default mTLS endpoint if client certificate is present; this is
563
+ the default value).
564
+
565
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
566
+ is "true", then the ``client_cert_source`` property can be used
567
+ to provide a client certificate for mTLS transport. If
568
+ not provided, the default SSL client certificate will be used if
569
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
570
+ set, no client certificate will be used.
571
+
572
+ 3. The ``universe_domain`` property can be used to override the
573
+ default "googleapis.com" universe. Note that the ``api_endpoint``
574
+ property still takes precedence; and ``universe_domain`` is
575
+ currently not supported for mTLS.
576
+
577
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
578
+ The client info used to send a user-agent string along with
579
+ API requests. If ``None``, then default info will be used.
580
+ Generally, you only need to set this if you're developing
581
+ your own client library.
582
+
583
+ Raises:
584
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
585
+ creation failed for any reason.
586
+ """
587
+ self._client_options = client_options
588
+ if isinstance(self._client_options, dict):
589
+ self._client_options = client_options_lib.from_dict(self._client_options)
590
+ if self._client_options is None:
591
+ self._client_options = client_options_lib.ClientOptions()
592
+ self._client_options = cast(
593
+ client_options_lib.ClientOptions, self._client_options
594
+ )
595
+
596
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
597
+
598
+ (
599
+ self._use_client_cert,
600
+ self._use_mtls_endpoint,
601
+ self._universe_domain_env,
602
+ ) = PermissionServiceClient._read_environment_variables()
603
+ self._client_cert_source = PermissionServiceClient._get_client_cert_source(
604
+ self._client_options.client_cert_source, self._use_client_cert
605
+ )
606
+ self._universe_domain = PermissionServiceClient._get_universe_domain(
607
+ universe_domain_opt, self._universe_domain_env
608
+ )
609
+ self._api_endpoint = None # updated below, depending on `transport`
610
+
611
+ # Initialize the universe domain validation.
612
+ self._is_universe_domain_valid = False
613
+
614
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
615
+ # Setup logging.
616
+ client_logging.initialize_logging()
617
+
618
+ api_key_value = getattr(self._client_options, "api_key", None)
619
+ if api_key_value and credentials:
620
+ raise ValueError(
621
+ "client_options.api_key and credentials are mutually exclusive"
622
+ )
623
+
624
+ # Save or instantiate the transport.
625
+ # Ordinarily, we provide the transport, but allowing a custom transport
626
+ # instance provides an extensibility point for unusual situations.
627
+ transport_provided = isinstance(transport, PermissionServiceTransport)
628
+ if transport_provided:
629
+ # transport is a PermissionServiceTransport instance.
630
+ if credentials or self._client_options.credentials_file or api_key_value:
631
+ raise ValueError(
632
+ "When providing a transport instance, "
633
+ "provide its credentials directly."
634
+ )
635
+ if self._client_options.scopes:
636
+ raise ValueError(
637
+ "When providing a transport instance, provide its scopes "
638
+ "directly."
639
+ )
640
+ self._transport = cast(PermissionServiceTransport, transport)
641
+ self._api_endpoint = self._transport.host
642
+
643
+ self._api_endpoint = (
644
+ self._api_endpoint
645
+ or PermissionServiceClient._get_api_endpoint(
646
+ self._client_options.api_endpoint,
647
+ self._client_cert_source,
648
+ self._universe_domain,
649
+ self._use_mtls_endpoint,
650
+ )
651
+ )
652
+
653
+ if not transport_provided:
654
+ import google.auth._default # type: ignore
655
+
656
+ if api_key_value and hasattr(
657
+ google.auth._default, "get_api_key_credentials"
658
+ ):
659
+ credentials = google.auth._default.get_api_key_credentials(
660
+ api_key_value
661
+ )
662
+
663
+ transport_init: Union[
664
+ Type[PermissionServiceTransport],
665
+ Callable[..., PermissionServiceTransport],
666
+ ] = (
667
+ PermissionServiceClient.get_transport_class(transport)
668
+ if isinstance(transport, str) or transport is None
669
+ else cast(Callable[..., PermissionServiceTransport], transport)
670
+ )
671
+ # initialize with the provided callable or the passed in class
672
+ self._transport = transport_init(
673
+ credentials=credentials,
674
+ credentials_file=self._client_options.credentials_file,
675
+ host=self._api_endpoint,
676
+ scopes=self._client_options.scopes,
677
+ client_cert_source_for_mtls=self._client_cert_source,
678
+ quota_project_id=self._client_options.quota_project_id,
679
+ client_info=client_info,
680
+ always_use_jwt_access=True,
681
+ api_audience=self._client_options.api_audience,
682
+ )
683
+
684
+ if "async" not in str(self._transport):
685
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
686
+ std_logging.DEBUG
687
+ ): # pragma: NO COVER
688
+ _LOGGER.debug(
689
+ "Created client `google.ai.generativelanguage_v1beta3.PermissionServiceClient`.",
690
+ extra={
691
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
692
+ "universeDomain": getattr(
693
+ self._transport._credentials, "universe_domain", ""
694
+ ),
695
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
696
+ "credentialsInfo": getattr(
697
+ self.transport._credentials, "get_cred_info", lambda: None
698
+ )(),
699
+ }
700
+ if hasattr(self._transport, "_credentials")
701
+ else {
702
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
703
+ "credentialsType": None,
704
+ },
705
+ )
706
+
707
+ def create_permission(
708
+ self,
709
+ request: Optional[
710
+ Union[permission_service.CreatePermissionRequest, dict]
711
+ ] = None,
712
+ *,
713
+ parent: Optional[str] = None,
714
+ permission: Optional[gag_permission.Permission] = None,
715
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
716
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
717
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
718
+ ) -> gag_permission.Permission:
719
+ r"""Create a permission to a specific resource.
720
+
721
+ .. code-block:: python
722
+
723
+ # This snippet has been automatically generated and should be regarded as a
724
+ # code template only.
725
+ # It will require modifications to work:
726
+ # - It may require correct/in-range values for request initialization.
727
+ # - It may require specifying regional endpoints when creating the service
728
+ # client as shown in:
729
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
730
+ from google.ai import generativelanguage_v1beta3
731
+
732
+ def sample_create_permission():
733
+ # Create a client
734
+ client = generativelanguage_v1beta3.PermissionServiceClient()
735
+
736
+ # Initialize request argument(s)
737
+ request = generativelanguage_v1beta3.CreatePermissionRequest(
738
+ parent="parent_value",
739
+ )
740
+
741
+ # Make the request
742
+ response = client.create_permission(request=request)
743
+
744
+ # Handle the response
745
+ print(response)
746
+
747
+ Args:
748
+ request (Union[google.ai.generativelanguage_v1beta3.types.CreatePermissionRequest, dict]):
749
+ The request object. Request to create a ``Permission``.
750
+ parent (str):
751
+ Required. The parent resource of the ``Permission``.
752
+ Format: tunedModels/{tuned_model}
753
+
754
+ This corresponds to the ``parent`` field
755
+ on the ``request`` instance; if ``request`` is provided, this
756
+ should not be set.
757
+ permission (google.ai.generativelanguage_v1beta3.types.Permission):
758
+ Required. The permission to create.
759
+ This corresponds to the ``permission`` field
760
+ on the ``request`` instance; if ``request`` is provided, this
761
+ should not be set.
762
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
763
+ should be retried.
764
+ timeout (float): The timeout for this request.
765
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
766
+ sent along with the request as metadata. Normally, each value must be of type `str`,
767
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
768
+ be of type `bytes`.
769
+
770
+ Returns:
771
+ google.ai.generativelanguage_v1beta3.types.Permission:
772
+ Permission resource grants user,
773
+ group or the rest of the world access to
774
+ the PaLM API resource (e.g. a tuned
775
+ model, file).
776
+
777
+ A role is a collection of permitted
778
+ operations that allows users to perform
779
+ specific actions on PaLM API resources.
780
+ To make them available to users, groups,
781
+ or service accounts, you assign roles.
782
+ When you assign a role, you grant
783
+ permissions that the role contains.
784
+
785
+ There are three concentric roles. Each
786
+ role is a superset of the previous
787
+ role's permitted operations:
788
+
789
+ - reader can use the resource (e.g.
790
+ tuned model) for inference
791
+ - writer has reader's permissions and
792
+ additionally can edit and share
793
+ - owner has writer's permissions and
794
+ additionally can delete
795
+
796
+ """
797
+ # Create or coerce a protobuf request object.
798
+ # - Quick check: If we got a request object, we should *not* have
799
+ # gotten any keyword arguments that map to the request.
800
+ has_flattened_params = any([parent, permission])
801
+ if request is not None and has_flattened_params:
802
+ raise ValueError(
803
+ "If the `request` argument is set, then none of "
804
+ "the individual field arguments should be set."
805
+ )
806
+
807
+ # - Use the request object if provided (there's no risk of modifying the input as
808
+ # there are no flattened fields), or create one.
809
+ if not isinstance(request, permission_service.CreatePermissionRequest):
810
+ request = permission_service.CreatePermissionRequest(request)
811
+ # If we have keyword arguments corresponding to fields on the
812
+ # request, apply these.
813
+ if parent is not None:
814
+ request.parent = parent
815
+ if permission is not None:
816
+ request.permission = permission
817
+
818
+ # Wrap the RPC method; this adds retry and timeout information,
819
+ # and friendly error handling.
820
+ rpc = self._transport._wrapped_methods[self._transport.create_permission]
821
+
822
+ # Certain fields should be provided within the metadata header;
823
+ # add these here.
824
+ metadata = tuple(metadata) + (
825
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
826
+ )
827
+
828
+ # Validate the universe domain.
829
+ self._validate_universe_domain()
830
+
831
+ # Send the request.
832
+ response = rpc(
833
+ request,
834
+ retry=retry,
835
+ timeout=timeout,
836
+ metadata=metadata,
837
+ )
838
+
839
+ # Done; return the response.
840
+ return response
841
+
842
+ def get_permission(
843
+ self,
844
+ request: Optional[Union[permission_service.GetPermissionRequest, dict]] = None,
845
+ *,
846
+ name: Optional[str] = None,
847
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
848
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
849
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
850
+ ) -> permission.Permission:
851
+ r"""Gets information about a specific Permission.
852
+
853
+ .. code-block:: python
854
+
855
+ # This snippet has been automatically generated and should be regarded as a
856
+ # code template only.
857
+ # It will require modifications to work:
858
+ # - It may require correct/in-range values for request initialization.
859
+ # - It may require specifying regional endpoints when creating the service
860
+ # client as shown in:
861
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
862
+ from google.ai import generativelanguage_v1beta3
863
+
864
+ def sample_get_permission():
865
+ # Create a client
866
+ client = generativelanguage_v1beta3.PermissionServiceClient()
867
+
868
+ # Initialize request argument(s)
869
+ request = generativelanguage_v1beta3.GetPermissionRequest(
870
+ name="name_value",
871
+ )
872
+
873
+ # Make the request
874
+ response = client.get_permission(request=request)
875
+
876
+ # Handle the response
877
+ print(response)
878
+
879
+ Args:
880
+ request (Union[google.ai.generativelanguage_v1beta3.types.GetPermissionRequest, dict]):
881
+ The request object. Request for getting information about a specific
882
+ ``Permission``.
883
+ name (str):
884
+ Required. The resource name of the permission.
885
+
886
+ Format:
887
+ ``tunedModels/{tuned_model}permissions/{permission}``
888
+
889
+ This corresponds to the ``name`` field
890
+ on the ``request`` instance; if ``request`` is provided, this
891
+ should not be set.
892
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
893
+ should be retried.
894
+ timeout (float): The timeout for this request.
895
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
896
+ sent along with the request as metadata. Normally, each value must be of type `str`,
897
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
898
+ be of type `bytes`.
899
+
900
+ Returns:
901
+ google.ai.generativelanguage_v1beta3.types.Permission:
902
+ Permission resource grants user,
903
+ group or the rest of the world access to
904
+ the PaLM API resource (e.g. a tuned
905
+ model, file).
906
+
907
+ A role is a collection of permitted
908
+ operations that allows users to perform
909
+ specific actions on PaLM API resources.
910
+ To make them available to users, groups,
911
+ or service accounts, you assign roles.
912
+ When you assign a role, you grant
913
+ permissions that the role contains.
914
+
915
+ There are three concentric roles. Each
916
+ role is a superset of the previous
917
+ role's permitted operations:
918
+
919
+ - reader can use the resource (e.g.
920
+ tuned model) for inference
921
+ - writer has reader's permissions and
922
+ additionally can edit and share
923
+ - owner has writer's permissions and
924
+ additionally can delete
925
+
926
+ """
927
+ # Create or coerce a protobuf request object.
928
+ # - Quick check: If we got a request object, we should *not* have
929
+ # gotten any keyword arguments that map to the request.
930
+ has_flattened_params = any([name])
931
+ if request is not None and has_flattened_params:
932
+ raise ValueError(
933
+ "If the `request` argument is set, then none of "
934
+ "the individual field arguments should be set."
935
+ )
936
+
937
+ # - Use the request object if provided (there's no risk of modifying the input as
938
+ # there are no flattened fields), or create one.
939
+ if not isinstance(request, permission_service.GetPermissionRequest):
940
+ request = permission_service.GetPermissionRequest(request)
941
+ # If we have keyword arguments corresponding to fields on the
942
+ # request, apply these.
943
+ if name is not None:
944
+ request.name = name
945
+
946
+ # Wrap the RPC method; this adds retry and timeout information,
947
+ # and friendly error handling.
948
+ rpc = self._transport._wrapped_methods[self._transport.get_permission]
949
+
950
+ # Certain fields should be provided within the metadata header;
951
+ # add these here.
952
+ metadata = tuple(metadata) + (
953
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
954
+ )
955
+
956
+ # Validate the universe domain.
957
+ self._validate_universe_domain()
958
+
959
+ # Send the request.
960
+ response = rpc(
961
+ request,
962
+ retry=retry,
963
+ timeout=timeout,
964
+ metadata=metadata,
965
+ )
966
+
967
+ # Done; return the response.
968
+ return response
969
+
970
+ def list_permissions(
971
+ self,
972
+ request: Optional[
973
+ Union[permission_service.ListPermissionsRequest, dict]
974
+ ] = None,
975
+ *,
976
+ parent: Optional[str] = None,
977
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
978
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
979
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
980
+ ) -> pagers.ListPermissionsPager:
981
+ r"""Lists permissions for the specific resource.
982
+
983
+ .. code-block:: python
984
+
985
+ # This snippet has been automatically generated and should be regarded as a
986
+ # code template only.
987
+ # It will require modifications to work:
988
+ # - It may require correct/in-range values for request initialization.
989
+ # - It may require specifying regional endpoints when creating the service
990
+ # client as shown in:
991
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
992
+ from google.ai import generativelanguage_v1beta3
993
+
994
+ def sample_list_permissions():
995
+ # Create a client
996
+ client = generativelanguage_v1beta3.PermissionServiceClient()
997
+
998
+ # Initialize request argument(s)
999
+ request = generativelanguage_v1beta3.ListPermissionsRequest(
1000
+ parent="parent_value",
1001
+ )
1002
+
1003
+ # Make the request
1004
+ page_result = client.list_permissions(request=request)
1005
+
1006
+ # Handle the response
1007
+ for response in page_result:
1008
+ print(response)
1009
+
1010
+ Args:
1011
+ request (Union[google.ai.generativelanguage_v1beta3.types.ListPermissionsRequest, dict]):
1012
+ The request object. Request for listing permissions.
1013
+ parent (str):
1014
+ Required. The parent resource of the permissions.
1015
+ Format: tunedModels/{tuned_model}
1016
+
1017
+ This corresponds to the ``parent`` field
1018
+ on the ``request`` instance; if ``request`` is provided, this
1019
+ should not be set.
1020
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1021
+ should be retried.
1022
+ timeout (float): The timeout for this request.
1023
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1024
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1025
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1026
+ be of type `bytes`.
1027
+
1028
+ Returns:
1029
+ google.ai.generativelanguage_v1beta3.services.permission_service.pagers.ListPermissionsPager:
1030
+ Response from ListPermissions containing a paginated list of
1031
+ permissions.
1032
+
1033
+ Iterating over this object will yield results and
1034
+ resolve additional pages automatically.
1035
+
1036
+ """
1037
+ # Create or coerce a protobuf request object.
1038
+ # - Quick check: If we got a request object, we should *not* have
1039
+ # gotten any keyword arguments that map to the request.
1040
+ has_flattened_params = any([parent])
1041
+ if request is not None and has_flattened_params:
1042
+ raise ValueError(
1043
+ "If the `request` argument is set, then none of "
1044
+ "the individual field arguments should be set."
1045
+ )
1046
+
1047
+ # - Use the request object if provided (there's no risk of modifying the input as
1048
+ # there are no flattened fields), or create one.
1049
+ if not isinstance(request, permission_service.ListPermissionsRequest):
1050
+ request = permission_service.ListPermissionsRequest(request)
1051
+ # If we have keyword arguments corresponding to fields on the
1052
+ # request, apply these.
1053
+ if parent is not None:
1054
+ request.parent = parent
1055
+
1056
+ # Wrap the RPC method; this adds retry and timeout information,
1057
+ # and friendly error handling.
1058
+ rpc = self._transport._wrapped_methods[self._transport.list_permissions]
1059
+
1060
+ # Certain fields should be provided within the metadata header;
1061
+ # add these here.
1062
+ metadata = tuple(metadata) + (
1063
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
1064
+ )
1065
+
1066
+ # Validate the universe domain.
1067
+ self._validate_universe_domain()
1068
+
1069
+ # Send the request.
1070
+ response = rpc(
1071
+ request,
1072
+ retry=retry,
1073
+ timeout=timeout,
1074
+ metadata=metadata,
1075
+ )
1076
+
1077
+ # This method is paged; wrap the response in a pager, which provides
1078
+ # an `__iter__` convenience method.
1079
+ response = pagers.ListPermissionsPager(
1080
+ method=rpc,
1081
+ request=request,
1082
+ response=response,
1083
+ retry=retry,
1084
+ timeout=timeout,
1085
+ metadata=metadata,
1086
+ )
1087
+
1088
+ # Done; return the response.
1089
+ return response
1090
+
1091
+ def update_permission(
1092
+ self,
1093
+ request: Optional[
1094
+ Union[permission_service.UpdatePermissionRequest, dict]
1095
+ ] = None,
1096
+ *,
1097
+ permission: Optional[gag_permission.Permission] = None,
1098
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
1099
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1100
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1101
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1102
+ ) -> gag_permission.Permission:
1103
+ r"""Updates the permission.
1104
+
1105
+ .. code-block:: python
1106
+
1107
+ # This snippet has been automatically generated and should be regarded as a
1108
+ # code template only.
1109
+ # It will require modifications to work:
1110
+ # - It may require correct/in-range values for request initialization.
1111
+ # - It may require specifying regional endpoints when creating the service
1112
+ # client as shown in:
1113
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1114
+ from google.ai import generativelanguage_v1beta3
1115
+
1116
+ def sample_update_permission():
1117
+ # Create a client
1118
+ client = generativelanguage_v1beta3.PermissionServiceClient()
1119
+
1120
+ # Initialize request argument(s)
1121
+ request = generativelanguage_v1beta3.UpdatePermissionRequest(
1122
+ )
1123
+
1124
+ # Make the request
1125
+ response = client.update_permission(request=request)
1126
+
1127
+ # Handle the response
1128
+ print(response)
1129
+
1130
+ Args:
1131
+ request (Union[google.ai.generativelanguage_v1beta3.types.UpdatePermissionRequest, dict]):
1132
+ The request object. Request to update the ``Permission``.
1133
+ permission (google.ai.generativelanguage_v1beta3.types.Permission):
1134
+ Required. The permission to update.
1135
+
1136
+ The permission's ``name`` field is used to identify the
1137
+ permission to update.
1138
+
1139
+ This corresponds to the ``permission`` field
1140
+ on the ``request`` instance; if ``request`` is provided, this
1141
+ should not be set.
1142
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
1143
+ Required. The list of fields to update. Accepted ones:
1144
+
1145
+ - role (``Permission.role`` field)
1146
+
1147
+ This corresponds to the ``update_mask`` field
1148
+ on the ``request`` instance; if ``request`` is provided, this
1149
+ should not be set.
1150
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1151
+ should be retried.
1152
+ timeout (float): The timeout for this request.
1153
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1154
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1155
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1156
+ be of type `bytes`.
1157
+
1158
+ Returns:
1159
+ google.ai.generativelanguage_v1beta3.types.Permission:
1160
+ Permission resource grants user,
1161
+ group or the rest of the world access to
1162
+ the PaLM API resource (e.g. a tuned
1163
+ model, file).
1164
+
1165
+ A role is a collection of permitted
1166
+ operations that allows users to perform
1167
+ specific actions on PaLM API resources.
1168
+ To make them available to users, groups,
1169
+ or service accounts, you assign roles.
1170
+ When you assign a role, you grant
1171
+ permissions that the role contains.
1172
+
1173
+ There are three concentric roles. Each
1174
+ role is a superset of the previous
1175
+ role's permitted operations:
1176
+
1177
+ - reader can use the resource (e.g.
1178
+ tuned model) for inference
1179
+ - writer has reader's permissions and
1180
+ additionally can edit and share
1181
+ - owner has writer's permissions and
1182
+ additionally can delete
1183
+
1184
+ """
1185
+ # Create or coerce a protobuf request object.
1186
+ # - Quick check: If we got a request object, we should *not* have
1187
+ # gotten any keyword arguments that map to the request.
1188
+ has_flattened_params = any([permission, update_mask])
1189
+ if request is not None and has_flattened_params:
1190
+ raise ValueError(
1191
+ "If the `request` argument is set, then none of "
1192
+ "the individual field arguments should be set."
1193
+ )
1194
+
1195
+ # - Use the request object if provided (there's no risk of modifying the input as
1196
+ # there are no flattened fields), or create one.
1197
+ if not isinstance(request, permission_service.UpdatePermissionRequest):
1198
+ request = permission_service.UpdatePermissionRequest(request)
1199
+ # If we have keyword arguments corresponding to fields on the
1200
+ # request, apply these.
1201
+ if permission is not None:
1202
+ request.permission = permission
1203
+ if update_mask is not None:
1204
+ request.update_mask = update_mask
1205
+
1206
+ # Wrap the RPC method; this adds retry and timeout information,
1207
+ # and friendly error handling.
1208
+ rpc = self._transport._wrapped_methods[self._transport.update_permission]
1209
+
1210
+ # Certain fields should be provided within the metadata header;
1211
+ # add these here.
1212
+ metadata = tuple(metadata) + (
1213
+ gapic_v1.routing_header.to_grpc_metadata(
1214
+ (("permission.name", request.permission.name),)
1215
+ ),
1216
+ )
1217
+
1218
+ # Validate the universe domain.
1219
+ self._validate_universe_domain()
1220
+
1221
+ # Send the request.
1222
+ response = rpc(
1223
+ request,
1224
+ retry=retry,
1225
+ timeout=timeout,
1226
+ metadata=metadata,
1227
+ )
1228
+
1229
+ # Done; return the response.
1230
+ return response
1231
+
1232
+ def delete_permission(
1233
+ self,
1234
+ request: Optional[
1235
+ Union[permission_service.DeletePermissionRequest, dict]
1236
+ ] = None,
1237
+ *,
1238
+ name: Optional[str] = None,
1239
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1240
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1241
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1242
+ ) -> None:
1243
+ r"""Deletes the permission.
1244
+
1245
+ .. code-block:: python
1246
+
1247
+ # This snippet has been automatically generated and should be regarded as a
1248
+ # code template only.
1249
+ # It will require modifications to work:
1250
+ # - It may require correct/in-range values for request initialization.
1251
+ # - It may require specifying regional endpoints when creating the service
1252
+ # client as shown in:
1253
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1254
+ from google.ai import generativelanguage_v1beta3
1255
+
1256
+ def sample_delete_permission():
1257
+ # Create a client
1258
+ client = generativelanguage_v1beta3.PermissionServiceClient()
1259
+
1260
+ # Initialize request argument(s)
1261
+ request = generativelanguage_v1beta3.DeletePermissionRequest(
1262
+ name="name_value",
1263
+ )
1264
+
1265
+ # Make the request
1266
+ client.delete_permission(request=request)
1267
+
1268
+ Args:
1269
+ request (Union[google.ai.generativelanguage_v1beta3.types.DeletePermissionRequest, dict]):
1270
+ The request object. Request to delete the ``Permission``.
1271
+ name (str):
1272
+ Required. The resource name of the permission. Format:
1273
+ ``tunedModels/{tuned_model}/permissions/{permission}``
1274
+
1275
+ This corresponds to the ``name`` field
1276
+ on the ``request`` instance; if ``request`` is provided, this
1277
+ should not be set.
1278
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1279
+ should be retried.
1280
+ timeout (float): The timeout for this request.
1281
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1282
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1283
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1284
+ be of type `bytes`.
1285
+ """
1286
+ # Create or coerce a protobuf request object.
1287
+ # - Quick check: If we got a request object, we should *not* have
1288
+ # gotten any keyword arguments that map to the request.
1289
+ has_flattened_params = any([name])
1290
+ if request is not None and has_flattened_params:
1291
+ raise ValueError(
1292
+ "If the `request` argument is set, then none of "
1293
+ "the individual field arguments should be set."
1294
+ )
1295
+
1296
+ # - Use the request object if provided (there's no risk of modifying the input as
1297
+ # there are no flattened fields), or create one.
1298
+ if not isinstance(request, permission_service.DeletePermissionRequest):
1299
+ request = permission_service.DeletePermissionRequest(request)
1300
+ # If we have keyword arguments corresponding to fields on the
1301
+ # request, apply these.
1302
+ if name is not None:
1303
+ request.name = name
1304
+
1305
+ # Wrap the RPC method; this adds retry and timeout information,
1306
+ # and friendly error handling.
1307
+ rpc = self._transport._wrapped_methods[self._transport.delete_permission]
1308
+
1309
+ # Certain fields should be provided within the metadata header;
1310
+ # add these here.
1311
+ metadata = tuple(metadata) + (
1312
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1313
+ )
1314
+
1315
+ # Validate the universe domain.
1316
+ self._validate_universe_domain()
1317
+
1318
+ # Send the request.
1319
+ rpc(
1320
+ request,
1321
+ retry=retry,
1322
+ timeout=timeout,
1323
+ metadata=metadata,
1324
+ )
1325
+
1326
+ def transfer_ownership(
1327
+ self,
1328
+ request: Optional[
1329
+ Union[permission_service.TransferOwnershipRequest, dict]
1330
+ ] = None,
1331
+ *,
1332
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1333
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1334
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1335
+ ) -> permission_service.TransferOwnershipResponse:
1336
+ r"""Transfers ownership of the tuned model.
1337
+ This is the only way to change ownership of the tuned
1338
+ model. The current owner will be downgraded to writer
1339
+ role.
1340
+
1341
+ .. code-block:: python
1342
+
1343
+ # This snippet has been automatically generated and should be regarded as a
1344
+ # code template only.
1345
+ # It will require modifications to work:
1346
+ # - It may require correct/in-range values for request initialization.
1347
+ # - It may require specifying regional endpoints when creating the service
1348
+ # client as shown in:
1349
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1350
+ from google.ai import generativelanguage_v1beta3
1351
+
1352
+ def sample_transfer_ownership():
1353
+ # Create a client
1354
+ client = generativelanguage_v1beta3.PermissionServiceClient()
1355
+
1356
+ # Initialize request argument(s)
1357
+ request = generativelanguage_v1beta3.TransferOwnershipRequest(
1358
+ name="name_value",
1359
+ email_address="email_address_value",
1360
+ )
1361
+
1362
+ # Make the request
1363
+ response = client.transfer_ownership(request=request)
1364
+
1365
+ # Handle the response
1366
+ print(response)
1367
+
1368
+ Args:
1369
+ request (Union[google.ai.generativelanguage_v1beta3.types.TransferOwnershipRequest, dict]):
1370
+ The request object. Request to transfer the ownership of
1371
+ the tuned model.
1372
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1373
+ should be retried.
1374
+ timeout (float): The timeout for this request.
1375
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1376
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1377
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1378
+ be of type `bytes`.
1379
+
1380
+ Returns:
1381
+ google.ai.generativelanguage_v1beta3.types.TransferOwnershipResponse:
1382
+ Response from TransferOwnership.
1383
+ """
1384
+ # Create or coerce a protobuf request object.
1385
+ # - Use the request object if provided (there's no risk of modifying the input as
1386
+ # there are no flattened fields), or create one.
1387
+ if not isinstance(request, permission_service.TransferOwnershipRequest):
1388
+ request = permission_service.TransferOwnershipRequest(request)
1389
+
1390
+ # Wrap the RPC method; this adds retry and timeout information,
1391
+ # and friendly error handling.
1392
+ rpc = self._transport._wrapped_methods[self._transport.transfer_ownership]
1393
+
1394
+ # Certain fields should be provided within the metadata header;
1395
+ # add these here.
1396
+ metadata = tuple(metadata) + (
1397
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1398
+ )
1399
+
1400
+ # Validate the universe domain.
1401
+ self._validate_universe_domain()
1402
+
1403
+ # Send the request.
1404
+ response = rpc(
1405
+ request,
1406
+ retry=retry,
1407
+ timeout=timeout,
1408
+ metadata=metadata,
1409
+ )
1410
+
1411
+ # Done; return the response.
1412
+ return response
1413
+
1414
+ def __enter__(self) -> "PermissionServiceClient":
1415
+ return self
1416
+
1417
+ def __exit__(self, type, value, traceback):
1418
+ """Releases underlying transport's resources.
1419
+
1420
+ .. warning::
1421
+ ONLY use as a context manager if the transport is NOT shared
1422
+ with other clients! Exiting the with block will CLOSE the transport
1423
+ and may cause errors in other clients!
1424
+ """
1425
+ self.transport.close()
1426
+
1427
+
1428
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1429
+ gapic_version=package_version.__version__
1430
+ )
1431
+
1432
+
1433
+ __all__ = ("PermissionServiceClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/pagers.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from typing import (
17
+ Any,
18
+ AsyncIterator,
19
+ Awaitable,
20
+ Callable,
21
+ Iterator,
22
+ Optional,
23
+ Sequence,
24
+ Tuple,
25
+ Union,
26
+ )
27
+
28
+ from google.api_core import gapic_v1
29
+ from google.api_core import retry as retries
30
+ from google.api_core import retry_async as retries_async
31
+
32
+ try:
33
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
34
+ OptionalAsyncRetry = Union[
35
+ retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None
36
+ ]
37
+ except AttributeError: # pragma: NO COVER
38
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
39
+ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore
40
+
41
+ from google.ai.generativelanguage_v1beta3.types import permission, permission_service
42
+
43
+
44
+ class ListPermissionsPager:
45
+ """A pager for iterating through ``list_permissions`` requests.
46
+
47
+ This class thinly wraps an initial
48
+ :class:`google.ai.generativelanguage_v1beta3.types.ListPermissionsResponse` object, and
49
+ provides an ``__iter__`` method to iterate through its
50
+ ``permissions`` field.
51
+
52
+ If there are more pages, the ``__iter__`` method will make additional
53
+ ``ListPermissions`` requests and continue to iterate
54
+ through the ``permissions`` field on the
55
+ corresponding responses.
56
+
57
+ All the usual :class:`google.ai.generativelanguage_v1beta3.types.ListPermissionsResponse`
58
+ attributes are available on the pager. If multiple requests are made, only
59
+ the most recent response is retained, and thus used for attribute lookup.
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ method: Callable[..., permission_service.ListPermissionsResponse],
65
+ request: permission_service.ListPermissionsRequest,
66
+ response: permission_service.ListPermissionsResponse,
67
+ *,
68
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
69
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
70
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
71
+ ):
72
+ """Instantiate the pager.
73
+
74
+ Args:
75
+ method (Callable): The method that was originally called, and
76
+ which instantiated this pager.
77
+ request (google.ai.generativelanguage_v1beta3.types.ListPermissionsRequest):
78
+ The initial request object.
79
+ response (google.ai.generativelanguage_v1beta3.types.ListPermissionsResponse):
80
+ The initial response object.
81
+ retry (google.api_core.retry.Retry): Designation of what errors,
82
+ if any, should be retried.
83
+ timeout (float): The timeout for this request.
84
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
85
+ sent along with the request as metadata. Normally, each value must be of type `str`,
86
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
87
+ be of type `bytes`.
88
+ """
89
+ self._method = method
90
+ self._request = permission_service.ListPermissionsRequest(request)
91
+ self._response = response
92
+ self._retry = retry
93
+ self._timeout = timeout
94
+ self._metadata = metadata
95
+
96
+ def __getattr__(self, name: str) -> Any:
97
+ return getattr(self._response, name)
98
+
99
+ @property
100
+ def pages(self) -> Iterator[permission_service.ListPermissionsResponse]:
101
+ yield self._response
102
+ while self._response.next_page_token:
103
+ self._request.page_token = self._response.next_page_token
104
+ self._response = self._method(
105
+ self._request,
106
+ retry=self._retry,
107
+ timeout=self._timeout,
108
+ metadata=self._metadata,
109
+ )
110
+ yield self._response
111
+
112
+ def __iter__(self) -> Iterator[permission.Permission]:
113
+ for page in self.pages:
114
+ yield from page.permissions
115
+
116
+ def __repr__(self) -> str:
117
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
118
+
119
+
120
+ class ListPermissionsAsyncPager:
121
+ """A pager for iterating through ``list_permissions`` requests.
122
+
123
+ This class thinly wraps an initial
124
+ :class:`google.ai.generativelanguage_v1beta3.types.ListPermissionsResponse` object, and
125
+ provides an ``__aiter__`` method to iterate through its
126
+ ``permissions`` field.
127
+
128
+ If there are more pages, the ``__aiter__`` method will make additional
129
+ ``ListPermissions`` requests and continue to iterate
130
+ through the ``permissions`` field on the
131
+ corresponding responses.
132
+
133
+ All the usual :class:`google.ai.generativelanguage_v1beta3.types.ListPermissionsResponse`
134
+ attributes are available on the pager. If multiple requests are made, only
135
+ the most recent response is retained, and thus used for attribute lookup.
136
+ """
137
+
138
+ def __init__(
139
+ self,
140
+ method: Callable[..., Awaitable[permission_service.ListPermissionsResponse]],
141
+ request: permission_service.ListPermissionsRequest,
142
+ response: permission_service.ListPermissionsResponse,
143
+ *,
144
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
145
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
146
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
147
+ ):
148
+ """Instantiates the pager.
149
+
150
+ Args:
151
+ method (Callable): The method that was originally called, and
152
+ which instantiated this pager.
153
+ request (google.ai.generativelanguage_v1beta3.types.ListPermissionsRequest):
154
+ The initial request object.
155
+ response (google.ai.generativelanguage_v1beta3.types.ListPermissionsResponse):
156
+ The initial response object.
157
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
158
+ if any, should be retried.
159
+ timeout (float): The timeout for this request.
160
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
161
+ sent along with the request as metadata. Normally, each value must be of type `str`,
162
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
163
+ be of type `bytes`.
164
+ """
165
+ self._method = method
166
+ self._request = permission_service.ListPermissionsRequest(request)
167
+ self._response = response
168
+ self._retry = retry
169
+ self._timeout = timeout
170
+ self._metadata = metadata
171
+
172
+ def __getattr__(self, name: str) -> Any:
173
+ return getattr(self._response, name)
174
+
175
+ @property
176
+ async def pages(self) -> AsyncIterator[permission_service.ListPermissionsResponse]:
177
+ yield self._response
178
+ while self._response.next_page_token:
179
+ self._request.page_token = self._response.next_page_token
180
+ self._response = await self._method(
181
+ self._request,
182
+ retry=self._retry,
183
+ timeout=self._timeout,
184
+ metadata=self._metadata,
185
+ )
186
+ yield self._response
187
+
188
+ def __aiter__(self) -> AsyncIterator[permission.Permission]:
189
+ async def async_generator():
190
+ async for page in self.pages:
191
+ for response in page.permissions:
192
+ yield response
193
+
194
+ return async_generator()
195
+
196
+ def __repr__(self) -> str:
197
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ from typing import Dict, Type
18
+
19
+ from .base import PermissionServiceTransport
20
+ from .grpc import PermissionServiceGrpcTransport
21
+ from .grpc_asyncio import PermissionServiceGrpcAsyncIOTransport
22
+ from .rest import PermissionServiceRestInterceptor, PermissionServiceRestTransport
23
+
24
+ # Compile a registry of transports.
25
+ _transport_registry = OrderedDict() # type: Dict[str, Type[PermissionServiceTransport]]
26
+ _transport_registry["grpc"] = PermissionServiceGrpcTransport
27
+ _transport_registry["grpc_asyncio"] = PermissionServiceGrpcAsyncIOTransport
28
+ _transport_registry["rest"] = PermissionServiceRestTransport
29
+
30
+ __all__ = (
31
+ "PermissionServiceTransport",
32
+ "PermissionServiceGrpcTransport",
33
+ "PermissionServiceGrpcAsyncIOTransport",
34
+ "PermissionServiceRestTransport",
35
+ "PermissionServiceRestInterceptor",
36
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc ADDED
Binary file (24.4 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/__pycache__/rest.cpython-311.pyc ADDED
Binary file (50.8 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/base.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import abc
17
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
+
19
+ import google.api_core
20
+ from google.api_core import exceptions as core_exceptions
21
+ from google.api_core import gapic_v1
22
+ from google.api_core import retry as retries
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.longrunning import operations_pb2 # type: ignore
26
+ from google.oauth2 import service_account # type: ignore
27
+ from google.protobuf import empty_pb2 # type: ignore
28
+
29
+ from google.ai.generativelanguage_v1beta3 import gapic_version as package_version
30
+ from google.ai.generativelanguage_v1beta3.types import permission as gag_permission
31
+ from google.ai.generativelanguage_v1beta3.types import permission
32
+ from google.ai.generativelanguage_v1beta3.types import permission_service
33
+
34
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
35
+ gapic_version=package_version.__version__
36
+ )
37
+
38
+
39
+ class PermissionServiceTransport(abc.ABC):
40
+ """Abstract transport class for PermissionService."""
41
+
42
+ AUTH_SCOPES = ()
43
+
44
+ DEFAULT_HOST: str = "generativelanguage.googleapis.com"
45
+
46
+ def __init__(
47
+ self,
48
+ *,
49
+ host: str = DEFAULT_HOST,
50
+ credentials: Optional[ga_credentials.Credentials] = None,
51
+ credentials_file: Optional[str] = None,
52
+ scopes: Optional[Sequence[str]] = None,
53
+ quota_project_id: Optional[str] = None,
54
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
55
+ always_use_jwt_access: Optional[bool] = False,
56
+ api_audience: Optional[str] = None,
57
+ **kwargs,
58
+ ) -> None:
59
+ """Instantiate the transport.
60
+
61
+ Args:
62
+ host (Optional[str]):
63
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
64
+ credentials (Optional[google.auth.credentials.Credentials]): The
65
+ authorization credentials to attach to requests. These
66
+ credentials identify the application to the service; if none
67
+ are specified, the client will attempt to ascertain the
68
+ credentials from the environment.
69
+ credentials_file (Optional[str]): A file with credentials that can
70
+ be loaded with :func:`google.auth.load_credentials_from_file`.
71
+ This argument is mutually exclusive with credentials.
72
+ scopes (Optional[Sequence[str]]): A list of scopes.
73
+ quota_project_id (Optional[str]): An optional project to use for billing
74
+ and quota.
75
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
76
+ The client info used to send a user-agent string along with
77
+ API requests. If ``None``, then default info will be used.
78
+ Generally, you only need to set this if you're developing
79
+ your own client library.
80
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
81
+ be used for service account credentials.
82
+ """
83
+
84
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
85
+
86
+ # Save the scopes.
87
+ self._scopes = scopes
88
+ if not hasattr(self, "_ignore_credentials"):
89
+ self._ignore_credentials: bool = False
90
+
91
+ # If no credentials are provided, then determine the appropriate
92
+ # defaults.
93
+ if credentials and credentials_file:
94
+ raise core_exceptions.DuplicateCredentialArgs(
95
+ "'credentials_file' and 'credentials' are mutually exclusive"
96
+ )
97
+
98
+ if credentials_file is not None:
99
+ credentials, _ = google.auth.load_credentials_from_file(
100
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
101
+ )
102
+ elif credentials is None and not self._ignore_credentials:
103
+ credentials, _ = google.auth.default(
104
+ **scopes_kwargs, quota_project_id=quota_project_id
105
+ )
106
+ # Don't apply audience if the credentials file passed from user.
107
+ if hasattr(credentials, "with_gdch_audience"):
108
+ credentials = credentials.with_gdch_audience(
109
+ api_audience if api_audience else host
110
+ )
111
+
112
+ # If the credentials are service account credentials, then always try to use self signed JWT.
113
+ if (
114
+ always_use_jwt_access
115
+ and isinstance(credentials, service_account.Credentials)
116
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
117
+ ):
118
+ credentials = credentials.with_always_use_jwt_access(True)
119
+
120
+ # Save the credentials.
121
+ self._credentials = credentials
122
+
123
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
124
+ if ":" not in host:
125
+ host += ":443"
126
+ self._host = host
127
+
128
+ @property
129
+ def host(self):
130
+ return self._host
131
+
132
+ def _prep_wrapped_messages(self, client_info):
133
+ # Precompute the wrapped methods.
134
+ self._wrapped_methods = {
135
+ self.create_permission: gapic_v1.method.wrap_method(
136
+ self.create_permission,
137
+ default_timeout=None,
138
+ client_info=client_info,
139
+ ),
140
+ self.get_permission: gapic_v1.method.wrap_method(
141
+ self.get_permission,
142
+ default_timeout=None,
143
+ client_info=client_info,
144
+ ),
145
+ self.list_permissions: gapic_v1.method.wrap_method(
146
+ self.list_permissions,
147
+ default_timeout=None,
148
+ client_info=client_info,
149
+ ),
150
+ self.update_permission: gapic_v1.method.wrap_method(
151
+ self.update_permission,
152
+ default_timeout=None,
153
+ client_info=client_info,
154
+ ),
155
+ self.delete_permission: gapic_v1.method.wrap_method(
156
+ self.delete_permission,
157
+ default_timeout=None,
158
+ client_info=client_info,
159
+ ),
160
+ self.transfer_ownership: gapic_v1.method.wrap_method(
161
+ self.transfer_ownership,
162
+ default_timeout=None,
163
+ client_info=client_info,
164
+ ),
165
+ }
166
+
167
+ def close(self):
168
+ """Closes resources associated with the transport.
169
+
170
+ .. warning::
171
+ Only call this method if the transport is NOT shared
172
+ with other clients - this may cause errors in other clients!
173
+ """
174
+ raise NotImplementedError()
175
+
176
+ @property
177
+ def create_permission(
178
+ self,
179
+ ) -> Callable[
180
+ [permission_service.CreatePermissionRequest],
181
+ Union[gag_permission.Permission, Awaitable[gag_permission.Permission]],
182
+ ]:
183
+ raise NotImplementedError()
184
+
185
+ @property
186
+ def get_permission(
187
+ self,
188
+ ) -> Callable[
189
+ [permission_service.GetPermissionRequest],
190
+ Union[permission.Permission, Awaitable[permission.Permission]],
191
+ ]:
192
+ raise NotImplementedError()
193
+
194
+ @property
195
+ def list_permissions(
196
+ self,
197
+ ) -> Callable[
198
+ [permission_service.ListPermissionsRequest],
199
+ Union[
200
+ permission_service.ListPermissionsResponse,
201
+ Awaitable[permission_service.ListPermissionsResponse],
202
+ ],
203
+ ]:
204
+ raise NotImplementedError()
205
+
206
+ @property
207
+ def update_permission(
208
+ self,
209
+ ) -> Callable[
210
+ [permission_service.UpdatePermissionRequest],
211
+ Union[gag_permission.Permission, Awaitable[gag_permission.Permission]],
212
+ ]:
213
+ raise NotImplementedError()
214
+
215
+ @property
216
+ def delete_permission(
217
+ self,
218
+ ) -> Callable[
219
+ [permission_service.DeletePermissionRequest],
220
+ Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
221
+ ]:
222
+ raise NotImplementedError()
223
+
224
+ @property
225
+ def transfer_ownership(
226
+ self,
227
+ ) -> Callable[
228
+ [permission_service.TransferOwnershipRequest],
229
+ Union[
230
+ permission_service.TransferOwnershipResponse,
231
+ Awaitable[permission_service.TransferOwnershipResponse],
232
+ ],
233
+ ]:
234
+ raise NotImplementedError()
235
+
236
+ @property
237
+ def kind(self) -> str:
238
+ raise NotImplementedError()
239
+
240
+
241
+ __all__ = ("PermissionServiceTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/grpc.py ADDED
@@ -0,0 +1,505 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json
17
+ import logging as std_logging
18
+ import pickle
19
+ from typing import Callable, Dict, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import gapic_v1, grpc_helpers
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.auth.transport.grpc import SslCredentials # type: ignore
26
+ from google.longrunning import operations_pb2 # type: ignore
27
+ from google.protobuf import empty_pb2 # type: ignore
28
+ from google.protobuf.json_format import MessageToJson
29
+ import google.protobuf.message
30
+ import grpc # type: ignore
31
+ import proto # type: ignore
32
+
33
+ from google.ai.generativelanguage_v1beta3.types import permission as gag_permission
34
+ from google.ai.generativelanguage_v1beta3.types import permission
35
+ from google.ai.generativelanguage_v1beta3.types import permission_service
36
+
37
+ from .base import DEFAULT_CLIENT_INFO, PermissionServiceTransport
38
+
39
+ try:
40
+ from google.api_core import client_logging # type: ignore
41
+
42
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
43
+ except ImportError: # pragma: NO COVER
44
+ CLIENT_LOGGING_SUPPORTED = False
45
+
46
+ _LOGGER = std_logging.getLogger(__name__)
47
+
48
+
49
+ class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER
50
+ def intercept_unary_unary(self, continuation, client_call_details, request):
51
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
52
+ std_logging.DEBUG
53
+ )
54
+ if logging_enabled: # pragma: NO COVER
55
+ request_metadata = client_call_details.metadata
56
+ if isinstance(request, proto.Message):
57
+ request_payload = type(request).to_json(request)
58
+ elif isinstance(request, google.protobuf.message.Message):
59
+ request_payload = MessageToJson(request)
60
+ else:
61
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
62
+
63
+ request_metadata = {
64
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
65
+ for key, value in request_metadata
66
+ }
67
+ grpc_request = {
68
+ "payload": request_payload,
69
+ "requestMethod": "grpc",
70
+ "metadata": dict(request_metadata),
71
+ }
72
+ _LOGGER.debug(
73
+ f"Sending request for {client_call_details.method}",
74
+ extra={
75
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
76
+ "rpcName": client_call_details.method,
77
+ "request": grpc_request,
78
+ "metadata": grpc_request["metadata"],
79
+ },
80
+ )
81
+
82
+ response = continuation(client_call_details, request)
83
+ if logging_enabled: # pragma: NO COVER
84
+ response_metadata = response.trailing_metadata()
85
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
86
+ metadata = (
87
+ dict([(k, str(v)) for k, v in response_metadata])
88
+ if response_metadata
89
+ else None
90
+ )
91
+ result = response.result()
92
+ if isinstance(result, proto.Message):
93
+ response_payload = type(result).to_json(result)
94
+ elif isinstance(result, google.protobuf.message.Message):
95
+ response_payload = MessageToJson(result)
96
+ else:
97
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
98
+ grpc_response = {
99
+ "payload": response_payload,
100
+ "metadata": metadata,
101
+ "status": "OK",
102
+ }
103
+ _LOGGER.debug(
104
+ f"Received response for {client_call_details.method}.",
105
+ extra={
106
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
107
+ "rpcName": client_call_details.method,
108
+ "response": grpc_response,
109
+ "metadata": grpc_response["metadata"],
110
+ },
111
+ )
112
+ return response
113
+
114
+
115
+ class PermissionServiceGrpcTransport(PermissionServiceTransport):
116
+ """gRPC backend transport for PermissionService.
117
+
118
+ Provides methods for managing permissions to PaLM API
119
+ resources.
120
+
121
+ This class defines the same methods as the primary client, so the
122
+ primary client can load the underlying transport implementation
123
+ and call it.
124
+
125
+ It sends protocol buffers over the wire using gRPC (which is built on
126
+ top of HTTP/2); the ``grpcio`` package must be installed.
127
+ """
128
+
129
+ _stubs: Dict[str, Callable]
130
+
131
+ def __init__(
132
+ self,
133
+ *,
134
+ host: str = "generativelanguage.googleapis.com",
135
+ credentials: Optional[ga_credentials.Credentials] = None,
136
+ credentials_file: Optional[str] = None,
137
+ scopes: Optional[Sequence[str]] = None,
138
+ channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None,
139
+ api_mtls_endpoint: Optional[str] = None,
140
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
141
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
142
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
143
+ quota_project_id: Optional[str] = None,
144
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
145
+ always_use_jwt_access: Optional[bool] = False,
146
+ api_audience: Optional[str] = None,
147
+ ) -> None:
148
+ """Instantiate the transport.
149
+
150
+ Args:
151
+ host (Optional[str]):
152
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
153
+ credentials (Optional[google.auth.credentials.Credentials]): The
154
+ authorization credentials to attach to requests. These
155
+ credentials identify the application to the service; if none
156
+ are specified, the client will attempt to ascertain the
157
+ credentials from the environment.
158
+ This argument is ignored if a ``channel`` instance is provided.
159
+ credentials_file (Optional[str]): A file with credentials that can
160
+ be loaded with :func:`google.auth.load_credentials_from_file`.
161
+ This argument is ignored if a ``channel`` instance is provided.
162
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
163
+ ignored if a ``channel`` instance is provided.
164
+ channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
165
+ A ``Channel`` instance through which to make calls, or a Callable
166
+ that constructs and returns one. If set to None, ``self.create_channel``
167
+ is used to create the channel. If a Callable is given, it will be called
168
+ with the same arguments as used in ``self.create_channel``.
169
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
170
+ If provided, it overrides the ``host`` argument and tries to create
171
+ a mutual TLS channel with client SSL credentials from
172
+ ``client_cert_source`` or application default SSL credentials.
173
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
174
+ Deprecated. A callback to provide client SSL certificate bytes and
175
+ private key bytes, both in PEM format. It is ignored if
176
+ ``api_mtls_endpoint`` is None.
177
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
178
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
179
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
180
+ A callback to provide client certificate bytes and private key bytes,
181
+ both in PEM format. It is used to configure a mutual TLS channel. It is
182
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
183
+ quota_project_id (Optional[str]): An optional project to use for billing
184
+ and quota.
185
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
186
+ The client info used to send a user-agent string along with
187
+ API requests. If ``None``, then default info will be used.
188
+ Generally, you only need to set this if you're developing
189
+ your own client library.
190
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
191
+ be used for service account credentials.
192
+
193
+ Raises:
194
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
195
+ creation failed for any reason.
196
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
197
+ and ``credentials_file`` are passed.
198
+ """
199
+ self._grpc_channel = None
200
+ self._ssl_channel_credentials = ssl_channel_credentials
201
+ self._stubs: Dict[str, Callable] = {}
202
+
203
+ if api_mtls_endpoint:
204
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
205
+ if client_cert_source:
206
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
207
+
208
+ if isinstance(channel, grpc.Channel):
209
+ # Ignore credentials if a channel was passed.
210
+ credentials = None
211
+ self._ignore_credentials = True
212
+ # If a channel was explicitly provided, set it.
213
+ self._grpc_channel = channel
214
+ self._ssl_channel_credentials = None
215
+
216
+ else:
217
+ if api_mtls_endpoint:
218
+ host = api_mtls_endpoint
219
+
220
+ # Create SSL credentials with client_cert_source or application
221
+ # default SSL credentials.
222
+ if client_cert_source:
223
+ cert, key = client_cert_source()
224
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
225
+ certificate_chain=cert, private_key=key
226
+ )
227
+ else:
228
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
229
+
230
+ else:
231
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
232
+ cert, key = client_cert_source_for_mtls()
233
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
234
+ certificate_chain=cert, private_key=key
235
+ )
236
+
237
+ # The base transport sets the host, credentials and scopes
238
+ super().__init__(
239
+ host=host,
240
+ credentials=credentials,
241
+ credentials_file=credentials_file,
242
+ scopes=scopes,
243
+ quota_project_id=quota_project_id,
244
+ client_info=client_info,
245
+ always_use_jwt_access=always_use_jwt_access,
246
+ api_audience=api_audience,
247
+ )
248
+
249
+ if not self._grpc_channel:
250
+ # initialize with the provided callable or the default channel
251
+ channel_init = channel or type(self).create_channel
252
+ self._grpc_channel = channel_init(
253
+ self._host,
254
+ # use the credentials which are saved
255
+ credentials=self._credentials,
256
+ # Set ``credentials_file`` to ``None`` here as
257
+ # the credentials that we saved earlier should be used.
258
+ credentials_file=None,
259
+ scopes=self._scopes,
260
+ ssl_credentials=self._ssl_channel_credentials,
261
+ quota_project_id=quota_project_id,
262
+ options=[
263
+ ("grpc.max_send_message_length", -1),
264
+ ("grpc.max_receive_message_length", -1),
265
+ ],
266
+ )
267
+
268
+ self._interceptor = _LoggingClientInterceptor()
269
+ self._logged_channel = grpc.intercept_channel(
270
+ self._grpc_channel, self._interceptor
271
+ )
272
+
273
+ # Wrap messages. This must be done after self._logged_channel exists
274
+ self._prep_wrapped_messages(client_info)
275
+
276
+ @classmethod
277
+ def create_channel(
278
+ cls,
279
+ host: str = "generativelanguage.googleapis.com",
280
+ credentials: Optional[ga_credentials.Credentials] = None,
281
+ credentials_file: Optional[str] = None,
282
+ scopes: Optional[Sequence[str]] = None,
283
+ quota_project_id: Optional[str] = None,
284
+ **kwargs,
285
+ ) -> grpc.Channel:
286
+ """Create and return a gRPC channel object.
287
+ Args:
288
+ host (Optional[str]): The host for the channel to use.
289
+ credentials (Optional[~.Credentials]): The
290
+ authorization credentials to attach to requests. These
291
+ credentials identify this application to the service. If
292
+ none are specified, the client will attempt to ascertain
293
+ the credentials from the environment.
294
+ credentials_file (Optional[str]): A file with credentials that can
295
+ be loaded with :func:`google.auth.load_credentials_from_file`.
296
+ This argument is mutually exclusive with credentials.
297
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
298
+ service. These are only used when credentials are not specified and
299
+ are passed to :func:`google.auth.default`.
300
+ quota_project_id (Optional[str]): An optional project to use for billing
301
+ and quota.
302
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
303
+ channel creation.
304
+ Returns:
305
+ grpc.Channel: A gRPC channel object.
306
+
307
+ Raises:
308
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
309
+ and ``credentials_file`` are passed.
310
+ """
311
+
312
+ return grpc_helpers.create_channel(
313
+ host,
314
+ credentials=credentials,
315
+ credentials_file=credentials_file,
316
+ quota_project_id=quota_project_id,
317
+ default_scopes=cls.AUTH_SCOPES,
318
+ scopes=scopes,
319
+ default_host=cls.DEFAULT_HOST,
320
+ **kwargs,
321
+ )
322
+
323
+ @property
324
+ def grpc_channel(self) -> grpc.Channel:
325
+ """Return the channel designed to connect to this service."""
326
+ return self._grpc_channel
327
+
328
+ @property
329
+ def create_permission(
330
+ self,
331
+ ) -> Callable[
332
+ [permission_service.CreatePermissionRequest], gag_permission.Permission
333
+ ]:
334
+ r"""Return a callable for the create permission method over gRPC.
335
+
336
+ Create a permission to a specific resource.
337
+
338
+ Returns:
339
+ Callable[[~.CreatePermissionRequest],
340
+ ~.Permission]:
341
+ A function that, when called, will call the underlying RPC
342
+ on the server.
343
+ """
344
+ # Generate a "stub function" on-the-fly which will actually make
345
+ # the request.
346
+ # gRPC handles serialization and deserialization, so we just need
347
+ # to pass in the functions for each.
348
+ if "create_permission" not in self._stubs:
349
+ self._stubs["create_permission"] = self._logged_channel.unary_unary(
350
+ "/google.ai.generativelanguage.v1beta3.PermissionService/CreatePermission",
351
+ request_serializer=permission_service.CreatePermissionRequest.serialize,
352
+ response_deserializer=gag_permission.Permission.deserialize,
353
+ )
354
+ return self._stubs["create_permission"]
355
+
356
+ @property
357
+ def get_permission(
358
+ self,
359
+ ) -> Callable[[permission_service.GetPermissionRequest], permission.Permission]:
360
+ r"""Return a callable for the get permission method over gRPC.
361
+
362
+ Gets information about a specific Permission.
363
+
364
+ Returns:
365
+ Callable[[~.GetPermissionRequest],
366
+ ~.Permission]:
367
+ A function that, when called, will call the underlying RPC
368
+ on the server.
369
+ """
370
+ # Generate a "stub function" on-the-fly which will actually make
371
+ # the request.
372
+ # gRPC handles serialization and deserialization, so we just need
373
+ # to pass in the functions for each.
374
+ if "get_permission" not in self._stubs:
375
+ self._stubs["get_permission"] = self._logged_channel.unary_unary(
376
+ "/google.ai.generativelanguage.v1beta3.PermissionService/GetPermission",
377
+ request_serializer=permission_service.GetPermissionRequest.serialize,
378
+ response_deserializer=permission.Permission.deserialize,
379
+ )
380
+ return self._stubs["get_permission"]
381
+
382
+ @property
383
+ def list_permissions(
384
+ self,
385
+ ) -> Callable[
386
+ [permission_service.ListPermissionsRequest],
387
+ permission_service.ListPermissionsResponse,
388
+ ]:
389
+ r"""Return a callable for the list permissions method over gRPC.
390
+
391
+ Lists permissions for the specific resource.
392
+
393
+ Returns:
394
+ Callable[[~.ListPermissionsRequest],
395
+ ~.ListPermissionsResponse]:
396
+ A function that, when called, will call the underlying RPC
397
+ on the server.
398
+ """
399
+ # Generate a "stub function" on-the-fly which will actually make
400
+ # the request.
401
+ # gRPC handles serialization and deserialization, so we just need
402
+ # to pass in the functions for each.
403
+ if "list_permissions" not in self._stubs:
404
+ self._stubs["list_permissions"] = self._logged_channel.unary_unary(
405
+ "/google.ai.generativelanguage.v1beta3.PermissionService/ListPermissions",
406
+ request_serializer=permission_service.ListPermissionsRequest.serialize,
407
+ response_deserializer=permission_service.ListPermissionsResponse.deserialize,
408
+ )
409
+ return self._stubs["list_permissions"]
410
+
411
+ @property
412
+ def update_permission(
413
+ self,
414
+ ) -> Callable[
415
+ [permission_service.UpdatePermissionRequest], gag_permission.Permission
416
+ ]:
417
+ r"""Return a callable for the update permission method over gRPC.
418
+
419
+ Updates the permission.
420
+
421
+ Returns:
422
+ Callable[[~.UpdatePermissionRequest],
423
+ ~.Permission]:
424
+ A function that, when called, will call the underlying RPC
425
+ on the server.
426
+ """
427
+ # Generate a "stub function" on-the-fly which will actually make
428
+ # the request.
429
+ # gRPC handles serialization and deserialization, so we just need
430
+ # to pass in the functions for each.
431
+ if "update_permission" not in self._stubs:
432
+ self._stubs["update_permission"] = self._logged_channel.unary_unary(
433
+ "/google.ai.generativelanguage.v1beta3.PermissionService/UpdatePermission",
434
+ request_serializer=permission_service.UpdatePermissionRequest.serialize,
435
+ response_deserializer=gag_permission.Permission.deserialize,
436
+ )
437
+ return self._stubs["update_permission"]
438
+
439
+ @property
440
+ def delete_permission(
441
+ self,
442
+ ) -> Callable[[permission_service.DeletePermissionRequest], empty_pb2.Empty]:
443
+ r"""Return a callable for the delete permission method over gRPC.
444
+
445
+ Deletes the permission.
446
+
447
+ Returns:
448
+ Callable[[~.DeletePermissionRequest],
449
+ ~.Empty]:
450
+ A function that, when called, will call the underlying RPC
451
+ on the server.
452
+ """
453
+ # Generate a "stub function" on-the-fly which will actually make
454
+ # the request.
455
+ # gRPC handles serialization and deserialization, so we just need
456
+ # to pass in the functions for each.
457
+ if "delete_permission" not in self._stubs:
458
+ self._stubs["delete_permission"] = self._logged_channel.unary_unary(
459
+ "/google.ai.generativelanguage.v1beta3.PermissionService/DeletePermission",
460
+ request_serializer=permission_service.DeletePermissionRequest.serialize,
461
+ response_deserializer=empty_pb2.Empty.FromString,
462
+ )
463
+ return self._stubs["delete_permission"]
464
+
465
+ @property
466
+ def transfer_ownership(
467
+ self,
468
+ ) -> Callable[
469
+ [permission_service.TransferOwnershipRequest],
470
+ permission_service.TransferOwnershipResponse,
471
+ ]:
472
+ r"""Return a callable for the transfer ownership method over gRPC.
473
+
474
+ Transfers ownership of the tuned model.
475
+ This is the only way to change ownership of the tuned
476
+ model. The current owner will be downgraded to writer
477
+ role.
478
+
479
+ Returns:
480
+ Callable[[~.TransferOwnershipRequest],
481
+ ~.TransferOwnershipResponse]:
482
+ A function that, when called, will call the underlying RPC
483
+ on the server.
484
+ """
485
+ # Generate a "stub function" on-the-fly which will actually make
486
+ # the request.
487
+ # gRPC handles serialization and deserialization, so we just need
488
+ # to pass in the functions for each.
489
+ if "transfer_ownership" not in self._stubs:
490
+ self._stubs["transfer_ownership"] = self._logged_channel.unary_unary(
491
+ "/google.ai.generativelanguage.v1beta3.PermissionService/TransferOwnership",
492
+ request_serializer=permission_service.TransferOwnershipRequest.serialize,
493
+ response_deserializer=permission_service.TransferOwnershipResponse.deserialize,
494
+ )
495
+ return self._stubs["transfer_ownership"]
496
+
497
+ def close(self):
498
+ self._logged_channel.close()
499
+
500
+ @property
501
+ def kind(self) -> str:
502
+ return "grpc"
503
+
504
+
505
+ __all__ = ("PermissionServiceGrpcTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/grpc_asyncio.py ADDED
@@ -0,0 +1,558 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import inspect
17
+ import json
18
+ import logging as std_logging
19
+ import pickle
20
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
21
+ import warnings
22
+
23
+ from google.api_core import exceptions as core_exceptions
24
+ from google.api_core import gapic_v1, grpc_helpers_async
25
+ from google.api_core import retry_async as retries
26
+ from google.auth import credentials as ga_credentials # type: ignore
27
+ from google.auth.transport.grpc import SslCredentials # type: ignore
28
+ from google.longrunning import operations_pb2 # type: ignore
29
+ from google.protobuf import empty_pb2 # type: ignore
30
+ from google.protobuf.json_format import MessageToJson
31
+ import google.protobuf.message
32
+ import grpc # type: ignore
33
+ from grpc.experimental import aio # type: ignore
34
+ import proto # type: ignore
35
+
36
+ from google.ai.generativelanguage_v1beta3.types import permission as gag_permission
37
+ from google.ai.generativelanguage_v1beta3.types import permission
38
+ from google.ai.generativelanguage_v1beta3.types import permission_service
39
+
40
+ from .base import DEFAULT_CLIENT_INFO, PermissionServiceTransport
41
+ from .grpc import PermissionServiceGrpcTransport
42
+
43
+ try:
44
+ from google.api_core import client_logging # type: ignore
45
+
46
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
47
+ except ImportError: # pragma: NO COVER
48
+ CLIENT_LOGGING_SUPPORTED = False
49
+
50
+ _LOGGER = std_logging.getLogger(__name__)
51
+
52
+
53
+ class _LoggingClientAIOInterceptor(
54
+ grpc.aio.UnaryUnaryClientInterceptor
55
+ ): # pragma: NO COVER
56
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
57
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
58
+ std_logging.DEBUG
59
+ )
60
+ if logging_enabled: # pragma: NO COVER
61
+ request_metadata = client_call_details.metadata
62
+ if isinstance(request, proto.Message):
63
+ request_payload = type(request).to_json(request)
64
+ elif isinstance(request, google.protobuf.message.Message):
65
+ request_payload = MessageToJson(request)
66
+ else:
67
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
68
+
69
+ request_metadata = {
70
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
71
+ for key, value in request_metadata
72
+ }
73
+ grpc_request = {
74
+ "payload": request_payload,
75
+ "requestMethod": "grpc",
76
+ "metadata": dict(request_metadata),
77
+ }
78
+ _LOGGER.debug(
79
+ f"Sending request for {client_call_details.method}",
80
+ extra={
81
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
82
+ "rpcName": str(client_call_details.method),
83
+ "request": grpc_request,
84
+ "metadata": grpc_request["metadata"],
85
+ },
86
+ )
87
+ response = await continuation(client_call_details, request)
88
+ if logging_enabled: # pragma: NO COVER
89
+ response_metadata = await response.trailing_metadata()
90
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
91
+ metadata = (
92
+ dict([(k, str(v)) for k, v in response_metadata])
93
+ if response_metadata
94
+ else None
95
+ )
96
+ result = await response
97
+ if isinstance(result, proto.Message):
98
+ response_payload = type(result).to_json(result)
99
+ elif isinstance(result, google.protobuf.message.Message):
100
+ response_payload = MessageToJson(result)
101
+ else:
102
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
103
+ grpc_response = {
104
+ "payload": response_payload,
105
+ "metadata": metadata,
106
+ "status": "OK",
107
+ }
108
+ _LOGGER.debug(
109
+ f"Received response to rpc {client_call_details.method}.",
110
+ extra={
111
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
112
+ "rpcName": str(client_call_details.method),
113
+ "response": grpc_response,
114
+ "metadata": grpc_response["metadata"],
115
+ },
116
+ )
117
+ return response
118
+
119
+
120
+ class PermissionServiceGrpcAsyncIOTransport(PermissionServiceTransport):
121
+ """gRPC AsyncIO backend transport for PermissionService.
122
+
123
+ Provides methods for managing permissions to PaLM API
124
+ resources.
125
+
126
+ This class defines the same methods as the primary client, so the
127
+ primary client can load the underlying transport implementation
128
+ and call it.
129
+
130
+ It sends protocol buffers over the wire using gRPC (which is built on
131
+ top of HTTP/2); the ``grpcio`` package must be installed.
132
+ """
133
+
134
+ _grpc_channel: aio.Channel
135
+ _stubs: Dict[str, Callable] = {}
136
+
137
+ @classmethod
138
+ def create_channel(
139
+ cls,
140
+ host: str = "generativelanguage.googleapis.com",
141
+ credentials: Optional[ga_credentials.Credentials] = None,
142
+ credentials_file: Optional[str] = None,
143
+ scopes: Optional[Sequence[str]] = None,
144
+ quota_project_id: Optional[str] = None,
145
+ **kwargs,
146
+ ) -> aio.Channel:
147
+ """Create and return a gRPC AsyncIO channel object.
148
+ Args:
149
+ host (Optional[str]): The host for the channel to use.
150
+ credentials (Optional[~.Credentials]): The
151
+ authorization credentials to attach to requests. These
152
+ credentials identify this application to the service. If
153
+ none are specified, the client will attempt to ascertain
154
+ the credentials from the environment.
155
+ credentials_file (Optional[str]): A file with credentials that can
156
+ be loaded with :func:`google.auth.load_credentials_from_file`.
157
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
158
+ service. These are only used when credentials are not specified and
159
+ are passed to :func:`google.auth.default`.
160
+ quota_project_id (Optional[str]): An optional project to use for billing
161
+ and quota.
162
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
163
+ channel creation.
164
+ Returns:
165
+ aio.Channel: A gRPC AsyncIO channel object.
166
+ """
167
+
168
+ return grpc_helpers_async.create_channel(
169
+ host,
170
+ credentials=credentials,
171
+ credentials_file=credentials_file,
172
+ quota_project_id=quota_project_id,
173
+ default_scopes=cls.AUTH_SCOPES,
174
+ scopes=scopes,
175
+ default_host=cls.DEFAULT_HOST,
176
+ **kwargs,
177
+ )
178
+
179
+ def __init__(
180
+ self,
181
+ *,
182
+ host: str = "generativelanguage.googleapis.com",
183
+ credentials: Optional[ga_credentials.Credentials] = None,
184
+ credentials_file: Optional[str] = None,
185
+ scopes: Optional[Sequence[str]] = None,
186
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
187
+ api_mtls_endpoint: Optional[str] = None,
188
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
189
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
190
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
191
+ quota_project_id: Optional[str] = None,
192
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
193
+ always_use_jwt_access: Optional[bool] = False,
194
+ api_audience: Optional[str] = None,
195
+ ) -> None:
196
+ """Instantiate the transport.
197
+
198
+ Args:
199
+ host (Optional[str]):
200
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
201
+ credentials (Optional[google.auth.credentials.Credentials]): The
202
+ authorization credentials to attach to requests. These
203
+ credentials identify the application to the service; if none
204
+ are specified, the client will attempt to ascertain the
205
+ credentials from the environment.
206
+ This argument is ignored if a ``channel`` instance is provided.
207
+ credentials_file (Optional[str]): A file with credentials that can
208
+ be loaded with :func:`google.auth.load_credentials_from_file`.
209
+ This argument is ignored if a ``channel`` instance is provided.
210
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
211
+ service. These are only used when credentials are not specified and
212
+ are passed to :func:`google.auth.default`.
213
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
214
+ A ``Channel`` instance through which to make calls, or a Callable
215
+ that constructs and returns one. If set to None, ``self.create_channel``
216
+ is used to create the channel. If a Callable is given, it will be called
217
+ with the same arguments as used in ``self.create_channel``.
218
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
219
+ If provided, it overrides the ``host`` argument and tries to create
220
+ a mutual TLS channel with client SSL credentials from
221
+ ``client_cert_source`` or application default SSL credentials.
222
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
223
+ Deprecated. A callback to provide client SSL certificate bytes and
224
+ private key bytes, both in PEM format. It is ignored if
225
+ ``api_mtls_endpoint`` is None.
226
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
227
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
228
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
229
+ A callback to provide client certificate bytes and private key bytes,
230
+ both in PEM format. It is used to configure a mutual TLS channel. It is
231
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
232
+ quota_project_id (Optional[str]): An optional project to use for billing
233
+ and quota.
234
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
235
+ The client info used to send a user-agent string along with
236
+ API requests. If ``None``, then default info will be used.
237
+ Generally, you only need to set this if you're developing
238
+ your own client library.
239
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
240
+ be used for service account credentials.
241
+
242
+ Raises:
243
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
244
+ creation failed for any reason.
245
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
246
+ and ``credentials_file`` are passed.
247
+ """
248
+ self._grpc_channel = None
249
+ self._ssl_channel_credentials = ssl_channel_credentials
250
+ self._stubs: Dict[str, Callable] = {}
251
+
252
+ if api_mtls_endpoint:
253
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
254
+ if client_cert_source:
255
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
256
+
257
+ if isinstance(channel, aio.Channel):
258
+ # Ignore credentials if a channel was passed.
259
+ credentials = None
260
+ self._ignore_credentials = True
261
+ # If a channel was explicitly provided, set it.
262
+ self._grpc_channel = channel
263
+ self._ssl_channel_credentials = None
264
+ else:
265
+ if api_mtls_endpoint:
266
+ host = api_mtls_endpoint
267
+
268
+ # Create SSL credentials with client_cert_source or application
269
+ # default SSL credentials.
270
+ if client_cert_source:
271
+ cert, key = client_cert_source()
272
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
273
+ certificate_chain=cert, private_key=key
274
+ )
275
+ else:
276
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
277
+
278
+ else:
279
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
280
+ cert, key = client_cert_source_for_mtls()
281
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
282
+ certificate_chain=cert, private_key=key
283
+ )
284
+
285
+ # The base transport sets the host, credentials and scopes
286
+ super().__init__(
287
+ host=host,
288
+ credentials=credentials,
289
+ credentials_file=credentials_file,
290
+ scopes=scopes,
291
+ quota_project_id=quota_project_id,
292
+ client_info=client_info,
293
+ always_use_jwt_access=always_use_jwt_access,
294
+ api_audience=api_audience,
295
+ )
296
+
297
+ if not self._grpc_channel:
298
+ # initialize with the provided callable or the default channel
299
+ channel_init = channel or type(self).create_channel
300
+ self._grpc_channel = channel_init(
301
+ self._host,
302
+ # use the credentials which are saved
303
+ credentials=self._credentials,
304
+ # Set ``credentials_file`` to ``None`` here as
305
+ # the credentials that we saved earlier should be used.
306
+ credentials_file=None,
307
+ scopes=self._scopes,
308
+ ssl_credentials=self._ssl_channel_credentials,
309
+ quota_project_id=quota_project_id,
310
+ options=[
311
+ ("grpc.max_send_message_length", -1),
312
+ ("grpc.max_receive_message_length", -1),
313
+ ],
314
+ )
315
+
316
+ self._interceptor = _LoggingClientAIOInterceptor()
317
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
318
+ self._logged_channel = self._grpc_channel
319
+ self._wrap_with_kind = (
320
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
321
+ )
322
+ # Wrap messages. This must be done after self._logged_channel exists
323
+ self._prep_wrapped_messages(client_info)
324
+
325
+ @property
326
+ def grpc_channel(self) -> aio.Channel:
327
+ """Create the channel designed to connect to this service.
328
+
329
+ This property caches on the instance; repeated calls return
330
+ the same channel.
331
+ """
332
+ # Return the channel from cache.
333
+ return self._grpc_channel
334
+
335
+ @property
336
+ def create_permission(
337
+ self,
338
+ ) -> Callable[
339
+ [permission_service.CreatePermissionRequest],
340
+ Awaitable[gag_permission.Permission],
341
+ ]:
342
+ r"""Return a callable for the create permission method over gRPC.
343
+
344
+ Create a permission to a specific resource.
345
+
346
+ Returns:
347
+ Callable[[~.CreatePermissionRequest],
348
+ Awaitable[~.Permission]]:
349
+ A function that, when called, will call the underlying RPC
350
+ on the server.
351
+ """
352
+ # Generate a "stub function" on-the-fly which will actually make
353
+ # the request.
354
+ # gRPC handles serialization and deserialization, so we just need
355
+ # to pass in the functions for each.
356
+ if "create_permission" not in self._stubs:
357
+ self._stubs["create_permission"] = self._logged_channel.unary_unary(
358
+ "/google.ai.generativelanguage.v1beta3.PermissionService/CreatePermission",
359
+ request_serializer=permission_service.CreatePermissionRequest.serialize,
360
+ response_deserializer=gag_permission.Permission.deserialize,
361
+ )
362
+ return self._stubs["create_permission"]
363
+
364
+ @property
365
+ def get_permission(
366
+ self,
367
+ ) -> Callable[
368
+ [permission_service.GetPermissionRequest], Awaitable[permission.Permission]
369
+ ]:
370
+ r"""Return a callable for the get permission method over gRPC.
371
+
372
+ Gets information about a specific Permission.
373
+
374
+ Returns:
375
+ Callable[[~.GetPermissionRequest],
376
+ Awaitable[~.Permission]]:
377
+ A function that, when called, will call the underlying RPC
378
+ on the server.
379
+ """
380
+ # Generate a "stub function" on-the-fly which will actually make
381
+ # the request.
382
+ # gRPC handles serialization and deserialization, so we just need
383
+ # to pass in the functions for each.
384
+ if "get_permission" not in self._stubs:
385
+ self._stubs["get_permission"] = self._logged_channel.unary_unary(
386
+ "/google.ai.generativelanguage.v1beta3.PermissionService/GetPermission",
387
+ request_serializer=permission_service.GetPermissionRequest.serialize,
388
+ response_deserializer=permission.Permission.deserialize,
389
+ )
390
+ return self._stubs["get_permission"]
391
+
392
+ @property
393
+ def list_permissions(
394
+ self,
395
+ ) -> Callable[
396
+ [permission_service.ListPermissionsRequest],
397
+ Awaitable[permission_service.ListPermissionsResponse],
398
+ ]:
399
+ r"""Return a callable for the list permissions method over gRPC.
400
+
401
+ Lists permissions for the specific resource.
402
+
403
+ Returns:
404
+ Callable[[~.ListPermissionsRequest],
405
+ Awaitable[~.ListPermissionsResponse]]:
406
+ A function that, when called, will call the underlying RPC
407
+ on the server.
408
+ """
409
+ # Generate a "stub function" on-the-fly which will actually make
410
+ # the request.
411
+ # gRPC handles serialization and deserialization, so we just need
412
+ # to pass in the functions for each.
413
+ if "list_permissions" not in self._stubs:
414
+ self._stubs["list_permissions"] = self._logged_channel.unary_unary(
415
+ "/google.ai.generativelanguage.v1beta3.PermissionService/ListPermissions",
416
+ request_serializer=permission_service.ListPermissionsRequest.serialize,
417
+ response_deserializer=permission_service.ListPermissionsResponse.deserialize,
418
+ )
419
+ return self._stubs["list_permissions"]
420
+
421
+ @property
422
+ def update_permission(
423
+ self,
424
+ ) -> Callable[
425
+ [permission_service.UpdatePermissionRequest],
426
+ Awaitable[gag_permission.Permission],
427
+ ]:
428
+ r"""Return a callable for the update permission method over gRPC.
429
+
430
+ Updates the permission.
431
+
432
+ Returns:
433
+ Callable[[~.UpdatePermissionRequest],
434
+ Awaitable[~.Permission]]:
435
+ A function that, when called, will call the underlying RPC
436
+ on the server.
437
+ """
438
+ # Generate a "stub function" on-the-fly which will actually make
439
+ # the request.
440
+ # gRPC handles serialization and deserialization, so we just need
441
+ # to pass in the functions for each.
442
+ if "update_permission" not in self._stubs:
443
+ self._stubs["update_permission"] = self._logged_channel.unary_unary(
444
+ "/google.ai.generativelanguage.v1beta3.PermissionService/UpdatePermission",
445
+ request_serializer=permission_service.UpdatePermissionRequest.serialize,
446
+ response_deserializer=gag_permission.Permission.deserialize,
447
+ )
448
+ return self._stubs["update_permission"]
449
+
450
+ @property
451
+ def delete_permission(
452
+ self,
453
+ ) -> Callable[
454
+ [permission_service.DeletePermissionRequest], Awaitable[empty_pb2.Empty]
455
+ ]:
456
+ r"""Return a callable for the delete permission method over gRPC.
457
+
458
+ Deletes the permission.
459
+
460
+ Returns:
461
+ Callable[[~.DeletePermissionRequest],
462
+ Awaitable[~.Empty]]:
463
+ A function that, when called, will call the underlying RPC
464
+ on the server.
465
+ """
466
+ # Generate a "stub function" on-the-fly which will actually make
467
+ # the request.
468
+ # gRPC handles serialization and deserialization, so we just need
469
+ # to pass in the functions for each.
470
+ if "delete_permission" not in self._stubs:
471
+ self._stubs["delete_permission"] = self._logged_channel.unary_unary(
472
+ "/google.ai.generativelanguage.v1beta3.PermissionService/DeletePermission",
473
+ request_serializer=permission_service.DeletePermissionRequest.serialize,
474
+ response_deserializer=empty_pb2.Empty.FromString,
475
+ )
476
+ return self._stubs["delete_permission"]
477
+
478
+ @property
479
+ def transfer_ownership(
480
+ self,
481
+ ) -> Callable[
482
+ [permission_service.TransferOwnershipRequest],
483
+ Awaitable[permission_service.TransferOwnershipResponse],
484
+ ]:
485
+ r"""Return a callable for the transfer ownership method over gRPC.
486
+
487
+ Transfers ownership of the tuned model.
488
+ This is the only way to change ownership of the tuned
489
+ model. The current owner will be downgraded to writer
490
+ role.
491
+
492
+ Returns:
493
+ Callable[[~.TransferOwnershipRequest],
494
+ Awaitable[~.TransferOwnershipResponse]]:
495
+ A function that, when called, will call the underlying RPC
496
+ on the server.
497
+ """
498
+ # Generate a "stub function" on-the-fly which will actually make
499
+ # the request.
500
+ # gRPC handles serialization and deserialization, so we just need
501
+ # to pass in the functions for each.
502
+ if "transfer_ownership" not in self._stubs:
503
+ self._stubs["transfer_ownership"] = self._logged_channel.unary_unary(
504
+ "/google.ai.generativelanguage.v1beta3.PermissionService/TransferOwnership",
505
+ request_serializer=permission_service.TransferOwnershipRequest.serialize,
506
+ response_deserializer=permission_service.TransferOwnershipResponse.deserialize,
507
+ )
508
+ return self._stubs["transfer_ownership"]
509
+
510
+ def _prep_wrapped_messages(self, client_info):
511
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
512
+ self._wrapped_methods = {
513
+ self.create_permission: self._wrap_method(
514
+ self.create_permission,
515
+ default_timeout=None,
516
+ client_info=client_info,
517
+ ),
518
+ self.get_permission: self._wrap_method(
519
+ self.get_permission,
520
+ default_timeout=None,
521
+ client_info=client_info,
522
+ ),
523
+ self.list_permissions: self._wrap_method(
524
+ self.list_permissions,
525
+ default_timeout=None,
526
+ client_info=client_info,
527
+ ),
528
+ self.update_permission: self._wrap_method(
529
+ self.update_permission,
530
+ default_timeout=None,
531
+ client_info=client_info,
532
+ ),
533
+ self.delete_permission: self._wrap_method(
534
+ self.delete_permission,
535
+ default_timeout=None,
536
+ client_info=client_info,
537
+ ),
538
+ self.transfer_ownership: self._wrap_method(
539
+ self.transfer_ownership,
540
+ default_timeout=None,
541
+ client_info=client_info,
542
+ ),
543
+ }
544
+
545
+ def _wrap_method(self, func, *args, **kwargs):
546
+ if self._wrap_with_kind: # pragma: NO COVER
547
+ kwargs["kind"] = self.kind
548
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
549
+
550
+ def close(self):
551
+ return self._logged_channel.close()
552
+
553
+ @property
554
+ def kind(self) -> str:
555
+ return "grpc_asyncio"
556
+
557
+
558
+ __all__ = ("PermissionServiceGrpcAsyncIOTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/rest.py ADDED
@@ -0,0 +1,1340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import dataclasses
17
+ import json # type: ignore
18
+ import logging
19
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import exceptions as core_exceptions
23
+ from google.api_core import gapic_v1, rest_helpers, rest_streaming
24
+ from google.api_core import retry as retries
25
+ from google.auth import credentials as ga_credentials # type: ignore
26
+ from google.auth.transport.requests import AuthorizedSession # type: ignore
27
+ from google.longrunning import operations_pb2 # type: ignore
28
+ from google.protobuf import empty_pb2 # type: ignore
29
+ from google.protobuf import json_format
30
+ from requests import __version__ as requests_version
31
+
32
+ from google.ai.generativelanguage_v1beta3.types import permission as gag_permission
33
+ from google.ai.generativelanguage_v1beta3.types import permission
34
+ from google.ai.generativelanguage_v1beta3.types import permission_service
35
+
36
+ from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
37
+ from .rest_base import _BasePermissionServiceRestTransport
38
+
39
+ try:
40
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
41
+ except AttributeError: # pragma: NO COVER
42
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
43
+
44
+ try:
45
+ from google.api_core import client_logging # type: ignore
46
+
47
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
48
+ except ImportError: # pragma: NO COVER
49
+ CLIENT_LOGGING_SUPPORTED = False
50
+
51
+ _LOGGER = logging.getLogger(__name__)
52
+
53
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
54
+ gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
55
+ grpc_version=None,
56
+ rest_version=f"requests@{requests_version}",
57
+ )
58
+
59
+
60
+ class PermissionServiceRestInterceptor:
61
+ """Interceptor for PermissionService.
62
+
63
+ Interceptors are used to manipulate requests, request metadata, and responses
64
+ in arbitrary ways.
65
+ Example use cases include:
66
+ * Logging
67
+ * Verifying requests according to service or custom semantics
68
+ * Stripping extraneous information from responses
69
+
70
+ These use cases and more can be enabled by injecting an
71
+ instance of a custom subclass when constructing the PermissionServiceRestTransport.
72
+
73
+ .. code-block:: python
74
+ class MyCustomPermissionServiceInterceptor(PermissionServiceRestInterceptor):
75
+ def pre_create_permission(self, request, metadata):
76
+ logging.log(f"Received request: {request}")
77
+ return request, metadata
78
+
79
+ def post_create_permission(self, response):
80
+ logging.log(f"Received response: {response}")
81
+ return response
82
+
83
+ def pre_delete_permission(self, request, metadata):
84
+ logging.log(f"Received request: {request}")
85
+ return request, metadata
86
+
87
+ def pre_get_permission(self, request, metadata):
88
+ logging.log(f"Received request: {request}")
89
+ return request, metadata
90
+
91
+ def post_get_permission(self, response):
92
+ logging.log(f"Received response: {response}")
93
+ return response
94
+
95
+ def pre_list_permissions(self, request, metadata):
96
+ logging.log(f"Received request: {request}")
97
+ return request, metadata
98
+
99
+ def post_list_permissions(self, response):
100
+ logging.log(f"Received response: {response}")
101
+ return response
102
+
103
+ def pre_transfer_ownership(self, request, metadata):
104
+ logging.log(f"Received request: {request}")
105
+ return request, metadata
106
+
107
+ def post_transfer_ownership(self, response):
108
+ logging.log(f"Received response: {response}")
109
+ return response
110
+
111
+ def pre_update_permission(self, request, metadata):
112
+ logging.log(f"Received request: {request}")
113
+ return request, metadata
114
+
115
+ def post_update_permission(self, response):
116
+ logging.log(f"Received response: {response}")
117
+ return response
118
+
119
+ transport = PermissionServiceRestTransport(interceptor=MyCustomPermissionServiceInterceptor())
120
+ client = PermissionServiceClient(transport=transport)
121
+
122
+
123
+ """
124
+
125
+ def pre_create_permission(
126
+ self,
127
+ request: permission_service.CreatePermissionRequest,
128
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
129
+ ) -> Tuple[
130
+ permission_service.CreatePermissionRequest,
131
+ Sequence[Tuple[str, Union[str, bytes]]],
132
+ ]:
133
+ """Pre-rpc interceptor for create_permission
134
+
135
+ Override in a subclass to manipulate the request or metadata
136
+ before they are sent to the PermissionService server.
137
+ """
138
+ return request, metadata
139
+
140
+ def post_create_permission(
141
+ self, response: gag_permission.Permission
142
+ ) -> gag_permission.Permission:
143
+ """Post-rpc interceptor for create_permission
144
+
145
+ Override in a subclass to manipulate the response
146
+ after it is returned by the PermissionService server but before
147
+ it is returned to user code.
148
+ """
149
+ return response
150
+
151
+ def pre_delete_permission(
152
+ self,
153
+ request: permission_service.DeletePermissionRequest,
154
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
155
+ ) -> Tuple[
156
+ permission_service.DeletePermissionRequest,
157
+ Sequence[Tuple[str, Union[str, bytes]]],
158
+ ]:
159
+ """Pre-rpc interceptor for delete_permission
160
+
161
+ Override in a subclass to manipulate the request or metadata
162
+ before they are sent to the PermissionService server.
163
+ """
164
+ return request, metadata
165
+
166
+ def pre_get_permission(
167
+ self,
168
+ request: permission_service.GetPermissionRequest,
169
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
170
+ ) -> Tuple[
171
+ permission_service.GetPermissionRequest, Sequence[Tuple[str, Union[str, bytes]]]
172
+ ]:
173
+ """Pre-rpc interceptor for get_permission
174
+
175
+ Override in a subclass to manipulate the request or metadata
176
+ before they are sent to the PermissionService server.
177
+ """
178
+ return request, metadata
179
+
180
+ def post_get_permission(
181
+ self, response: permission.Permission
182
+ ) -> permission.Permission:
183
+ """Post-rpc interceptor for get_permission
184
+
185
+ Override in a subclass to manipulate the response
186
+ after it is returned by the PermissionService server but before
187
+ it is returned to user code.
188
+ """
189
+ return response
190
+
191
+ def pre_list_permissions(
192
+ self,
193
+ request: permission_service.ListPermissionsRequest,
194
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
195
+ ) -> Tuple[
196
+ permission_service.ListPermissionsRequest,
197
+ Sequence[Tuple[str, Union[str, bytes]]],
198
+ ]:
199
+ """Pre-rpc interceptor for list_permissions
200
+
201
+ Override in a subclass to manipulate the request or metadata
202
+ before they are sent to the PermissionService server.
203
+ """
204
+ return request, metadata
205
+
206
+ def post_list_permissions(
207
+ self, response: permission_service.ListPermissionsResponse
208
+ ) -> permission_service.ListPermissionsResponse:
209
+ """Post-rpc interceptor for list_permissions
210
+
211
+ Override in a subclass to manipulate the response
212
+ after it is returned by the PermissionService server but before
213
+ it is returned to user code.
214
+ """
215
+ return response
216
+
217
+ def pre_transfer_ownership(
218
+ self,
219
+ request: permission_service.TransferOwnershipRequest,
220
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
221
+ ) -> Tuple[
222
+ permission_service.TransferOwnershipRequest,
223
+ Sequence[Tuple[str, Union[str, bytes]]],
224
+ ]:
225
+ """Pre-rpc interceptor for transfer_ownership
226
+
227
+ Override in a subclass to manipulate the request or metadata
228
+ before they are sent to the PermissionService server.
229
+ """
230
+ return request, metadata
231
+
232
+ def post_transfer_ownership(
233
+ self, response: permission_service.TransferOwnershipResponse
234
+ ) -> permission_service.TransferOwnershipResponse:
235
+ """Post-rpc interceptor for transfer_ownership
236
+
237
+ Override in a subclass to manipulate the response
238
+ after it is returned by the PermissionService server but before
239
+ it is returned to user code.
240
+ """
241
+ return response
242
+
243
+ def pre_update_permission(
244
+ self,
245
+ request: permission_service.UpdatePermissionRequest,
246
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
247
+ ) -> Tuple[
248
+ permission_service.UpdatePermissionRequest,
249
+ Sequence[Tuple[str, Union[str, bytes]]],
250
+ ]:
251
+ """Pre-rpc interceptor for update_permission
252
+
253
+ Override in a subclass to manipulate the request or metadata
254
+ before they are sent to the PermissionService server.
255
+ """
256
+ return request, metadata
257
+
258
+ def post_update_permission(
259
+ self, response: gag_permission.Permission
260
+ ) -> gag_permission.Permission:
261
+ """Post-rpc interceptor for update_permission
262
+
263
+ Override in a subclass to manipulate the response
264
+ after it is returned by the PermissionService server but before
265
+ it is returned to user code.
266
+ """
267
+ return response
268
+
269
+
270
+ @dataclasses.dataclass
271
+ class PermissionServiceRestStub:
272
+ _session: AuthorizedSession
273
+ _host: str
274
+ _interceptor: PermissionServiceRestInterceptor
275
+
276
+
277
+ class PermissionServiceRestTransport(_BasePermissionServiceRestTransport):
278
+ """REST backend synchronous transport for PermissionService.
279
+
280
+ Provides methods for managing permissions to PaLM API
281
+ resources.
282
+
283
+ This class defines the same methods as the primary client, so the
284
+ primary client can load the underlying transport implementation
285
+ and call it.
286
+
287
+ It sends JSON representations of protocol buffers over HTTP/1.1
288
+ """
289
+
290
+ def __init__(
291
+ self,
292
+ *,
293
+ host: str = "generativelanguage.googleapis.com",
294
+ credentials: Optional[ga_credentials.Credentials] = None,
295
+ credentials_file: Optional[str] = None,
296
+ scopes: Optional[Sequence[str]] = None,
297
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
298
+ quota_project_id: Optional[str] = None,
299
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
300
+ always_use_jwt_access: Optional[bool] = False,
301
+ url_scheme: str = "https",
302
+ interceptor: Optional[PermissionServiceRestInterceptor] = None,
303
+ api_audience: Optional[str] = None,
304
+ ) -> None:
305
+ """Instantiate the transport.
306
+
307
+ Args:
308
+ host (Optional[str]):
309
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
310
+ credentials (Optional[google.auth.credentials.Credentials]): The
311
+ authorization credentials to attach to requests. These
312
+ credentials identify the application to the service; if none
313
+ are specified, the client will attempt to ascertain the
314
+ credentials from the environment.
315
+
316
+ credentials_file (Optional[str]): A file with credentials that can
317
+ be loaded with :func:`google.auth.load_credentials_from_file`.
318
+ This argument is ignored if ``channel`` is provided.
319
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
320
+ ignored if ``channel`` is provided.
321
+ client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
322
+ certificate to configure mutual TLS HTTP channel. It is ignored
323
+ if ``channel`` is provided.
324
+ quota_project_id (Optional[str]): An optional project to use for billing
325
+ and quota.
326
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
327
+ The client info used to send a user-agent string along with
328
+ API requests. If ``None``, then default info will be used.
329
+ Generally, you only need to set this if you are developing
330
+ your own client library.
331
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
332
+ be used for service account credentials.
333
+ url_scheme: the protocol scheme for the API endpoint. Normally
334
+ "https", but for testing or local servers,
335
+ "http" can be specified.
336
+ """
337
+ # Run the base constructor
338
+ # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
339
+ # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
340
+ # credentials object
341
+ super().__init__(
342
+ host=host,
343
+ credentials=credentials,
344
+ client_info=client_info,
345
+ always_use_jwt_access=always_use_jwt_access,
346
+ url_scheme=url_scheme,
347
+ api_audience=api_audience,
348
+ )
349
+ self._session = AuthorizedSession(
350
+ self._credentials, default_host=self.DEFAULT_HOST
351
+ )
352
+ if client_cert_source_for_mtls:
353
+ self._session.configure_mtls_channel(client_cert_source_for_mtls)
354
+ self._interceptor = interceptor or PermissionServiceRestInterceptor()
355
+ self._prep_wrapped_messages(client_info)
356
+
357
+ class _CreatePermission(
358
+ _BasePermissionServiceRestTransport._BaseCreatePermission,
359
+ PermissionServiceRestStub,
360
+ ):
361
+ def __hash__(self):
362
+ return hash("PermissionServiceRestTransport.CreatePermission")
363
+
364
+ @staticmethod
365
+ def _get_response(
366
+ host,
367
+ metadata,
368
+ query_params,
369
+ session,
370
+ timeout,
371
+ transcoded_request,
372
+ body=None,
373
+ ):
374
+ uri = transcoded_request["uri"]
375
+ method = transcoded_request["method"]
376
+ headers = dict(metadata)
377
+ headers["Content-Type"] = "application/json"
378
+ response = getattr(session, method)(
379
+ "{host}{uri}".format(host=host, uri=uri),
380
+ timeout=timeout,
381
+ headers=headers,
382
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
383
+ data=body,
384
+ )
385
+ return response
386
+
387
+ def __call__(
388
+ self,
389
+ request: permission_service.CreatePermissionRequest,
390
+ *,
391
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
392
+ timeout: Optional[float] = None,
393
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
394
+ ) -> gag_permission.Permission:
395
+ r"""Call the create permission method over HTTP.
396
+
397
+ Args:
398
+ request (~.permission_service.CreatePermissionRequest):
399
+ The request object. Request to create a ``Permission``.
400
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
401
+ should be retried.
402
+ timeout (float): The timeout for this request.
403
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
404
+ sent along with the request as metadata. Normally, each value must be of type `str`,
405
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
406
+ be of type `bytes`.
407
+
408
+ Returns:
409
+ ~.gag_permission.Permission:
410
+ Permission resource grants user,
411
+ group or the rest of the world access to
412
+ the PaLM API resource (e.g. a tuned
413
+ model, file).
414
+
415
+ A role is a collection of permitted
416
+ operations that allows users to perform
417
+ specific actions on PaLM API resources.
418
+ To make them available to users, groups,
419
+ or service accounts, you assign roles.
420
+ When you assign a role, you grant
421
+ permissions that the role contains.
422
+
423
+ There are three concentric roles. Each
424
+ role is a superset of the previous
425
+ role's permitted operations:
426
+
427
+ - reader can use the resource (e.g.
428
+ tuned model) for inference
429
+ - writer has reader's permissions and
430
+ additionally can edit and share
431
+ - owner has writer's permissions and
432
+ additionally can delete
433
+
434
+ """
435
+
436
+ http_options = (
437
+ _BasePermissionServiceRestTransport._BaseCreatePermission._get_http_options()
438
+ )
439
+
440
+ request, metadata = self._interceptor.pre_create_permission(
441
+ request, metadata
442
+ )
443
+ transcoded_request = _BasePermissionServiceRestTransport._BaseCreatePermission._get_transcoded_request(
444
+ http_options, request
445
+ )
446
+
447
+ body = _BasePermissionServiceRestTransport._BaseCreatePermission._get_request_body_json(
448
+ transcoded_request
449
+ )
450
+
451
+ # Jsonify the query params
452
+ query_params = _BasePermissionServiceRestTransport._BaseCreatePermission._get_query_params_json(
453
+ transcoded_request
454
+ )
455
+
456
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
457
+ logging.DEBUG
458
+ ): # pragma: NO COVER
459
+ request_url = "{host}{uri}".format(
460
+ host=self._host, uri=transcoded_request["uri"]
461
+ )
462
+ method = transcoded_request["method"]
463
+ try:
464
+ request_payload = type(request).to_json(request)
465
+ except:
466
+ request_payload = None
467
+ http_request = {
468
+ "payload": request_payload,
469
+ "requestMethod": method,
470
+ "requestUrl": request_url,
471
+ "headers": dict(metadata),
472
+ }
473
+ _LOGGER.debug(
474
+ f"Sending request for google.ai.generativelanguage_v1beta3.PermissionServiceClient.CreatePermission",
475
+ extra={
476
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
477
+ "rpcName": "CreatePermission",
478
+ "httpRequest": http_request,
479
+ "metadata": http_request["headers"],
480
+ },
481
+ )
482
+
483
+ # Send the request
484
+ response = PermissionServiceRestTransport._CreatePermission._get_response(
485
+ self._host,
486
+ metadata,
487
+ query_params,
488
+ self._session,
489
+ timeout,
490
+ transcoded_request,
491
+ body,
492
+ )
493
+
494
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
495
+ # subclass.
496
+ if response.status_code >= 400:
497
+ raise core_exceptions.from_http_response(response)
498
+
499
+ # Return the response
500
+ resp = gag_permission.Permission()
501
+ pb_resp = gag_permission.Permission.pb(resp)
502
+
503
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
504
+
505
+ resp = self._interceptor.post_create_permission(resp)
506
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
507
+ logging.DEBUG
508
+ ): # pragma: NO COVER
509
+ try:
510
+ response_payload = gag_permission.Permission.to_json(response)
511
+ except:
512
+ response_payload = None
513
+ http_response = {
514
+ "payload": response_payload,
515
+ "headers": dict(response.headers),
516
+ "status": response.status_code,
517
+ }
518
+ _LOGGER.debug(
519
+ "Received response for google.ai.generativelanguage_v1beta3.PermissionServiceClient.create_permission",
520
+ extra={
521
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
522
+ "rpcName": "CreatePermission",
523
+ "metadata": http_response["headers"],
524
+ "httpResponse": http_response,
525
+ },
526
+ )
527
+ return resp
528
+
529
+ class _DeletePermission(
530
+ _BasePermissionServiceRestTransport._BaseDeletePermission,
531
+ PermissionServiceRestStub,
532
+ ):
533
+ def __hash__(self):
534
+ return hash("PermissionServiceRestTransport.DeletePermission")
535
+
536
+ @staticmethod
537
+ def _get_response(
538
+ host,
539
+ metadata,
540
+ query_params,
541
+ session,
542
+ timeout,
543
+ transcoded_request,
544
+ body=None,
545
+ ):
546
+ uri = transcoded_request["uri"]
547
+ method = transcoded_request["method"]
548
+ headers = dict(metadata)
549
+ headers["Content-Type"] = "application/json"
550
+ response = getattr(session, method)(
551
+ "{host}{uri}".format(host=host, uri=uri),
552
+ timeout=timeout,
553
+ headers=headers,
554
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
555
+ )
556
+ return response
557
+
558
+ def __call__(
559
+ self,
560
+ request: permission_service.DeletePermissionRequest,
561
+ *,
562
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
563
+ timeout: Optional[float] = None,
564
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
565
+ ):
566
+ r"""Call the delete permission method over HTTP.
567
+
568
+ Args:
569
+ request (~.permission_service.DeletePermissionRequest):
570
+ The request object. Request to delete the ``Permission``.
571
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
572
+ should be retried.
573
+ timeout (float): The timeout for this request.
574
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
575
+ sent along with the request as metadata. Normally, each value must be of type `str`,
576
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
577
+ be of type `bytes`.
578
+ """
579
+
580
+ http_options = (
581
+ _BasePermissionServiceRestTransport._BaseDeletePermission._get_http_options()
582
+ )
583
+
584
+ request, metadata = self._interceptor.pre_delete_permission(
585
+ request, metadata
586
+ )
587
+ transcoded_request = _BasePermissionServiceRestTransport._BaseDeletePermission._get_transcoded_request(
588
+ http_options, request
589
+ )
590
+
591
+ # Jsonify the query params
592
+ query_params = _BasePermissionServiceRestTransport._BaseDeletePermission._get_query_params_json(
593
+ transcoded_request
594
+ )
595
+
596
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
597
+ logging.DEBUG
598
+ ): # pragma: NO COVER
599
+ request_url = "{host}{uri}".format(
600
+ host=self._host, uri=transcoded_request["uri"]
601
+ )
602
+ method = transcoded_request["method"]
603
+ try:
604
+ request_payload = json_format.MessageToJson(request)
605
+ except:
606
+ request_payload = None
607
+ http_request = {
608
+ "payload": request_payload,
609
+ "requestMethod": method,
610
+ "requestUrl": request_url,
611
+ "headers": dict(metadata),
612
+ }
613
+ _LOGGER.debug(
614
+ f"Sending request for google.ai.generativelanguage_v1beta3.PermissionServiceClient.DeletePermission",
615
+ extra={
616
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
617
+ "rpcName": "DeletePermission",
618
+ "httpRequest": http_request,
619
+ "metadata": http_request["headers"],
620
+ },
621
+ )
622
+
623
+ # Send the request
624
+ response = PermissionServiceRestTransport._DeletePermission._get_response(
625
+ self._host,
626
+ metadata,
627
+ query_params,
628
+ self._session,
629
+ timeout,
630
+ transcoded_request,
631
+ )
632
+
633
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
634
+ # subclass.
635
+ if response.status_code >= 400:
636
+ raise core_exceptions.from_http_response(response)
637
+
638
+ class _GetPermission(
639
+ _BasePermissionServiceRestTransport._BaseGetPermission,
640
+ PermissionServiceRestStub,
641
+ ):
642
+ def __hash__(self):
643
+ return hash("PermissionServiceRestTransport.GetPermission")
644
+
645
+ @staticmethod
646
+ def _get_response(
647
+ host,
648
+ metadata,
649
+ query_params,
650
+ session,
651
+ timeout,
652
+ transcoded_request,
653
+ body=None,
654
+ ):
655
+ uri = transcoded_request["uri"]
656
+ method = transcoded_request["method"]
657
+ headers = dict(metadata)
658
+ headers["Content-Type"] = "application/json"
659
+ response = getattr(session, method)(
660
+ "{host}{uri}".format(host=host, uri=uri),
661
+ timeout=timeout,
662
+ headers=headers,
663
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
664
+ )
665
+ return response
666
+
667
+ def __call__(
668
+ self,
669
+ request: permission_service.GetPermissionRequest,
670
+ *,
671
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
672
+ timeout: Optional[float] = None,
673
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
674
+ ) -> permission.Permission:
675
+ r"""Call the get permission method over HTTP.
676
+
677
+ Args:
678
+ request (~.permission_service.GetPermissionRequest):
679
+ The request object. Request for getting information about a specific
680
+ ``Permission``.
681
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
682
+ should be retried.
683
+ timeout (float): The timeout for this request.
684
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
685
+ sent along with the request as metadata. Normally, each value must be of type `str`,
686
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
687
+ be of type `bytes`.
688
+
689
+ Returns:
690
+ ~.permission.Permission:
691
+ Permission resource grants user,
692
+ group or the rest of the world access to
693
+ the PaLM API resource (e.g. a tuned
694
+ model, file).
695
+
696
+ A role is a collection of permitted
697
+ operations that allows users to perform
698
+ specific actions on PaLM API resources.
699
+ To make them available to users, groups,
700
+ or service accounts, you assign roles.
701
+ When you assign a role, you grant
702
+ permissions that the role contains.
703
+
704
+ There are three concentric roles. Each
705
+ role is a superset of the previous
706
+ role's permitted operations:
707
+
708
+ - reader can use the resource (e.g.
709
+ tuned model) for inference
710
+ - writer has reader's permissions and
711
+ additionally can edit and share
712
+ - owner has writer's permissions and
713
+ additionally can delete
714
+
715
+ """
716
+
717
+ http_options = (
718
+ _BasePermissionServiceRestTransport._BaseGetPermission._get_http_options()
719
+ )
720
+
721
+ request, metadata = self._interceptor.pre_get_permission(request, metadata)
722
+ transcoded_request = _BasePermissionServiceRestTransport._BaseGetPermission._get_transcoded_request(
723
+ http_options, request
724
+ )
725
+
726
+ # Jsonify the query params
727
+ query_params = _BasePermissionServiceRestTransport._BaseGetPermission._get_query_params_json(
728
+ transcoded_request
729
+ )
730
+
731
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
732
+ logging.DEBUG
733
+ ): # pragma: NO COVER
734
+ request_url = "{host}{uri}".format(
735
+ host=self._host, uri=transcoded_request["uri"]
736
+ )
737
+ method = transcoded_request["method"]
738
+ try:
739
+ request_payload = type(request).to_json(request)
740
+ except:
741
+ request_payload = None
742
+ http_request = {
743
+ "payload": request_payload,
744
+ "requestMethod": method,
745
+ "requestUrl": request_url,
746
+ "headers": dict(metadata),
747
+ }
748
+ _LOGGER.debug(
749
+ f"Sending request for google.ai.generativelanguage_v1beta3.PermissionServiceClient.GetPermission",
750
+ extra={
751
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
752
+ "rpcName": "GetPermission",
753
+ "httpRequest": http_request,
754
+ "metadata": http_request["headers"],
755
+ },
756
+ )
757
+
758
+ # Send the request
759
+ response = PermissionServiceRestTransport._GetPermission._get_response(
760
+ self._host,
761
+ metadata,
762
+ query_params,
763
+ self._session,
764
+ timeout,
765
+ transcoded_request,
766
+ )
767
+
768
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
769
+ # subclass.
770
+ if response.status_code >= 400:
771
+ raise core_exceptions.from_http_response(response)
772
+
773
+ # Return the response
774
+ resp = permission.Permission()
775
+ pb_resp = permission.Permission.pb(resp)
776
+
777
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
778
+
779
+ resp = self._interceptor.post_get_permission(resp)
780
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
781
+ logging.DEBUG
782
+ ): # pragma: NO COVER
783
+ try:
784
+ response_payload = permission.Permission.to_json(response)
785
+ except:
786
+ response_payload = None
787
+ http_response = {
788
+ "payload": response_payload,
789
+ "headers": dict(response.headers),
790
+ "status": response.status_code,
791
+ }
792
+ _LOGGER.debug(
793
+ "Received response for google.ai.generativelanguage_v1beta3.PermissionServiceClient.get_permission",
794
+ extra={
795
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
796
+ "rpcName": "GetPermission",
797
+ "metadata": http_response["headers"],
798
+ "httpResponse": http_response,
799
+ },
800
+ )
801
+ return resp
802
+
803
+ class _ListPermissions(
804
+ _BasePermissionServiceRestTransport._BaseListPermissions,
805
+ PermissionServiceRestStub,
806
+ ):
807
+ def __hash__(self):
808
+ return hash("PermissionServiceRestTransport.ListPermissions")
809
+
810
+ @staticmethod
811
+ def _get_response(
812
+ host,
813
+ metadata,
814
+ query_params,
815
+ session,
816
+ timeout,
817
+ transcoded_request,
818
+ body=None,
819
+ ):
820
+ uri = transcoded_request["uri"]
821
+ method = transcoded_request["method"]
822
+ headers = dict(metadata)
823
+ headers["Content-Type"] = "application/json"
824
+ response = getattr(session, method)(
825
+ "{host}{uri}".format(host=host, uri=uri),
826
+ timeout=timeout,
827
+ headers=headers,
828
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
829
+ )
830
+ return response
831
+
832
+ def __call__(
833
+ self,
834
+ request: permission_service.ListPermissionsRequest,
835
+ *,
836
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
837
+ timeout: Optional[float] = None,
838
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
839
+ ) -> permission_service.ListPermissionsResponse:
840
+ r"""Call the list permissions method over HTTP.
841
+
842
+ Args:
843
+ request (~.permission_service.ListPermissionsRequest):
844
+ The request object. Request for listing permissions.
845
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
846
+ should be retried.
847
+ timeout (float): The timeout for this request.
848
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
849
+ sent along with the request as metadata. Normally, each value must be of type `str`,
850
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
851
+ be of type `bytes`.
852
+
853
+ Returns:
854
+ ~.permission_service.ListPermissionsResponse:
855
+ Response from ``ListPermissions`` containing a paginated
856
+ list of permissions.
857
+
858
+ """
859
+
860
+ http_options = (
861
+ _BasePermissionServiceRestTransport._BaseListPermissions._get_http_options()
862
+ )
863
+
864
+ request, metadata = self._interceptor.pre_list_permissions(
865
+ request, metadata
866
+ )
867
+ transcoded_request = _BasePermissionServiceRestTransport._BaseListPermissions._get_transcoded_request(
868
+ http_options, request
869
+ )
870
+
871
+ # Jsonify the query params
872
+ query_params = _BasePermissionServiceRestTransport._BaseListPermissions._get_query_params_json(
873
+ transcoded_request
874
+ )
875
+
876
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
877
+ logging.DEBUG
878
+ ): # pragma: NO COVER
879
+ request_url = "{host}{uri}".format(
880
+ host=self._host, uri=transcoded_request["uri"]
881
+ )
882
+ method = transcoded_request["method"]
883
+ try:
884
+ request_payload = type(request).to_json(request)
885
+ except:
886
+ request_payload = None
887
+ http_request = {
888
+ "payload": request_payload,
889
+ "requestMethod": method,
890
+ "requestUrl": request_url,
891
+ "headers": dict(metadata),
892
+ }
893
+ _LOGGER.debug(
894
+ f"Sending request for google.ai.generativelanguage_v1beta3.PermissionServiceClient.ListPermissions",
895
+ extra={
896
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
897
+ "rpcName": "ListPermissions",
898
+ "httpRequest": http_request,
899
+ "metadata": http_request["headers"],
900
+ },
901
+ )
902
+
903
+ # Send the request
904
+ response = PermissionServiceRestTransport._ListPermissions._get_response(
905
+ self._host,
906
+ metadata,
907
+ query_params,
908
+ self._session,
909
+ timeout,
910
+ transcoded_request,
911
+ )
912
+
913
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
914
+ # subclass.
915
+ if response.status_code >= 400:
916
+ raise core_exceptions.from_http_response(response)
917
+
918
+ # Return the response
919
+ resp = permission_service.ListPermissionsResponse()
920
+ pb_resp = permission_service.ListPermissionsResponse.pb(resp)
921
+
922
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
923
+
924
+ resp = self._interceptor.post_list_permissions(resp)
925
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
926
+ logging.DEBUG
927
+ ): # pragma: NO COVER
928
+ try:
929
+ response_payload = (
930
+ permission_service.ListPermissionsResponse.to_json(response)
931
+ )
932
+ except:
933
+ response_payload = None
934
+ http_response = {
935
+ "payload": response_payload,
936
+ "headers": dict(response.headers),
937
+ "status": response.status_code,
938
+ }
939
+ _LOGGER.debug(
940
+ "Received response for google.ai.generativelanguage_v1beta3.PermissionServiceClient.list_permissions",
941
+ extra={
942
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
943
+ "rpcName": "ListPermissions",
944
+ "metadata": http_response["headers"],
945
+ "httpResponse": http_response,
946
+ },
947
+ )
948
+ return resp
949
+
950
+ class _TransferOwnership(
951
+ _BasePermissionServiceRestTransport._BaseTransferOwnership,
952
+ PermissionServiceRestStub,
953
+ ):
954
+ def __hash__(self):
955
+ return hash("PermissionServiceRestTransport.TransferOwnership")
956
+
957
+ @staticmethod
958
+ def _get_response(
959
+ host,
960
+ metadata,
961
+ query_params,
962
+ session,
963
+ timeout,
964
+ transcoded_request,
965
+ body=None,
966
+ ):
967
+ uri = transcoded_request["uri"]
968
+ method = transcoded_request["method"]
969
+ headers = dict(metadata)
970
+ headers["Content-Type"] = "application/json"
971
+ response = getattr(session, method)(
972
+ "{host}{uri}".format(host=host, uri=uri),
973
+ timeout=timeout,
974
+ headers=headers,
975
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
976
+ data=body,
977
+ )
978
+ return response
979
+
980
+ def __call__(
981
+ self,
982
+ request: permission_service.TransferOwnershipRequest,
983
+ *,
984
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
985
+ timeout: Optional[float] = None,
986
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
987
+ ) -> permission_service.TransferOwnershipResponse:
988
+ r"""Call the transfer ownership method over HTTP.
989
+
990
+ Args:
991
+ request (~.permission_service.TransferOwnershipRequest):
992
+ The request object. Request to transfer the ownership of
993
+ the tuned model.
994
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
995
+ should be retried.
996
+ timeout (float): The timeout for this request.
997
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
998
+ sent along with the request as metadata. Normally, each value must be of type `str`,
999
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1000
+ be of type `bytes`.
1001
+
1002
+ Returns:
1003
+ ~.permission_service.TransferOwnershipResponse:
1004
+ Response from ``TransferOwnership``.
1005
+ """
1006
+
1007
+ http_options = (
1008
+ _BasePermissionServiceRestTransport._BaseTransferOwnership._get_http_options()
1009
+ )
1010
+
1011
+ request, metadata = self._interceptor.pre_transfer_ownership(
1012
+ request, metadata
1013
+ )
1014
+ transcoded_request = _BasePermissionServiceRestTransport._BaseTransferOwnership._get_transcoded_request(
1015
+ http_options, request
1016
+ )
1017
+
1018
+ body = _BasePermissionServiceRestTransport._BaseTransferOwnership._get_request_body_json(
1019
+ transcoded_request
1020
+ )
1021
+
1022
+ # Jsonify the query params
1023
+ query_params = _BasePermissionServiceRestTransport._BaseTransferOwnership._get_query_params_json(
1024
+ transcoded_request
1025
+ )
1026
+
1027
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1028
+ logging.DEBUG
1029
+ ): # pragma: NO COVER
1030
+ request_url = "{host}{uri}".format(
1031
+ host=self._host, uri=transcoded_request["uri"]
1032
+ )
1033
+ method = transcoded_request["method"]
1034
+ try:
1035
+ request_payload = type(request).to_json(request)
1036
+ except:
1037
+ request_payload = None
1038
+ http_request = {
1039
+ "payload": request_payload,
1040
+ "requestMethod": method,
1041
+ "requestUrl": request_url,
1042
+ "headers": dict(metadata),
1043
+ }
1044
+ _LOGGER.debug(
1045
+ f"Sending request for google.ai.generativelanguage_v1beta3.PermissionServiceClient.TransferOwnership",
1046
+ extra={
1047
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
1048
+ "rpcName": "TransferOwnership",
1049
+ "httpRequest": http_request,
1050
+ "metadata": http_request["headers"],
1051
+ },
1052
+ )
1053
+
1054
+ # Send the request
1055
+ response = PermissionServiceRestTransport._TransferOwnership._get_response(
1056
+ self._host,
1057
+ metadata,
1058
+ query_params,
1059
+ self._session,
1060
+ timeout,
1061
+ transcoded_request,
1062
+ body,
1063
+ )
1064
+
1065
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
1066
+ # subclass.
1067
+ if response.status_code >= 400:
1068
+ raise core_exceptions.from_http_response(response)
1069
+
1070
+ # Return the response
1071
+ resp = permission_service.TransferOwnershipResponse()
1072
+ pb_resp = permission_service.TransferOwnershipResponse.pb(resp)
1073
+
1074
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
1075
+
1076
+ resp = self._interceptor.post_transfer_ownership(resp)
1077
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1078
+ logging.DEBUG
1079
+ ): # pragma: NO COVER
1080
+ try:
1081
+ response_payload = (
1082
+ permission_service.TransferOwnershipResponse.to_json(response)
1083
+ )
1084
+ except:
1085
+ response_payload = None
1086
+ http_response = {
1087
+ "payload": response_payload,
1088
+ "headers": dict(response.headers),
1089
+ "status": response.status_code,
1090
+ }
1091
+ _LOGGER.debug(
1092
+ "Received response for google.ai.generativelanguage_v1beta3.PermissionServiceClient.transfer_ownership",
1093
+ extra={
1094
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
1095
+ "rpcName": "TransferOwnership",
1096
+ "metadata": http_response["headers"],
1097
+ "httpResponse": http_response,
1098
+ },
1099
+ )
1100
+ return resp
1101
+
1102
+ class _UpdatePermission(
1103
+ _BasePermissionServiceRestTransport._BaseUpdatePermission,
1104
+ PermissionServiceRestStub,
1105
+ ):
1106
+ def __hash__(self):
1107
+ return hash("PermissionServiceRestTransport.UpdatePermission")
1108
+
1109
+ @staticmethod
1110
+ def _get_response(
1111
+ host,
1112
+ metadata,
1113
+ query_params,
1114
+ session,
1115
+ timeout,
1116
+ transcoded_request,
1117
+ body=None,
1118
+ ):
1119
+ uri = transcoded_request["uri"]
1120
+ method = transcoded_request["method"]
1121
+ headers = dict(metadata)
1122
+ headers["Content-Type"] = "application/json"
1123
+ response = getattr(session, method)(
1124
+ "{host}{uri}".format(host=host, uri=uri),
1125
+ timeout=timeout,
1126
+ headers=headers,
1127
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
1128
+ data=body,
1129
+ )
1130
+ return response
1131
+
1132
+ def __call__(
1133
+ self,
1134
+ request: permission_service.UpdatePermissionRequest,
1135
+ *,
1136
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1137
+ timeout: Optional[float] = None,
1138
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1139
+ ) -> gag_permission.Permission:
1140
+ r"""Call the update permission method over HTTP.
1141
+
1142
+ Args:
1143
+ request (~.permission_service.UpdatePermissionRequest):
1144
+ The request object. Request to update the ``Permission``.
1145
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1146
+ should be retried.
1147
+ timeout (float): The timeout for this request.
1148
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1149
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1150
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1151
+ be of type `bytes`.
1152
+
1153
+ Returns:
1154
+ ~.gag_permission.Permission:
1155
+ Permission resource grants user,
1156
+ group or the rest of the world access to
1157
+ the PaLM API resource (e.g. a tuned
1158
+ model, file).
1159
+
1160
+ A role is a collection of permitted
1161
+ operations that allows users to perform
1162
+ specific actions on PaLM API resources.
1163
+ To make them available to users, groups,
1164
+ or service accounts, you assign roles.
1165
+ When you assign a role, you grant
1166
+ permissions that the role contains.
1167
+
1168
+ There are three concentric roles. Each
1169
+ role is a superset of the previous
1170
+ role's permitted operations:
1171
+
1172
+ - reader can use the resource (e.g.
1173
+ tuned model) for inference
1174
+ - writer has reader's permissions and
1175
+ additionally can edit and share
1176
+ - owner has writer's permissions and
1177
+ additionally can delete
1178
+
1179
+ """
1180
+
1181
+ http_options = (
1182
+ _BasePermissionServiceRestTransport._BaseUpdatePermission._get_http_options()
1183
+ )
1184
+
1185
+ request, metadata = self._interceptor.pre_update_permission(
1186
+ request, metadata
1187
+ )
1188
+ transcoded_request = _BasePermissionServiceRestTransport._BaseUpdatePermission._get_transcoded_request(
1189
+ http_options, request
1190
+ )
1191
+
1192
+ body = _BasePermissionServiceRestTransport._BaseUpdatePermission._get_request_body_json(
1193
+ transcoded_request
1194
+ )
1195
+
1196
+ # Jsonify the query params
1197
+ query_params = _BasePermissionServiceRestTransport._BaseUpdatePermission._get_query_params_json(
1198
+ transcoded_request
1199
+ )
1200
+
1201
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1202
+ logging.DEBUG
1203
+ ): # pragma: NO COVER
1204
+ request_url = "{host}{uri}".format(
1205
+ host=self._host, uri=transcoded_request["uri"]
1206
+ )
1207
+ method = transcoded_request["method"]
1208
+ try:
1209
+ request_payload = type(request).to_json(request)
1210
+ except:
1211
+ request_payload = None
1212
+ http_request = {
1213
+ "payload": request_payload,
1214
+ "requestMethod": method,
1215
+ "requestUrl": request_url,
1216
+ "headers": dict(metadata),
1217
+ }
1218
+ _LOGGER.debug(
1219
+ f"Sending request for google.ai.generativelanguage_v1beta3.PermissionServiceClient.UpdatePermission",
1220
+ extra={
1221
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
1222
+ "rpcName": "UpdatePermission",
1223
+ "httpRequest": http_request,
1224
+ "metadata": http_request["headers"],
1225
+ },
1226
+ )
1227
+
1228
+ # Send the request
1229
+ response = PermissionServiceRestTransport._UpdatePermission._get_response(
1230
+ self._host,
1231
+ metadata,
1232
+ query_params,
1233
+ self._session,
1234
+ timeout,
1235
+ transcoded_request,
1236
+ body,
1237
+ )
1238
+
1239
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
1240
+ # subclass.
1241
+ if response.status_code >= 400:
1242
+ raise core_exceptions.from_http_response(response)
1243
+
1244
+ # Return the response
1245
+ resp = gag_permission.Permission()
1246
+ pb_resp = gag_permission.Permission.pb(resp)
1247
+
1248
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
1249
+
1250
+ resp = self._interceptor.post_update_permission(resp)
1251
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1252
+ logging.DEBUG
1253
+ ): # pragma: NO COVER
1254
+ try:
1255
+ response_payload = gag_permission.Permission.to_json(response)
1256
+ except:
1257
+ response_payload = None
1258
+ http_response = {
1259
+ "payload": response_payload,
1260
+ "headers": dict(response.headers),
1261
+ "status": response.status_code,
1262
+ }
1263
+ _LOGGER.debug(
1264
+ "Received response for google.ai.generativelanguage_v1beta3.PermissionServiceClient.update_permission",
1265
+ extra={
1266
+ "serviceName": "google.ai.generativelanguage.v1beta3.PermissionService",
1267
+ "rpcName": "UpdatePermission",
1268
+ "metadata": http_response["headers"],
1269
+ "httpResponse": http_response,
1270
+ },
1271
+ )
1272
+ return resp
1273
+
1274
+ @property
1275
+ def create_permission(
1276
+ self,
1277
+ ) -> Callable[
1278
+ [permission_service.CreatePermissionRequest], gag_permission.Permission
1279
+ ]:
1280
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1281
+ # In C++ this would require a dynamic_cast
1282
+ return self._CreatePermission(self._session, self._host, self._interceptor) # type: ignore
1283
+
1284
+ @property
1285
+ def delete_permission(
1286
+ self,
1287
+ ) -> Callable[[permission_service.DeletePermissionRequest], empty_pb2.Empty]:
1288
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1289
+ # In C++ this would require a dynamic_cast
1290
+ return self._DeletePermission(self._session, self._host, self._interceptor) # type: ignore
1291
+
1292
+ @property
1293
+ def get_permission(
1294
+ self,
1295
+ ) -> Callable[[permission_service.GetPermissionRequest], permission.Permission]:
1296
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1297
+ # In C++ this would require a dynamic_cast
1298
+ return self._GetPermission(self._session, self._host, self._interceptor) # type: ignore
1299
+
1300
+ @property
1301
+ def list_permissions(
1302
+ self,
1303
+ ) -> Callable[
1304
+ [permission_service.ListPermissionsRequest],
1305
+ permission_service.ListPermissionsResponse,
1306
+ ]:
1307
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1308
+ # In C++ this would require a dynamic_cast
1309
+ return self._ListPermissions(self._session, self._host, self._interceptor) # type: ignore
1310
+
1311
+ @property
1312
+ def transfer_ownership(
1313
+ self,
1314
+ ) -> Callable[
1315
+ [permission_service.TransferOwnershipRequest],
1316
+ permission_service.TransferOwnershipResponse,
1317
+ ]:
1318
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1319
+ # In C++ this would require a dynamic_cast
1320
+ return self._TransferOwnership(self._session, self._host, self._interceptor) # type: ignore
1321
+
1322
+ @property
1323
+ def update_permission(
1324
+ self,
1325
+ ) -> Callable[
1326
+ [permission_service.UpdatePermissionRequest], gag_permission.Permission
1327
+ ]:
1328
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1329
+ # In C++ this would require a dynamic_cast
1330
+ return self._UpdatePermission(self._session, self._host, self._interceptor) # type: ignore
1331
+
1332
+ @property
1333
+ def kind(self) -> str:
1334
+ return "rest"
1335
+
1336
+ def close(self):
1337
+ self._session.close()
1338
+
1339
+
1340
+ __all__ = ("PermissionServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/transports/rest_base.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json # type: ignore
17
+ import re
18
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
19
+
20
+ from google.api_core import gapic_v1, path_template
21
+ from google.longrunning import operations_pb2 # type: ignore
22
+ from google.protobuf import empty_pb2 # type: ignore
23
+ from google.protobuf import json_format
24
+
25
+ from google.ai.generativelanguage_v1beta3.types import permission as gag_permission
26
+ from google.ai.generativelanguage_v1beta3.types import permission
27
+ from google.ai.generativelanguage_v1beta3.types import permission_service
28
+
29
+ from .base import DEFAULT_CLIENT_INFO, PermissionServiceTransport
30
+
31
+
32
+ class _BasePermissionServiceRestTransport(PermissionServiceTransport):
33
+ """Base REST backend transport for PermissionService.
34
+
35
+ Note: This class is not meant to be used directly. Use its sync and
36
+ async sub-classes instead.
37
+
38
+ This class defines the same methods as the primary client, so the
39
+ primary client can load the underlying transport implementation
40
+ and call it.
41
+
42
+ It sends JSON representations of protocol buffers over HTTP/1.1
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ *,
48
+ host: str = "generativelanguage.googleapis.com",
49
+ credentials: Optional[Any] = None,
50
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
51
+ always_use_jwt_access: Optional[bool] = False,
52
+ url_scheme: str = "https",
53
+ api_audience: Optional[str] = None,
54
+ ) -> None:
55
+ """Instantiate the transport.
56
+ Args:
57
+ host (Optional[str]):
58
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
59
+ credentials (Optional[Any]): The
60
+ authorization credentials to attach to requests. These
61
+ credentials identify the application to the service; if none
62
+ are specified, the client will attempt to ascertain the
63
+ credentials from the environment.
64
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
65
+ The client info used to send a user-agent string along with
66
+ API requests. If ``None``, then default info will be used.
67
+ Generally, you only need to set this if you are developing
68
+ your own client library.
69
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
70
+ be used for service account credentials.
71
+ url_scheme: the protocol scheme for the API endpoint. Normally
72
+ "https", but for testing or local servers,
73
+ "http" can be specified.
74
+ """
75
+ # Run the base constructor
76
+ maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
77
+ if maybe_url_match is None:
78
+ raise ValueError(
79
+ f"Unexpected hostname structure: {host}"
80
+ ) # pragma: NO COVER
81
+
82
+ url_match_items = maybe_url_match.groupdict()
83
+
84
+ host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
85
+
86
+ super().__init__(
87
+ host=host,
88
+ credentials=credentials,
89
+ client_info=client_info,
90
+ always_use_jwt_access=always_use_jwt_access,
91
+ api_audience=api_audience,
92
+ )
93
+
94
+ class _BaseCreatePermission:
95
+ def __hash__(self): # pragma: NO COVER
96
+ return NotImplementedError("__hash__ must be implemented.")
97
+
98
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
99
+
100
+ @classmethod
101
+ def _get_unset_required_fields(cls, message_dict):
102
+ return {
103
+ k: v
104
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
105
+ if k not in message_dict
106
+ }
107
+
108
+ @staticmethod
109
+ def _get_http_options():
110
+ http_options: List[Dict[str, str]] = [
111
+ {
112
+ "method": "post",
113
+ "uri": "/v1beta3/{parent=tunedModels/*}/permissions",
114
+ "body": "permission",
115
+ },
116
+ ]
117
+ return http_options
118
+
119
+ @staticmethod
120
+ def _get_transcoded_request(http_options, request):
121
+ pb_request = permission_service.CreatePermissionRequest.pb(request)
122
+ transcoded_request = path_template.transcode(http_options, pb_request)
123
+ return transcoded_request
124
+
125
+ @staticmethod
126
+ def _get_request_body_json(transcoded_request):
127
+ # Jsonify the request body
128
+
129
+ body = json_format.MessageToJson(
130
+ transcoded_request["body"], use_integers_for_enums=True
131
+ )
132
+ return body
133
+
134
+ @staticmethod
135
+ def _get_query_params_json(transcoded_request):
136
+ query_params = json.loads(
137
+ json_format.MessageToJson(
138
+ transcoded_request["query_params"],
139
+ use_integers_for_enums=True,
140
+ )
141
+ )
142
+ query_params.update(
143
+ _BasePermissionServiceRestTransport._BaseCreatePermission._get_unset_required_fields(
144
+ query_params
145
+ )
146
+ )
147
+
148
+ query_params["$alt"] = "json;enum-encoding=int"
149
+ return query_params
150
+
151
+ class _BaseDeletePermission:
152
+ def __hash__(self): # pragma: NO COVER
153
+ return NotImplementedError("__hash__ must be implemented.")
154
+
155
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
156
+
157
+ @classmethod
158
+ def _get_unset_required_fields(cls, message_dict):
159
+ return {
160
+ k: v
161
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
162
+ if k not in message_dict
163
+ }
164
+
165
+ @staticmethod
166
+ def _get_http_options():
167
+ http_options: List[Dict[str, str]] = [
168
+ {
169
+ "method": "delete",
170
+ "uri": "/v1beta3/{name=tunedModels/*/permissions/*}",
171
+ },
172
+ ]
173
+ return http_options
174
+
175
+ @staticmethod
176
+ def _get_transcoded_request(http_options, request):
177
+ pb_request = permission_service.DeletePermissionRequest.pb(request)
178
+ transcoded_request = path_template.transcode(http_options, pb_request)
179
+ return transcoded_request
180
+
181
+ @staticmethod
182
+ def _get_query_params_json(transcoded_request):
183
+ query_params = json.loads(
184
+ json_format.MessageToJson(
185
+ transcoded_request["query_params"],
186
+ use_integers_for_enums=True,
187
+ )
188
+ )
189
+ query_params.update(
190
+ _BasePermissionServiceRestTransport._BaseDeletePermission._get_unset_required_fields(
191
+ query_params
192
+ )
193
+ )
194
+
195
+ query_params["$alt"] = "json;enum-encoding=int"
196
+ return query_params
197
+
198
+ class _BaseGetPermission:
199
+ def __hash__(self): # pragma: NO COVER
200
+ return NotImplementedError("__hash__ must be implemented.")
201
+
202
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
203
+
204
+ @classmethod
205
+ def _get_unset_required_fields(cls, message_dict):
206
+ return {
207
+ k: v
208
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
209
+ if k not in message_dict
210
+ }
211
+
212
+ @staticmethod
213
+ def _get_http_options():
214
+ http_options: List[Dict[str, str]] = [
215
+ {
216
+ "method": "get",
217
+ "uri": "/v1beta3/{name=tunedModels/*/permissions/*}",
218
+ },
219
+ ]
220
+ return http_options
221
+
222
+ @staticmethod
223
+ def _get_transcoded_request(http_options, request):
224
+ pb_request = permission_service.GetPermissionRequest.pb(request)
225
+ transcoded_request = path_template.transcode(http_options, pb_request)
226
+ return transcoded_request
227
+
228
+ @staticmethod
229
+ def _get_query_params_json(transcoded_request):
230
+ query_params = json.loads(
231
+ json_format.MessageToJson(
232
+ transcoded_request["query_params"],
233
+ use_integers_for_enums=True,
234
+ )
235
+ )
236
+ query_params.update(
237
+ _BasePermissionServiceRestTransport._BaseGetPermission._get_unset_required_fields(
238
+ query_params
239
+ )
240
+ )
241
+
242
+ query_params["$alt"] = "json;enum-encoding=int"
243
+ return query_params
244
+
245
+ class _BaseListPermissions:
246
+ def __hash__(self): # pragma: NO COVER
247
+ return NotImplementedError("__hash__ must be implemented.")
248
+
249
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
250
+
251
+ @classmethod
252
+ def _get_unset_required_fields(cls, message_dict):
253
+ return {
254
+ k: v
255
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
256
+ if k not in message_dict
257
+ }
258
+
259
+ @staticmethod
260
+ def _get_http_options():
261
+ http_options: List[Dict[str, str]] = [
262
+ {
263
+ "method": "get",
264
+ "uri": "/v1beta3/{parent=tunedModels/*}/permissions",
265
+ },
266
+ ]
267
+ return http_options
268
+
269
+ @staticmethod
270
+ def _get_transcoded_request(http_options, request):
271
+ pb_request = permission_service.ListPermissionsRequest.pb(request)
272
+ transcoded_request = path_template.transcode(http_options, pb_request)
273
+ return transcoded_request
274
+
275
+ @staticmethod
276
+ def _get_query_params_json(transcoded_request):
277
+ query_params = json.loads(
278
+ json_format.MessageToJson(
279
+ transcoded_request["query_params"],
280
+ use_integers_for_enums=True,
281
+ )
282
+ )
283
+ query_params.update(
284
+ _BasePermissionServiceRestTransport._BaseListPermissions._get_unset_required_fields(
285
+ query_params
286
+ )
287
+ )
288
+
289
+ query_params["$alt"] = "json;enum-encoding=int"
290
+ return query_params
291
+
292
+ class _BaseTransferOwnership:
293
+ def __hash__(self): # pragma: NO COVER
294
+ return NotImplementedError("__hash__ must be implemented.")
295
+
296
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
297
+
298
+ @classmethod
299
+ def _get_unset_required_fields(cls, message_dict):
300
+ return {
301
+ k: v
302
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
303
+ if k not in message_dict
304
+ }
305
+
306
+ @staticmethod
307
+ def _get_http_options():
308
+ http_options: List[Dict[str, str]] = [
309
+ {
310
+ "method": "post",
311
+ "uri": "/v1beta3/{name=tunedModels/*}:transferOwnership",
312
+ "body": "*",
313
+ },
314
+ ]
315
+ return http_options
316
+
317
+ @staticmethod
318
+ def _get_transcoded_request(http_options, request):
319
+ pb_request = permission_service.TransferOwnershipRequest.pb(request)
320
+ transcoded_request = path_template.transcode(http_options, pb_request)
321
+ return transcoded_request
322
+
323
+ @staticmethod
324
+ def _get_request_body_json(transcoded_request):
325
+ # Jsonify the request body
326
+
327
+ body = json_format.MessageToJson(
328
+ transcoded_request["body"], use_integers_for_enums=True
329
+ )
330
+ return body
331
+
332
+ @staticmethod
333
+ def _get_query_params_json(transcoded_request):
334
+ query_params = json.loads(
335
+ json_format.MessageToJson(
336
+ transcoded_request["query_params"],
337
+ use_integers_for_enums=True,
338
+ )
339
+ )
340
+ query_params.update(
341
+ _BasePermissionServiceRestTransport._BaseTransferOwnership._get_unset_required_fields(
342
+ query_params
343
+ )
344
+ )
345
+
346
+ query_params["$alt"] = "json;enum-encoding=int"
347
+ return query_params
348
+
349
+ class _BaseUpdatePermission:
350
+ def __hash__(self): # pragma: NO COVER
351
+ return NotImplementedError("__hash__ must be implemented.")
352
+
353
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
354
+ "updateMask": {},
355
+ }
356
+
357
+ @classmethod
358
+ def _get_unset_required_fields(cls, message_dict):
359
+ return {
360
+ k: v
361
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
362
+ if k not in message_dict
363
+ }
364
+
365
+ @staticmethod
366
+ def _get_http_options():
367
+ http_options: List[Dict[str, str]] = [
368
+ {
369
+ "method": "patch",
370
+ "uri": "/v1beta3/{permission.name=tunedModels/*/permissions/*}",
371
+ "body": "permission",
372
+ },
373
+ ]
374
+ return http_options
375
+
376
+ @staticmethod
377
+ def _get_transcoded_request(http_options, request):
378
+ pb_request = permission_service.UpdatePermissionRequest.pb(request)
379
+ transcoded_request = path_template.transcode(http_options, pb_request)
380
+ return transcoded_request
381
+
382
+ @staticmethod
383
+ def _get_request_body_json(transcoded_request):
384
+ # Jsonify the request body
385
+
386
+ body = json_format.MessageToJson(
387
+ transcoded_request["body"], use_integers_for_enums=True
388
+ )
389
+ return body
390
+
391
+ @staticmethod
392
+ def _get_query_params_json(transcoded_request):
393
+ query_params = json.loads(
394
+ json_format.MessageToJson(
395
+ transcoded_request["query_params"],
396
+ use_integers_for_enums=True,
397
+ )
398
+ )
399
+ query_params.update(
400
+ _BasePermissionServiceRestTransport._BaseUpdatePermission._get_unset_required_fields(
401
+ query_params
402
+ )
403
+ )
404
+
405
+ query_params["$alt"] = "json;enum-encoding=int"
406
+ return query_params
407
+
408
+
409
+ __all__ = ("_BasePermissionServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__init__.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from .citation import CitationMetadata, CitationSource
17
+ from .discuss_service import (
18
+ CountMessageTokensRequest,
19
+ CountMessageTokensResponse,
20
+ Example,
21
+ GenerateMessageRequest,
22
+ GenerateMessageResponse,
23
+ Message,
24
+ MessagePrompt,
25
+ )
26
+ from .model import Model
27
+ from .model_service import (
28
+ CreateTunedModelMetadata,
29
+ CreateTunedModelRequest,
30
+ DeleteTunedModelRequest,
31
+ GetModelRequest,
32
+ GetTunedModelRequest,
33
+ ListModelsRequest,
34
+ ListModelsResponse,
35
+ ListTunedModelsRequest,
36
+ ListTunedModelsResponse,
37
+ UpdateTunedModelRequest,
38
+ )
39
+ from .permission import Permission
40
+ from .permission_service import (
41
+ CreatePermissionRequest,
42
+ DeletePermissionRequest,
43
+ GetPermissionRequest,
44
+ ListPermissionsRequest,
45
+ ListPermissionsResponse,
46
+ TransferOwnershipRequest,
47
+ TransferOwnershipResponse,
48
+ UpdatePermissionRequest,
49
+ )
50
+ from .safety import (
51
+ ContentFilter,
52
+ HarmCategory,
53
+ SafetyFeedback,
54
+ SafetyRating,
55
+ SafetySetting,
56
+ )
57
+ from .text_service import (
58
+ BatchEmbedTextRequest,
59
+ BatchEmbedTextResponse,
60
+ CountTextTokensRequest,
61
+ CountTextTokensResponse,
62
+ Embedding,
63
+ EmbedTextRequest,
64
+ EmbedTextResponse,
65
+ GenerateTextRequest,
66
+ GenerateTextResponse,
67
+ TextCompletion,
68
+ TextPrompt,
69
+ )
70
+ from .tuned_model import (
71
+ Dataset,
72
+ Hyperparameters,
73
+ TunedModel,
74
+ TunedModelSource,
75
+ TuningExample,
76
+ TuningExamples,
77
+ TuningSnapshot,
78
+ TuningTask,
79
+ )
80
+
81
+ __all__ = (
82
+ "CitationMetadata",
83
+ "CitationSource",
84
+ "CountMessageTokensRequest",
85
+ "CountMessageTokensResponse",
86
+ "Example",
87
+ "GenerateMessageRequest",
88
+ "GenerateMessageResponse",
89
+ "Message",
90
+ "MessagePrompt",
91
+ "Model",
92
+ "CreateTunedModelMetadata",
93
+ "CreateTunedModelRequest",
94
+ "DeleteTunedModelRequest",
95
+ "GetModelRequest",
96
+ "GetTunedModelRequest",
97
+ "ListModelsRequest",
98
+ "ListModelsResponse",
99
+ "ListTunedModelsRequest",
100
+ "ListTunedModelsResponse",
101
+ "UpdateTunedModelRequest",
102
+ "Permission",
103
+ "CreatePermissionRequest",
104
+ "DeletePermissionRequest",
105
+ "GetPermissionRequest",
106
+ "ListPermissionsRequest",
107
+ "ListPermissionsResponse",
108
+ "TransferOwnershipRequest",
109
+ "TransferOwnershipResponse",
110
+ "UpdatePermissionRequest",
111
+ "ContentFilter",
112
+ "SafetyFeedback",
113
+ "SafetyRating",
114
+ "SafetySetting",
115
+ "HarmCategory",
116
+ "BatchEmbedTextRequest",
117
+ "BatchEmbedTextResponse",
118
+ "CountTextTokensRequest",
119
+ "CountTextTokensResponse",
120
+ "Embedding",
121
+ "EmbedTextRequest",
122
+ "EmbedTextResponse",
123
+ "GenerateTextRequest",
124
+ "GenerateTextResponse",
125
+ "TextCompletion",
126
+ "TextPrompt",
127
+ "Dataset",
128
+ "Hyperparameters",
129
+ "TunedModel",
130
+ "TunedModelSource",
131
+ "TuningExample",
132
+ "TuningExamples",
133
+ "TuningSnapshot",
134
+ "TuningTask",
135
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (2.88 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/citation.cpython-311.pyc ADDED
Binary file (3.36 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/discuss_service.cpython-311.pyc ADDED
Binary file (13.8 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/model.cpython-311.pyc ADDED
Binary file (5.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/model_service.cpython-311.pyc ADDED
Binary file (12 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/permission.cpython-311.pyc ADDED
Binary file (5.12 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/permission_service.cpython-311.pyc ADDED
Binary file (8.4 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/safety.cpython-311.pyc ADDED
Binary file (9.87 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/text_service.cpython-311.pyc ADDED
Binary file (17.1 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/__pycache__/tuned_model.cpython-311.pyc ADDED
Binary file (15.9 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/citation.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1beta3",
24
+ manifest={
25
+ "CitationMetadata",
26
+ "CitationSource",
27
+ },
28
+ )
29
+
30
+
31
+ class CitationMetadata(proto.Message):
32
+ r"""A collection of source attributions for a piece of content.
33
+
34
+ Attributes:
35
+ citation_sources (MutableSequence[google.ai.generativelanguage_v1beta3.types.CitationSource]):
36
+ Citations to sources for a specific response.
37
+ """
38
+
39
+ citation_sources: MutableSequence["CitationSource"] = proto.RepeatedField(
40
+ proto.MESSAGE,
41
+ number=1,
42
+ message="CitationSource",
43
+ )
44
+
45
+
46
+ class CitationSource(proto.Message):
47
+ r"""A citation to a source for a portion of a specific response.
48
+
49
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
50
+
51
+ Attributes:
52
+ start_index (int):
53
+ Optional. Start of segment of the response
54
+ that is attributed to this source.
55
+
56
+ Index indicates the start of the segment,
57
+ measured in bytes.
58
+
59
+ This field is a member of `oneof`_ ``_start_index``.
60
+ end_index (int):
61
+ Optional. End of the attributed segment,
62
+ exclusive.
63
+
64
+ This field is a member of `oneof`_ ``_end_index``.
65
+ uri (str):
66
+ Optional. URI that is attributed as a source
67
+ for a portion of the text.
68
+
69
+ This field is a member of `oneof`_ ``_uri``.
70
+ license_ (str):
71
+ Optional. License for the GitHub project that
72
+ is attributed as a source for segment.
73
+
74
+ License info is required for code citations.
75
+
76
+ This field is a member of `oneof`_ ``_license``.
77
+ """
78
+
79
+ start_index: int = proto.Field(
80
+ proto.INT32,
81
+ number=1,
82
+ optional=True,
83
+ )
84
+ end_index: int = proto.Field(
85
+ proto.INT32,
86
+ number=2,
87
+ optional=True,
88
+ )
89
+ uri: str = proto.Field(
90
+ proto.STRING,
91
+ number=3,
92
+ optional=True,
93
+ )
94
+ license_: str = proto.Field(
95
+ proto.STRING,
96
+ number=4,
97
+ optional=True,
98
+ )
99
+
100
+
101
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/discuss_service.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ from google.ai.generativelanguage_v1beta3.types import citation, safety
23
+
24
+ __protobuf__ = proto.module(
25
+ package="google.ai.generativelanguage.v1beta3",
26
+ manifest={
27
+ "GenerateMessageRequest",
28
+ "GenerateMessageResponse",
29
+ "Message",
30
+ "MessagePrompt",
31
+ "Example",
32
+ "CountMessageTokensRequest",
33
+ "CountMessageTokensResponse",
34
+ },
35
+ )
36
+
37
+
38
+ class GenerateMessageRequest(proto.Message):
39
+ r"""Request to generate a message response from the model.
40
+
41
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
42
+
43
+ Attributes:
44
+ model (str):
45
+ Required. The name of the model to use.
46
+
47
+ Format: ``name=models/{model}``.
48
+ prompt (google.ai.generativelanguage_v1beta3.types.MessagePrompt):
49
+ Required. The structured textual input given
50
+ to the model as a prompt.
51
+ Given a
52
+ prompt, the model will return what it predicts
53
+ is the next message in the discussion.
54
+ temperature (float):
55
+ Optional. Controls the randomness of the output.
56
+
57
+ Values can range over ``[0.0,1.0]``, inclusive. A value
58
+ closer to ``1.0`` will produce responses that are more
59
+ varied, while a value closer to ``0.0`` will typically
60
+ result in less surprising responses from the model.
61
+
62
+ This field is a member of `oneof`_ ``_temperature``.
63
+ candidate_count (int):
64
+ Optional. The number of generated response messages to
65
+ return.
66
+
67
+ This value must be between ``[1, 8]``, inclusive. If unset,
68
+ this will default to ``1``.
69
+
70
+ This field is a member of `oneof`_ ``_candidate_count``.
71
+ top_p (float):
72
+ Optional. The maximum cumulative probability of tokens to
73
+ consider when sampling.
74
+
75
+ The model uses combined Top-k and nucleus sampling.
76
+
77
+ Nucleus sampling considers the smallest set of tokens whose
78
+ probability sum is at least ``top_p``.
79
+
80
+ This field is a member of `oneof`_ ``_top_p``.
81
+ top_k (int):
82
+ Optional. The maximum number of tokens to consider when
83
+ sampling.
84
+
85
+ The model uses combined Top-k and nucleus sampling.
86
+
87
+ Top-k sampling considers the set of ``top_k`` most probable
88
+ tokens.
89
+
90
+ This field is a member of `oneof`_ ``_top_k``.
91
+ """
92
+
93
+ model: str = proto.Field(
94
+ proto.STRING,
95
+ number=1,
96
+ )
97
+ prompt: "MessagePrompt" = proto.Field(
98
+ proto.MESSAGE,
99
+ number=2,
100
+ message="MessagePrompt",
101
+ )
102
+ temperature: float = proto.Field(
103
+ proto.FLOAT,
104
+ number=3,
105
+ optional=True,
106
+ )
107
+ candidate_count: int = proto.Field(
108
+ proto.INT32,
109
+ number=4,
110
+ optional=True,
111
+ )
112
+ top_p: float = proto.Field(
113
+ proto.FLOAT,
114
+ number=5,
115
+ optional=True,
116
+ )
117
+ top_k: int = proto.Field(
118
+ proto.INT32,
119
+ number=6,
120
+ optional=True,
121
+ )
122
+
123
+
124
+ class GenerateMessageResponse(proto.Message):
125
+ r"""The response from the model.
126
+
127
+ This includes candidate messages and
128
+ conversation history in the form of chronologically-ordered
129
+ messages.
130
+
131
+ Attributes:
132
+ candidates (MutableSequence[google.ai.generativelanguage_v1beta3.types.Message]):
133
+ Candidate response messages from the model.
134
+ messages (MutableSequence[google.ai.generativelanguage_v1beta3.types.Message]):
135
+ The conversation history used by the model.
136
+ filters (MutableSequence[google.ai.generativelanguage_v1beta3.types.ContentFilter]):
137
+ A set of content filtering metadata for the prompt and
138
+ response text.
139
+
140
+ This indicates which ``SafetyCategory``\ (s) blocked a
141
+ candidate from this response, the lowest ``HarmProbability``
142
+ that triggered a block, and the HarmThreshold setting for
143
+ that category.
144
+ """
145
+
146
+ candidates: MutableSequence["Message"] = proto.RepeatedField(
147
+ proto.MESSAGE,
148
+ number=1,
149
+ message="Message",
150
+ )
151
+ messages: MutableSequence["Message"] = proto.RepeatedField(
152
+ proto.MESSAGE,
153
+ number=2,
154
+ message="Message",
155
+ )
156
+ filters: MutableSequence[safety.ContentFilter] = proto.RepeatedField(
157
+ proto.MESSAGE,
158
+ number=3,
159
+ message=safety.ContentFilter,
160
+ )
161
+
162
+
163
+ class Message(proto.Message):
164
+ r"""The base unit of structured text.
165
+
166
+ A ``Message`` includes an ``author`` and the ``content`` of the
167
+ ``Message``.
168
+
169
+ The ``author`` is used to tag messages when they are fed to the
170
+ model as text.
171
+
172
+
173
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
174
+
175
+ Attributes:
176
+ author (str):
177
+ Optional. The author of this Message.
178
+
179
+ This serves as a key for tagging
180
+ the content of this Message when it is fed to
181
+ the model as text.
182
+
183
+ The author can be any alphanumeric string.
184
+ content (str):
185
+ Required. The text content of the structured ``Message``.
186
+ citation_metadata (google.ai.generativelanguage_v1beta3.types.CitationMetadata):
187
+ Output only. Citation information for model-generated
188
+ ``content`` in this ``Message``.
189
+
190
+ If this ``Message`` was generated as output from the model,
191
+ this field may be populated with attribution information for
192
+ any text included in the ``content``. This field is used
193
+ only on output.
194
+
195
+ This field is a member of `oneof`_ ``_citation_metadata``.
196
+ """
197
+
198
+ author: str = proto.Field(
199
+ proto.STRING,
200
+ number=1,
201
+ )
202
+ content: str = proto.Field(
203
+ proto.STRING,
204
+ number=2,
205
+ )
206
+ citation_metadata: citation.CitationMetadata = proto.Field(
207
+ proto.MESSAGE,
208
+ number=3,
209
+ optional=True,
210
+ message=citation.CitationMetadata,
211
+ )
212
+
213
+
214
+ class MessagePrompt(proto.Message):
215
+ r"""All of the structured input text passed to the model as a prompt.
216
+
217
+ A ``MessagePrompt`` contains a structured set of fields that provide
218
+ context for the conversation, examples of user input/model output
219
+ message pairs that prime the model to respond in different ways, and
220
+ the conversation history or list of messages representing the
221
+ alternating turns of the conversation between the user and the
222
+ model.
223
+
224
+ Attributes:
225
+ context (str):
226
+ Optional. Text that should be provided to the model first to
227
+ ground the response.
228
+
229
+ If not empty, this ``context`` will be given to the model
230
+ first before the ``examples`` and ``messages``. When using a
231
+ ``context`` be sure to provide it with every request to
232
+ maintain continuity.
233
+
234
+ This field can be a description of your prompt to the model
235
+ to help provide context and guide the responses. Examples:
236
+ "Translate the phrase from English to French." or "Given a
237
+ statement, classify the sentiment as happy, sad or neutral."
238
+
239
+ Anything included in this field will take precedence over
240
+ message history if the total input size exceeds the model's
241
+ ``input_token_limit`` and the input request is truncated.
242
+ examples (MutableSequence[google.ai.generativelanguage_v1beta3.types.Example]):
243
+ Optional. Examples of what the model should generate.
244
+
245
+ This includes both user input and the response that the
246
+ model should emulate.
247
+
248
+ These ``examples`` are treated identically to conversation
249
+ messages except that they take precedence over the history
250
+ in ``messages``: If the total input size exceeds the model's
251
+ ``input_token_limit`` the input will be truncated. Items
252
+ will be dropped from ``messages`` before ``examples``.
253
+ messages (MutableSequence[google.ai.generativelanguage_v1beta3.types.Message]):
254
+ Required. A snapshot of the recent conversation history
255
+ sorted chronologically.
256
+
257
+ Turns alternate between two authors.
258
+
259
+ If the total input size exceeds the model's
260
+ ``input_token_limit`` the input will be truncated: The
261
+ oldest items will be dropped from ``messages``.
262
+ """
263
+
264
+ context: str = proto.Field(
265
+ proto.STRING,
266
+ number=1,
267
+ )
268
+ examples: MutableSequence["Example"] = proto.RepeatedField(
269
+ proto.MESSAGE,
270
+ number=2,
271
+ message="Example",
272
+ )
273
+ messages: MutableSequence["Message"] = proto.RepeatedField(
274
+ proto.MESSAGE,
275
+ number=3,
276
+ message="Message",
277
+ )
278
+
279
+
280
+ class Example(proto.Message):
281
+ r"""An input/output example used to instruct the Model.
282
+
283
+ It demonstrates how the model should respond or format its
284
+ response.
285
+
286
+ Attributes:
287
+ input (google.ai.generativelanguage_v1beta3.types.Message):
288
+ Required. An example of an input ``Message`` from the user.
289
+ output (google.ai.generativelanguage_v1beta3.types.Message):
290
+ Required. An example of what the model should
291
+ output given the input.
292
+ """
293
+
294
+ input: "Message" = proto.Field(
295
+ proto.MESSAGE,
296
+ number=1,
297
+ message="Message",
298
+ )
299
+ output: "Message" = proto.Field(
300
+ proto.MESSAGE,
301
+ number=2,
302
+ message="Message",
303
+ )
304
+
305
+
306
+ class CountMessageTokensRequest(proto.Message):
307
+ r"""Counts the number of tokens in the ``prompt`` sent to a model.
308
+
309
+ Models may tokenize text differently, so each model may return a
310
+ different ``token_count``.
311
+
312
+ Attributes:
313
+ model (str):
314
+ Required. The model's resource name. This serves as an ID
315
+ for the Model to use.
316
+
317
+ This name should match a model name returned by the
318
+ ``ListModels`` method.
319
+
320
+ Format: ``models/{model}``
321
+ prompt (google.ai.generativelanguage_v1beta3.types.MessagePrompt):
322
+ Required. The prompt, whose token count is to
323
+ be returned.
324
+ """
325
+
326
+ model: str = proto.Field(
327
+ proto.STRING,
328
+ number=1,
329
+ )
330
+ prompt: "MessagePrompt" = proto.Field(
331
+ proto.MESSAGE,
332
+ number=2,
333
+ message="MessagePrompt",
334
+ )
335
+
336
+
337
+ class CountMessageTokensResponse(proto.Message):
338
+ r"""A response from ``CountMessageTokens``.
339
+
340
+ It returns the model's ``token_count`` for the ``prompt``.
341
+
342
+ Attributes:
343
+ token_count (int):
344
+ The number of tokens that the ``model`` tokenizes the
345
+ ``prompt`` into.
346
+
347
+ Always non-negative.
348
+ """
349
+
350
+ token_count: int = proto.Field(
351
+ proto.INT32,
352
+ number=1,
353
+ )
354
+
355
+
356
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/model.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1beta3",
24
+ manifest={
25
+ "Model",
26
+ },
27
+ )
28
+
29
+
30
+ class Model(proto.Message):
31
+ r"""Information about a Generative Language Model.
32
+
33
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
34
+
35
+ Attributes:
36
+ name (str):
37
+ Required. The resource name of the ``Model``.
38
+
39
+ Format: ``models/{model}`` with a ``{model}`` naming
40
+ convention of:
41
+
42
+ - "{base_model_id}-{version}"
43
+
44
+ Examples:
45
+
46
+ - ``models/chat-bison-001``
47
+ base_model_id (str):
48
+ Required. The name of the base model, pass this to the
49
+ generation request.
50
+
51
+ Examples:
52
+
53
+ - ``chat-bison``
54
+ version (str):
55
+ Required. The version number of the model.
56
+
57
+ This represents the major version
58
+ display_name (str):
59
+ The human-readable name of the model. E.g.
60
+ "Chat Bison".
61
+ The name can be up to 128 characters long and
62
+ can consist of any UTF-8 characters.
63
+ description (str):
64
+ A short description of the model.
65
+ input_token_limit (int):
66
+ Maximum number of input tokens allowed for
67
+ this model.
68
+ output_token_limit (int):
69
+ Maximum number of output tokens available for
70
+ this model.
71
+ supported_generation_methods (MutableSequence[str]):
72
+ The model's supported generation methods.
73
+
74
+ The method names are defined as Pascal case strings, such as
75
+ ``generateMessage`` which correspond to API methods.
76
+ temperature (float):
77
+ Controls the randomness of the output.
78
+
79
+ Values can range over ``[0.0,1.0]``, inclusive. A value
80
+ closer to ``1.0`` will produce responses that are more
81
+ varied, while a value closer to ``0.0`` will typically
82
+ result in less surprising responses from the model. This
83
+ value specifies default to be used by the backend while
84
+ making the call to the model.
85
+
86
+ This field is a member of `oneof`_ ``_temperature``.
87
+ top_p (float):
88
+ For Nucleus sampling.
89
+
90
+ Nucleus sampling considers the smallest set of tokens whose
91
+ probability sum is at least ``top_p``. This value specifies
92
+ default to be used by the backend while making the call to
93
+ the model.
94
+
95
+ This field is a member of `oneof`_ ``_top_p``.
96
+ top_k (int):
97
+ For Top-k sampling.
98
+
99
+ Top-k sampling considers the set of ``top_k`` most probable
100
+ tokens. This value specifies default to be used by the
101
+ backend while making the call to the model.
102
+
103
+ This field is a member of `oneof`_ ``_top_k``.
104
+ """
105
+
106
+ name: str = proto.Field(
107
+ proto.STRING,
108
+ number=1,
109
+ )
110
+ base_model_id: str = proto.Field(
111
+ proto.STRING,
112
+ number=2,
113
+ )
114
+ version: str = proto.Field(
115
+ proto.STRING,
116
+ number=3,
117
+ )
118
+ display_name: str = proto.Field(
119
+ proto.STRING,
120
+ number=4,
121
+ )
122
+ description: str = proto.Field(
123
+ proto.STRING,
124
+ number=5,
125
+ )
126
+ input_token_limit: int = proto.Field(
127
+ proto.INT32,
128
+ number=6,
129
+ )
130
+ output_token_limit: int = proto.Field(
131
+ proto.INT32,
132
+ number=7,
133
+ )
134
+ supported_generation_methods: MutableSequence[str] = proto.RepeatedField(
135
+ proto.STRING,
136
+ number=8,
137
+ )
138
+ temperature: float = proto.Field(
139
+ proto.FLOAT,
140
+ number=9,
141
+ optional=True,
142
+ )
143
+ top_p: float = proto.Field(
144
+ proto.FLOAT,
145
+ number=10,
146
+ optional=True,
147
+ )
148
+ top_k: int = proto.Field(
149
+ proto.INT32,
150
+ number=11,
151
+ optional=True,
152
+ )
153
+
154
+
155
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/model_service.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import field_mask_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ from google.ai.generativelanguage_v1beta3.types import tuned_model as gag_tuned_model
24
+ from google.ai.generativelanguage_v1beta3.types import model
25
+
26
+ __protobuf__ = proto.module(
27
+ package="google.ai.generativelanguage.v1beta3",
28
+ manifest={
29
+ "GetModelRequest",
30
+ "ListModelsRequest",
31
+ "ListModelsResponse",
32
+ "GetTunedModelRequest",
33
+ "ListTunedModelsRequest",
34
+ "ListTunedModelsResponse",
35
+ "CreateTunedModelRequest",
36
+ "CreateTunedModelMetadata",
37
+ "UpdateTunedModelRequest",
38
+ "DeleteTunedModelRequest",
39
+ },
40
+ )
41
+
42
+
43
+ class GetModelRequest(proto.Message):
44
+ r"""Request for getting information about a specific Model.
45
+
46
+ Attributes:
47
+ name (str):
48
+ Required. The resource name of the model.
49
+
50
+ This name should match a model name returned by the
51
+ ``ListModels`` method.
52
+
53
+ Format: ``models/{model}``
54
+ """
55
+
56
+ name: str = proto.Field(
57
+ proto.STRING,
58
+ number=1,
59
+ )
60
+
61
+
62
+ class ListModelsRequest(proto.Message):
63
+ r"""Request for listing all Models.
64
+
65
+ Attributes:
66
+ page_size (int):
67
+ The maximum number of ``Models`` to return (per page).
68
+
69
+ The service may return fewer models. If unspecified, at most
70
+ 50 models will be returned per page. This method returns at
71
+ most 1000 models per page, even if you pass a larger
72
+ page_size.
73
+ page_token (str):
74
+ A page token, received from a previous ``ListModels`` call.
75
+
76
+ Provide the ``page_token`` returned by one request as an
77
+ argument to the next request to retrieve the next page.
78
+
79
+ When paginating, all other parameters provided to
80
+ ``ListModels`` must match the call that provided the page
81
+ token.
82
+ """
83
+
84
+ page_size: int = proto.Field(
85
+ proto.INT32,
86
+ number=2,
87
+ )
88
+ page_token: str = proto.Field(
89
+ proto.STRING,
90
+ number=3,
91
+ )
92
+
93
+
94
+ class ListModelsResponse(proto.Message):
95
+ r"""Response from ``ListModel`` containing a paginated list of Models.
96
+
97
+ Attributes:
98
+ models (MutableSequence[google.ai.generativelanguage_v1beta3.types.Model]):
99
+ The returned Models.
100
+ next_page_token (str):
101
+ A token, which can be sent as ``page_token`` to retrieve the
102
+ next page.
103
+
104
+ If this field is omitted, there are no more pages.
105
+ """
106
+
107
+ @property
108
+ def raw_page(self):
109
+ return self
110
+
111
+ models: MutableSequence[model.Model] = proto.RepeatedField(
112
+ proto.MESSAGE,
113
+ number=1,
114
+ message=model.Model,
115
+ )
116
+ next_page_token: str = proto.Field(
117
+ proto.STRING,
118
+ number=2,
119
+ )
120
+
121
+
122
+ class GetTunedModelRequest(proto.Message):
123
+ r"""Request for getting information about a specific Model.
124
+
125
+ Attributes:
126
+ name (str):
127
+ Required. The resource name of the model.
128
+
129
+ Format: ``tunedModels/my-model-id``
130
+ """
131
+
132
+ name: str = proto.Field(
133
+ proto.STRING,
134
+ number=1,
135
+ )
136
+
137
+
138
+ class ListTunedModelsRequest(proto.Message):
139
+ r"""Request for listing TunedModels.
140
+
141
+ Attributes:
142
+ page_size (int):
143
+ Optional. The maximum number of ``TunedModels`` to return
144
+ (per page). The service may return fewer tuned models.
145
+
146
+ If unspecified, at most 10 tuned models will be returned.
147
+ This method returns at most 1000 models per page, even if
148
+ you pass a larger page_size.
149
+ page_token (str):
150
+ Optional. A page token, received from a previous
151
+ ``ListTunedModels`` call.
152
+
153
+ Provide the ``page_token`` returned by one request as an
154
+ argument to the next request to retrieve the next page.
155
+
156
+ When paginating, all other parameters provided to
157
+ ``ListTunedModels`` must match the call that provided the
158
+ page token.
159
+ """
160
+
161
+ page_size: int = proto.Field(
162
+ proto.INT32,
163
+ number=1,
164
+ )
165
+ page_token: str = proto.Field(
166
+ proto.STRING,
167
+ number=2,
168
+ )
169
+
170
+
171
+ class ListTunedModelsResponse(proto.Message):
172
+ r"""Response from ``ListTunedModels`` containing a paginated list of
173
+ Models.
174
+
175
+ Attributes:
176
+ tuned_models (MutableSequence[google.ai.generativelanguage_v1beta3.types.TunedModel]):
177
+ The returned Models.
178
+ next_page_token (str):
179
+ A token, which can be sent as ``page_token`` to retrieve the
180
+ next page.
181
+
182
+ If this field is omitted, there are no more pages.
183
+ """
184
+
185
+ @property
186
+ def raw_page(self):
187
+ return self
188
+
189
+ tuned_models: MutableSequence[gag_tuned_model.TunedModel] = proto.RepeatedField(
190
+ proto.MESSAGE,
191
+ number=1,
192
+ message=gag_tuned_model.TunedModel,
193
+ )
194
+ next_page_token: str = proto.Field(
195
+ proto.STRING,
196
+ number=2,
197
+ )
198
+
199
+
200
+ class CreateTunedModelRequest(proto.Message):
201
+ r"""Request to create a TunedModel.
202
+
203
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
204
+
205
+ Attributes:
206
+ tuned_model_id (str):
207
+ Optional. The unique id for the tuned model if specified.
208
+ This value should be up to 40 characters, the first
209
+ character must be a letter, the last could be a letter or a
210
+ number. The id must match the regular expression:
211
+ `a-z <[a-z0-9-]{0,38}[a-z0-9]>`__?.
212
+
213
+ This field is a member of `oneof`_ ``_tuned_model_id``.
214
+ tuned_model (google.ai.generativelanguage_v1beta3.types.TunedModel):
215
+ Required. The tuned model to create.
216
+ """
217
+
218
+ tuned_model_id: str = proto.Field(
219
+ proto.STRING,
220
+ number=1,
221
+ optional=True,
222
+ )
223
+ tuned_model: gag_tuned_model.TunedModel = proto.Field(
224
+ proto.MESSAGE,
225
+ number=2,
226
+ message=gag_tuned_model.TunedModel,
227
+ )
228
+
229
+
230
+ class CreateTunedModelMetadata(proto.Message):
231
+ r"""Metadata about the state and progress of creating a tuned
232
+ model returned from the long-running operation
233
+
234
+ Attributes:
235
+ tuned_model (str):
236
+ Name of the tuned model associated with the
237
+ tuning operation.
238
+ total_steps (int):
239
+ The total number of tuning steps.
240
+ completed_steps (int):
241
+ The number of steps completed.
242
+ completed_percent (float):
243
+ The completed percentage for the tuning
244
+ operation.
245
+ snapshots (MutableSequence[google.ai.generativelanguage_v1beta3.types.TuningSnapshot]):
246
+ Metrics collected during tuning.
247
+ """
248
+
249
+ tuned_model: str = proto.Field(
250
+ proto.STRING,
251
+ number=5,
252
+ )
253
+ total_steps: int = proto.Field(
254
+ proto.INT32,
255
+ number=1,
256
+ )
257
+ completed_steps: int = proto.Field(
258
+ proto.INT32,
259
+ number=2,
260
+ )
261
+ completed_percent: float = proto.Field(
262
+ proto.FLOAT,
263
+ number=3,
264
+ )
265
+ snapshots: MutableSequence[gag_tuned_model.TuningSnapshot] = proto.RepeatedField(
266
+ proto.MESSAGE,
267
+ number=4,
268
+ message=gag_tuned_model.TuningSnapshot,
269
+ )
270
+
271
+
272
+ class UpdateTunedModelRequest(proto.Message):
273
+ r"""Request to update a TunedModel.
274
+
275
+ Attributes:
276
+ tuned_model (google.ai.generativelanguage_v1beta3.types.TunedModel):
277
+ Required. The tuned model to update.
278
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
279
+ Required. The list of fields to update.
280
+ """
281
+
282
+ tuned_model: gag_tuned_model.TunedModel = proto.Field(
283
+ proto.MESSAGE,
284
+ number=1,
285
+ message=gag_tuned_model.TunedModel,
286
+ )
287
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
288
+ proto.MESSAGE,
289
+ number=2,
290
+ message=field_mask_pb2.FieldMask,
291
+ )
292
+
293
+
294
+ class DeleteTunedModelRequest(proto.Message):
295
+ r"""Request to delete a TunedModel.
296
+
297
+ Attributes:
298
+ name (str):
299
+ Required. The resource name of the model. Format:
300
+ ``tunedModels/my-model-id``
301
+ """
302
+
303
+ name: str = proto.Field(
304
+ proto.STRING,
305
+ number=1,
306
+ )
307
+
308
+
309
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/permission.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1beta3",
24
+ manifest={
25
+ "Permission",
26
+ },
27
+ )
28
+
29
+
30
+ class Permission(proto.Message):
31
+ r"""Permission resource grants user, group or the rest of the
32
+ world access to the PaLM API resource (e.g. a tuned model,
33
+ file).
34
+
35
+ A role is a collection of permitted operations that allows users
36
+ to perform specific actions on PaLM API resources. To make them
37
+ available to users, groups, or service accounts, you assign
38
+ roles. When you assign a role, you grant permissions that the
39
+ role contains.
40
+
41
+ There are three concentric roles. Each role is a superset of the
42
+ previous role's permitted operations:
43
+
44
+ - reader can use the resource (e.g. tuned model) for inference
45
+ - writer has reader's permissions and additionally can edit and
46
+ share
47
+ - owner has writer's permissions and additionally can delete
48
+
49
+
50
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
51
+
52
+ Attributes:
53
+ name (str):
54
+ Output only. The permission name. A unique name will be
55
+ generated on create. Example:
56
+ tunedModels/{tuned_model}permssions/{permission} Output
57
+ only.
58
+ grantee_type (google.ai.generativelanguage_v1beta3.types.Permission.GranteeType):
59
+ Required. Immutable. The type of the grantee.
60
+
61
+ This field is a member of `oneof`_ ``_grantee_type``.
62
+ email_address (str):
63
+ Optional. Immutable. The email address of the
64
+ user of group which this permission refers.
65
+ Field is not set when permission's grantee type
66
+ is EVERYONE.
67
+
68
+ This field is a member of `oneof`_ ``_email_address``.
69
+ role (google.ai.generativelanguage_v1beta3.types.Permission.Role):
70
+ Required. The role granted by this
71
+ permission.
72
+
73
+ This field is a member of `oneof`_ ``_role``.
74
+ """
75
+
76
+ class GranteeType(proto.Enum):
77
+ r"""Defines types of the grantee of this permission.
78
+
79
+ Values:
80
+ GRANTEE_TYPE_UNSPECIFIED (0):
81
+ The default value. This value is unused.
82
+ USER (1):
83
+ Represents a user. When set, you must provide email_address
84
+ for the user.
85
+ GROUP (2):
86
+ Represents a group. When set, you must provide email_address
87
+ for the group.
88
+ EVERYONE (3):
89
+ Represents access to everyone. No extra
90
+ information is required.
91
+ """
92
+ GRANTEE_TYPE_UNSPECIFIED = 0
93
+ USER = 1
94
+ GROUP = 2
95
+ EVERYONE = 3
96
+
97
+ class Role(proto.Enum):
98
+ r"""Defines the role granted by this permission.
99
+
100
+ Values:
101
+ ROLE_UNSPECIFIED (0):
102
+ The default value. This value is unused.
103
+ OWNER (1):
104
+ Owner can use, update, share and delete the
105
+ resource.
106
+ WRITER (2):
107
+ Writer can use, update and share the
108
+ resource.
109
+ READER (3):
110
+ Reader can use the resource.
111
+ """
112
+ ROLE_UNSPECIFIED = 0
113
+ OWNER = 1
114
+ WRITER = 2
115
+ READER = 3
116
+
117
+ name: str = proto.Field(
118
+ proto.STRING,
119
+ number=1,
120
+ )
121
+ grantee_type: GranteeType = proto.Field(
122
+ proto.ENUM,
123
+ number=2,
124
+ optional=True,
125
+ enum=GranteeType,
126
+ )
127
+ email_address: str = proto.Field(
128
+ proto.STRING,
129
+ number=3,
130
+ optional=True,
131
+ )
132
+ role: Role = proto.Field(
133
+ proto.ENUM,
134
+ number=4,
135
+ optional=True,
136
+ enum=Role,
137
+ )
138
+
139
+
140
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/permission_service.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import field_mask_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ from google.ai.generativelanguage_v1beta3.types import permission as gag_permission
24
+
25
+ __protobuf__ = proto.module(
26
+ package="google.ai.generativelanguage.v1beta3",
27
+ manifest={
28
+ "CreatePermissionRequest",
29
+ "GetPermissionRequest",
30
+ "ListPermissionsRequest",
31
+ "ListPermissionsResponse",
32
+ "UpdatePermissionRequest",
33
+ "DeletePermissionRequest",
34
+ "TransferOwnershipRequest",
35
+ "TransferOwnershipResponse",
36
+ },
37
+ )
38
+
39
+
40
+ class CreatePermissionRequest(proto.Message):
41
+ r"""Request to create a ``Permission``.
42
+
43
+ Attributes:
44
+ parent (str):
45
+ Required. The parent resource of the ``Permission``. Format:
46
+ tunedModels/{tuned_model}
47
+ permission (google.ai.generativelanguage_v1beta3.types.Permission):
48
+ Required. The permission to create.
49
+ """
50
+
51
+ parent: str = proto.Field(
52
+ proto.STRING,
53
+ number=1,
54
+ )
55
+ permission: gag_permission.Permission = proto.Field(
56
+ proto.MESSAGE,
57
+ number=2,
58
+ message=gag_permission.Permission,
59
+ )
60
+
61
+
62
+ class GetPermissionRequest(proto.Message):
63
+ r"""Request for getting information about a specific ``Permission``.
64
+
65
+ Attributes:
66
+ name (str):
67
+ Required. The resource name of the permission.
68
+
69
+ Format:
70
+ ``tunedModels/{tuned_model}permissions/{permission}``
71
+ """
72
+
73
+ name: str = proto.Field(
74
+ proto.STRING,
75
+ number=1,
76
+ )
77
+
78
+
79
+ class ListPermissionsRequest(proto.Message):
80
+ r"""Request for listing permissions.
81
+
82
+ Attributes:
83
+ parent (str):
84
+ Required. The parent resource of the permissions. Format:
85
+ tunedModels/{tuned_model}
86
+ page_size (int):
87
+ Optional. The maximum number of ``Permission``\ s to return
88
+ (per page). The service may return fewer permissions.
89
+
90
+ If unspecified, at most 10 permissions will be returned.
91
+ This method returns at most 1000 permissions per page, even
92
+ if you pass larger page_size.
93
+ page_token (str):
94
+ Optional. A page token, received from a previous
95
+ ``ListPermissions`` call.
96
+
97
+ Provide the ``page_token`` returned by one request as an
98
+ argument to the next request to retrieve the next page.
99
+
100
+ When paginating, all other parameters provided to
101
+ ``ListPermissions`` must match the call that provided the
102
+ page token.
103
+ """
104
+
105
+ parent: str = proto.Field(
106
+ proto.STRING,
107
+ number=1,
108
+ )
109
+ page_size: int = proto.Field(
110
+ proto.INT32,
111
+ number=2,
112
+ )
113
+ page_token: str = proto.Field(
114
+ proto.STRING,
115
+ number=3,
116
+ )
117
+
118
+
119
+ class ListPermissionsResponse(proto.Message):
120
+ r"""Response from ``ListPermissions`` containing a paginated list of
121
+ permissions.
122
+
123
+ Attributes:
124
+ permissions (MutableSequence[google.ai.generativelanguage_v1beta3.types.Permission]):
125
+ Returned permissions.
126
+ next_page_token (str):
127
+ A token, which can be sent as ``page_token`` to retrieve the
128
+ next page.
129
+
130
+ If this field is omitted, there are no more pages.
131
+ """
132
+
133
+ @property
134
+ def raw_page(self):
135
+ return self
136
+
137
+ permissions: MutableSequence[gag_permission.Permission] = proto.RepeatedField(
138
+ proto.MESSAGE,
139
+ number=1,
140
+ message=gag_permission.Permission,
141
+ )
142
+ next_page_token: str = proto.Field(
143
+ proto.STRING,
144
+ number=2,
145
+ )
146
+
147
+
148
+ class UpdatePermissionRequest(proto.Message):
149
+ r"""Request to update the ``Permission``.
150
+
151
+ Attributes:
152
+ permission (google.ai.generativelanguage_v1beta3.types.Permission):
153
+ Required. The permission to update.
154
+
155
+ The permission's ``name`` field is used to identify the
156
+ permission to update.
157
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
158
+ Required. The list of fields to update. Accepted ones:
159
+
160
+ - role (``Permission.role`` field)
161
+ """
162
+
163
+ permission: gag_permission.Permission = proto.Field(
164
+ proto.MESSAGE,
165
+ number=1,
166
+ message=gag_permission.Permission,
167
+ )
168
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
169
+ proto.MESSAGE,
170
+ number=2,
171
+ message=field_mask_pb2.FieldMask,
172
+ )
173
+
174
+
175
+ class DeletePermissionRequest(proto.Message):
176
+ r"""Request to delete the ``Permission``.
177
+
178
+ Attributes:
179
+ name (str):
180
+ Required. The resource name of the permission. Format:
181
+ ``tunedModels/{tuned_model}/permissions/{permission}``
182
+ """
183
+
184
+ name: str = proto.Field(
185
+ proto.STRING,
186
+ number=1,
187
+ )
188
+
189
+
190
+ class TransferOwnershipRequest(proto.Message):
191
+ r"""Request to transfer the ownership of the tuned model.
192
+
193
+ Attributes:
194
+ name (str):
195
+ Required. The resource name of the tuned model to transfer
196
+ ownership .
197
+
198
+ Format: ``tunedModels/my-model-id``
199
+ email_address (str):
200
+ Required. The email address of the user to
201
+ whom the tuned model is being transferred to.
202
+ """
203
+
204
+ name: str = proto.Field(
205
+ proto.STRING,
206
+ number=1,
207
+ )
208
+ email_address: str = proto.Field(
209
+ proto.STRING,
210
+ number=2,
211
+ )
212
+
213
+
214
+ class TransferOwnershipResponse(proto.Message):
215
+ r"""Response from ``TransferOwnership``."""
216
+
217
+
218
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/safety.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1beta3",
24
+ manifest={
25
+ "HarmCategory",
26
+ "ContentFilter",
27
+ "SafetyFeedback",
28
+ "SafetyRating",
29
+ "SafetySetting",
30
+ },
31
+ )
32
+
33
+
34
+ class HarmCategory(proto.Enum):
35
+ r"""The category of a rating.
36
+
37
+ These categories cover various kinds of harms that developers
38
+ may wish to adjust.
39
+
40
+ Values:
41
+ HARM_CATEGORY_UNSPECIFIED (0):
42
+ Category is unspecified.
43
+ HARM_CATEGORY_DEROGATORY (1):
44
+ Negative or harmful comments targeting
45
+ identity and/or protected attribute.
46
+ HARM_CATEGORY_TOXICITY (2):
47
+ Content that is rude, disrepspectful, or
48
+ profane.
49
+ HARM_CATEGORY_VIOLENCE (3):
50
+ Describes scenarios depictng violence against
51
+ an individual or group, or general descriptions
52
+ of gore.
53
+ HARM_CATEGORY_SEXUAL (4):
54
+ Contains references to sexual acts or other
55
+ lewd content.
56
+ HARM_CATEGORY_MEDICAL (5):
57
+ Promotes unchecked medical advice.
58
+ HARM_CATEGORY_DANGEROUS (6):
59
+ Dangerous content that promotes, facilitates,
60
+ or encourages harmful acts.
61
+ """
62
+ HARM_CATEGORY_UNSPECIFIED = 0
63
+ HARM_CATEGORY_DEROGATORY = 1
64
+ HARM_CATEGORY_TOXICITY = 2
65
+ HARM_CATEGORY_VIOLENCE = 3
66
+ HARM_CATEGORY_SEXUAL = 4
67
+ HARM_CATEGORY_MEDICAL = 5
68
+ HARM_CATEGORY_DANGEROUS = 6
69
+
70
+
71
+ class ContentFilter(proto.Message):
72
+ r"""Content filtering metadata associated with processing a
73
+ single request.
74
+ ContentFilter contains a reason and an optional supporting
75
+ string. The reason may be unspecified.
76
+
77
+
78
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
79
+
80
+ Attributes:
81
+ reason (google.ai.generativelanguage_v1beta3.types.ContentFilter.BlockedReason):
82
+ The reason content was blocked during request
83
+ processing.
84
+ message (str):
85
+ A string that describes the filtering
86
+ behavior in more detail.
87
+
88
+ This field is a member of `oneof`_ ``_message``.
89
+ """
90
+
91
+ class BlockedReason(proto.Enum):
92
+ r"""A list of reasons why content may have been blocked.
93
+
94
+ Values:
95
+ BLOCKED_REASON_UNSPECIFIED (0):
96
+ A blocked reason was not specified.
97
+ SAFETY (1):
98
+ Content was blocked by safety settings.
99
+ OTHER (2):
100
+ Content was blocked, but the reason is
101
+ uncategorized.
102
+ """
103
+ BLOCKED_REASON_UNSPECIFIED = 0
104
+ SAFETY = 1
105
+ OTHER = 2
106
+
107
+ reason: BlockedReason = proto.Field(
108
+ proto.ENUM,
109
+ number=1,
110
+ enum=BlockedReason,
111
+ )
112
+ message: str = proto.Field(
113
+ proto.STRING,
114
+ number=2,
115
+ optional=True,
116
+ )
117
+
118
+
119
+ class SafetyFeedback(proto.Message):
120
+ r"""Safety feedback for an entire request.
121
+
122
+ This field is populated if content in the input and/or response
123
+ is blocked due to safety settings. SafetyFeedback may not exist
124
+ for every HarmCategory. Each SafetyFeedback will return the
125
+ safety settings used by the request as well as the lowest
126
+ HarmProbability that should be allowed in order to return a
127
+ result.
128
+
129
+ Attributes:
130
+ rating (google.ai.generativelanguage_v1beta3.types.SafetyRating):
131
+ Safety rating evaluated from content.
132
+ setting (google.ai.generativelanguage_v1beta3.types.SafetySetting):
133
+ Safety settings applied to the request.
134
+ """
135
+
136
+ rating: "SafetyRating" = proto.Field(
137
+ proto.MESSAGE,
138
+ number=1,
139
+ message="SafetyRating",
140
+ )
141
+ setting: "SafetySetting" = proto.Field(
142
+ proto.MESSAGE,
143
+ number=2,
144
+ message="SafetySetting",
145
+ )
146
+
147
+
148
+ class SafetyRating(proto.Message):
149
+ r"""Safety rating for a piece of content.
150
+
151
+ The safety rating contains the category of harm and the harm
152
+ probability level in that category for a piece of content.
153
+ Content is classified for safety across a number of harm
154
+ categories and the probability of the harm classification is
155
+ included here.
156
+
157
+ Attributes:
158
+ category (google.ai.generativelanguage_v1beta3.types.HarmCategory):
159
+ Required. The category for this rating.
160
+ probability (google.ai.generativelanguage_v1beta3.types.SafetyRating.HarmProbability):
161
+ Required. The probability of harm for this
162
+ content.
163
+ """
164
+
165
+ class HarmProbability(proto.Enum):
166
+ r"""The probability that a piece of content is harmful.
167
+
168
+ The classification system gives the probability of the content
169
+ being unsafe. This does not indicate the severity of harm for a
170
+ piece of content.
171
+
172
+ Values:
173
+ HARM_PROBABILITY_UNSPECIFIED (0):
174
+ Probability is unspecified.
175
+ NEGLIGIBLE (1):
176
+ Content has a negligible chance of being
177
+ unsafe.
178
+ LOW (2):
179
+ Content has a low chance of being unsafe.
180
+ MEDIUM (3):
181
+ Content has a medium chance of being unsafe.
182
+ HIGH (4):
183
+ Content has a high chance of being unsafe.
184
+ """
185
+ HARM_PROBABILITY_UNSPECIFIED = 0
186
+ NEGLIGIBLE = 1
187
+ LOW = 2
188
+ MEDIUM = 3
189
+ HIGH = 4
190
+
191
+ category: "HarmCategory" = proto.Field(
192
+ proto.ENUM,
193
+ number=3,
194
+ enum="HarmCategory",
195
+ )
196
+ probability: HarmProbability = proto.Field(
197
+ proto.ENUM,
198
+ number=4,
199
+ enum=HarmProbability,
200
+ )
201
+
202
+
203
+ class SafetySetting(proto.Message):
204
+ r"""Safety setting, affecting the safety-blocking behavior.
205
+
206
+ Passing a safety setting for a category changes the allowed
207
+ proability that content is blocked.
208
+
209
+ Attributes:
210
+ category (google.ai.generativelanguage_v1beta3.types.HarmCategory):
211
+ Required. The category for this setting.
212
+ threshold (google.ai.generativelanguage_v1beta3.types.SafetySetting.HarmBlockThreshold):
213
+ Required. Controls the probability threshold
214
+ at which harm is blocked.
215
+ """
216
+
217
+ class HarmBlockThreshold(proto.Enum):
218
+ r"""Block at and beyond a specified harm probability.
219
+
220
+ Values:
221
+ HARM_BLOCK_THRESHOLD_UNSPECIFIED (0):
222
+ Threshold is unspecified.
223
+ BLOCK_LOW_AND_ABOVE (1):
224
+ Content with NEGLIGIBLE will be allowed.
225
+ BLOCK_MEDIUM_AND_ABOVE (2):
226
+ Content with NEGLIGIBLE and LOW will be
227
+ allowed.
228
+ BLOCK_ONLY_HIGH (3):
229
+ Content with NEGLIGIBLE, LOW, and MEDIUM will
230
+ be allowed.
231
+ BLOCK_NONE (4):
232
+ All content will be allowed.
233
+ """
234
+ HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0
235
+ BLOCK_LOW_AND_ABOVE = 1
236
+ BLOCK_MEDIUM_AND_ABOVE = 2
237
+ BLOCK_ONLY_HIGH = 3
238
+ BLOCK_NONE = 4
239
+
240
+ category: "HarmCategory" = proto.Field(
241
+ proto.ENUM,
242
+ number=3,
243
+ enum="HarmCategory",
244
+ )
245
+ threshold: HarmBlockThreshold = proto.Field(
246
+ proto.ENUM,
247
+ number=4,
248
+ enum=HarmBlockThreshold,
249
+ )
250
+
251
+
252
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/types/text_service.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ from google.ai.generativelanguage_v1beta3.types import citation, safety
23
+
24
+ __protobuf__ = proto.module(
25
+ package="google.ai.generativelanguage.v1beta3",
26
+ manifest={
27
+ "GenerateTextRequest",
28
+ "GenerateTextResponse",
29
+ "TextPrompt",
30
+ "TextCompletion",
31
+ "EmbedTextRequest",
32
+ "EmbedTextResponse",
33
+ "BatchEmbedTextRequest",
34
+ "BatchEmbedTextResponse",
35
+ "Embedding",
36
+ "CountTextTokensRequest",
37
+ "CountTextTokensResponse",
38
+ },
39
+ )
40
+
41
+
42
+ class GenerateTextRequest(proto.Message):
43
+ r"""Request to generate a text completion response from the
44
+ model.
45
+
46
+
47
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
48
+
49
+ Attributes:
50
+ model (str):
51
+ Required. The name of the ``Model`` or ``TunedModel`` to use
52
+ for generating the completion. Examples:
53
+ models/text-bison-001 tunedModels/sentence-translator-u3b7m
54
+ prompt (google.ai.generativelanguage_v1beta3.types.TextPrompt):
55
+ Required. The free-form input text given to
56
+ the model as a prompt.
57
+ Given a prompt, the model will generate a
58
+ TextCompletion response it predicts as the
59
+ completion of the input text.
60
+ temperature (float):
61
+ Optional. Controls the randomness of the output. Note: The
62
+ default value varies by model, see the ``Model.temperature``
63
+ attribute of the ``Model`` returned the ``getModel``
64
+ function.
65
+
66
+ Values can range from [0.0,1.0], inclusive. A value closer
67
+ to 1.0 will produce responses that are more varied and
68
+ creative, while a value closer to 0.0 will typically result
69
+ in more straightforward responses from the model.
70
+
71
+ This field is a member of `oneof`_ ``_temperature``.
72
+ candidate_count (int):
73
+ Optional. Number of generated responses to return.
74
+
75
+ This value must be between [1, 8], inclusive. If unset, this
76
+ will default to 1.
77
+
78
+ This field is a member of `oneof`_ ``_candidate_count``.
79
+ max_output_tokens (int):
80
+ Optional. The maximum number of tokens to include in a
81
+ candidate.
82
+
83
+ If unset, this will default to output_token_limit specified
84
+ in the ``Model`` specification.
85
+
86
+ This field is a member of `oneof`_ ``_max_output_tokens``.
87
+ top_p (float):
88
+ Optional. The maximum cumulative probability of tokens to
89
+ consider when sampling.
90
+
91
+ The model uses combined Top-k and nucleus sampling.
92
+
93
+ Tokens are sorted based on their assigned probabilities so
94
+ that only the most likely tokens are considered. Top-k
95
+ sampling directly limits the maximum number of tokens to
96
+ consider, while Nucleus sampling limits number of tokens
97
+ based on the cumulative probability.
98
+
99
+ Note: The default value varies by model, see the
100
+ ``Model.top_p`` attribute of the ``Model`` returned the
101
+ ``getModel`` function.
102
+
103
+ This field is a member of `oneof`_ ``_top_p``.
104
+ top_k (int):
105
+ Optional. The maximum number of tokens to consider when
106
+ sampling.
107
+
108
+ The model uses combined Top-k and nucleus sampling.
109
+
110
+ Top-k sampling considers the set of ``top_k`` most probable
111
+ tokens. Defaults to 40.
112
+
113
+ Note: The default value varies by model, see the
114
+ ``Model.top_k`` attribute of the ``Model`` returned the
115
+ ``getModel`` function.
116
+
117
+ This field is a member of `oneof`_ ``_top_k``.
118
+ safety_settings (MutableSequence[google.ai.generativelanguage_v1beta3.types.SafetySetting]):
119
+ A list of unique ``SafetySetting`` instances for blocking
120
+ unsafe content.
121
+
122
+ that will be enforced on the ``GenerateTextRequest.prompt``
123
+ and ``GenerateTextResponse.candidates``. There should not be
124
+ more than one setting for each ``SafetyCategory`` type. The
125
+ API will block any prompts and responses that fail to meet
126
+ the thresholds set by these settings. This list overrides
127
+ the default settings for each ``SafetyCategory`` specified
128
+ in the safety_settings. If there is no ``SafetySetting`` for
129
+ a given ``SafetyCategory`` provided in the list, the API
130
+ will use the default safety setting for that category.
131
+ stop_sequences (MutableSequence[str]):
132
+ The set of character sequences (up to 5) that
133
+ will stop output generation. If specified, the
134
+ API will stop at the first appearance of a stop
135
+ sequence. The stop sequence will not be included
136
+ as part of the response.
137
+ """
138
+
139
+ model: str = proto.Field(
140
+ proto.STRING,
141
+ number=1,
142
+ )
143
+ prompt: "TextPrompt" = proto.Field(
144
+ proto.MESSAGE,
145
+ number=2,
146
+ message="TextPrompt",
147
+ )
148
+ temperature: float = proto.Field(
149
+ proto.FLOAT,
150
+ number=3,
151
+ optional=True,
152
+ )
153
+ candidate_count: int = proto.Field(
154
+ proto.INT32,
155
+ number=4,
156
+ optional=True,
157
+ )
158
+ max_output_tokens: int = proto.Field(
159
+ proto.INT32,
160
+ number=5,
161
+ optional=True,
162
+ )
163
+ top_p: float = proto.Field(
164
+ proto.FLOAT,
165
+ number=6,
166
+ optional=True,
167
+ )
168
+ top_k: int = proto.Field(
169
+ proto.INT32,
170
+ number=7,
171
+ optional=True,
172
+ )
173
+ safety_settings: MutableSequence[safety.SafetySetting] = proto.RepeatedField(
174
+ proto.MESSAGE,
175
+ number=8,
176
+ message=safety.SafetySetting,
177
+ )
178
+ stop_sequences: MutableSequence[str] = proto.RepeatedField(
179
+ proto.STRING,
180
+ number=9,
181
+ )
182
+
183
+
184
+ class GenerateTextResponse(proto.Message):
185
+ r"""The response from the model, including candidate completions.
186
+
187
+ Attributes:
188
+ candidates (MutableSequence[google.ai.generativelanguage_v1beta3.types.TextCompletion]):
189
+ Candidate responses from the model.
190
+ filters (MutableSequence[google.ai.generativelanguage_v1beta3.types.ContentFilter]):
191
+ A set of content filtering metadata for the prompt and
192
+ response text.
193
+
194
+ This indicates which ``SafetyCategory``\ (s) blocked a
195
+ candidate from this response, the lowest ``HarmProbability``
196
+ that triggered a block, and the HarmThreshold setting for
197
+ that category. This indicates the smallest change to the
198
+ ``SafetySettings`` that would be necessary to unblock at
199
+ least 1 response.
200
+
201
+ The blocking is configured by the ``SafetySettings`` in the
202
+ request (or the default ``SafetySettings`` of the API).
203
+ safety_feedback (MutableSequence[google.ai.generativelanguage_v1beta3.types.SafetyFeedback]):
204
+ Returns any safety feedback related to
205
+ content filtering.
206
+ """
207
+
208
+ candidates: MutableSequence["TextCompletion"] = proto.RepeatedField(
209
+ proto.MESSAGE,
210
+ number=1,
211
+ message="TextCompletion",
212
+ )
213
+ filters: MutableSequence[safety.ContentFilter] = proto.RepeatedField(
214
+ proto.MESSAGE,
215
+ number=3,
216
+ message=safety.ContentFilter,
217
+ )
218
+ safety_feedback: MutableSequence[safety.SafetyFeedback] = proto.RepeatedField(
219
+ proto.MESSAGE,
220
+ number=4,
221
+ message=safety.SafetyFeedback,
222
+ )
223
+
224
+
225
+ class TextPrompt(proto.Message):
226
+ r"""Text given to the model as a prompt.
227
+
228
+ The Model will use this TextPrompt to Generate a text
229
+ completion.
230
+
231
+ Attributes:
232
+ text (str):
233
+ Required. The prompt text.
234
+ """
235
+
236
+ text: str = proto.Field(
237
+ proto.STRING,
238
+ number=1,
239
+ )
240
+
241
+
242
+ class TextCompletion(proto.Message):
243
+ r"""Output text returned from a model.
244
+
245
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
246
+
247
+ Attributes:
248
+ output (str):
249
+ Output only. The generated text returned from
250
+ the model.
251
+ safety_ratings (MutableSequence[google.ai.generativelanguage_v1beta3.types.SafetyRating]):
252
+ Ratings for the safety of a response.
253
+
254
+ There is at most one rating per category.
255
+ citation_metadata (google.ai.generativelanguage_v1beta3.types.CitationMetadata):
256
+ Output only. Citation information for model-generated
257
+ ``output`` in this ``TextCompletion``.
258
+
259
+ This field may be populated with attribution information for
260
+ any text included in the ``output``.
261
+
262
+ This field is a member of `oneof`_ ``_citation_metadata``.
263
+ """
264
+
265
+ output: str = proto.Field(
266
+ proto.STRING,
267
+ number=1,
268
+ )
269
+ safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField(
270
+ proto.MESSAGE,
271
+ number=2,
272
+ message=safety.SafetyRating,
273
+ )
274
+ citation_metadata: citation.CitationMetadata = proto.Field(
275
+ proto.MESSAGE,
276
+ number=3,
277
+ optional=True,
278
+ message=citation.CitationMetadata,
279
+ )
280
+
281
+
282
+ class EmbedTextRequest(proto.Message):
283
+ r"""Request to get a text embedding from the model.
284
+
285
+ Attributes:
286
+ model (str):
287
+ Required. The model name to use with the
288
+ format model=models/{model}.
289
+ text (str):
290
+ Required. The free-form input text that the
291
+ model will turn into an embedding.
292
+ """
293
+
294
+ model: str = proto.Field(
295
+ proto.STRING,
296
+ number=1,
297
+ )
298
+ text: str = proto.Field(
299
+ proto.STRING,
300
+ number=2,
301
+ )
302
+
303
+
304
+ class EmbedTextResponse(proto.Message):
305
+ r"""The response to a EmbedTextRequest.
306
+
307
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
308
+
309
+ Attributes:
310
+ embedding (google.ai.generativelanguage_v1beta3.types.Embedding):
311
+ Output only. The embedding generated from the
312
+ input text.
313
+
314
+ This field is a member of `oneof`_ ``_embedding``.
315
+ """
316
+
317
+ embedding: "Embedding" = proto.Field(
318
+ proto.MESSAGE,
319
+ number=1,
320
+ optional=True,
321
+ message="Embedding",
322
+ )
323
+
324
+
325
+ class BatchEmbedTextRequest(proto.Message):
326
+ r"""Batch request to get a text embedding from the model.
327
+
328
+ Attributes:
329
+ model (str):
330
+ Required. The name of the ``Model`` to use for generating
331
+ the embedding. Examples: models/embedding-gecko-001
332
+ texts (MutableSequence[str]):
333
+ Required. The free-form input texts that the
334
+ model will turn into an embedding. The current
335
+ limit is 100 texts, over which an error will be
336
+ thrown.
337
+ """
338
+
339
+ model: str = proto.Field(
340
+ proto.STRING,
341
+ number=1,
342
+ )
343
+ texts: MutableSequence[str] = proto.RepeatedField(
344
+ proto.STRING,
345
+ number=2,
346
+ )
347
+
348
+
349
+ class BatchEmbedTextResponse(proto.Message):
350
+ r"""The response to a EmbedTextRequest.
351
+
352
+ Attributes:
353
+ embeddings (MutableSequence[google.ai.generativelanguage_v1beta3.types.Embedding]):
354
+ Output only. The embeddings generated from
355
+ the input text.
356
+ """
357
+
358
+ embeddings: MutableSequence["Embedding"] = proto.RepeatedField(
359
+ proto.MESSAGE,
360
+ number=1,
361
+ message="Embedding",
362
+ )
363
+
364
+
365
+ class Embedding(proto.Message):
366
+ r"""A list of floats representing the embedding.
367
+
368
+ Attributes:
369
+ value (MutableSequence[float]):
370
+ The embedding values.
371
+ """
372
+
373
+ value: MutableSequence[float] = proto.RepeatedField(
374
+ proto.FLOAT,
375
+ number=1,
376
+ )
377
+
378
+
379
+ class CountTextTokensRequest(proto.Message):
380
+ r"""Counts the number of tokens in the ``prompt`` sent to a model.
381
+
382
+ Models may tokenize text differently, so each model may return a
383
+ different ``token_count``.
384
+
385
+ Attributes:
386
+ model (str):
387
+ Required. The model's resource name. This serves as an ID
388
+ for the Model to use.
389
+
390
+ This name should match a model name returned by the
391
+ ``ListModels`` method.
392
+
393
+ Format: ``models/{model}``
394
+ prompt (google.ai.generativelanguage_v1beta3.types.TextPrompt):
395
+ Required. The free-form input text given to
396
+ the model as a prompt.
397
+ """
398
+
399
+ model: str = proto.Field(
400
+ proto.STRING,
401
+ number=1,
402
+ )
403
+ prompt: "TextPrompt" = proto.Field(
404
+ proto.MESSAGE,
405
+ number=2,
406
+ message="TextPrompt",
407
+ )
408
+
409
+
410
+ class CountTextTokensResponse(proto.Message):
411
+ r"""A response from ``CountTextTokens``.
412
+
413
+ It returns the model's ``token_count`` for the ``prompt``.
414
+
415
+ Attributes:
416
+ token_count (int):
417
+ The number of tokens that the ``model`` tokenizes the
418
+ ``prompt`` into.
419
+
420
+ Always non-negative.
421
+ """
422
+
423
+ token_count: int = proto.Field(
424
+ proto.INT32,
425
+ number=1,
426
+ )
427
+
428
+
429
+ __all__ = tuple(sorted(__protobuf__.manifest))