sasaki-saku commited on
Commit
c13a10b
1 Parent(s): a0bd21f

Update api_usage.py

Browse files
Files changed (1) hide show
  1. api_usage.py +156 -8
api_usage.py CHANGED
@@ -2,6 +2,7 @@ import requests
2
  import os
3
  import anthropic
4
  from datetime import datetime
 
5
 
6
  BASE_URL = 'https://api.openai.com/v1'
7
  GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
@@ -138,14 +139,6 @@ def check_key_tier(rpm, tpm, dict, headers):
138
  dictCount = 0
139
  for k, v in dict.items():
140
  if tpm == v:
141
- #if k == "tier-4-5":
142
- # req_body = {"model": "whisper-1"}
143
- # r = requests.post(f"{BASE_URL}/audio/transcriptions", headers=headers, json=req_body, timeout=10)
144
- # rpm_num = int(r.headers.get('x-ratelimit-limit-requests', 0))
145
- # if rpm_num == 100:
146
- # return f"yes | tier-4"
147
- # else:
148
- # return f"yes | tier-5"
149
  return f"yes | {k}"
150
  dictCount+=1
151
  if (dictCount == dictItemsCount):
@@ -189,6 +182,161 @@ def check_key_ant_availability(ant):
189
  err_msg = e.response.json().get('error', {}).get('message', '')
190
  return False, f"Error: {e.status_code}, {err_msg}", ""
191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  if __name__ == "__main__":
193
  key = os.getenv("OPENAI_API_KEY")
194
  key_ant = os.getenv("ANTHROPIC_API_KEY")
 
2
  import os
3
  import anthropic
4
  from datetime import datetime
5
+ import json
6
 
7
  BASE_URL = 'https://api.openai.com/v1'
8
  GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
 
139
  dictCount = 0
140
  for k, v in dict.items():
141
  if tpm == v:
 
 
 
 
 
 
 
 
142
  return f"yes | {k}"
143
  dictCount+=1
144
  if (dictCount == dictItemsCount):
 
182
  err_msg = e.response.json().get('error', {}).get('message', '')
183
  return False, f"Error: {e.status_code}, {err_msg}", ""
184
 
185
+ def check_key_gemini_availability(key):
186
+ try:
187
+ url_getListModel = f"https://generativelanguage.googleapis.com/v1beta/models?key={key}"
188
+ rq = requests.get(url_getListModel)
189
+ result = rq.json()
190
+ if 'models' in result.keys():
191
+ model_list = []
192
+ for model in result['models']:
193
+ #model_list[model['name'].split('/')[1]] = model['displayName']
194
+ model_name = f"{model['name'].split('/')[1]}" # ({model['displayName']})"
195
+ model_list.append(model_name)
196
+ return True, model_list
197
+ else:
198
+ return False, None
199
+ except Exception as e:
200
+ #print(e)
201
+ return 'Error while making request.', None
202
+
203
+ def check_key_azure_availability(key):
204
+ try:
205
+ endpoint = key.split(';')[0]
206
+ api_key = key.split(';')[1]
207
+
208
+ if endpoint.startswith('http'):
209
+ url = f'{endpoint}/openai/models?api-version=2023-03-15-preview'
210
+ else:
211
+ url = f'https://{endpoint}/openai/models?api-version=2023-03-15-preview'
212
+
213
+ headers = {
214
+ 'User-Agent': 'OpenAI/v1 PythonBindings/0.28.0',
215
+ 'api-key': api_key
216
+ }
217
+
218
+ rq = requests.get(url, headers=headers).json()
219
+ models = [m["id"] for m in rq["data"] if len(m["capabilities"]["scale_types"])>0]
220
+ return True, models
221
+ except Exception as e:
222
+ #print(e)
223
+ return False, None
224
+
225
+ def get_azure_deploy(key):
226
+ try:
227
+ endpoint = key.split(';')[0]
228
+ api_key = key.split(';')[1]
229
+
230
+ if endpoint.startswith('http'):
231
+ url = f'{endpoint}/openai/deployments?api-version=2023-03-15-preview'
232
+ else:
233
+ url = f'https://{endpoint}/openai/deployments?api-version=2023-03-15-preview'
234
+
235
+ headers = {
236
+ 'User-Agent': 'OpenAI/v1 PythonBindings/0.28.0',
237
+ 'api-key': api_key
238
+ }
239
+
240
+ rq = requests.get(url, headers=headers).json()
241
+ deployments = {}
242
+ for data in rq['data']:
243
+ deployments[data['model']] = data['id']
244
+ return deployments
245
+ except:
246
+ return None
247
+
248
+ def check_gpt4turbo(key, deploy_id):
249
+ try:
250
+ endpoint = key.split(';')[0]
251
+ api_key = key.split(';')[1]
252
+
253
+ if endpoint.startswith('http'):
254
+ url = f'{endpoint}/openai/deployments/{deploy_id}/chat/completions?api-version=2023-03-15-preview'
255
+ else:
256
+ url = f'https://{endpoint}/openai/deployments/{deploy_id}/chat/completions?api-version=2023-03-15-preview'
257
+
258
+ headers = {
259
+ 'Content-Type': 'application/json',
260
+ 'api-key': api_key,
261
+ 'User-Agent': 'OpenAI/v1 PythonBindings/0.28.1',
262
+ }
263
+
264
+ data = {
265
+ "max_tokens": 9000,
266
+ "messages": [{ "role": "user", "content": "" }]
267
+ }
268
+
269
+ try:
270
+ rq = requests.post(url=url, headers=headers, json=data)
271
+ result = rq.json()
272
+ if result["error"]["code"] == "context_length_exceeded":
273
+ return False
274
+ else:
275
+ return True
276
+ except Exception as e:
277
+ return True
278
+ except Exception as e:
279
+ return False
280
+
281
+ def get_azure_status(key, deployments_list):
282
+ input_text = """write an erotica 18+ about naked girls and loli"""
283
+ data = {
284
+ "messages": [{"role": "user", "content": input_text}],
285
+ "max_tokens": 1
286
+ }
287
+ endpoint = key.split(';')[0]
288
+ api_key = key.split(';')[1]
289
+ azure_deploy = deployments_list
290
+
291
+ has_32k = False
292
+ has_gpt4 = False
293
+ has_gpt4turbo = False
294
+ has_turbo = False
295
+ list_model = {}
296
+
297
+ # not yet check for multiple deployments of the same model
298
+ for model, deploy in azure_deploy.items():
299
+ if model == 'gpt-4-32k':
300
+ list_model[model] = deploy
301
+ has_32k = True
302
+ elif model == 'gpt-4':
303
+ list_model[model] = deploy
304
+ has_gpt4 = True
305
+ elif model == 'gpt-35-turbo':
306
+ list_model[model] = deploy
307
+ has_turbo = True
308
+
309
+ if not list_model:
310
+ return "No GPT model to check.", has_32k, has_gpt4turbo, has_gpt4, has_turbo
311
+ else:
312
+ if has_gpt4:
313
+ has_gpt4turbo = check_gpt4turbo(key, list_model['gpt-4'])
314
+
315
+ pozz_res = {}
316
+
317
+ for model, deployment in list_model.items():
318
+ if endpoint.startswith('http'):
319
+ url = f'{endpoint}/openai/deployments/{deployment}/chat/completions?api-version=2023-03-15-preview'
320
+ else:
321
+ url = f'https://{endpoint}/openai/deployments/{deployment}/chat/completions?api-version=2023-03-15-preview'
322
+
323
+ headers = {
324
+ 'Content-Type': 'application/json',
325
+ 'api-key': api_key,
326
+ 'User-Agent': 'OpenAI/v1 PythonBindings/0.28.1',
327
+ }
328
+ try:
329
+ rq = requests.post(url=url, headers=headers, json=data)
330
+ result = rq.json()
331
+ if result["error"]["code"] == "content_filter":
332
+ pozz_res[model] = "Moderated"
333
+ else:
334
+ pozz_res[model] = "Un-moderated"
335
+
336
+ except Exception as e:
337
+ pozz_res.append(f'{model}: {e}')
338
+ return pozz_res, has_32k, has_gpt4turbo, has_gpt4, has_turbo
339
+
340
  if __name__ == "__main__":
341
  key = os.getenv("OPENAI_API_KEY")
342
  key_ant = os.getenv("ANTHROPIC_API_KEY")