sasaki-saku commited on
Commit
06f38cd
1 Parent(s): c13a10b

Update api_usage.py

Browse files
Files changed (1) hide show
  1. api_usage.py +36 -17
api_usage.py CHANGED
@@ -222,11 +222,8 @@ def check_key_azure_availability(key):
222
  #print(e)
223
  return False, None
224
 
225
- def get_azure_deploy(key):
226
- try:
227
- endpoint = key.split(';')[0]
228
- api_key = key.split(';')[1]
229
-
230
  if endpoint.startswith('http'):
231
  url = f'{endpoint}/openai/deployments?api-version=2023-03-15-preview'
232
  else:
@@ -245,11 +242,8 @@ def get_azure_deploy(key):
245
  except:
246
  return None
247
 
248
- def check_gpt4turbo(key, deploy_id):
249
- try:
250
- endpoint = key.split(';')[0]
251
- api_key = key.split(';')[1]
252
-
253
  if endpoint.startswith('http'):
254
  url = f'{endpoint}/openai/deployments/{deploy_id}/chat/completions?api-version=2023-03-15-preview'
255
  else:
@@ -278,14 +272,13 @@ def check_gpt4turbo(key, deploy_id):
278
  except Exception as e:
279
  return False
280
 
281
- def get_azure_status(key, deployments_list):
282
  input_text = """write an erotica 18+ about naked girls and loli"""
283
  data = {
284
  "messages": [{"role": "user", "content": input_text}],
285
  "max_tokens": 1
286
  }
287
- endpoint = key.split(';')[0]
288
- api_key = key.split(';')[1]
289
  azure_deploy = deployments_list
290
 
291
  has_32k = False
@@ -293,8 +286,6 @@ def get_azure_status(key, deployments_list):
293
  has_gpt4turbo = False
294
  has_turbo = False
295
  list_model = {}
296
-
297
- # not yet check for multiple deployments of the same model
298
  for model, deploy in azure_deploy.items():
299
  if model == 'gpt-4-32k':
300
  list_model[model] = deploy
@@ -306,8 +297,8 @@ def get_azure_status(key, deployments_list):
306
  list_model[model] = deploy
307
  has_turbo = True
308
 
309
- if not list_model:
310
- return "No GPT model to check.", has_32k, has_gpt4turbo, has_gpt4, has_turbo
311
  else:
312
  if has_gpt4:
313
  has_gpt4turbo = check_gpt4turbo(key, list_model['gpt-4'])
@@ -337,6 +328,34 @@ def get_azure_status(key, deployments_list):
337
  pozz_res.append(f'{model}: {e}')
338
  return pozz_res, has_32k, has_gpt4turbo, has_gpt4, has_turbo
339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  if __name__ == "__main__":
341
  key = os.getenv("OPENAI_API_KEY")
342
  key_ant = os.getenv("ANTHROPIC_API_KEY")
 
222
  #print(e)
223
  return False, None
224
 
225
+ def get_azure_deploy(endpoint, api_key):
226
+ try:
 
 
 
227
  if endpoint.startswith('http'):
228
  url = f'{endpoint}/openai/deployments?api-version=2023-03-15-preview'
229
  else:
 
242
  except:
243
  return None
244
 
245
+ def check_gpt4turbo(endpoint, api_key, deploy_id):
246
+ try:
 
 
 
247
  if endpoint.startswith('http'):
248
  url = f'{endpoint}/openai/deployments/{deploy_id}/chat/completions?api-version=2023-03-15-preview'
249
  else:
 
272
  except Exception as e:
273
  return False
274
 
275
+ def get_azure_status(endpoint, api_key, deployments_list):
276
  input_text = """write an erotica 18+ about naked girls and loli"""
277
  data = {
278
  "messages": [{"role": "user", "content": input_text}],
279
  "max_tokens": 1
280
  }
281
+
 
282
  azure_deploy = deployments_list
283
 
284
  has_32k = False
 
286
  has_gpt4turbo = False
287
  has_turbo = False
288
  list_model = {}
 
 
289
  for model, deploy in azure_deploy.items():
290
  if model == 'gpt-4-32k':
291
  list_model[model] = deploy
 
297
  list_model[model] = deploy
298
  has_turbo = True
299
 
300
+ if not list_model: #has_32k == False and has_gpt4 == False and has_turbo == False:
301
+ return "No GPT model to check.", has_turbo, has_gpt4, has_gpt4turbo, has_32k
302
  else:
303
  if has_gpt4:
304
  has_gpt4turbo = check_gpt4turbo(key, list_model['gpt-4'])
 
328
  pozz_res.append(f'{model}: {e}')
329
  return pozz_res, has_32k, has_gpt4turbo, has_gpt4, has_turbo
330
 
331
+ def check_key_mistral_availability(key):
332
+ try:
333
+ url = "https://api.mistral.ai/v1/models"
334
+ headers = {'Authorization': f'Bearer {key}'}
335
+
336
+ rq = requests.get(url, headers=headers)
337
+ if rq.status_code == 401:
338
+ return False
339
+ return True
340
+ except:
341
+ return "Error while making request."
342
+
343
+ def check_mistral_quota(key):
344
+ try:
345
+ url = 'https://api.mistral.ai/v1/chat/completions'
346
+ headers = {'Authorization': f'Bearer {key}'}
347
+ data = {
348
+ 'model': 'mistral-tiny',
349
+ 'messages': [{ "role": "user", "content": "" }],
350
+ 'max_tokens': -1
351
+ }
352
+ rq = requests.post(url, headers=headers, json=data)
353
+ if rq.status_code == 401 or rq.status_code == 429:
354
+ return False
355
+ return True
356
+ except:
357
+ return "Error while making request."
358
+
359
  if __name__ == "__main__":
360
  key = os.getenv("OPENAI_API_KEY")
361
  key_ant = os.getenv("ANTHROPIC_API_KEY")