File size: 3,057 Bytes
50389ad
 
 
 
 
 
 
 
 
1609810
50389ad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cac8a2
50389ad
 
 
 
 
 
 
7cac8a2
50389ad
 
 
 
 
 
 
7cac8a2
50389ad
 
 
 
 
 
 
7cac8a2
 
 
 
50389ad
7cac8a2
50389ad
 
c3f1c6c
50389ad
7cac8a2
50389ad
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import requests
import gradio as gr
from requests.exceptions import HTTPError


BaseApi = "https://api.openai.com/v1"

# Rate limits for each model
RateLimitPerModel = {
    "gpt-3.5-turbo": 2000, # New pay-as-go keys start with 2k for 48 hours
    "gpt-4": 200,
    "gpt-4-32k": 1000
}

def get_available_models(api_key):
    headers = {"Authorization": f"Bearer {api_key}"}
    url = f"{BaseApi}/engines"

    try:
        response = requests.get(url, headers=headers)
        response_data = response.json()
        available_models = [model["id"] for model in response_data.get("data", []) if model["id"] in {"gpt-4", "gpt-4-32k", "gpt-3.5-turbo"}]
        return available_models
    except Exception as e:
        return []

def check_key_status(api_key, model):
    headers = {"Authorization": f"Bearer {api_key}"}
    url = f"{BaseApi}/chat/completions"

    data = {
        "model": model,
        "messages": [{"role": "user", "content": ""}],
        "max_tokens": -1
    }

    try:
        response = requests.post(url, headers=headers, json=data)
        response_data = response.json()

        if response.status_code == 401:
            return "Error: Invalid API key"

        error_type = response_data.get("error", {}).get("type", "")
        if error_type == "insufficient_quota" and model in {"gpt-4", "gpt-4-32k"}:
            return f"Error: The key for {model} is out of quota, but has gpt4"
        elif error_type in ["insufficient_quota", "billing_not_active", "access_terminated"]:
            return f"Error: The key for {model} is either out of quota, inactive, or access is terminated."

        ratelimited = response.status_code == 429
        if (response.status_code == 400 and error_type == "invalid_request_error") or ratelimited:
            ratelimit = response.headers.get("x-ratelimit-limit-requests", "0")
            org = response.headers.get("openai-organization", "user-xyz")
            is_trial_key = "Trial Key" if int(ratelimit) < RateLimitPerModel.get(model, 0) else "Paid Key"
            return f"Key for {model} is working. Ratelimit: {ratelimit}, Organization: {org}, Key Type: {is_trial_key}"

    except HTTPError as http_err:
        return f"HTTP error occurred: {http_err}"
    except Exception as e:
        return f"Error occurred: {e}"

def check_models(api_key):
    available_models = get_available_models(api_key)
    if not available_models:
        return "Error occurred: Unable to retrieve available models. Please check your API key."

    model_info = "\n".join([check_key_status(api_key, model) for model in available_models])
    return model_info

# Define Gradio interface with a button to trigger model checking
def trigger_model_check(api_key):
    return check_models(api_key)

iface = gr.Interface(
    fn=trigger_model_check,
    inputs=gr.inputs.Textbox(placeholder="Enter your OpenAI API key", type="text"),
    outputs=gr.outputs.Textbox(),
    live=False,
    title="OKC",
    allow_flagging=False,  # Disable flagging to prevent unnecessary reporting
)

iface.launch()