|
import os |
|
|
|
import gradio as gr |
|
import openai |
|
import tiktoken |
|
|
|
|
|
CHAT_MODELS = [ |
|
"gpt-4", |
|
"gpt-4-32k", |
|
"gpt-3.5-turbo", |
|
] |
|
DEFAULT_MODEL = "gpt-3.5-turbo" |
|
TOKEN_PRICE = { |
|
"gpt-3.5-turbo": 0.002 / 1000, |
|
"gpt-4": 0.06 / 1000, |
|
"gpt-4-32k": 0.12 / 1000, |
|
} |
|
|
|
|
|
_tokens: int = 0 |
|
_selected_model = DEFAULT_MODEL |
|
_encoding = tiktoken.encoding_for_model(DEFAULT_MODEL) |
|
_api_key = os.environ.get("OPENAI_API_KEY", "") |
|
_show_openai_settings = _api_key == "" |
|
|
|
|
|
def count_tokens(prompt_text: str) -> int: |
|
"""Return the number of tokens in the prompt text""" |
|
return len(_encoding.encode(str(prompt_text))) |
|
|
|
|
|
def get_cost(tokens: int, model: str) -> float: |
|
return TOKEN_PRICE.get(model, 0) * tokens |
|
|
|
|
|
def prompt(prompt_text: str, api_key: str, model: str) -> str: |
|
global _tokens |
|
global _selected_model |
|
global _encoding |
|
|
|
|
|
if model != _selected_model: |
|
_selected_model = model |
|
_encoding = tiktoken.encoding_for_model(_selected_model) |
|
|
|
_tokens += count_tokens(prompt_text) |
|
|
|
openai.api_key = api_key |
|
content = openai.ChatCompletion.create( |
|
model=_selected_model, |
|
messages=[ |
|
{"role": "system", "content": "You are a helpful assistant."}, |
|
{"role": "user", "content": prompt_text}, |
|
], |
|
temperature=0, |
|
)["choices"][0]["message"]["content"] |
|
|
|
_tokens += count_tokens(content) |
|
cost = get_cost(_tokens, model) |
|
|
|
return ( |
|
content, |
|
_tokens, |
|
cost, |
|
) |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## Test your prompts and see how much it costs!") |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
txt_api_key = gr.Textbox( |
|
label="OpenAI API Key", |
|
value=_api_key, |
|
visible=_show_openai_settings, |
|
type="password", |
|
placeholder="sk-...", |
|
) |
|
dd_model = gr.Dropdown( |
|
choices=CHAT_MODELS, |
|
value=DEFAULT_MODEL, |
|
label="Model", |
|
) |
|
num_tokens = gr.Number(value=0, label="Tokens used") |
|
num_cost = gr.Number(value=0, label="Estimated cost in $", precision=3) |
|
with gr.Column(scale=3): |
|
txt_prompt = gr.Textbox(label="Prompt") |
|
txt_response = gr.TextArea(label="Model response") |
|
with gr.Row(): |
|
gr.Button("Prompt").click( |
|
fn=prompt, |
|
inputs=[txt_prompt, txt_api_key, dd_model], |
|
outputs=[txt_response, num_tokens, num_cost], |
|
) |
|
gr.Button("Clear").click( |
|
fn=lambda _: ("", ""), |
|
inputs=txt_prompt, |
|
outputs=[txt_prompt, txt_response], |
|
) |
|
|
|
|
|
demo.launch() |
|
|