m-ric HF staff commited on
Commit
8aeda98
1 Parent(s): b9296d6

Add table layout with multiple models

Browse files
Files changed (2) hide show
  1. app.py +59 -27
  2. requirements.txt +2 -1
app.py CHANGED
@@ -3,6 +3,7 @@ import pandas as pd
3
  import requests
4
  import json
5
  import tiktoken
 
6
 
7
  PRICES_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
8
 
@@ -20,9 +21,16 @@ except Exception as e:
20
 
21
  TOKEN_COSTS = pd.DataFrame.from_dict(TOKEN_COSTS, orient='index').reset_index()
22
  TOKEN_COSTS.columns = ['model'] + list(TOKEN_COSTS.columns[1:])
23
- TOKEN_COSTS = TOKEN_COSTS.loc[~TOKEN_COSTS["model"].str.contains("sample_spec")]
24
- TOKEN_COSTS = TOKEN_COSTS.loc[~TOKEN_COSTS["input_cost_per_token"].isnull()]
 
 
 
 
 
 
25
 
 
26
 
27
  def count_string_tokens(string: str, model: str) -> int:
28
  try:
@@ -50,27 +58,54 @@ def update_model_list(function_calling, litellm_provider, max_price, supports_vi
50
  list_models = filtered_models['model'].tolist()
51
  return gr.Dropdown(choices=list_models, value=list_models[0] if list_models else "No model found for this combination!")
52
 
53
- def compute_all(input_type, prompt_text, completion_text, prompt_tokens, completion_tokens, model):
54
- if input_type == "Text Input":
55
- prompt_tokens = count_string_tokens(prompt_text, model)
56
- completion_tokens = count_string_tokens(completion_text, model)
57
- else: # Token Count Input
58
- prompt_tokens = int(prompt_tokens * 1000)
59
- completion_tokens = int(completion_tokens * 1000)
60
-
61
- prompt_cost, completion_cost = calculate_total_cost(prompt_tokens, completion_tokens, model)
62
- total_cost = prompt_cost + completion_cost
 
 
 
 
 
 
 
 
 
63
 
64
- return (
65
- f"${prompt_cost:.6f}",
66
- f"${completion_cost:.6f}",
67
- f"${total_cost:.6f}"
68
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.yellow, secondary_hue=gr.themes.colors.orange)) as demo:
71
  gr.Markdown("""
72
  # Text-to-$$$: Calculate the price of your LLM runs
73
- Based on data from [litellm](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json).
74
  """)
75
 
76
  with gr.Row():
@@ -91,19 +126,16 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.yellow, seconda
91
  with gr.Column():
92
  function_calling = gr.Checkbox(label="Supports Tool Calling", value=False)
93
  supports_vision = gr.Checkbox(label="Supports Vision", value=False)
94
- litellm_provider = gr.Dropdown(label="LiteLLM Provider", choices=["Any"] + TOKEN_COSTS['litellm_provider'].unique().tolist(), value="Any")
95
 
96
  max_price = gr.Slider(label="Max Price per Token (input + output)", minimum=0, maximum=0.001, step=0.00001, value=0.001)
97
 
98
- model = gr.Dropdown(label="Model", choices=TOKEN_COSTS['model'].tolist(), value=TOKEN_COSTS['model'].tolist()[0])
99
 
100
- compute_button = gr.Button("Compute Costs", variant="secondary")
101
 
102
- with gr.Column(scale=1):
103
- with gr.Group():
104
- prompt_cost = gr.Textbox(label="Prompt Cost", interactive=False)
105
- completion_cost = gr.Textbox(label="Completion Cost", interactive=False)
106
- total_cost = gr.Textbox(label="Total Cost", interactive=False)
107
 
108
  def toggle_input_visibility(choice):
109
  return (
@@ -134,7 +166,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.yellow, seconda
134
  completion_tokens_input,
135
  model
136
  ],
137
- outputs=[prompt_cost, completion_cost, total_cost]
138
  )
139
 
140
  if __name__ == "__main__":
 
3
  import requests
4
  import json
5
  import tiktoken
6
+ import matplotlib.pyplot as plt
7
 
8
  PRICES_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
9
 
 
21
 
22
  TOKEN_COSTS = pd.DataFrame.from_dict(TOKEN_COSTS, orient='index').reset_index()
23
  TOKEN_COSTS.columns = ['model'] + list(TOKEN_COSTS.columns[1:])
24
+ TOKEN_COSTS = TOKEN_COSTS.loc[
25
+ (~TOKEN_COSTS["model"].str.contains("sample_spec"))
26
+ & (~TOKEN_COSTS["input_cost_per_token"].isnull())
27
+ & (~TOKEN_COSTS["output_cost_per_token"].isnull())
28
+ & (TOKEN_COSTS["input_cost_per_token"] > 0)
29
+ & (TOKEN_COSTS["output_cost_per_token"] > 0)
30
+ ]
31
+ TOKEN_COSTS["supports_vision"] = TOKEN_COSTS["supports_vision"].fillna(False)
32
 
33
+ cmap = plt.get_cmap('RdYlGn_r') # Red-Yellow-Green colormap, reversed
34
 
35
  def count_string_tokens(string: str, model: str) -> int:
36
  try:
 
58
  list_models = filtered_models['model'].tolist()
59
  return gr.Dropdown(choices=list_models, value=list_models[0] if list_models else "No model found for this combination!")
60
 
61
+ def compute_all(input_type, prompt_text, completion_text, prompt_tokens, completion_tokens, models):
62
+ results = []
63
+ for model in models:
64
+ if input_type == "Text Input":
65
+ prompt_tokens = count_string_tokens(prompt_text, model)
66
+ completion_tokens = count_string_tokens(completion_text, model)
67
+ else: # Token Count Input
68
+ prompt_tokens = int(prompt_tokens * 1000)
69
+ completion_tokens = int(completion_tokens * 1000)
70
+
71
+ prompt_cost, completion_cost = calculate_total_cost(prompt_tokens, completion_tokens, model)
72
+ total_cost = prompt_cost + completion_cost
73
+
74
+ results.append({
75
+ "Model": model,
76
+ "Prompt Cost": f"${prompt_cost:.6f}",
77
+ "Completion Cost": f"${completion_cost:.6f}",
78
+ "Total Cost": f"${total_cost:.6f}"
79
+ })
80
 
81
+ df = pd.DataFrame(results)
82
+
83
+ # Convert cost columns to numeric, removing the '$' sign
84
+ for col in ["Prompt Cost", "Completion Cost", "Total Cost"]:
85
+ df[col] = df[col].str.replace('$', '').astype(float)
86
+
87
+ if len(df) > 1:
88
+ def apply_color(val, min, max):
89
+ norm = plt.Normalize(min, max)
90
+ color = cmap(norm(val))
91
+ rgba = tuple(int(x * 255) for x in color[:3]) + (0.5,)
92
+ rgba = tuple(int(x * 255) for x in color[:3]) + (0.5,) # 0.5 for 50% opacity
93
+ return f'background-color: rgba{rgba}'
94
+
95
+ min, max = df["Total Cost"].min(), df["Total Cost"].max()
96
+ df = df.style.applymap(lambda x: apply_color(x, min, max), subset=["Total Cost"])
97
+ df = df.format({"Prompt Cost": "${:.6f}", "Completion Cost": "${:.6f}", "Total Cost": "${:.6f}"})
98
+ df = df.set_properties(**{
99
+ 'font-family': 'Arial, sans-serif',
100
+ 'white-space': 'pre-wrap'
101
+ })
102
+ df = df.set_properties(**{'font-weight': 'bold'}, subset=['Total Cost'])
103
+ return df
104
 
105
  with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.yellow, secondary_hue=gr.themes.colors.orange)) as demo:
106
  gr.Markdown("""
107
  # Text-to-$$$: Calculate the price of your LLM runs
108
+ Based on prices data from [BerriAI's litellm](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json).
109
  """)
110
 
111
  with gr.Row():
 
126
  with gr.Column():
127
  function_calling = gr.Checkbox(label="Supports Tool Calling", value=False)
128
  supports_vision = gr.Checkbox(label="Supports Vision", value=False)
129
+ litellm_provider = gr.Dropdown(label="Inference Provider", choices=["Any"] + TOKEN_COSTS['litellm_provider'].unique().tolist(), value="Any")
130
 
131
  max_price = gr.Slider(label="Max Price per Token (input + output)", minimum=0, maximum=0.001, step=0.00001, value=0.001)
132
 
133
+ model = gr.Dropdown(label="Models (can select multiple)", choices=TOKEN_COSTS['model'].tolist(), value=[TOKEN_COSTS['model'].tolist()[0]], multiselect=True)
134
 
135
+ compute_button = gr.Button("Compute Costs ⚙️", variant="secondary")
136
 
137
+ with gr.Column(scale=2):
138
+ results_table = gr.Dataframe(label="Cost Results")
 
 
 
139
 
140
  def toggle_input_visibility(choice):
141
  return (
 
166
  completion_tokens_input,
167
  model
168
  ],
169
+ outputs=[results_table]
170
  )
171
 
172
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  pandas
2
- tiktoken
 
 
1
  pandas
2
+ tiktoken
3
+ matplotlib