jadehardouin commited on
Commit
b3b6d77
1 Parent(s): 2b78da8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +148 -33
app.py CHANGED
@@ -13,20 +13,10 @@ text4 = "<h1 style='text-align: center; color: blue; font-size: 25px;'>Results"
13
  diy_value = 0
14
  saas_value = 0
15
 
16
- def calculate_tco(model_choice, vm_rental_choice, out_diy):
17
- VM_cost_per_hour=3.6730 #at Azure for the basic pay as you go option
18
- maxed_out = 0.8 #percentage of time the VM is maxed out
19
- used = 0.5 #percentage of time the VM is used
20
  tokens_per_request = 64
21
-
22
- if model_choice == "Llama-2-7B":
23
- tokens_per_second=694.38
24
-
25
- elif model_choice == "Llama-2-13B":
26
- tokens_per_second=1000
27
-
28
- elif model_choice == "Llama-2-70B":
29
- tokens_per_second=10000
30
 
31
  if vm_rental_choice == "pay as you go":
32
  reduction = 0
@@ -37,7 +27,7 @@ def calculate_tco(model_choice, vm_rental_choice, out_diy):
37
  elif vm_rental_choice == "3 years reserved":
38
  reduction = 0.62
39
 
40
- homemade_cost_per_token = VM_cost_per_hour * (1 - reduction) / (tokens_per_second * 3600 * maxed_out * used)
41
  homemade_cost_per_request = tokens_per_request * homemade_cost_per_token
42
  out_diy = homemade_cost_per_token
43
  return out_diy
@@ -55,6 +45,16 @@ def calculate_tco_2(model_provider, context, out_saas):
55
  out_saas = saas_cost_per_token
56
  return out_saas
57
 
 
 
 
 
 
 
 
 
 
 
58
  def extract_cost_from_text(text):
59
  try:
60
  cost = float(text)
@@ -83,13 +83,38 @@ def compare(cost_text1, cost_text2):
83
  return f"Error: {str(e)}"
84
 
85
  def update_plot(diy_value, saas_value):
86
- data = pd.DataFrame(
 
 
 
 
 
 
 
 
87
  {
88
  "Solution": ["Open-source", "SaaS"],
89
  "Cost/token ($)": [diy_value, saas_value],
90
  }
91
  )
92
- return gr.BarPlot.update(data, x="Solution", y="Cost/token ($)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  def render_latex(latex_str):
95
  fig, ax = plt.subplots(figsize=(1, 1))
@@ -103,6 +128,21 @@ def render_latex(latex_str):
103
  base64_str = base64.b64encode(buf.getvalue()).decode("utf-8")
104
  return f"<img src='data:image/png;base64,{base64_str}'>"
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  description=f"""
107
  <p>In this demo application, we help you compare different solutions for your AI incorporation plans, such as open-source or SaaS.</p>
108
  <p>First, you'll have to choose the two solutions you'd like to compare. Then, follow the instructions to select your configurations for each solution and we will compute the cost/request accordingly to them. Eventually, you can compare both solutions to evaluate which one best suits your needs, in the short or long term.</p>
@@ -111,8 +151,10 @@ description1="This interface provides you with the cost per token you get using
111
  description2="This interface provides you with the cost per token resulting from the AI model provider you choose and the number of tokens you select for context, which the model will take into account when processing input texts."
112
  description3="This interface compares the cost per request for the two solutions you selected and gives you an insight of whether a solution is more valuable in the long term."
113
 
 
114
  models = ["Llama-2-7B", "Llama-2-13B", "Llama-2-70B"]
115
  vm_rental_choice = ["pay as you go", "1 year reserved", "3 years reserved"]
 
116
  model_provider = ["OpenAI"]
117
  context = ["4K context", "16K context"]
118
  error_box = gr.Textbox(label="Error", visible=False)
@@ -125,11 +167,15 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
125
  out_saas = gr.State(value=0)
126
  out_diy2 = gr.State(value=0)
127
  out_saas2 = gr.State(value=0)
 
 
 
 
128
 
129
  with gr.Row():
130
  with gr.Column():
131
 
132
- solution_selection = gr.Dropdown(["SaaS", "Open-source"], label="Select a Solution", value="SaaS")
133
 
134
  with gr.Row(visible=False) as title_column:
135
  gr.Markdown(value=text1)
@@ -143,10 +189,33 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
143
  )
144
 
145
  with gr.Row(visible=False) as input_diy_column:
146
- model_inp = gr.Dropdown(models, label="Select an AI Model", value="Llama-2-7B", info="Open-source AI model used for your application")
147
- rental_plan_inp = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan", value="pay as you go", info="These options are from Azure's VM rental plans")
148
- rental_plan_inp.change(fn=calculate_tco, inputs=[model_inp, rental_plan_inp, out_diy], outputs=out_diy)
149
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  with gr.Row(visible=False) as text_saas_column:
151
  gr.Markdown(description2)
152
 
@@ -156,10 +225,19 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
156
  )
157
 
158
  with gr.Row(visible=False) as input_saas_column:
159
- model_provider_inp = gr.Dropdown(model_provider, label="Model Provider", vallue="OpenAI", info="Choose an AI model provider you want to work with")
160
- context_inp = gr.Dropdown(context, label="Context", value="4K context", info="Number of tokens the model considers when processing text")
161
- context_inp.change(fn=calculate_tco_2, inputs=[model_provider_inp, context_inp, out_saas], outputs=out_saas)
162
-
 
 
 
 
 
 
 
 
 
163
  def submit(solution_selection):
164
  if solution_selection == "Open-source":
165
  return {
@@ -185,14 +263,14 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
185
  solution_selection.change(
186
  submit,
187
  solution_selection,
188
- [out_saas, text_diy_column, formula_diy, formula_saas, title_column, text_saas_column, model_inp, rental_plan_inp, model_provider_inp, context_inp, input_diy_column, input_saas_column],
189
  )
190
 
191
  # gr.Divider(style="vertical", thickness=2, color="blue")
192
 
193
  with gr.Column():
194
 
195
- solution_selection2 = gr.Dropdown(["SaaS", "Open-source"], value="Open-source", label="Select a Solution")
196
 
197
  with gr.Row(visible=False) as title_column2:
198
  gr.Markdown(value=text2)
@@ -206,9 +284,31 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
206
  )
207
 
208
  with gr.Row(visible=False) as input_diy_column2:
209
- model_inp2 = gr.Dropdown(models, label="Select an AI Model", value="Llama-2-7B", info="Open-source AI model used for your application")
210
- rental_plan_inp2 = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan", value="pay as you go", info="These options are from Azure's VM rental plans")
211
- rental_plan_inp2.change(fn=calculate_tco, inputs=[model_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
 
213
  with gr.Row(visible=False) as text_saas_column2:
214
  gr.Markdown(description2)
@@ -220,8 +320,17 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
220
 
221
  with gr.Row(visible=False) as input_saas_column2:
222
  model_provider_inp2 = gr.Dropdown(['OpenAI'], label="Model Provider", value="OpenAI", info="Choose an AI model provider you want to work with")
223
- context_inp2 = gr.Dropdown(['4K context', '16K context'], label="Context", value="4K context", info="Number of tokens the model considers when processing text")
224
- context_inp2.change(fn=calculate_tco_2, inputs=[model_provider_inp2, context_inp2, out_saas2], outputs=out_saas2)
 
 
 
 
 
 
 
 
 
225
 
226
  def submit2(solution_selection2):
227
  if solution_selection2 == "Open-source":
@@ -248,7 +357,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
248
  solution_selection2.change(
249
  submit2,
250
  solution_selection2,
251
- [out_diy2, out_saas2, formula_diy2, formula_saas2, title_column2, text_diy_column2, text_saas_column2, model_inp2, rental_plan_inp2, model_provider_inp2, context_inp2, input_diy_column2, input_saas_column2],
252
  )
253
 
254
  with gr.Row():
@@ -261,11 +370,17 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
261
  plot = gr.BarPlot(vertical=False, title="Comparison", y_title="Cost/token ($)", width=500, interactive=True)
262
 
263
  context_inp.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
 
 
 
264
  model_provider_inp.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
265
  rental_plan_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
266
  model_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
267
 
268
  context_inp2.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
 
 
 
269
  model_provider_inp2.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
270
  rental_plan_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
271
  model_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
 
13
  diy_value = 0
14
  saas_value = 0
15
 
16
+ def calculate_tco(maxed_out, used, tokens_per_second_inp, vm_cost_per_hour, vm_rental_choice, out_diy):
 
 
 
17
  tokens_per_request = 64
18
+ maxed_out = maxed_out / 100
19
+ used = used / 100
 
 
 
 
 
 
 
20
 
21
  if vm_rental_choice == "pay as you go":
22
  reduction = 0
 
27
  elif vm_rental_choice == "3 years reserved":
28
  reduction = 0.62
29
 
30
+ homemade_cost_per_token = vm_cost_per_hour * (1 - reduction) / (tokens_per_second_inp * 3600 * maxed_out * used)
31
  homemade_cost_per_request = tokens_per_request * homemade_cost_per_token
32
  out_diy = homemade_cost_per_token
33
  return out_diy
 
45
  out_saas = saas_cost_per_token
46
  return out_saas
47
 
48
+ def update_tco(maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy):
49
+ if maxed_out!=None and used!=None and tokens_per_second_inp!=None and vm_cost_per_hour_inp!=None and rental_plan_inp!=None:
50
+ return calculate_tco(maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy)
51
+ return None
52
+
53
+ def update_tco2(model_provider_inp, context_inp, out_saas):
54
+ if model_provider_inp!=None and context_inp!=None:
55
+ return calculate_tco_2(model_provider_inp, context_inp, out_saas)
56
+ return None
57
+
58
  def extract_cost_from_text(text):
59
  try:
60
  cost = float(text)
 
83
  return f"Error: {str(e)}"
84
 
85
  def update_plot(diy_value, saas_value):
86
+ # if maxed_out and used and tokens_per_second_inp and vm_cost_per_hour:
87
+ # diy_value = calculate_tco(maxed_out.value, used.value, tokens_per_second_inp, vm_cost_per_hour, vm_rental_choice, out_diy)
88
+ # else :
89
+ # diy_value = 0
90
+ # if model_provider_inp2 and context_inp2:
91
+ # saas_value = calculate_tco_2(model_provider_inp2, context_inp2, out_saas2)
92
+ # else:
93
+ # saas_value = 0
94
+ data = pd.DataFrame(
95
  {
96
  "Solution": ["Open-source", "SaaS"],
97
  "Cost/token ($)": [diy_value, saas_value],
98
  }
99
  )
100
+ return gr.BarPlot.update(data, x="Solution", y="Cost/token ($)")
101
+
102
+ def update_plot2(diy_value, saas_value):
103
+ # if maxed_out2!=None and used2!=None and tokens_per_second_inp2!=None and vm_cost_per_hour2!=None and vm_rental_choice!=None:
104
+ # diy_value = calculate_tco(maxed_out2.value, used2.value, tokens_per_second_inp2, vm_cost_per_hour2, vm_rental_choice, out_diy2)
105
+ # else:
106
+ # diy_value = 0
107
+ # if model_provider_inp2 and context_inp2:
108
+ # saas_value = calculate_tco_2(model_provider_inp, context_inp, out_saas)
109
+ # else:
110
+ # saas_value = 0
111
+ data = pd.DataFrame(
112
+ {
113
+ "Solution": ["Open-source", "SaaS"],
114
+ "Cost/token ($)": [diy_value, saas_value],
115
+ }
116
+ )
117
+ return gr.BarPlot.update(data, x="Solution", y="Cost/token ($)")
118
 
119
  def render_latex(latex_str):
120
  fig, ax = plt.subplots(figsize=(1, 1))
 
128
  base64_str = base64.b64encode(buf.getvalue()).decode("utf-8")
129
  return f"<img src='data:image/png;base64,{base64_str}'>"
130
 
131
+ def update_vm_choice(model_inp):
132
+ if model_inp == "Llama-2-7B" or "Llama-2-13B" or "Llama-2-70B":
133
+ new_options = ["A100 40GB"]
134
+ return gr.Dropdown.update(choices=new_options)
135
+
136
+ def token_per_s_and_cost(vm_inp):
137
+ if vm_inp == "A100 40GB":
138
+ return [694.38, 3.6730, 694.38, 3.6730]
139
+
140
+ def submit_diy(rental_plan):
141
+ calculate_tco(maxed_out.value, used.value, tokens_per_second_inp.value, vm_cost_per_hour_inp.value, rental_plan, out_diy)
142
+
143
+ def submit_saas(context_inp):
144
+ calculate_tco_2(model_provider_inp, context_inp, out_saas)
145
+
146
  description=f"""
147
  <p>In this demo application, we help you compare different solutions for your AI incorporation plans, such as open-source or SaaS.</p>
148
  <p>First, you'll have to choose the two solutions you'd like to compare. Then, follow the instructions to select your configurations for each solution and we will compute the cost/request accordingly to them. Eventually, you can compare both solutions to evaluate which one best suits your needs, in the short or long term.</p>
 
151
  description2="This interface provides you with the cost per token resulting from the AI model provider you choose and the number of tokens you select for context, which the model will take into account when processing input texts."
152
  description3="This interface compares the cost per request for the two solutions you selected and gives you an insight of whether a solution is more valuable in the long term."
153
 
154
+ test_list = []
155
  models = ["Llama-2-7B", "Llama-2-13B", "Llama-2-70B"]
156
  vm_rental_choice = ["pay as you go", "1 year reserved", "3 years reserved"]
157
+ vm_choice = ["A100 40GB"]
158
  model_provider = ["OpenAI"]
159
  context = ["4K context", "16K context"]
160
  error_box = gr.Textbox(label="Error", visible=False)
 
167
  out_saas = gr.State(value=0)
168
  out_diy2 = gr.State(value=0)
169
  out_saas2 = gr.State(value=0)
170
+ tokens_per_second_inp = gr.State()
171
+ vm_cost_per_hour_inp = gr.State()
172
+ tokens_per_second_inp2 = gr.State()
173
+ vm_cost_per_hour_inp2 = gr.State()
174
 
175
  with gr.Row():
176
  with gr.Column():
177
 
178
+ solution_selection = gr.Dropdown(["SaaS", "Open-source"], label="Select a Solution")
179
 
180
  with gr.Row(visible=False) as title_column:
181
  gr.Markdown(value=text1)
 
189
  )
190
 
191
  with gr.Row(visible=False) as input_diy_column:
192
+ with gr.Column():
193
+
194
+ with gr.Row():
195
+ model_inp = gr.Dropdown(models, label="Select an AI Model", info="Open-source AI model used for your application")
196
+
197
+ with gr.Row() as vm:
198
+ with gr.Column():
199
+ with gr.Row():
200
+ vm_inp = gr.Dropdown(vm_choice, label="Select a Virtual Machine", info="Your options for this choice depend on the model you previously chose")
201
+ with gr.Row(visible=False) as vm_info:
202
+ token_per_seconds = gr.Textbox(interactive=False, label="Token/s", info="To compute this value based on your model and VM choice, we chose an input length of 233 tokens.")
203
+ vm_cost_per_hour = gr.Textbox(interactive=False, label="Cost/h ($) for the VM")
204
+
205
+ with gr.Row() as use_case:
206
+ maxed_out = gr.Slider(minimum=0.01, value=80, label="% maxed out", info="percentage of how much your machine is maxed out")
207
+ used = gr.Slider(minimum=0.01, value=50, label="% used", info="percentage of time your machine is used")
208
+ rental_plan_inp = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan", info="These options are from Azure's VM rental plans. By default, the cost taken into account are from the pay as you go plan.")
209
+
210
+ model_inp.change(fn=update_vm_choice, inputs=model_inp, outputs=vm_inp)
211
+ vm_inp.change(fn=token_per_s_and_cost, inputs=vm_inp, outputs=[tokens_per_second_inp, vm_cost_per_hour_inp, token_per_seconds, vm_cost_per_hour])
212
+
213
+ maxed_out.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
214
+ used.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
215
+ model_inp.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
216
+ vm_inp.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
217
+ rental_plan_inp.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
218
+
219
  with gr.Row(visible=False) as text_saas_column:
220
  gr.Markdown(description2)
221
 
 
225
  )
226
 
227
  with gr.Row(visible=False) as input_saas_column:
228
+ model_provider_inp = gr.Dropdown(model_provider, label="Model Provider", value="OpenAI", info="Choose an AI model provider you want to work with")
229
+ context_inp = gr.Dropdown(context, label="Context", info="Number of tokens the model considers when processing text")
230
+
231
+ model_provider_inp.change(fn=update_tco2, inputs=[model_provider_inp, context_inp, out_saas], outputs=out_saas)
232
+ context_inp.change(fn=update_tco2, inputs=[model_provider_inp, context_inp, out_saas], outputs=out_saas)
233
+
234
+ def show_vm_info():
235
+ return {
236
+ vm_info: gr.update(visible=True),
237
+ }
238
+
239
+ vm_inp.change(show_vm_info, outputs=vm_info)
240
+
241
  def submit(solution_selection):
242
  if solution_selection == "Open-source":
243
  return {
 
263
  solution_selection.change(
264
  submit,
265
  solution_selection,
266
+ [model_inp, vm, vm_info, vm_inp, maxed_out, used, out_saas, text_diy_column, formula_diy, formula_saas, title_column, text_saas_column, model_inp, rental_plan_inp, model_provider_inp, context_inp, input_diy_column, input_saas_column],
267
  )
268
 
269
  # gr.Divider(style="vertical", thickness=2, color="blue")
270
 
271
  with gr.Column():
272
 
273
+ solution_selection2 = gr.Dropdown(["SaaS", "Open-source"], label="Select a solution")
274
 
275
  with gr.Row(visible=False) as title_column2:
276
  gr.Markdown(value=text2)
 
284
  )
285
 
286
  with gr.Row(visible=False) as input_diy_column2:
287
+ with gr.Column():
288
+ with gr.Row():
289
+ model_inp2 = gr.Dropdown(models, label="Select an AI Model", info="Open-source AI model used for your application")
290
+
291
+ with gr.Row() as vm2:
292
+ with gr.Column():
293
+ with gr.Row():
294
+ vm_inp2 = gr.Dropdown(vm_choice, label="Select a Virtual Machine", info="Your options for this choice depend on the model you previously chose")
295
+ with gr.Row(visible=False) as vm_info2:
296
+ tokens_per_second2 = gr.Textbox(interactive=False, label="Token/s", info="To compute this value based on your model and VM choice, we chose an input length of 233 tokens.")
297
+ vm_cost_per_hour2 = gr.Textbox(interactive=False, label="Cost/h ($) for the VM")
298
+
299
+ with gr.Row() as use_case2:
300
+ maxed_out2 = gr.Slider(minimum=0.01, value=80, label="% maxed out", info="percentage of how much your machine is maxed out")
301
+ used2 = gr.Slider(minimum=0.01, value=50, label="% used", info="percentage of time your machine is used")
302
+ rental_plan_inp2 = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan", info="These options are from Azure's VM rental plans")
303
+
304
+ model_inp2.change(fn=update_vm_choice, inputs=model_inp2, outputs=vm_inp2)
305
+ vm_inp2.change(fn=token_per_s_and_cost, inputs=vm_inp2, outputs=[tokens_per_second_inp2, vm_cost_per_hour_inp2, tokens_per_second2, vm_cost_per_hour2])
306
+
307
+ maxed_out2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
308
+ used2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
309
+ model_inp2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
310
+ vm_inp2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
311
+ rental_plan_inp2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
312
 
313
  with gr.Row(visible=False) as text_saas_column2:
314
  gr.Markdown(description2)
 
320
 
321
  with gr.Row(visible=False) as input_saas_column2:
322
  model_provider_inp2 = gr.Dropdown(['OpenAI'], label="Model Provider", value="OpenAI", info="Choose an AI model provider you want to work with")
323
+ context_inp2 = gr.Dropdown(['4K context', '16K context'], label="Context", info="Number of tokens the model considers when processing text")
324
+
325
+ model_provider_inp2.change(fn=update_tco2, inputs=[model_provider_inp2, context_inp2, out_saas2], outputs=out_saas2)
326
+ context_inp2.change(fn=update_tco2, inputs=[model_provider_inp2, context_inp2, out_saas2], outputs=out_saas2)
327
+
328
+ def show_vm_info():
329
+ return {
330
+ vm_info2: gr.update(visible=True),
331
+ }
332
+
333
+ vm_inp2.change(show_vm_info, outputs=vm_info2)
334
 
335
  def submit2(solution_selection2):
336
  if solution_selection2 == "Open-source":
 
357
  solution_selection2.change(
358
  submit2,
359
  solution_selection2,
360
+ [vm2, vm_info2, vm_inp2, maxed_out2, used2, out_diy2, out_saas2, formula_diy2, formula_saas2, title_column2, text_diy_column2, text_saas_column2, model_inp2, rental_plan_inp2, model_provider_inp2, context_inp2, input_diy_column2, input_saas_column2],
361
  )
362
 
363
  with gr.Row():
 
370
  plot = gr.BarPlot(vertical=False, title="Comparison", y_title="Cost/token ($)", width=500, interactive=True)
371
 
372
  context_inp.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
373
+ maxed_out2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
374
+ used2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
375
+ vm_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
376
  model_provider_inp.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
377
  rental_plan_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
378
  model_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
379
 
380
  context_inp2.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
381
+ vm_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
382
+ maxed_out.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
383
+ used.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
384
  model_provider_inp2.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
385
  rental_plan_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
386
  model_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)