dhuynh95 commited on
Commit
c194fcd
1 Parent(s): b3b6d77

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +17 -380
  2. models.py +160 -0
app.py CHANGED
@@ -1,388 +1,25 @@
1
  import gradio as gr
2
- import pandas as pd
3
- import matplotlib.pyplot as plt
4
- import io
5
- import base64
6
 
7
- text = "<h1 style='text-align: center; color: blue; font-size: 30px;'>TCO Comparison Calculator"
8
- text1 = "<h1 style='text-align: center; color: blue; font-size: 20px;'>First solution"
9
- text2 = "<h1 style='text-align: center; color: blue; font-size: 20px;'>Second solution"
10
- text3 = "<h1 style='text-align: center; color: blue; font-size: 25px;'>Comparison"
11
- text4 = "<h1 style='text-align: center; color: blue; font-size: 25px;'>Results"
12
-
13
- diy_value = 0
14
- saas_value = 0
15
-
16
- def calculate_tco(maxed_out, used, tokens_per_second_inp, vm_cost_per_hour, vm_rental_choice, out_diy):
17
- tokens_per_request = 64
18
- maxed_out = maxed_out / 100
19
- used = used / 100
20
-
21
- if vm_rental_choice == "pay as you go":
22
- reduction = 0
23
-
24
- elif vm_rental_choice == "1 year reserved":
25
- reduction = 0.34
26
-
27
- elif vm_rental_choice == "3 years reserved":
28
- reduction = 0.62
29
-
30
- homemade_cost_per_token = vm_cost_per_hour * (1 - reduction) / (tokens_per_second_inp * 3600 * maxed_out * used)
31
- homemade_cost_per_request = tokens_per_request * homemade_cost_per_token
32
- out_diy = homemade_cost_per_token
33
- return out_diy
34
-
35
- def calculate_tco_2(model_provider, context, out_saas):
36
- tokens_per_request = 64
37
-
38
- if model_provider == "OpenAI":
39
- if context == "4K context":
40
- saas_cost_per_token = 0.00035
41
- saas_cost_per_request = saas_cost_per_token * tokens_per_request
42
- elif context == "16K context" :
43
- saas_cost_per_token = 0.0007
44
- saas_cost_per_request = saas_cost_per_token * tokens_per_request
45
- out_saas = saas_cost_per_token
46
- return out_saas
47
-
48
- def update_tco(maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy):
49
- if maxed_out!=None and used!=None and tokens_per_second_inp!=None and vm_cost_per_hour_inp!=None and rental_plan_inp!=None:
50
- return calculate_tco(maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy)
51
- return None
52
-
53
- def update_tco2(model_provider_inp, context_inp, out_saas):
54
- if model_provider_inp!=None and context_inp!=None:
55
- return calculate_tco_2(model_provider_inp, context_inp, out_saas)
56
- return None
57
-
58
- def extract_cost_from_text(text):
59
- try:
60
- cost = float(text)
61
- return cost
62
- except ValueError as e:
63
- raise ValueError("Invalid cost text format")
64
-
65
- def compare(cost_text1, cost_text2):
66
- try:
67
- # Extract the costs from the input strings
68
- cost1 = extract_cost_from_text(cost_text1)
69
- cost2 = extract_cost_from_text(cost_text2)
70
-
71
- r = cost1 / cost2
72
-
73
- if r < 1:
74
- comparison_result = f"First solution is cheaper, with a ratio of {r:.2f}."
75
- elif r > 1:
76
- comparison_result = f"Second solution is cheaper, with a ratio of {r:.2f}."
77
- else:
78
- comparison_result = "Both solutions will cost the same."
79
-
80
- return comparison_result
81
-
82
- except ValueError as e:
83
- return f"Error: {str(e)}"
84
-
85
- def update_plot(diy_value, saas_value):
86
- # if maxed_out and used and tokens_per_second_inp and vm_cost_per_hour:
87
- # diy_value = calculate_tco(maxed_out.value, used.value, tokens_per_second_inp, vm_cost_per_hour, vm_rental_choice, out_diy)
88
- # else :
89
- # diy_value = 0
90
- # if model_provider_inp2 and context_inp2:
91
- # saas_value = calculate_tco_2(model_provider_inp2, context_inp2, out_saas2)
92
- # else:
93
- # saas_value = 0
94
- data = pd.DataFrame(
95
- {
96
- "Solution": ["Open-source", "SaaS"],
97
- "Cost/token ($)": [diy_value, saas_value],
98
- }
99
- )
100
- return gr.BarPlot.update(data, x="Solution", y="Cost/token ($)")
101
-
102
- def update_plot2(diy_value, saas_value):
103
- # if maxed_out2!=None and used2!=None and tokens_per_second_inp2!=None and vm_cost_per_hour2!=None and vm_rental_choice!=None:
104
- # diy_value = calculate_tco(maxed_out2.value, used2.value, tokens_per_second_inp2, vm_cost_per_hour2, vm_rental_choice, out_diy2)
105
- # else:
106
- # diy_value = 0
107
- # if model_provider_inp2 and context_inp2:
108
- # saas_value = calculate_tco_2(model_provider_inp, context_inp, out_saas)
109
- # else:
110
- # saas_value = 0
111
- data = pd.DataFrame(
112
- {
113
- "Solution": ["Open-source", "SaaS"],
114
- "Cost/token ($)": [diy_value, saas_value],
115
- }
116
- )
117
- return gr.BarPlot.update(data, x="Solution", y="Cost/token ($)")
118
-
119
- def render_latex(latex_str):
120
- fig, ax = plt.subplots(figsize=(1, 1))
121
- ax.text(0.5, 0.5, f"${latex_str}$", size=12, usetex=True, va="center", ha="center")
122
- ax.axis("off")
123
-
124
- buf = io.BytesIO()
125
- plt.savefig(buf, format="png")
126
- plt.close(fig)
127
-
128
- base64_str = base64.b64encode(buf.getvalue()).decode("utf-8")
129
- return f"<img src='data:image/png;base64,{base64_str}'>"
130
-
131
- def update_vm_choice(model_inp):
132
- if model_inp == "Llama-2-7B" or "Llama-2-13B" or "Llama-2-70B":
133
- new_options = ["A100 40GB"]
134
- return gr.Dropdown.update(choices=new_options)
135
-
136
- def token_per_s_and_cost(vm_inp):
137
- if vm_inp == "A100 40GB":
138
- return [694.38, 3.6730, 694.38, 3.6730]
139
-
140
- def submit_diy(rental_plan):
141
- calculate_tco(maxed_out.value, used.value, tokens_per_second_inp.value, vm_cost_per_hour_inp.value, rental_plan, out_diy)
142
-
143
- def submit_saas(context_inp):
144
- calculate_tco_2(model_provider_inp, context_inp, out_saas)
145
-
146
- description=f"""
147
- <p>In this demo application, we help you compare different solutions for your AI incorporation plans, such as open-source or SaaS.</p>
148
- <p>First, you'll have to choose the two solutions you'd like to compare. Then, follow the instructions to select your configurations for each solution and we will compute the cost/request accordingly to them. Eventually, you can compare both solutions to evaluate which one best suits your needs, in the short or long term.</p>
149
- """
150
- description1="This interface provides you with the cost per token you get using the open-source solution, based on the model you choose to use and how long you're planning to use it. The selected prices for a Virtual Machine rental come from Azure's VM rental plans, which can offer reductions for long-term reserved usage."
151
- description2="This interface provides you with the cost per token resulting from the AI model provider you choose and the number of tokens you select for context, which the model will take into account when processing input texts."
152
- description3="This interface compares the cost per request for the two solutions you selected and gives you an insight of whether a solution is more valuable in the long term."
153
-
154
- test_list = []
155
- models = ["Llama-2-7B", "Llama-2-13B", "Llama-2-70B"]
156
- vm_rental_choice = ["pay as you go", "1 year reserved", "3 years reserved"]
157
- vm_choice = ["A100 40GB"]
158
- model_provider = ["OpenAI"]
159
- context = ["4K context", "16K context"]
160
- error_box = gr.Textbox(label="Error", visible=False)
161
-
162
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
163
- gr.Markdown(value=text)
164
- gr.Markdown(value=description)
165
-
166
- out_diy = gr.State(value=0)
167
- out_saas = gr.State(value=0)
168
- out_diy2 = gr.State(value=0)
169
- out_saas2 = gr.State(value=0)
170
- tokens_per_second_inp = gr.State()
171
- vm_cost_per_hour_inp = gr.State()
172
- tokens_per_second_inp2 = gr.State()
173
- vm_cost_per_hour_inp2 = gr.State()
174
-
175
  with gr.Row():
176
  with gr.Column():
177
-
178
- solution_selection = gr.Dropdown(["SaaS", "Open-source"], label="Select a Solution")
179
-
180
- with gr.Row(visible=False) as title_column:
181
- gr.Markdown(value=text1)
182
-
183
- with gr.Row(visible=False) as text_diy_column:
184
- gr.Markdown(description1)
185
-
186
- with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_diy:
187
- gr.Markdown(
188
- r"$ opensource\_cost\_per\_request = \frac{tokens\_per\_request \times VM\_cost\_per\_hour \times (1 - reduction)}{tokens\_per\_second \times 3600 \times maxed\_out \times used}$"
189
- )
190
-
191
- with gr.Row(visible=False) as input_diy_column:
192
- with gr.Column():
193
-
194
- with gr.Row():
195
- model_inp = gr.Dropdown(models, label="Select an AI Model", info="Open-source AI model used for your application")
196
-
197
- with gr.Row() as vm:
198
- with gr.Column():
199
- with gr.Row():
200
- vm_inp = gr.Dropdown(vm_choice, label="Select a Virtual Machine", info="Your options for this choice depend on the model you previously chose")
201
- with gr.Row(visible=False) as vm_info:
202
- token_per_seconds = gr.Textbox(interactive=False, label="Token/s", info="To compute this value based on your model and VM choice, we chose an input length of 233 tokens.")
203
- vm_cost_per_hour = gr.Textbox(interactive=False, label="Cost/h ($) for the VM")
204
-
205
- with gr.Row() as use_case:
206
- maxed_out = gr.Slider(minimum=0.01, value=80, label="% maxed out", info="percentage of how much your machine is maxed out")
207
- used = gr.Slider(minimum=0.01, value=50, label="% used", info="percentage of time your machine is used")
208
- rental_plan_inp = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan", info="These options are from Azure's VM rental plans. By default, the cost taken into account are from the pay as you go plan.")
209
-
210
- model_inp.change(fn=update_vm_choice, inputs=model_inp, outputs=vm_inp)
211
- vm_inp.change(fn=token_per_s_and_cost, inputs=vm_inp, outputs=[tokens_per_second_inp, vm_cost_per_hour_inp, token_per_seconds, vm_cost_per_hour])
212
-
213
- maxed_out.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
214
- used.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
215
- model_inp.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
216
- vm_inp.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
217
- rental_plan_inp.change(fn=update_tco, inputs=[maxed_out, used, tokens_per_second_inp, vm_cost_per_hour_inp, rental_plan_inp, out_diy], outputs=out_diy)
218
-
219
- with gr.Row(visible=False) as text_saas_column:
220
- gr.Markdown(description2)
221
-
222
- with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_saas:
223
- gr.Markdown(
224
- r"$ saas\_cost\_per\_request = saas\_cost\_per\_token \times tokens\_per\_request$"
225
- )
226
-
227
- with gr.Row(visible=False) as input_saas_column:
228
- model_provider_inp = gr.Dropdown(model_provider, label="Model Provider", value="OpenAI", info="Choose an AI model provider you want to work with")
229
- context_inp = gr.Dropdown(context, label="Context", info="Number of tokens the model considers when processing text")
230
-
231
- model_provider_inp.change(fn=update_tco2, inputs=[model_provider_inp, context_inp, out_saas], outputs=out_saas)
232
- context_inp.change(fn=update_tco2, inputs=[model_provider_inp, context_inp, out_saas], outputs=out_saas)
233
-
234
- def show_vm_info():
235
- return {
236
- vm_info: gr.update(visible=True),
237
- }
238
 
239
- vm_inp.change(show_vm_info, outputs=vm_info)
240
-
241
- def submit(solution_selection):
242
- if solution_selection == "Open-source":
243
- return {
244
- formula_diy: gr.update(visible=True),
245
- title_column: gr.update(visible=True),
246
- text_diy_column: gr.update(visible=True),
247
- input_diy_column: gr.update(visible=True),
248
- formula_saas: gr.update(visible=False),
249
- text_saas_column: gr.update(visible=False),
250
- input_saas_column: gr.update(visible=False),
251
- }
252
- else:
253
- return {
254
- formula_saas: gr.update(visible=True),
255
- formula_diy: gr.update(visible=False),
256
- text_diy_column: gr.update(visible=False),
257
- input_diy_column: gr.update(visible=False),
258
- title_column: gr.update(visible=True),
259
- text_saas_column: gr.update(visible=True),
260
- input_saas_column: gr.update(visible=True),
261
- }
262
-
263
- solution_selection.change(
264
- submit,
265
- solution_selection,
266
- [model_inp, vm, vm_info, vm_inp, maxed_out, used, out_saas, text_diy_column, formula_diy, formula_saas, title_column, text_saas_column, model_inp, rental_plan_inp, model_provider_inp, context_inp, input_diy_column, input_saas_column],
267
- )
268
-
269
- # gr.Divider(style="vertical", thickness=2, color="blue")
270
-
271
  with gr.Column():
 
 
 
272
 
273
- solution_selection2 = gr.Dropdown(["SaaS", "Open-source"], label="Select a solution")
274
-
275
- with gr.Row(visible=False) as title_column2:
276
- gr.Markdown(value=text2)
277
-
278
- with gr.Row(visible=False) as text_diy_column2:
279
- gr.Markdown(description1)
280
-
281
- with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_diy2:
282
- gr.Markdown(
283
- r"$ homemade\_cost\_per\_request = \frac{tokens\_per\_request \times VM\_cost\_per\_hour \times (1 - reduction)}{tokens\_per\_second \times 3600 \times maxed\_out \times used}$"
284
- )
285
-
286
- with gr.Row(visible=False) as input_diy_column2:
287
- with gr.Column():
288
- with gr.Row():
289
- model_inp2 = gr.Dropdown(models, label="Select an AI Model", info="Open-source AI model used for your application")
290
-
291
- with gr.Row() as vm2:
292
- with gr.Column():
293
- with gr.Row():
294
- vm_inp2 = gr.Dropdown(vm_choice, label="Select a Virtual Machine", info="Your options for this choice depend on the model you previously chose")
295
- with gr.Row(visible=False) as vm_info2:
296
- tokens_per_second2 = gr.Textbox(interactive=False, label="Token/s", info="To compute this value based on your model and VM choice, we chose an input length of 233 tokens.")
297
- vm_cost_per_hour2 = gr.Textbox(interactive=False, label="Cost/h ($) for the VM")
298
-
299
- with gr.Row() as use_case2:
300
- maxed_out2 = gr.Slider(minimum=0.01, value=80, label="% maxed out", info="percentage of how much your machine is maxed out")
301
- used2 = gr.Slider(minimum=0.01, value=50, label="% used", info="percentage of time your machine is used")
302
- rental_plan_inp2 = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan", info="These options are from Azure's VM rental plans")
303
-
304
- model_inp2.change(fn=update_vm_choice, inputs=model_inp2, outputs=vm_inp2)
305
- vm_inp2.change(fn=token_per_s_and_cost, inputs=vm_inp2, outputs=[tokens_per_second_inp2, vm_cost_per_hour_inp2, tokens_per_second2, vm_cost_per_hour2])
306
-
307
- maxed_out2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
308
- used2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
309
- model_inp2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
310
- vm_inp2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
311
- rental_plan_inp2.change(fn=update_tco, inputs=[maxed_out2, used2, tokens_per_second_inp2, vm_cost_per_hour_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
312
-
313
- with gr.Row(visible=False) as text_saas_column2:
314
- gr.Markdown(description2)
315
-
316
- with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_saas2:
317
- gr.Markdown(
318
- r"$ saas\_cost\_per\_request = saas\_cost\_per\_token \times tokens\_per\_request$"
319
- )
320
-
321
- with gr.Row(visible=False) as input_saas_column2:
322
- model_provider_inp2 = gr.Dropdown(['OpenAI'], label="Model Provider", value="OpenAI", info="Choose an AI model provider you want to work with")
323
- context_inp2 = gr.Dropdown(['4K context', '16K context'], label="Context", info="Number of tokens the model considers when processing text")
324
-
325
- model_provider_inp2.change(fn=update_tco2, inputs=[model_provider_inp2, context_inp2, out_saas2], outputs=out_saas2)
326
- context_inp2.change(fn=update_tco2, inputs=[model_provider_inp2, context_inp2, out_saas2], outputs=out_saas2)
327
-
328
- def show_vm_info():
329
- return {
330
- vm_info2: gr.update(visible=True),
331
- }
332
 
333
- vm_inp2.change(show_vm_info, outputs=vm_info2)
334
-
335
- def submit2(solution_selection2):
336
- if solution_selection2 == "Open-source":
337
- return {
338
- formula_diy2: gr.update(visible=True),
339
- title_column2: gr.update(visible=True),
340
- text_diy_column2: gr.update(visible=True),
341
- input_diy_column2: gr.update(visible=True),
342
- formula_saas2: gr.update(visible=False),
343
- text_saas_column2: gr.update(visible=False),
344
- input_saas_column2: gr.update(visible=False),
345
- }
346
- else:
347
- return {
348
- formula_diy2: gr.update(visible=False),
349
- text_diy_column2: gr.update(visible=False),
350
- input_diy_column2: gr.update(visible=False),
351
- title_column2: gr.update(visible=True),
352
- formula_saas2: gr.update(visible=True),
353
- text_saas_column2: gr.update(visible=True),
354
- input_saas_column2: gr.update(visible=True),
355
- }
356
-
357
- solution_selection2.change(
358
- submit2,
359
- solution_selection2,
360
- [vm2, vm_info2, vm_inp2, maxed_out2, used2, out_diy2, out_saas2, formula_diy2, formula_saas2, title_column2, text_diy_column2, text_saas_column2, model_inp2, rental_plan_inp2, model_provider_inp2, context_inp2, input_diy_column2, input_saas_column2],
361
- )
362
-
363
- with gr.Row():
364
- with gr.Column():
365
-
366
- with gr.Row():
367
- gr.Markdown(text3)
368
-
369
- with gr.Row():
370
- plot = gr.BarPlot(vertical=False, title="Comparison", y_title="Cost/token ($)", width=500, interactive=True)
371
-
372
- context_inp.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
373
- maxed_out2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
374
- used2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
375
- vm_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
376
- model_provider_inp.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
377
- rental_plan_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
378
- model_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
379
-
380
- context_inp2.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
381
- vm_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
382
- maxed_out.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
383
- used.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
384
- model_provider_inp2.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
385
- rental_plan_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
386
- model_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
387
-
388
- demo.launch()
 
1
  import gradio as gr
2
+ import models
 
 
 
3
 
4
+ with gr.Blocks() as demo:
5
+ Models: list[models.BaseTCOModel] = [models.OpenAIModel, models.OpenSourceModel]
6
+ model_names = [Model().get_name() for Model in Models]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  with gr.Row():
8
  with gr.Column():
9
+ page1 = models.ModelPage(Models)
10
+ dropdown = gr.Dropdown(model_names, interactive=True)
11
+ page1.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  with gr.Column():
14
+ page2 = models.ModelPage(Models)
15
+ dropdown2 = gr.Dropdown(model_names, interactive=True)
16
+ page2.render()
17
 
18
+ dropdown.change(page1.make_model_visible, inputs=dropdown, outputs=page1.get_all_components())
19
+ dropdown2.change(page2.make_model_visible, inputs=dropdown2, outputs=page2.get_all_components())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ compute_tco_btn = gr.Button("Compute TCO")
22
+ tco_output = gr.Text("Output: ")
23
+ compute_tco_btn.click(page1.compute_cost_per_token, inputs=page1.get_all_components() + [dropdown], outputs=tco_output)
24
+
25
+ demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gradio.components import Component
2
+ import gradio as gr
3
+ import uuid
4
+ from abc import ABC, abstractclassmethod
5
+
6
+ class BaseTCOModel(ABC):
7
+ # TO DO: Find way to specify which component should be used for computing cost
8
+ def __setattr__(self, name, value):
9
+ if isinstance(value, Component):
10
+ self._components.append(value)
11
+ self.__dict__[name] = value
12
+
13
+ def __init__(self):
14
+ super(BaseTCOModel, self).__setattr__("_components", [])
15
+
16
+ def get_components(self) -> list[Component]:
17
+ return self._components
18
+
19
+ def get_name(self):
20
+ return self.name
21
+
22
+ @abstractclassmethod
23
+ def compute_cost_per_token(self):
24
+ pass
25
+
26
+ @abstractclassmethod
27
+ def render(self):
28
+ pass
29
+
30
+ def set_name(self, name):
31
+ self.name = name
32
+ self.id = name + str(uuid.uuid4())
33
+
34
+ class OpenAIModel(BaseTCOModel):
35
+
36
+ def __init__(self):
37
+ self.set_name("(SaaS) OpenAI")
38
+ super().__init__()
39
+
40
+ def render(self):
41
+ def on_model_change(model):
42
+
43
+ if model == "GPT-4":
44
+ print("GPT4")
45
+ return gr.Dropdown.update(choices=["8K", "32K"])
46
+ else:
47
+ print("GPT3.5")
48
+ return gr.Dropdown.update(choices=["4K", "16K"])
49
+
50
+ self.model = gr.Dropdown(["GPT-4", "GPT-3.5 Turbo"], value="GPT-4",
51
+ label="OpenAI model",
52
+ interactive=True, visible=False)
53
+ self.context_length = gr.Dropdown(["8K", "32K"], value="8K", interactive=True,
54
+ label="Context size",
55
+ visible=False)
56
+ self.model.change(on_model_change, inputs=self.model, outputs=self.context_length)
57
+ self.input_length = gr.Number(350, label="Average number of input tokens",
58
+ interactive=True, visible=False)
59
+
60
+ def compute_cost_per_token(self, model, context_length, input_length):
61
+ """Cost per token = """
62
+ model = model[0]
63
+ context_length = context_length[0]
64
+
65
+ if model == "GPT-4" and context_length == "8K":
66
+ cost_per_1k_input_tokens = 0.03
67
+ elif model == "GPT-4" and context_length == "32K":
68
+ cost_per_1k_input_tokens = 0.06
69
+ elif model == "GPT-3.5" and context_length == "4K":
70
+ cost_per_1k_input_tokens = 0.0015
71
+ else:
72
+ cost_per_1k_input_tokens = 0.003
73
+
74
+ cost_per_output_token = cost_per_1k_input_tokens * input_length / 1000
75
+
76
+ return cost_per_output_token
77
+
78
+ class OpenSourceModel(BaseTCOModel):
79
+ def __init__(self):
80
+ self.set_name("(Open source) Deploy yourself")
81
+ super().__init__()
82
+
83
+ def render(self):
84
+ vm_choices = ["1x Nvidia A100 (Azure NC24ads A100 v4)",
85
+ "2x Nvidia A100 (Azure NC48ads A100 v4)"]
86
+ def on_model_change(model):
87
+ if model == "Llama 2 7B":
88
+ return gr.Dropdown.update(choices=vm_choices)
89
+ else:
90
+ not_supported_vm = ["1x Nvidia A100 (Azure NC24ads A100 v4)"]
91
+ choices = [x for x in vm_choices if x not in not_supported_vm]
92
+ return gr.Dropdown.update(choices=choices)
93
+
94
+ def on_vm_change(model, vm):
95
+ # TO DO: load info from CSV
96
+ if model == "Llama 2 7B" and vm == "1x Nvidia A100 (Azure NC24ads A100 v4)":
97
+ return gr.Number.update(value=900)
98
+ elif model == "Llama 2 7B" and vm == "2x Nvidia A100 (Azure NC48ads A100 v4)":
99
+ return gr.Number.update(value=1800)
100
+
101
+ self.model = gr.Dropdown(["Llama 2 7B", "Llama 2 70B"], value="Llama 2 7B", visible=False)
102
+ self.vm = gr.Dropdown(vm_choices,
103
+ visible=False,
104
+ label="Instance of VM with GPU"
105
+ )
106
+ self.tokens_per_second = gr.Number(visible=False,
107
+ label="Number of tokens per second for this specific model and VM instance",
108
+ interactive=False
109
+ )
110
+ self.input_length = gr.Number(350, label="Average number of input tokens",
111
+ interactive=True, visible=False)
112
+
113
+ self.model.change(on_model_change, inputs=self.model, outputs=self.vm)
114
+ self.vm.change(on_vm_change, inputs=[self.model, self.vm], outputs=self.tokens_per_second)
115
+ self.maxed_out = gr.Slider(minimum=1, value=80, label="% maxed out",
116
+ info="How much the GPU is fully used.",
117
+ interactive=True,
118
+ visible=False)
119
+
120
+ def compute_cost_per_token(self, tokens_per_second, maxed_out):
121
+
122
+ return maxed_out
123
+
124
+ class ModelPage:
125
+ def __init__(self, Models: BaseTCOModel):
126
+ self.models: list[BaseTCOModel] = []
127
+ for Model in Models:
128
+ model = Model()
129
+ self.models.append(model)
130
+
131
+ def render(self):
132
+ for model in self.models:
133
+ model.render()
134
+
135
+ def get_all_components(self) -> list[Component]:
136
+ output = []
137
+ for model in self.models:
138
+ output += model.get_components()
139
+ return output
140
+
141
+ def make_model_visible(self, name:str):
142
+ # First decide which indexes
143
+ output = []
144
+ for model in self.models:
145
+ if model.get_name() == name:
146
+ output+= [gr.update(visible=True)] * len(model.get_components())
147
+ else:
148
+ output+= [gr.update(visible=False)] * len(model.get_components())
149
+ return output
150
+
151
+ def compute_cost_per_token(self, *args):
152
+ begin=0
153
+ current_model = args[-1]
154
+ for model in self.models:
155
+ model_n_args = len(model.get_components())
156
+ model_args = args[begin:begin+model_n_args]
157
+ if current_model == model.get_name():
158
+ model_tco = model.compute_cost_per_token(*model_args)
159
+ return f"Model {current_model} has TCO {model_tco}"
160
+ begin = begin+model_n_args