jadehardouin commited on
Commit
0ad933c
1 Parent(s): 52a72f5

Update models.py

Browse files
Files changed (1) hide show
  1. models.py +57 -3
models.py CHANGED
@@ -59,10 +59,8 @@ class OpenAIModel(BaseTCOModel):
59
  def on_model_change(model):
60
 
61
  if model == "GPT-4":
62
- print("GPT4")
63
  return gr.Dropdown.update(choices=["8K", "32K"])
64
  else:
65
- print("GPT3.5")
66
  return gr.Dropdown.update(choices=["4K", "16K"])
67
 
68
  self.model = gr.Dropdown(["GPT-4", "GPT-3.5 Turbo"], value="GPT-4",
@@ -89,7 +87,7 @@ class OpenAIModel(BaseTCOModel):
89
  else:
90
  cost_per_1k_input_tokens = 0.003
91
 
92
- cost_per_output_token = cost_per_1k_input_tokens * 1000 / input_length
93
 
94
  return cost_per_output_token
95
 
@@ -156,7 +154,63 @@ class OpenSourceLlama2Model(BaseTCOModel):
156
  def compute_cost_per_token(self, vm_cost_per_hour, tokens_per_second, maxed_out, used):
157
  cost_per_token = vm_cost_per_hour / (tokens_per_second * 3600 * maxed_out * used)
158
  return cost_per_token
 
 
 
 
 
 
 
 
 
 
 
 
159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  class ModelPage:
161
 
162
  def __init__(self, Models: BaseTCOModel):
 
59
  def on_model_change(model):
60
 
61
  if model == "GPT-4":
 
62
  return gr.Dropdown.update(choices=["8K", "32K"])
63
  else:
 
64
  return gr.Dropdown.update(choices=["4K", "16K"])
65
 
66
  self.model = gr.Dropdown(["GPT-4", "GPT-3.5 Turbo"], value="GPT-4",
 
87
  else:
88
  cost_per_1k_input_tokens = 0.003
89
 
90
+ cost_per_output_token = cost_per_1k_input_tokens * input_length / 1000
91
 
92
  return cost_per_output_token
93
 
 
154
  def compute_cost_per_token(self, vm_cost_per_hour, tokens_per_second, maxed_out, used):
155
  cost_per_token = vm_cost_per_hour / (tokens_per_second * 3600 * maxed_out * used)
156
  return cost_per_token
157
+
158
+ class CohereModel(BaseTCOModel):
159
+
160
+ def __init__(self):
161
+ self.set_name("(SaaS) Cohere")
162
+ self.set_formula(r"""$CT = \frac{CT\_1K \times 1000}{L}$ <br>
163
+ with: <br>
164
+ CT = Cost per output Token <br>
165
+ CT_1M = Cost per one million Tokens (from Cohere's pricing web page) <br>
166
+ L = Input Length
167
+ """)
168
+ super().__init__()
169
 
170
+ def render(self):
171
+ def on_use_case_change(use_case):
172
+ if use_case == "Summarize":
173
+ return gr.Dropdown.update(choices=["Default"])
174
+ else:
175
+ return gr.Dropdown.update(choices=["Default", "Custom"])
176
+
177
+ self.use_case = gr.Dropdown(["Embed", "Generate", "Classify", "Summarize"], value="Generate",
178
+ label="Use case",
179
+ interactive=True, visible=False)
180
+ self.model = gr.Dropdown(["Default", "Custom"], value="Default",
181
+ label="Model",
182
+ interactive=True, visible=False)
183
+ self.use_case.change(on_use_case_change, inputs=self.use_case, outputs=self.model)
184
+ self.input_length = gr.Number(350, label="Average number of input tokens",
185
+ interactive=True, visible=False)
186
+
187
+ def compute_cost_per_token(self, use_case, model, input_length):
188
+ """Cost per token = """
189
+ use_case = use_case[0]
190
+ model = model[0]
191
+
192
+ if use_case == "Embed":
193
+ if model == "Default":
194
+ cost_per_1M_input_tokens = 0.4
195
+ else:
196
+ cost_per_1M_input_tokens = 0.8
197
+ elif use_case == "Generate":
198
+ if model == "Default":
199
+ cost_per_1M_input_tokens = 15
200
+ else:
201
+ cost_per_1M_input_tokens = 30
202
+ elif use_case == "Classify":
203
+ if model == "Default":
204
+ cost_per_1M_input_tokens = 200
205
+ else:
206
+ cost_per_1M_input_tokens = 200
207
+ else:
208
+ cost_per_1M_input_tokens = 15
209
+
210
+ cost_per_output_token = cost_per_1M_input_tokens * input_length / 1000000
211
+
212
+ return cost_per_output_token
213
+
214
  class ModelPage:
215
 
216
  def __init__(self, Models: BaseTCOModel):