VinciDev commited on
Commit
136bee8
1 Parent(s): dc62ead

First Commit

Browse files
Files changed (8) hide show
  1. .env +2 -0
  2. .gitignore +5 -0
  3. app.py +70 -0
  4. data/llm.json +13 -0
  5. models/llm.py +8 -0
  6. requirements.txt +45 -0
  7. services/llm_service.py +28 -0
  8. services/table_service.py +33 -0
.env ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ BUILD_VERSION=20231101.1
2
+ PRICE_UPDATE=2023-11-01
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __pycache__
2
+ venv
3
+
4
+ .DS_Store
5
+ *.pyc
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Description: LLM Cost Calculator Web App
2
+ # Author: Vincent CIBELLI (https://www.linkedin.com/in/vincentcibelli/)
3
+ # Date: 2023-11-01
4
+ # Last Modified: 2023-11-01
5
+ # License: MIT License
6
+
7
+ import os
8
+ import streamlit as st
9
+ import pandas as pd
10
+ from dotenv import load_dotenv
11
+ from babel.numbers import format_currency
12
+ from services.llm_service import LLMService
13
+ from services.table_service import TableService
14
+
15
+
16
+ load_dotenv()
17
+
18
+ llm_service = LLMService()
19
+
20
+ st.set_page_config(
21
+ page_title="LLM Cost Calculator",
22
+ page_icon="💸",
23
+ layout="wide",
24
+ menu_items={
25
+ 'About': "## LLM Cost Calculator\n#### Build {}\nCreated with ❤️ by [Vincent CIBELLI](https://www.linkedin.com/in/vincentcibelli/)".format(os.environ.get("BUILD_VERSION"))
26
+ }
27
+ )
28
+
29
+ st.header("💸 LLM Cost Calculator 💸", divider="green")
30
+
31
+ with st.expander("Last price update ($ currency): {}.".format(os.getenv("PRICE_UPDATE")), expanded=False):
32
+ st.table(TableService.format_source_table(llm_service.get_list()))
33
+
34
+ st.subheader("Your project information:")
35
+ with st.expander("#### Average use of your LLM:", expanded=True):
36
+ col1, col2, col3 = st.columns(3)
37
+
38
+ with col1:
39
+ request_number = st.number_input("Average request number per hour", min_value=0, value=0, step=100)
40
+
41
+ with col2:
42
+ col2_1, col2_2 = st.columns(2)
43
+
44
+ with col2_1:
45
+ request_size = st.number_input("Average input token size", min_value=0, value=0, step=1000)
46
+
47
+ with col2_2:
48
+ response_size = st.number_input("Average output token size", min_value=0, value=0, step=1000)
49
+
50
+ with col3:
51
+ col3_1, col3_2 = st.columns(2)
52
+
53
+ with col3_1:
54
+ run_hours = st.number_input("Running hours per day", min_value=0, max_value=24, value=0, step=1)
55
+
56
+ with col3_2:
57
+ run_day = st.number_input("Running days per week", min_value=0, max_value=7, value=0, step=1)
58
+
59
+ with st.expander("#### LLM models to compare:", expanded=True):
60
+ llm_chat_list = st.multiselect("Select Chat LLM models", options=[llm.name for llm in llm_service.get_list_by_category("chat")])
61
+ llm_text_list = st.multiselect("Select Text LLM models", options=[llm.name for llm in llm_service.get_list_by_category("text")])
62
+
63
+ result = []
64
+ for llm in llm_chat_list + llm_text_list:
65
+ result.append(llm_service.get_result(llm_service.get_by_name(llm), request_number, request_size, response_size, run_hours, run_day))
66
+
67
+ if len(result) > 0:
68
+ st.subheader("Your Cost Result:")
69
+
70
+ st.table(TableService.format_result_table(result))
data/llm.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ { "id": "gpt-35-turbo-4k", "name": "GPT 3.5 Turbo 4K", "category": "chat", "type": "saas", "input_price": 0.0015, "output_price": 0.002 },
3
+ { "id": "gpt-35-turbo-16k", "name": "GPT 3.5 Turbo 16K", "category": "chat", "type": "saas", "input_price": 0.003, "output_price": 0.004 },
4
+ { "id": "gpt4-8k", "name": "GPT4 8K", "category": "chat", "type": "saas", "input_price": 0.03, "output_price": 0.06 },
5
+ { "id": "gpt4-32k", "name": "GPT4 32K", "category": "chat", "type": "saas", "input_price": 0.06, "output_price": 0.12 },
6
+ { "id": "claude-instant", "name": "Claude Instant", "category": "text", "type": "saas", "input_price": 0.00163, "output_price": 0.00551 },
7
+ { "id": "claude-2", "name": "Claude 2", "category": "text", "type": "saas", "input_price": 0.01102, "output_price": 0.03268 },
8
+ { "id": "palm2-chat-bison", "name": "PaLM 2 for Chat (Bison)", "category": "chat", "type": "saas", "input_price": 0.002, "output_price": 0.002},
9
+ { "id": "palm2-text-bison", "name": "PaLM 2 for Text (Bison)", "category": "text", "type": "saas", "input_price": 0.002, "output_price": 0.002 },
10
+ { "id": "gpt-35-turbo-4k-finetuned", "name": "GPT 3.5 Turbo - Fine-Tuned", "category": "chat", "type": "saas", "input_price": 0.012, "output_price": 0.016 },
11
+ { "id": "titan-text–lite", "name": "AWS Titan Text – Lite", "category": "text", "type": "saas", "input_price": 0.0003, "output_price": 0.0004 },
12
+ { "id": "titan-text–express", "name": "AWS Titan Text – Express", "category": "text", "type": "saas", "input_price": 0.0013, "output_price": 0.0017 }
13
+ ]
models/llm.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ class LLMModel:
2
+ def __init__(self, id, name, category, type, input_price, output_price):
3
+ self.id = id
4
+ self.name = name
5
+ self.category = category
6
+ self.type = type
7
+ self.input_price = input_price
8
+ self.output_price = output_price
requirements.txt ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ altair==5.1.2
2
+ attrs==23.1.0
3
+ Babel==2.13.1
4
+ blinker==1.6.3
5
+ cachetools==5.3.2
6
+ certifi==2023.7.22
7
+ charset-normalizer==3.3.2
8
+ click==8.1.7
9
+ gitdb==4.0.11
10
+ GitPython==3.1.40
11
+ idna==3.4
12
+ importlib-metadata==6.8.0
13
+ Jinja2==3.1.2
14
+ jsonschema==4.19.2
15
+ jsonschema-specifications==2023.7.1
16
+ markdown-it-py==3.0.0
17
+ MarkupSafe==2.1.3
18
+ mdurl==0.1.2
19
+ numpy==1.26.1
20
+ packaging==23.2
21
+ pandas==2.1.2
22
+ Pillow==10.1.0
23
+ protobuf==4.24.4
24
+ pyarrow==14.0.0
25
+ pydeck==0.8.1b0
26
+ Pygments==2.16.1
27
+ python-dateutil==2.8.2
28
+ python-dotenv==1.0.0
29
+ pytz==2023.3.post1
30
+ referencing==0.30.2
31
+ requests==2.31.0
32
+ rich==13.6.0
33
+ rpds-py==0.10.6
34
+ six==1.16.0
35
+ smmap==5.0.1
36
+ tenacity==8.2.3
37
+ toml==0.10.2
38
+ toolz==0.12.0
39
+ tornado==6.3.3
40
+ typing_extensions==4.8.0
41
+ tzdata==2023.3
42
+ tzlocal==5.2
43
+ urllib3==2.0.7
44
+ validators==0.22.0
45
+ zipp==3.17.0
services/llm_service.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from decimal import Decimal
3
+ from models.llm import LLMModel
4
+
5
+ class LLMService:
6
+ def __init__(self):
7
+ with open('data/llm.json', 'r') as f:
8
+ self.llm_list = json.load(f, object_hook=lambda d: LLMModel(**d))
9
+ self.llm_list = sorted(self.llm_list, key=lambda x: x.name)
10
+
11
+ def get_list(self) -> list[LLMModel]:
12
+ return self.llm_list
13
+
14
+ def get_list_by_category(self, category) -> list[LLMModel]:
15
+ return [llm for llm in self.llm_list if llm.category == category]
16
+
17
+ def get_by_name(self, name) -> LLMModel:
18
+ return next((llm for llm in self.llm_list if llm.name == name), None)
19
+
20
+ def get_result(self, llm: LLMModel, request_number: int, request_size: int, response_size: int, run_hours: int, run_day: int) -> dict:
21
+ return {
22
+ "model": llm.name,
23
+ "category": llm.category,
24
+ "price_per_request": Decimal(llm.input_price * (request_size / 1000) + llm.output_price * (response_size / 1000)),
25
+ "price_per_day": Decimal((llm.input_price * (request_size / 1000) + llm.output_price * (response_size / 1000)) * request_number * run_hours),
26
+ "price_per_week": Decimal((llm.input_price * (request_size / 1000) + llm.output_price * (response_size / 1000)) * request_number * run_hours * run_day),
27
+ "price_per_month": Decimal((llm.input_price * (request_size / 1000) + llm.output_price * (response_size / 1000)) * request_number * run_hours * run_day * 4.3)
28
+ }
services/table_service.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from babel.numbers import format_currency
3
+ from models.llm import LLMModel
4
+
5
+ class TableService:
6
+ def format_source_table(data: list[LLMModel]) -> pd.DataFrame:
7
+ src = []
8
+ for llm in data:
9
+ src.append({
10
+ "name": llm.name,
11
+ "category": llm.category,
12
+ "type": llm.type,
13
+ "input_price": llm.input_price,
14
+ "output_price": llm.output_price
15
+ })
16
+
17
+ table_source = pd.DataFrame(src)
18
+ table_source = table_source.set_index("name")
19
+ table_source = table_source.rename(columns={"name": "Model", "category": "Category", "type": "Type", "input_price": "Input price in $ (1000 tokens)", "output_price": "Output price in $ (1000 tokens)"})
20
+
21
+ return table_source
22
+
23
+ def format_result_table(data: list[LLMModel]) -> pd.DataFrame:
24
+ table_result = pd.DataFrame(data)
25
+ table_result = table_result.set_index("model")
26
+ table_result = table_result.rename(columns={"model": "Model", "category": "Category", "price_per_request": "Price per request", "price_per_day": "Price per day", "price_per_week": "Price per week", "price_per_month": "Price per month"})
27
+ table_result["Price per request"] = table_result["Price per request"].apply(lambda x: format_currency(x, currency="USD", locale="en_US"))
28
+ table_result["Price per day"] = table_result["Price per day"].apply(lambda x: format_currency(x, currency="USD", locale="en_US"))
29
+ table_result["Price per week"] = table_result["Price per week"].apply(lambda x: format_currency(x, currency="USD", locale="en_US"))
30
+ table_result["Price per month"] = table_result["Price per month"].apply(lambda x: format_currency(x, currency="USD", locale="en_US"))
31
+ table_result = table_result.sort_values(["Category", "Price per request"])
32
+
33
+ return table_result