AkimfromParis commited on
Commit
197aba7
1 Parent(s): aebcea3

Uploade 2 file app.py and req

Browse files
Files changed (2) hide show
  1. app.py +249 -0
  2. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ import streamlit as st
3
+
4
+ st.set_page_config(page_title="Pricing for scalar and binary embeddings", page_icon=":floppy-disk:", layout="wide", initial_sidebar_state="expanded", menu_items={'Report a bug': "mailto:moakim@protonmail.com"})
5
+
6
+ # SIDEBAR
7
+ st.sidebar.write('***"Exit float32, hello binary!"***')
8
+
9
+ cloud_price = st.sidebar.slider("Price of the instance: ", 0.0,20.00,3.8)
10
+ st.sidebar.write("*From 0 to 20 (default $3.8 per GB/mo estimated on x2gd instances on AWS)*")
11
+
12
+ st.sidebar.divider() ###
13
+
14
+ docs = st.sidebar.slider("Number of vector embeddings: ", 100000000,1000000000,250000000, step=10000000) #Defaul 250M
15
+ st.sidebar.write("*From 100M to 1 Billion (default 250M)*")
16
+
17
+ st.sidebar.write(" ")
18
+
19
+ st.sidebar.write("***Akim Mousterou*** (April 2024) *[LinkedIn](https://www.linkedin.com/in/akim-mousterou/),[HuggingFace](https://huggingface.co/Akimfromparis), and [GitHub](https://github.com/AkimParis)*")
20
+ kb2gb = 1024**3 #Conversion memory
21
+ # MAIN
22
+ st.title("***Pricing model on billion-scale vector with scalar and binary embeddings***")
23
+
24
+ st.write("*The real democratization of AI can only be achieved by a powerful open-source ecosystem and low prices for memory/GPU usage.*")
25
+ st.write("*Compression-friendly embedding models implemented in int8 and binary can save up to x4 and x32 of memory, storage, and, costs. To achieve X32 compute efficiency and retain ∼96% of retrieval performance, the binary quantization is powered by the normalization of embedding values (either 0 or 1), the calculation of Hamming Distance with only 2 CPU runtimes, and the application of ReRank step of [Yamada et al (2021)](https://arxiv.org/abs/2106.00882).*")
26
+ st.write('*Scalar and binary embeddings revealed great retrieval efficiency with just a minimal degradation of performance, perfect for NLP downstream tasks, semantic search, recommendation systems, and retrieval-augmented generation solutions. The following financial projections are based on ["Cohere int8 & binary Embeddings - Scale Your Vector Database to Large Datasets"](https://cohere.com/blog/int8-binary-embeddings) by Nils Reimers of [Cohere](https://cohere.com/). The cost of the index and the metadata might not have been factored in the calculus.*')
27
+
28
+ col1, col2, col3, col4, col5, col6, col7, col8 = st.columns([1,1,1,1,1,1,1,1])
29
+ with col1:
30
+ st.write("***Embedding dimension***")
31
+ st.divider() ###
32
+ st.write("***384***")
33
+ st.write("***512***")
34
+ st.write("***768***")
35
+ st.write("***1024***")
36
+ st.write("***1536***")
37
+ st.write("***2048***")
38
+ st.write("***3072***")
39
+ st.write("***4096***")
40
+ with col2:
41
+ st.write("***Memory usage in Gb***")
42
+ st.divider() ###
43
+ dim_1 = ((384 * 4) * docs) / kb2gb
44
+ st.write(str(round(dim_1, 2)) + " GB")
45
+
46
+ dim_2 = ((512 * 4) * docs) / kb2gb
47
+ r = st.write(str(round(dim_2, 2)) + " GB")
48
+
49
+ dim_3 = ((768 * 4) * docs) / kb2gb
50
+ r = st.write(str(round(dim_3, 2)) + " GB")
51
+
52
+ dim_4 = ((1024 * 4) * docs) / kb2gb
53
+ r = st.write(str(round(dim_4, 2)) + " GB")
54
+
55
+ dim_5 = ((1536 * 4) * docs) / kb2gb
56
+ r = st.write(str(round(dim_5, 2)) + " GB")
57
+
58
+ dim_6 = ((2048 * 4) * docs) / kb2gb
59
+ r = st.write(str(round(dim_6, 2)) + " GB")
60
+
61
+ dim_7 = ((3072 * 4) * docs) / kb2gb
62
+ r = st.write(str(round(dim_7, 2)) + " GB")
63
+
64
+ dim_8 = ((4096 * 4) * docs) / kb2gb
65
+ r = st.write(str(round(dim_8, 2)) + " GB")
66
+
67
+ with col3:
68
+ st.write("***$ Price by month***")
69
+ st.divider() ###
70
+ price_month_1 = dim_1 * cloud_price
71
+ st.write(str(round(price_month_1, 2)) + " $")
72
+
73
+ price_month_2 = dim_2 * cloud_price
74
+ st.write(str(round(price_month_2, 2)) + " $")
75
+
76
+ price_month_3 = dim_3 * cloud_price
77
+ st.write(str(round(price_month_3, 2)) + " $")
78
+
79
+ price_month_4 = dim_4 * cloud_price
80
+ st.write(str(round(price_month_4, 2)) + " $")
81
+
82
+ price_month_5 = dim_5 * cloud_price
83
+ st.write(str(round(price_month_5, 2)) + " $")
84
+
85
+ price_month_6 = dim_6 * cloud_price
86
+ st.write(str(round(price_month_6, 2)) + " $")
87
+
88
+ price_month_7 = dim_7 * cloud_price
89
+ st.write(str(round(price_month_7, 2)) + " $")
90
+
91
+ price_month_8 = dim_8 * cloud_price
92
+ st.write(str(round(price_month_8, 2)) + " $")
93
+
94
+ with col4:
95
+ st.write("***$ Price by year***")
96
+ st.divider() ###
97
+ price_year_1 = price_month_1 * 12
98
+ st.write(str(round(price_year_1, 2)) + " $")
99
+
100
+ price_year_2 = price_month_2 * 12
101
+ st.write(str(round(price_year_2, 2)) + " $")
102
+
103
+ price_year_3 = price_month_3 * 12
104
+ st.write(str(round(price_year_3, 2)) + " $")
105
+
106
+ price_year_4 = price_month_4 * 12
107
+ st.write(str(round(price_year_4, 2)) + " $")
108
+
109
+ price_year_5 = price_month_5 * 12
110
+ st.write(str(round(price_year_5, 2)) + " $")
111
+
112
+ price_year_6 = price_month_6 * 12
113
+ st.write(str(round(price_year_6, 2)) + " $")
114
+
115
+ price_year_7 = price_month_7 * 12
116
+ st.write(str(round(price_year_7, 2)) + " $")
117
+
118
+ price_year_8 = price_month_8 * 12
119
+ st.write(str(round(price_year_8, 2)) + " $")
120
+
121
+ with col5:
122
+ st.write("***Int8 memory*** (div. 4)")
123
+ st.divider() ###
124
+ int8_mem_1 = dim_1 / 4
125
+ st.write(str(round(int8_mem_1, 2)) + " GB")
126
+
127
+ int8_mem_2 = dim_2 / 4
128
+ st.write(str(round(int8_mem_2, 2)) + " GB")
129
+
130
+ int8_mem_3 = dim_3 / 4
131
+ st.write(str(round(int8_mem_3, 2)) + " GB")
132
+
133
+ int8_mem_4 = dim_4 / 4
134
+ st.write(str(round(int8_mem_4, 2)) + " GB")
135
+
136
+ int8_mem_5 = dim_5 / 4
137
+ st.write(str(round(int8_mem_5, 2)) + " GB")
138
+
139
+ int8_mem_6 = dim_6 / 4
140
+ st.write(str(round(int8_mem_6, 2)) + " GB")
141
+
142
+ int8_mem_7 = dim_7 / 4
143
+ st.write(str(round(int8_mem_7, 2)) + " GB")
144
+
145
+ int8_mem_8 = dim_8 / 4
146
+ st.write(str(round(int8_mem_8, 2)) + " GB")
147
+ with col6:
148
+ st.write("***$ Int8 price*** (div. 4)")
149
+ st.divider() ###
150
+ int8_price_1 = price_month_1 / 4
151
+ st.write(str(round(int8_price_1, 2)) + " $")
152
+
153
+ int8_price_2 = price_month_2 / 4
154
+ st.write(str(round(int8_price_2, 2)) + " $")
155
+
156
+ int8_price_3 = price_month_3 / 4
157
+ st.write(str(round(int8_price_3, 2)) + " $")
158
+
159
+ int8_price_4 = price_month_4 / 4
160
+ st.write(str(round(int8_price_4, 2)) + " $")
161
+
162
+ int8_price_5 = price_month_5 / 4
163
+ st.write(str(round(int8_price_5, 2)) + " $")
164
+
165
+ int8_price_6 = price_month_6 / 4
166
+ st.write(str(round(int8_price_6, 2)) + " $")
167
+
168
+ int8_price_7 = price_month_7 / 4
169
+ st.write(str(round(int8_price_7, 2)) + " $")
170
+
171
+ int8_price_8 = price_month_8 / 4
172
+ st.write(str(round(int8_price_8, 2)) + " $")
173
+
174
+ with col7:
175
+ st.write("***Bin memory*** (div. 32)")
176
+ st.divider() ###
177
+ binary_mem_1 = dim_1 / 32
178
+ st.write(str(round(binary_mem_1, 2)) + " GB")
179
+
180
+ binary_mem_2 = dim_2 / 32
181
+ st.write(str(round(binary_mem_2, 2)) + " GB")
182
+
183
+ binary_mem_3 = dim_3 / 32
184
+ st.write(str(round(binary_mem_3, 2)) + " GB")
185
+
186
+ binary_mem_4 = dim_4 / 32
187
+ st.write(str(round(binary_mem_4, 2)) + " GB")
188
+
189
+ binary_mem_5 = dim_5 / 32
190
+ st.write(str(round(binary_mem_5, 2)) + " GB")
191
+
192
+ binary_mem_6 = dim_6 / 32
193
+ st.write(str(round(binary_mem_6, 2)) + " GB")
194
+
195
+ binary_mem_7 = dim_7 / 32
196
+ st.write(str(round(binary_mem_7, 2)) + " GB")
197
+
198
+ binary_mem_8 = dim_8 / 32
199
+ st.write(str(round(binary_mem_8, 2)) + " GB")
200
+
201
+ with col8:
202
+ st.write("***$ Bin price*** (div. 32)")
203
+ st.divider() ###
204
+ binary_price_1 = price_month_1 / 32
205
+ st.write(str(round(binary_price_1, 2)) + " $")
206
+
207
+ binary_price_2 = price_month_2 / 32
208
+ st.write(str(round(binary_price_2, 2)) + " $")
209
+
210
+ binary_price_3 = price_month_3 / 32
211
+ st.write(str(round(binary_price_3, 2)) + " $")
212
+
213
+ binary_price_4 = price_month_4 / 32
214
+ st.write(str(round(binary_price_4, 2)) + " $")
215
+
216
+ binary_price_5 = price_month_5 / 32
217
+ st.write(str(round(binary_price_5, 2)) + " $")
218
+
219
+ binary_price_6 = price_month_6 / 32
220
+ st.write(str(round(binary_price_6, 2)) + " $")
221
+
222
+ binary_price_7 = price_month_7 / 32
223
+ st.write(str(round(binary_price_7, 2)) + " $")
224
+
225
+ binary_price_8 = price_month_8 / 32
226
+ st.write(str(round(binary_price_8, 2)) + " $")
227
+ st.divider() ###
228
+ st.write("***- Open-source vector databases for Scalar and binary quantization:***")
229
+ col9, col10 = st.columns([1,1])
230
+ with col9:
231
+ st.write("- [FAISS](https://github.com/facebookresearch/faiss) from :flag-us:")
232
+ st.write("- [VESPA AI](https://github.com/vespa-engine/vespa) from :flag-no:")
233
+ st.write("- [Pgvector](https://github.com/pgvector/pgvector) from :flag-us:")
234
+ st.write("- [Milvus](https://github.com/milvus-io/milvus) from :flag-cn:")
235
+ st.write("- [Usearch](https://github.com/unum-cloud/usearch) from :flag-us:")
236
+ with col10:
237
+ st.write("- [Qdrant](https://github.com/qdrant) from :flag-de:")
238
+ st.write("- [pgvecto.rs](https://github.com/tensorchord/pgvecto.rs) from :flag-cn:")
239
+ st.write("- [TencentVectorDB](https://github.com/Tencent/vectordatabase-sdk-python) from :flag-cn:")
240
+ st.write("- [BinaryVectorDB](https://github.com/cohere-ai/BinaryVectorDB) from :flag-ca:")
241
+ st.write("- [Weaviate](https://github.com/weaviate/weaviate) from :flag-de:")
242
+ st.divider() ###
243
+ st.write("***- For further readings:***")
244
+
245
+ st.write("- [Billion-scale similarity search with GPUs](https://arxiv.org/abs/1702.08734)")
246
+ st.write("- [Efficient Passage Retrieval with Hashing for Open-domain Question Answering](https://arxiv.org/abs/2106.00882)")
247
+ st.write("- [Matryoshka Representation Learning](https://arxiv.org/abs/2205.13147)")
248
+ st.write("- [Incorporating Relevance Feedback for Information-Seeking Retrieval using Few-Shot Document Re-Ranking](https://arxiv.org/abs/2210.10695)")
249
+ st.write("- [Binary Embedding-based Retrieval at Tencent](https://arxiv.org/abs/2302.08714)")
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ streamlit