File size: 9,873 Bytes
427f576
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
import os, sys
import streamlit as st

st.set_page_config(page_title="Pricing for scalar and binary embeddings", page_icon=":floppy-disk:", layout="wide", menu_items={'Report a bug': "mailto:moakim@protonmail.com"})

kb2gb = 1024**3 #Conversion memory

# MAIN
st.title("***Pricing model with scalar and binary embeddings***") 
st.write("***Akim Mousterou*** (April 2024) *[LinkedIn](https://www.linkedin.com/in/akim-mousterou/), [HuggingFace](https://huggingface.co/Akimfromparis), and [GitHub](https://github.com/AkimParis)*")

st.write("*The real democratization of AI can only be achieved by a powerful open-source ecosystem and low prices for memory/GPU usage. Thanks to quantization, we can say bye to float32, and hello binary! Compression-friendly embedding models implemented in int8 and binary can save up to x4 and x32 of memory, storage, and, costs. To achieve X32 compute efficiency and retain ∼96% of retrieval performance, the binary quantization is powered by the normalization of embedding values (either 0 or 1), the calculation of Hamming Distance with only 2 CPU runtimes, and the application of ReRank step of [Yamada et al (2021)](https://arxiv.org/abs/2106.00882). Scalar and binary embeddings revealed great retrieval efficiency with just a minimal degradation of performance, perfect for NLP downstream tasks, semantic search, recommendation systems, and retrieval-augmented generation solutions.*")
st.divider()  ###
col1, col2 = st.columns([1,1])
with col1:
    cloud_price = st.slider("Price of the instance: *From 0 to 20 (default $3.8 per GB/mo estimated on x2gd instances on AWS)* ", 0.0,20.00,3.8)
with col2:
    docs = st.slider("Number of vector embeddings: *From 100M to 1 Billion (default 250M)*", 100000000,1000000000,250000000, step=10000000) #Defaul 250M
st.divider()  ### 
col3, col4, col5, col6, col7, col8, col9, col10 = st.columns([1,1,1,1,1,1,1,1])
with col3:
    st.write("***Embedding dimension***")
with col4:
    st.write("***Memory usage in Gb***")
with col5:
    st.write("***Price on a monthly basis***")
with col6:
    st.write("***Price on a yearly basis***")
with col7:
    st.write("***Int8 memory*** (div. by 4)")
with col8:
    st.write("***Int8 price*** (div. by 4)")
with col9:
    st.write("***Binary memory*** (div. by 32)")
with col10:
    st.write("***Binary price*** (div. by 32)")   
    
col11, col12, col13, col14, col15, col16, col17, col18 = st.columns([1,1,1,1,1,1,1,1])
with col11:
    st.write("***384***")
    st.write("***512***")
    st.write("***768***")
    st.write("***1024***")
    st.write("***1536***")
    st.write("***2048***")
    st.write("***3072***")
    st.write("***4096***")
with col12:
    dim_1 = ((384 * 4) * docs) / kb2gb
    st.write(str(round(dim_1, 2)) + " GB")

    dim_2 = ((512 * 4) * docs) / kb2gb
    r = st.write(str(round(dim_2, 2)) + " GB")
    
    dim_3 = ((768 * 4) * docs) / kb2gb
    r = st.write(str(round(dim_3, 2)) + " GB")
    
    dim_4 = ((1024 * 4) * docs) / kb2gb
    r = st.write(str(round(dim_4, 2)) + " GB")

    dim_5 = ((1536 * 4) * docs) / kb2gb
    r = st.write(str(round(dim_5, 2)) + " GB")

    dim_6 = ((2048 * 4) * docs) / kb2gb
    r = st.write(str(round(dim_6, 2)) + " GB")

    dim_7 = ((3072 * 4) * docs) / kb2gb
    r = st.write(str(round(dim_7, 2)) + " GB")

    dim_8 = ((4096 * 4) * docs) / kb2gb
    r = st.write(str(round(dim_8, 2)) + " GB")

with col13:
    price_month_1 = dim_1 * cloud_price
    st.write(str(round(price_month_1, 2)) + " $")
    
    price_month_2 = dim_2 * cloud_price
    st.write(str(round(price_month_2, 2)) + " $")

    price_month_3 = dim_3 * cloud_price
    st.write(str(round(price_month_3, 2)) + " $")
    
    price_month_4 = dim_4 * cloud_price
    st.write(str(round(price_month_4, 2)) + " $")
    
    price_month_5 = dim_5 * cloud_price
    st.write(str(round(price_month_5, 2)) + " $")
    
    price_month_6 = dim_6 * cloud_price
    st.write(str(round(price_month_6, 2)) + " $")
    
    price_month_7 = dim_7 * cloud_price
    st.write(str(round(price_month_7, 2)) + " $")

    price_month_8 = dim_8 * cloud_price
    st.write(str(round(price_month_8, 2)) + " $")

with col14:
    price_year_1 = price_month_1 * 12
    st.write(str(round(price_year_1, 2)) + " $")
    
    price_year_2 = price_month_2 * 12
    st.write(str(round(price_year_2, 2)) + " $")

    price_year_3 = price_month_3 * 12
    st.write(str(round(price_year_3, 2)) + " $")
    
    price_year_4 = price_month_4 * 12
    st.write(str(round(price_year_4, 2)) + " $")

    price_year_5 = price_month_5 * 12
    st.write(str(round(price_year_5, 2)) + " $")
    
    price_year_6 = price_month_6 * 12
    st.write(str(round(price_year_6, 2)) + " $")
    
    price_year_7 = price_month_7 * 12
    st.write(str(round(price_year_7, 2)) + " $")
    
    price_year_8 = price_month_8 * 12
    st.write(str(round(price_year_8, 2)) + " $")
    
with col15:
    int8_mem_1 = dim_1 / 4
    st.write(str(round(int8_mem_1, 2)) + " GB")
    
    int8_mem_2 = dim_2 / 4
    st.write(str(round(int8_mem_2, 2)) + " GB")

    int8_mem_3 = dim_3 / 4
    st.write(str(round(int8_mem_3, 2)) + " GB")

    int8_mem_4 = dim_4 / 4
    st.write(str(round(int8_mem_4, 2)) + " GB")

    int8_mem_5 = dim_5 / 4
    st.write(str(round(int8_mem_5, 2)) + " GB")
    
    int8_mem_6 = dim_6 / 4
    st.write(str(round(int8_mem_6, 2)) + " GB")
    
    int8_mem_7 = dim_7 / 4
    st.write(str(round(int8_mem_7, 2)) + " GB")
    
    int8_mem_8 = dim_8 / 4
    st.write(str(round(int8_mem_8, 2)) + " GB")
with col16:
    int8_price_1 = price_month_1 / 4
    st.write(str(round(int8_price_1, 2)) + " $")

    int8_price_2 = price_month_2 / 4
    st.write(str(round(int8_price_2, 2)) + " $")

    int8_price_3 = price_month_3 / 4
    st.write(str(round(int8_price_3, 2)) + " $")

    int8_price_4 = price_month_4 / 4
    st.write(str(round(int8_price_4, 2)) + " $")

    int8_price_5 = price_month_5 / 4
    st.write(str(round(int8_price_5, 2)) + " $")

    int8_price_6 = price_month_6 / 4
    st.write(str(round(int8_price_6, 2)) + " $")

    int8_price_7 = price_month_7 / 4
    st.write(str(round(int8_price_7, 2)) + " $")

    int8_price_8 = price_month_8 / 4
    st.write(str(round(int8_price_8, 2)) + " $")
    
with col17:
    binary_mem_1 = dim_1 / 32
    st.write(str(round(binary_mem_1, 2)) + " GB")

    binary_mem_2 = dim_2 / 32
    st.write(str(round(binary_mem_2, 2)) + " GB")
    
    binary_mem_3 = dim_3 / 32
    st.write(str(round(binary_mem_3, 2)) + " GB")
    
    binary_mem_4 = dim_4 / 32
    st.write(str(round(binary_mem_4, 2)) + " GB")
    
    binary_mem_5 = dim_5 / 32
    st.write(str(round(binary_mem_5, 2)) + " GB")
    
    binary_mem_6 = dim_6 / 32
    st.write(str(round(binary_mem_6, 2)) + " GB")
    
    binary_mem_7 = dim_7 / 32
    st.write(str(round(binary_mem_7, 2)) + " GB")
    
    binary_mem_8 = dim_8 / 32
    st.write(str(round(binary_mem_8, 2)) + " GB")
    
with col18:
    binary_price_1 = price_month_1 / 32
    st.write(str(round(binary_price_1, 2)) + " $")

    binary_price_2 = price_month_2 / 32
    st.write(str(round(binary_price_2, 2)) + " $")
    
    binary_price_3 = price_month_3 / 32
    st.write(str(round(binary_price_3, 2)) + " $")
    
    binary_price_4 = price_month_4 / 32
    st.write(str(round(binary_price_4, 2)) + " $")
    
    binary_price_5 = price_month_5 / 32
    st.write(str(round(binary_price_5, 2)) + " $")
    
    binary_price_6 = price_month_6 / 32
    st.write(str(round(binary_price_6, 2)) + " $")
    
    binary_price_7 = price_month_7 / 32
    st.write(str(round(binary_price_7, 2)) + " $")
    
    binary_price_8 = price_month_8 / 32
    st.write(str(round(binary_price_8, 2)) + " $")

st.write('***Disclaimer:*** *The financial projections below are based on ["Cohere int8 & binary Embeddings - Scale Your Vector Database to Large Datasets"](https://cohere.com/blog/int8-binary-embeddings) by Nils Reimers of [Cohere](https://cohere.com/). The cost of the index and the metadata might not have been factored in the calculus.*')

st.divider()  ###
st.write("***- Open-source vector databases for Scalar and binary quantization:***")
col19, col20 = st.columns([1,1])
with col19:
    st.write("- [FAISS](https://github.com/facebookresearch/faiss) from :flag-us:")
    st.write("- [VESPA AI](https://github.com/vespa-engine/vespa) from :flag-no:")
    st.write("- [Pgvector](https://github.com/pgvector/pgvector) from :flag-us:")
    st.write("- [Milvus](https://github.com/milvus-io/milvus) from :flag-cn:")
    st.write("- [Usearch](https://github.com/unum-cloud/usearch) from :flag-us:")
with col20:
    st.write("- [Qdrant](https://github.com/qdrant) from :flag-de:")
    st.write("- [pgvecto.rs](https://github.com/tensorchord/pgvecto.rs) from :flag-cn:")
    st.write("- [TencentVectorDB](https://github.com/Tencent/vectordatabase-sdk-python) from :flag-cn:")
    st.write("- [BinaryVectorDB](https://github.com/cohere-ai/BinaryVectorDB) from :flag-ca:")
    st.write("- [Weaviate](https://github.com/weaviate/weaviate) from :flag-de:")
st.divider()  ###
st.write("***- For further readings:***")

st.write("- [Billion-scale similarity search with GPUs](https://arxiv.org/abs/1702.08734)")
st.write("- [Efficient Passage Retrieval with Hashing for Open-domain Question Answering](https://arxiv.org/abs/2106.00882)")
st.write("- [Matryoshka Representation Learning](https://arxiv.org/abs/2205.13147)")
st.write("- [Incorporating Relevance Feedback for Information-Seeking Retrieval using Few-Shot Document Re-Ranking](https://arxiv.org/abs/2210.10695)")
st.write("- [Binary Embedding-based Retrieval at Tencent](https://arxiv.org/abs/2302.08714)")
st.divider()  ###
st.write("***Akim Mousterou*** (April 2024) *[LinkedIn](https://www.linkedin.com/in/akim-mousterou/), [HuggingFace](https://huggingface.co/Akimfromparis), and [GitHub](https://github.com/AkimParis)*")