'''faiss_learn  的k-means PCA PQ编解码算法'''
import faiss
import numpy as np
import matplotlib.pyplot as plt

'''k-means'''
#数据
import numpy as np
d = 512          #维数
n_data = 2000
np.random.seed(0)
data = []
mu = 3
sigma = 0.1
for i in range(n_data):
    data.append(np.random.normal(mu, sigma, d))
data = np.array(data).astype('float32')

# 聚类
ncentroids = 1024 #形心; 矩心
niter = 20
verbose = True
d = data.shape[1]
kmeans = faiss.Kmeans(d, ncentroids, niter=niter, verbose=verbose)
kmeans.train(data)
# 输出聚类中心
print(kmeans.centroids)

#计算某个向量属于哪一个子类，返回聚类中心次序和L2距离
D, I = kmeans.index.search(data[:5], 1)
print(D)
print(I)


'''PCA 降维'''
mat = faiss.PCAMatrix (512, 10)  # 从512维降为64维
mat.train(data)
assert mat.is_trained
tr = mat.apply_py(data)
print(tr.shape)
#后续需要对特征值的大小进行理解

'''PQ编解码'''

d = 512  # 数据维度
cs = 4  # code size (bytes)
# 训练数据集
xt = data  #训练集
# dataset to encode (could be same as train)
x = data
# ProductQuantizer对象可以将向量编码为code
pq = faiss.ProductQuantizer(d, cs, 8)
pq.train(xt)
# encode编码
codes = pq.compute_codes(x)
# decode解码
x2 = pq.decode(codes)
# 编码-解码后与原始数据的差
avg_relative_error = ((x - x2)**2).sum() / (x ** 2).sum()
print(avg_relative_error)


# 标量量化器（scalar quantizer）
# QT_8bit allocates 8 bits per dimension (QT_4bit also works)
sq = faiss.ScalarQuantizer(d, faiss.ScalarQuantizer.QT_8bit)
sq.train(xt)
# encode 编码
codes = sq.compute_codes(x)
# decode 解码
x2 = sq.decode(codes)
# 计算编码-解码后与原始数据的差
avg_relative_error = ((x - x2)**2).sum() / (x ** 2).sum()
print(avg_relative_error)