#计算数据集的GT文件
import numpy as np
import os
from concurrent.futures import ThreadPoolExecutor
import threading
import time

# type = np.float32
# dataset_root_path = "/home/ljl/Code/dataset/vector-ssd/deep"
# dataset_path = dataset_root_path+"/1M.fbin"
# query_path = dataset_root_path+"/queries.fbin"
# gt_path = dataset_root_path+"/1M_gt.bin" #输出路径

# type = np.float32
# dataset_root_path = "/home/ljl/Code/dataset/vector-ssd/sift1m"
# dataset_path = dataset_root_path+"/sift_1m_base.fbin"
# query_path = dataset_root_path+"/sift_query.fbin"
# gt_path = dataset_root_path+"/gt_test.bin"

type = np.uint8
dataset_root_path = "/home/ljl/Code/dataset/vector-ssd/bigann/"
dataset_path = dataset_root_path+"bigann_10M.bbin"
query_path = dataset_root_path+"bigann_query.bbin"
gt_path = dataset_root_path+"bigann_10M_gt.bin" #输出路径

override = True #如果gt_path已经存在，是否覆盖写
k = 100 #计算多少个topk
thread_num = 50
n = 0 #计算前多少个查询，为0则计算所有

#判断dataset_path和query_path是否存在，以及gt_path是否已存在
if not os.path.exists(dataset_path):
    print("dataset_path:",dataset_path,"不存在")
    quit()
if not os.path.exists(query_path):
    print("query_path:",query_path,"不存在")
    quit()
if not override and os.path.exists(gt_path):
    print("gt_path:",gt_path,"已存在")
    quit()

#读取bin格式的数据集文件，数据集、查询集、GT集都可以使用这个函数读取
def read_bin(file_path, type):
    file_size = os.path.getsize(file_path)
    print("Read data from", file_path, "\nfile size:",file_size)
    feature_size = np.dtype(type).itemsize
    with open(file_path,"rb") as fd:
        lines = int.from_bytes(fd.read(4), byteorder='little')
        dim = int.from_bytes(fd.read(4), byteorder='little')
        print("lines:",lines,"dim:",dim)

        data_size = lines * dim * feature_size
        if(data_size+8 != file_size):
            print(f"Error! file size {file_size} and argument {data_size+8} not match!") # 判断实际文件大小是否与参数匹配，简单的纠错机制
            return None

        binary_data = fd.read(data_size)
        vectors = np.frombuffer(binary_data, dtype=type)
        vectors = vectors.reshape(lines, dim)
        print("Returned vector list:",vectors.shape, vectors.dtype)

        return vectors

#将向量集以.bin的格式写入文件中，主要用于写入质心
def write_bin(filename, array):
    # 检查输入是否为numpy数组
    if not isinstance(array, np.ndarray):
        raise ValueError("Input must be a NumPy array.")
    
    # 获取数组的形状（行数和列数）
    rows, cols = array.shape
    
    # 打开文件准备写入
    with open(filename, 'wb') as f:
        # 将行数和列数写入文件的前8个字节
        np.array(rows, dtype=np.uint32).tofile(f)
        np.array(cols, dtype=np.uint32).tofile(f)
        
        # 写入数组数据
        # 确保数据类型正确，这里假设使用float32
        array.tofile(f)

def compute_gt_multithread(dataset, query_set, gt_file_path, k, num_threads=16, input_n=0):
    """
    优化版：使用欧氏距离优化公式计算最近邻，避免内存爆炸
    """
    print(f"Computing ground truth with OPTIMIZED method on {dataset.shape[0]}x{dataset.shape[1]} data")
    
    # 记录起始时间
    start_time = time.time()
    n = query_set.shape[0] if input_n <= 0 else min(query_set.shape[0], input_n)
    
    # 转换到float32提高精度（避免uint8计算精度问题）
    dataset_float = dataset.astype(np.float32, copy=False)  # 避免不必要的拷贝
    
    # 预计算数据集L2范数的平方 (||x_i||^2)
    dataset_norms = np.sum(dataset_float**2, axis=1)  # 只需计算一次
    
    # 创建结果数组
    gt_array = np.zeros((n, k), dtype=np.uint32)
    
    # 使用锁保证安全计数
    processed_count = 0
    count_lock = threading.Lock()
    
    def process_query(i):
        """处理单个查询的核心函数"""
        nonlocal processed_count
        
        q = query_set[i].astype(np.float32)  # 确保转为float32
        
        # 优化计算：||x - q||² = ||x||² + ||q||² - 2x·q
        q_norm = np.dot(q, q)  # ||q||²
        dot_products = np.dot(dataset_float, q)  # x·q（矩阵乘法）
        dists_sq = dataset_norms + q_norm - 2 * dot_products
        
        # 直接获取top-k索引（比全排序更高效）
        indices = np.argpartition(dists_sq, k)[:k]
        
        # 局部排序获取有序结果
        sorted_local_idx = np.argsort(dists_sq[indices])
        gt_array[i] = indices[sorted_local_idx]
        
        # 更新进度
        with count_lock:
            processed_count += 1
            if processed_count % max(1, n//100) == 0:  # 按百分比显示进度
                print(f"Progress: {processed_count}/{n} ({processed_count/n:.1%})")
    
    # 并行处理所有查询
    with ThreadPoolExecutor(max_workers=num_threads) as executor:
        # 提交所有查询任务（每个查询一个任务）
        futures = [executor.submit(process_query, i) for i in range(n)]
        
        # 等待所有任务完成
        for future in futures:
            future.result()
    
    # 保存结果
    print(f"Saving GT to {gt_file_path}")
    with open(gt_file_path, 'wb') as f:
        np.array(gt_array.shape[0], dtype=np.uint32).tofile(f)
        np.array(gt_array.shape[1], dtype=np.uint32).tofile(f)
        gt_array.tofile(f)
    
    # 性能统计
    end_time = time.time()
    total_time = end_time - start_time
    qps = n / total_time
    print(f"Completed! Time: {total_time:.2f}s, QPS: {qps:.2f}")
    print(f"Memory saved: {(dataset.size * 4 / 1e9):.1f} GB per thread")

dataset = read_bin(dataset_path, type)
query = read_bin(query_path, type)
print("Dataset shape:", dataset.shape)
print("Query shape:", query.shape)
if dataset.shape[1] != query.shape[1]:
    print("Error! Dataset and query have different dimensions.")
    exit()

compute_gt_multithread(dataset, query, gt_path, k, thread_num, n)

quit()
gt1 = read_bin(gt_path,type=np.uint32)
gt2 = read_bin(dataset_root_path+"/sift_groundtruth.fbin",type=np.uint32)

# %%
#逐行对比gt1和gt2，有不一样的值，则输出
for i in range(len(gt1)):
    for j in range(len(gt1[i])):
        if(gt1[i][j] != gt2[i][j]):
            print(i,j,gt1[i][j],gt2[i][j])

# %%
a = np.float32(5)
b = np.float32(6)

# %%
a - b

# %%



