import numpy as np
from scipy.spatial.distance import cdist
from concurrent.futures import ThreadPoolExecutor
import time

# 1. 数据生成
def generate_high_dim_vectors(num_vectors=10000, dimensions=50):
    """ 
    生成高维向量数据
    :param num_vectors: 向量数量
    :param dimensions: 向量维度
    :return: 模拟数据库和查询向量
    """ 
    np.random.seed(42)
    database = np.random.rand(num_vectors, dimensions)
    query = np.random.rand(1, dimensions)
    return database, query

vector_database, query_vector = generate_high_dim_vectors(10000, 50)

# 2. 索引优化 - 分块搜索
def block_search(query, database, block_size, metric="euclidean"):
    """ 
    分块暴力搜索
    :param query: 查询向量
    :param database: 向量数据库
    :param block_size: 每块大小
    :param metric: 距离度量方式
    :return: 最小距离和索引
    """ 
    num_blocks = len(database) // block_size
    min_distance = float("inf")
    min_index = -1

    for i in range(num_blocks):
        block = database[i * block_size: (i + 1) * block_size]
        distances = cdist(query, block, metric=metric).flatten()
        block_min_index = np.argmin(distances)
        block_min_distance = distances[block_min_index]

        if block_min_distance < min_distance:
            min_distance = block_min_distance
            min_index = i * block_size + block_min_index

    # 处理剩余部分（如果数据库大小不是块大小的整数倍）
    if len(database) % block_size != 0:
        remaining_block = database[num_blocks * block_size:]
        if len(remaining_block) > 0:
            distances = cdist(query, remaining_block, metric=metric).flatten()
            block_min_index = np.argmin(distances)
            block_min_distance = distances[block_min_index]
            
            if block_min_distance < min_distance:
                min_distance = block_min_distance
                min_index = num_blocks * block_size + block_min_index

    return min_index, min_distance

# 3. 并行化搜索
def parallel_search(query, database, block_size, metric="euclidean"):
    """  
    并行化暴力搜索  
    :param query: 查询向量  
    :param database: 向量数据库  
    :param block_size: 每块大小  
    :param metric: 距离度量方式  
    :return: 最小距离和索引  
    """  
    num_blocks = len(database) // block_size
    blocks = [database[i * block_size: (i + 1) * block_size] for i in range(num_blocks)]
    
    # 添加剩余块
    if len(database) % block_size != 0:
        remaining_block = database[num_blocks * block_size:]
        if len(remaining_block) > 0:
            blocks.append(remaining_block)

    def search_block(block):
        distances = cdist(query, block, metric=metric).flatten()  
        return np.min(distances), np.argmin(distances)  

    with ThreadPoolExecutor() as executor:  
        results = list(executor.map(search_block, blocks))  

    min_distance = float("inf")  
    min_index = -1  
    for i, (dist, index) in enumerate(results):  
        if dist < min_distance:  
            min_distance = dist  
            min_index = i * block_size + index  

    return min_index, min_distance  

# 4. 性能测试  
block_size = 500  

# 基础暴力搜索  
start_time = time.time()  
min_index_block, min_distance_block = block_search(query_vector, vector_database, block_size=block_size)  
time_block = time.time() - start_time  

# 并行化暴力搜索  
start_time = time.time()  
min_index_parallel, min_distance_parallel = parallel_search(query_vector, vector_database, block_size=block_size)  
time_parallel = time.time() - start_time  

# 5. 输出结果  
print("分块搜索结果:")  
print(f"最小距离索引: {min_index_block}, 最小距离: {min_distance_block:.6f}, 耗时: {time_block:.6f} 秒\n")  

print("并行化搜索结果:")  
print(f"最小距离索引: {min_index_parallel}, 最小距离: {min_distance_parallel:.6f}, 耗时: {time_parallel:.6f} 秒")