import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import normalize
from sklearn.metrics.pairwise import cosine_similarity
import heapq

# 1. 数据生成
def generate_text_features(documents):
    """基于TF-IDF和SVD生成文本特征
    :param documents: 文本列表
    :return: 文本嵌入特征矩阵
    """
    tfidf = TfidfVectorizer(max_features=100)
    tfidf_matrix = tfidf.fit_transform(documents)
    svd = TruncatedSVD(n_components=10, random_state=42)
    return normalize(svd.fit_transform(tfidf_matrix))

def generate_image_features(num_images=10, dimensions=10):
    """生成随机图像嵌入特征
    :param num_images: 图像数量
    :param dimensions: 嵌入向量维度
    :return: 图像嵌入特征矩阵
    """
    np.random.seed(42)
    return normalize(np.random.rand(num_images, dimensions))

documents = [
    "apple banana orange",
    "cat dog elephant",
    "car bus train",
    "coffee tea water",
    "python java c++",
    "rose lily tulip",
    "sun moon stars",
    "pen pencil eraser",
    "chair table desk",
    "lamp fan bulb"
]
text_features = generate_text_features(documents)
image_features = generate_image_features(num_images=10, dimensions=10)

# 2. LSH实现
class LSH:
    """实现LSH，用于快速分桶"""
    def __init__(self, dimensions, num_hashes, num_tables):
        self.dimensions = dimensions
        self.num_hashes = num_hashes
        self.num_tables = num_tables
        self.hash_planes = [np.random.randn(num_hashes, dimensions) for _ in range(num_tables)]
        self.tables = [{} for _ in range(num_tables)]  # 修正：使用列表推导式创建字典列表

    def hash_function(self, vector, planes):
        projections = np.dot(planes, vector)
        return ''.join(['1' if p > 0 else '0' for p in projections])

    def insert(self, vectors):
        for table_id, planes in enumerate(self.hash_planes):
            for idx, vector in enumerate(vectors):
                hash_value = self.hash_function(vector, planes)
                if hash_value not in self.tables[table_id]:
                    self.tables[table_id][hash_value] = []
                self.tables[table_id][hash_value].append(idx)

    def query(self, query_vector):
        candidates = set()
        for table_id, planes in enumerate(self.hash_planes):
            hash_value = self.hash_function(query_vector, planes)
            if hash_value in self.tables[table_id]:
                candidates.update(self.tables[table_id][hash_value])
        return list(candidates)

# 3. HNSW实现
class HNSW:
    """实现HNSW，用于高精度近邻搜索"""
    def __init__(self, vectors, max_neighbors=5, max_layers=3):
        self.vectors = vectors  # 修正：赋值操作符
        self.max_neighbors = max_neighbors
        self.max_layers = max_layers
        self.layers = self._construct_layers()

    def _construct_layers(self):
        layers = []
        current_vectors = self.vectors
        for layer_id in range(self.max_layers):
            layer = {i: [] for i in range(len(current_vectors))}
            for i in range(len(current_vectors)):
                distances = []
                for j in range(len(current_vectors)):
                    if i != j:
                        dist = np.linalg.norm(current_vectors[i] - current_vectors[j])
                        distances.append((dist, j))
                nearest_neighbors = heapq.nsmallest(self.max_neighbors, distances)
                layer[i] = [neighbor[1] for neighbor in nearest_neighbors]  # 修正：使用列表而不是集合
            layers.append(layer)
            current_vectors = current_vectors[::2]  # 每层点数减半
        return layers

    def search(self, query_vector, top_k=5):
        candidates = [(np.linalg.norm(query_vector - self.vectors[i]), i) 
                     for i in range(len(self.vectors))]
        return heapq.nsmallest(top_k, candidates)

# 4. 构建组合检索系统
lsh_text = LSH(dimensions=10, num_hashes=5, num_tables=3)
lsh_image = LSH(dimensions=10, num_hashes=5, num_tables=3)
lsh_text.insert(text_features)
lsh_image.insert(image_features)

hnsw_text = HNSW(text_features, max_neighbors=5, max_layers=3)
hnsw_image = HNSW(image_features, max_neighbors=5, max_layers=3)

# 5. 多模态检索
def multimodal_query(query_vector, lsh, hnsw, features, labels):
    lsh_candidates = lsh.query(query_vector)
    print(f"LSH初筛候选数量: {len(lsh_candidates)}")
    
    hnsw_candidates = []
    for idx in lsh_candidates:
        similarity = cosine_similarity([query_vector], [features[idx]])[0][0]
        hnsw_candidates.append((idx, similarity))
    
    hnsw_candidates = sorted(hnsw_candidates, key=lambda x: -x[1])[:5]
    for idx, (candidate_idx, similarity) in enumerate(hnsw_candidates):
        print(f"推荐 {idx + 1}: 索引 {candidate_idx}, 相似度: {similarity:.4f}, 标签: {labels[candidate_idx]}")

print("文本检索结果:")
multimodal_query(text_features[2], lsh_text, hnsw_text, text_features, documents)
print("\n图像检索结果:")
multimodal_query(image_features[4], lsh_image, hnsw_image, image_features, [f"Image {i}" for i in range(10)])