from pinecone import Pinecone, ServerlessSpec
import numpy as np
from sklearn.datasets import load_digits
from collections import Counter
import matplotlib.pyplot as plt

# 初始化 Pinecone
pc = Pinecone(api_key="pcsk_7GvxrM_SVD5dH79WLzgGWd4JXQsdCoFHaXxsE3mJD83C4UXN8Ze1rRfkNC3ecQapfcd9DT")

index_name = "mnist-index"

# =================== 删除旧索引（可选）===================
if index_name in pc.list_indexes().names():
    print(f"索引 '{index_name}' 已存在，正在删除...")
    pc.delete_index(index_name)
    print(f"索引 '{index_name}' 已删除。")

# =================== 创建新索引 ===================
print(f"正在创建索引 '{index_name}'...")
pc.create_index(
    name=index_name,
    dimension=64,
    metric="euclidean",
    spec=ServerlessSpec(cloud="aws", region="us-east-1")
)
print(f"索引 '{index_name}' 创建成功。")

# =================== 连接索引 ===================
index = pc.Index(index_name)
print(f"已连接到索引 '{index_name}'")

# =================== 加载 MNIST 数据集（8x8 数字）===================
digits = load_digits(n_class=10)
X = digits.data  # (1797, 64) 每行是 64 维向量
y = digits.target

# =================== 批量插入数据 ===================
vectors = []
for i in range(len(X)):
    vector_id = str(i)
    vector_values = X[i].tolist()
    metadata = {"label": int(y[i])}
    vectors.append((vector_id, vector_values, metadata))

batch_size = 1000
for i in range(0, len(vectors), batch_size):
    batch = vectors[i:i + batch_size]
    index.upsert(vectors=batch)
    print(f"✅ 已插入 {min(i + batch_size, len(vectors))}/{len(vectors)} 个向量")

print("全部数据插入完成！")

# =================== 构造查询图像（数字 3）===================
# 注意：load_digits() 的像素范围是 0~16，不是 0~255
digit_3 = np.array([
    [0, 0, 16, 16, 16, 16, 0, 0],
    [0, 0, 0, 0, 0, 16, 0, 0],
    [0, 0, 0, 0, 0, 16, 0, 0],
    [0, 0, 0, 16, 16, 16, 0, 0],
    [0, 0, 0, 0, 0, 16, 0, 0],
    [0, 0, 0, 0, 0, 16, 0, 0],
    [0, 0, 0, 0, 0, 16, 0, 0],
    [0, 0, 16, 16, 16, 16, 0, 0]
])

# 展平并转为 list
query_vector = digit_3.flatten().tolist()  # 直接使用 0~16 的值，符合 digits.data 范围

# =================== 查询最相似的 11 个向量 ===================
results = index.query(
    vector=query_vector,
    top_k=11,
    include_metadata=True
)

# =================== 提取标签并处理空结果 ===================
labels = []
for match in results['matches']:
    if 'metadata' in match and 'label' in match['metadata']:
        labels.append(match['metadata']['label'])

# 判断是否找到有效标签
if not labels:
    print("❌ 未找到任何匹配项，无法预测")
    final_prediction = -1
else:
    # 去掉第一个结果（可能是自己，id=i），只取 top-10 邻居
    neighbor_labels = labels[1:]  # 去除自身（如果存在）
    if not neighbor_labels:
        final_prediction = labels[0]  # 如果只有一个结果，就用它
    else:
        final_prediction = Counter(neighbor_labels).most_common(1)[0][0]

    print("🔍 查询结果（id, distance, label）:")
    for match in results['matches']:
        _id = match['id']
        dist = match['score']
        lbl = match['metadata']['label'] if 'metadata' in match else "N/A"
        print(f"  id: {_id}, distance: {dist:.3f}, label: {lbl}")

# =================== 可视化结果 ===================
plt.imshow(digit_3, cmap='gray')
plt.title(f"🔍 预测结果: {final_prediction}", fontsize=16)
plt.axis('off')
plt.show()

print(f"✅ 最终预测结果: {final_prediction}")