import logging
import numpy as np
import pinecone
from tensorflow.keras.datasets import mnist
from tqdm import tqdm
import time
from pinecone import Pinecone, ServerlessSpec

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')

# 初始化 Pinecone
pinecone.init(api_key="c64273d1-3f4a-4ae3-99a1-4a0a0b7a5f87")

# 获取现有索引列表并处理
index_name = "mnist-index"
if index_name in pinecone.list_indexes():
    pinecone.delete_index(index_name)
    logging.info(f"索引 '{index_name}' 已存在并已删除。")

# 创建新索引
pinecone.create_index(
    name=index_name,
    dimension=784,
    metric="euclidean",
    spec=ServerlessSpec(cloud="aws", region="us-east-1")
)
logging.info(f"索引 '{index_name}' 创建成功。")

# 连接到索引
index = pinecone.Index(index_name)
logging.info(f"已成功连接到索引 '{index_name}'。")

# 加载和预处理MNIST数据集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
epsilon = 1e-8
x_train = (x_train.reshape(-1, 28*28).astype('float32') / 255.0 + epsilon)[::5]  # 减少数据量
y_train = y_train[::5]
x_test = x_train[:360]
y_test = y_train[:360]

# 封装上传数据的函数
def upload_data(index, vectors, batch_size=100, max_retries=3):
    for i in tqdm(range(0, len(vectors), batch_size), desc="上传数据", unit="it"):
        batch = vectors[i:i + batch_size]
        for attempt in range(max_retries):
            try:
                index.upsert(batch)
                break
            except Exception as e:
                wait_time = 2 ** attempt
                logging.warning(f"上传失败，等待{wait_time}秒后重试（第{attempt+1}次）: {str(e)}")
                time.sleep(wait_time)
                if attempt == max_retries - 1:
                    logging.error(f"上传失败，已达到最大重试次数: {str(e)}")
                    raise

# 准备上传到Pinecone的数据
vectors = [(str(i), x_train[i].tolist(), {"label": int(y_train[i])}) for i in range(len(x_train))]

# 上传数据到Pinecone
upload_data(index, vectors)

# 测试准确率函数
def test_accuracy(index, x_test, y_test, k):
    correct_predictions = 0
    for i in tqdm(range(len(x_test)), desc="测试准确率", unit="项"):
        try:
            query_result = index.query(x_test[i].tolist(), top_k=k, include_metadata=True)
            nearest_labels = [int(match['metadata']['label']) for match in query_result['matches']]
            if y_test[i] in nearest_labels:
                correct_predictions += 1
        except Exception as e:
            logging.error(f"查询失败: {str(e)}")
    return correct_predictions / len(x_test)

# 使用k=11测试准确率
k_value = 11
accuracy = test_accuracy(index, x_test, y_test, k_value)
logging.info(f"使用Pinecone的准确率 (k={k_value}): {accuracy:.4f}")

# 清理：删除索引（可选）
# pinecone.delete_index(index_name)