import logging
from datetime import datetime
from pinecone import Pinecone, ServerlessSpec
from sklearn.datasets import load_digits
from tqdm import tqdm
import numpy as np
from collections import Counter
from sklearn.model_selection import train_test_split

# 设置日志格式，包括日期和时间
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')

pinecone = Pinecone(api_key="f83eac6c-0929-44a6-bd90-6f48dba6ea56")
index_name = "mnist-index"

# 创建索引
existing_indexes = pinecone.list_indexes()
if any(index['name'] == index_name for index in existing_indexes):
    logging.info(f"索引 '{index_name}' 已存在，正在删除...")
    pinecone.delete_index(index_name)
    logging.info(f"索引 '{index_name}' 已成功删除。")
else:
    logging.info(f"索引 '{index_name}' 不存在，将创建新索引。")

logging.info(f"正在创建新索引 '{index_name}'...")
pinecone.create_index(
    name=index_name,
    dimension=64,
    metric="euclidean",
    spec=ServerlessSpec(
        cloud="aws",
        region="us-east-1"
    )
)
logging.info(f"索引 '{index_name}' 创建成功。")

# 连接到索引
index = pinecone.Index(index_name)
logging.info(f"已成功连接到索引 '{index_name}'。")

# 加载MNIST数据集
digits = load_digits(n_class=10)
X = digits.data
y = digits.target

# 划分训练集和测试集
train_size = int(0.8 * len(X))
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]

# 准备数据并上传到Pinecone
vectors = []
for i in range(len(X_train)):
    vector_id = str(i)
    vector_values = X_train[i].tolist()
    metadata = {"label": int(y_train[i])}
    vectors.append((vector_id, vector_values, metadata))

batch_size = 1000
with tqdm(total=len(vectors), desc="Uploading data", unit="batch") as pbar:
    for i in range(0, len(vectors), batch_size):
        batch = vectors[i:i + batch_size]
        index.upsert(batch)
        pbar.update(len(batch))

# 测试k=11时的准确率
correct = 0
total = len(X_test)

print("开始测试...")
for i in tqdm(range(total), desc="Testing"):
    query_vector = X_test[i].tolist()
    query_results = index.query(vector=query_vector, top_k=5, include_metadata=True)
    
    # 添加调试信息
    print(f"\nQuery {i+1}: Results - {query_results}")
    
    labels = []
    for match in query_results['matches']:
        if 'metadata' in match and 'label' in match['metadata']:
            labels.append(match['metadata']['label'])
        else:
            print(f"Warning: Missing metadata or label for match in query {i+1}")
    
    # 添加错误处理
    if labels:
        predicted_label = Counter(labels).most_common(1)[0][0]
    else:
        print(f"Warning: No labels found for query {i+1}")
        predicted_label = -1  # 或者其他表示错误的值
    
    if predicted_label == y_test[i]:
        correct += 1
    
    # 打印每个查询的结果
    print(f"Query {i+1}: Predicted {predicted_label}, Actual {y_test[i]}")

accuracy = correct / total
print(f"准确率: {accuracy:.2f}")
