import pinecone
from pinecone import Pinecone, ServerlessSpec
from sklearn.datasets import load_digits
import numpy as np
import logging
from tqdm import tqdm
from collections import Counter
import time

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[logging.StreamHandler()]
)

# Ë Pinecone ¢7ïòke` API Æ¥	
pc = Pinecone(
    api_key="pcsk_4tZGoM_HLqw5naiHZaXJWLh4WY3W3MEEcozbMT5Ci3cUkYpTGSJXNdLZZWzUQyZcyp9MVp"
)

index_name = "mnist-index"

if index_name in pc.list_indexes().names():
    logging.info(f"Deleting existing index: {index_name}")
    pc.delete_index(index_name)

logging.info(f"Creating new index: {index_name}")
pc.create_index(
    name=index_name,
    dimension=64,
    metric="euclidean",
    spec=ServerlessSpec(
        cloud="aws",
        region="us-east-1"
    )
)

index = pc.Index(index_name)
logging.info(f"Successfully connected to index: {index_name}")

digits = load_digits(n_class=10)
X = digits.data
y = digits.target
total_data = len(X)
logging.info(f"Loaded MNIST dataset with {total_data} samples")

vectors = []
for i in range(total_data):
    vectors.append((
        str(i),
        X[i].tolist(),
        {"label": int(y[i])}
    ))

batch_size = 100
logging.info("Starting data upload...")
for i in tqdm(range(0, total_data, batch_size), desc="Uploading data"):
    batch = vectors[i:i+batch_size]
    index.upsert(batch)
    time.sleep(0.1)

logging.info(f"Successfully created index and uploaded {total_data} data entries")

k = 11
correct = 0
total_tests = len(X)

logging.info(f"Starting accuracy test with k={k}...")
for i in tqdm(range(total_tests), desc="Testing accuracy"):
    query_vector = X[i].tolist()
    true_label = y[i]
    
    results = index.query(
        vector=query_vector,
        top_k=k,
        include_metadata=True
    )
    
    if results['matches']:
        labels = [match['metadata']['label'] for match in results['matches']]
        predicted_label = Counter(labels).most_common(1)[0][0]
        if predicted_label == true_label:
            correct += 1
    time.sleep(0.01)

accuracy = (correct / total_tests) * 100
logging.info(f"Accuracy when k={k}: {accuracy:.2f}%")