import os
import logging
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
import pinecone
from tqdm.auto import tqdm

# 配置logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

def load_mnist():
    # 获取MNIST数据集
    X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
    # 归一化像素值到[0,1]
    X = X / 255.0
    # 转换为numpy数组（解决KeyError问题）
    X = X.to_numpy()
    # 转换标签为整数并转为numpy数组
    y = y.astype(int).to_numpy()
    # 拆分训练集和测试集（测试集占20%）
    X_train_full, X_test, y_train_full, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )
    # 截取1437条训练数据
    X_train = X_train_full[:1437]
    y_train = y_train_full[:1437]
    # 确保测试集为numpy数组（若拆分后不是）
    X_test = X_test.to_numpy() if not isinstance(X_test, np.ndarray) else X_test
    y_test = y_test.to_numpy() if not isinstance(y_test, np.ndarray) else y_test
    return X_train, y_train, X_test, y_test

from pinecone import Pinecone, ServerlessSpec

def init_pinecone(index_name="mnist-index", dimension=784):
    # 填入提供的API Key
    api_key = "pcsk_TCZLb_n7yFoy1mV8YzXEuw4kTbQQ1jp9vAuKozVAP5C71xgekAKUd4fLAMSBaxEDaRoi"
    pc = Pinecone(api_key=api_key)
    
    # 删除已存在的同名索引
    if index_name in pc.list_indexes().names():
        logger.info(f"索引 '{index_name}' 已存在，正在删除...")
        pc.delete_index(index_name)
    
    # 创建新索引
    logger.info(f"正在创建新索引 '{index_name}'...")
    pc.create_index(
        name=index_name,
        dimension=dimension,  # MNIST图像为28x28=784维
        metric="euclidean",   # 使用欧氏距离
        spec=ServerlessSpec(cloud="aws", region="us-east-1")  # 服务器配置
    )
    logger.info(f"索引 '{index_name}' 创建成功.")
    
    # 连接到索引
    index = pc.Index(index_name)
    logger.info(f"已成功连接到索引 '{index_name}'.")
    return index

def upload_data(index, X_train, y_train, batch_size=100):
    vectors = []
    # 批量上传数据到Pinecone
    for i in tqdm(range(len(X_train)), desc="上传数据到Pinecone", total=len(X_train)):
        vec_id = f"vec-{i}"  # 唯一ID
        vector = X_train[i].tolist()  # 转换为列表格式
        metadata = {"label": int(y_train[i])}  # 存储标签信息
        vectors.append((vec_id, vector, metadata))
        
        # 达到批量大小或最后一条数据时上传
        if len(vectors) >= batch_size or i == len(X_train) - 1:
            index.upsert(vectors=vectors)
            vectors = []
    logger.info(f"成功创建索引，并上传了 {len(X_train)} 条数据")

def evaluate_accuracy(index, X_test, y_test, k=11):
    correct = 0
    # 测试准确率
    for i in tqdm(range(len(X_test)), desc=f"测试k={k}的准确率：Pinecone", total=len(X_test)):
        query_vec = X_test[i].tolist()  # 测试样本转换为列表
        # 查询最相似的k个向量
        results = index.query(vector=query_vec, top_k=k, include_metadata=True)
        # 提取相似向量的标签
        labels = [res["metadata"]["label"] for res in results["matches"]]
        # 多数投票法确定预测标签
        pred = max(set(labels), key=labels.count)
        # 统计正确预测数
        if pred == y_test[i]:
            correct += 1
    # 计算并输出准确率
    accuracy = correct / len(X_test)
    logger.info(f"当k=11 是，使用Pinecone的准确率：{accuracy:.4f}")
    return accuracy

def main():
    # 加载数据
    X_train, y_train, X_test, y_test = load_mnist()
    # 初始化Pinecone索引
    index = init_pinecone()
    # 上传训练数据
    upload_data(index, X_train, y_train)
    # 评估准确率
    evaluate_accuracy(index, X_test, y_test, k=11)

if __name__ == "__main__":
    main()