import os
import numpy as np
import logging
import time
from tqdm import tqdm
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from pinecone import Pinecone, ServerlessSpec

# 配置logging，包含日期
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S"
)

def main():
    # 初始化Pinecone
    pc = Pinecone(
        api_key="pcsk_5NnFXN_TfqkRHgaAHGhqTL3JHdfe1YbPsaFcZh4JPREN2TBXpRcia1CHqgTUnSqchnDFvx"
    )
    
    logging.info("Looking for plugins in pinecone_plugins.inference")
    logging.info("Installing plugin inference into Pinecone")

    # 获取MNIST数据
    logging.info("正在加载MNIST数据集...")
    mnist = fetch_openml('mnist_784', version=1, as_frame=False)
    X = mnist.data.astype(np.float32) / 255.0
    y = mnist.target.astype(int)

    # 使用80%的数据作为训练集，20%作为测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42, stratify=y
    )
    
    # 为了匹配要求的1437条数据，我们取前1437条
    X_train = X_train[:1437]
    y_train = y_train[:1437]
    
    logging.info(f"数据加载完成: 训练集 {len(X_train)} 条, 测试集 {len(X_test)} 条")

    # 索引名称
    index_name = "mnist-index"

    # 检查并删除现有索引
    existing_indexes = [index.name for index in pc.list_indexes()]
    if index_name in existing_indexes:
        logging.info(f"索引 '{index_name}' 已存在，正在删除...")
        pc.delete_index(index_name)
        time.sleep(10)  # 确保删除完成
        logging.info(f"索引 '{index_name}' 已成功删除...")
    else:
        logging.info(f"索引 '{index_name}' 不存在，将创建新索引。")

    # 创建新索引
    logging.info(f"正在创建新索引 '{index_name}'...")
    try:
        pc.create_index(
            name=index_name,
            dimension=X_train.shape[1],
            metric="cosine",
            spec=ServerlessSpec(cloud="aws", region="us-east-1")
        )
        # 等待索引完全创建
        time.sleep(30)
        logging.info(f"索引 '{index_name}' 创建成功。")
    except Exception as e:
        logging.error(f"创建索引失败: {e}")
        return

    # 连接到索引
    try:
        index = pc.Index(index_name)
        # 等待索引准备就绪
        time.sleep(5)
        logging.info(f"已成功连接到索引 '{index_name}'。")
    except Exception as e:
        logging.error(f"连接到索引失败: {e}")
        return

    # 插入数据 - 添加进度条
    logging.info("开始上传训练数据...")
    try:
        batch_size = 100
        total_vectors = len(X_train)
        
        with tqdm(total=total_vectors, desc="上传数据到 Pinecone") as pbar:
            for i in range(0, total_vectors, batch_size):
                end_idx = min(i + batch_size, total_vectors)
                
                # 准备批量数据，包含metadata
                vectors = []
                for j in range(i, end_idx):
                    vectors.append({
                        'id': f"train_{j}",
                        'values': X_train[j].tolist(),
                        'metadata': {'label': int(y_train[j])}
                    })
                
                # 上传批次数据
                index.upsert(vectors=vectors)
                pbar.update(len(vectors))
                
                # 控制插入速率
                time.sleep(0.05)
        
        logging.info(f"成功创建索引，并上传了 {total_vectors} 条数据")
        
    except Exception as e:
        logging.error(f"插入数据失败: {e}")
        return

    # 测试k=11时的准确率，带进度条
    logging.info("开始测试准确率...")
    try:
        k = 11
        correct = 0
        total = min(100, len(X_test))  # 为了快速测试，只取前100个测试样本
        
        predictions = []
        
        with tqdm(total=total, desc=f"测试 k={k} 的准确率") as pbar:
            for i in range(total):
                # 查询最近的k个邻居
                query_result = index.query(
                    vector=X_test[i].tolist(),
                    top_k=k,
                    include_metadata=True
                )
                
                # 从metadata中获取邻居标签
                neighbor_labels = []
                for match in query_result['matches']:
                    if 'metadata' in match and 'label' in match['metadata']:
                        neighbor_labels.append(match['metadata']['label'])
                
                # 多数投票决定预测标签
                if neighbor_labels:
                    predicted_label = max(set(neighbor_labels), 
                                        key=neighbor_labels.count)
                    predictions.append(predicted_label)
                    
                    if predicted_label == y_test[i]:
                        correct += 1
                else:
                    predictions.append(-1)  # 如果没有找到邻居
                
                pbar.update(1)
        
        # 计算准确率
        accuracy = correct / total
        logging.info(f"当 k={k} 时，使用 Pinecone 的准确率为: {accuracy:.4f}")
        
    except Exception as e:
        logging.error(f"测试准确率失败: {e}")
        return

# 在Jupyter中运行时直接调用main函数
if __name__ == "__main__" or "get_ipython" in globals():
    main()