import logging
import time
from tqdm import tqdm
import numpy as np
from pinecone import Pinecone, ServerlessSpec
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

# 配置 logging（含日期、时间、日志级别、消息）
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)

# 初始化 Pinecone 客户端
PINECONE_API_KEY = "pcsk_tgaTm_3tJUEJPS3jAQfBsU6c2z1BVuRJKrFfsz2bnkCSf348BS6XMAbDMQUFakb6JDoRJ"
INDEX_NAME = "mnist-index"
pinecone = Pinecone(api_key=PINECONE_API_KEY)


def load_and_split_data():
    """加载 MNIST 数据集（sklearn 内置简化版），按 8:2 分割训练集和测试集"""
    digits = load_digits()
    X = digits.data  # 图像特征（已展平为一维向量）
    y = digits.target  # 数字标签（0-9）
    # 分割数据：80% 训练（创建索引），20% 测试（验证准确率）
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42, stratify=y
    )
    return X_train, X_test, y_train, y_test


def create_and_populate_index(X_train, y_train):
    """创建 Pinecone 索引并上传训练数据（带进度条）"""
    # 检查索引是否存在，存在则删除重建
    if INDEX_NAME in pinecone.list_indexes().names():
        pinecone.delete_index(INDEX_NAME)

    # 创建索引（维度与 MNIST 简化版一致为 64，距离度量用余弦相似度）
    pinecone.create_index(
        name=INDEX_NAME,
        dimension=X_train.shape[1],
        metric="cosine",
        spec=ServerlessSpec(cloud="aws", region="us-east-1")  # 按需调整云厂商和区域
    )
    index = pinecone.Index(INDEX_NAME)

    # 准备上传数据（格式：(id, vector, metadata)）
    vectors = []
    for idx, (vec, label) in enumerate(zip(X_train, y_train)):
        vectors.append({
            "id": f"train-{idx}",
            "values": vec.tolist(),
            "metadata": {"label": int(label)}
        })

    # 批量上传数据（每批 100 条，带进度条）
    batch_size = 100
    total_batches = len(vectors) // batch_size + (1 if len(vectors) % batch_size != 0 else 0)
    with tqdm(total=len(vectors), desc="上传训练数据") as pbar:
        for i in range(total_batches):
            start = i * batch_size
            end = min((i + 1) * batch_size, len(vectors))
            index.upsert(vectors=vectors[start:end])
            pbar.update(end - start)

    # 打印上传结果（题目要求上传 1437 条，实际按数据集分割结果调整）
    uploaded_count = len(vectors)
    logging.info(f"成功创建索引，并上传了 {uploaded_count} 条数据")
    return index


def test_accuracy(index, X_test, y_test, k=11):
    """测试 k=11 时的准确率（带进度条）"""
    predictions = []
    # 遍历测试集，每条数据执行相似性搜索
    with tqdm(total=len(X_test), desc="测试准确率") as pbar:
        for vec in X_test:
            # 搜索 Top-k 相似结果
            results = index.query(
                vector=vec.tolist(),
                top_k=k,
                include_metadata=True
            )
            # 提取标签并通过投票机制确定预测结果
            labels = [match["metadata"]["label"] for match in results["matches"]]
            pred = max(set(labels), key=labels.count)  # 投票：取出现次数最多的标签
            predictions.append(pred)
            pbar.update(1)

    # 计算准确率
    accuracy = accuracy_score(y_test, predictions)
    logging.info(f"当k={k}时，使用 Pinecone 的准确率为 {accuracy:.4f}")
    return accuracy


if __name__ == "__main__":
    # 1. 加载并分割数据
    logging.info("开始加载 MNIST 数据集并分割...")
    X_train, X_test, y_train, y_test = load_and_split_data()

    # 2. 创建索引并上传训练数据
    logging.info("开始创建索引并上传训练数据...")
    index = create_and_populate_index(X_train, y_train)

    # 3. 测试准确率
    logging.info("开始测试 k=11 时的准确率...")
    test_accuracy(index, X_test, y_test, k=11)

    logging.info("所有任务执行完成！")