import logging
import time
from tqdm import tqdm
import pinecone
from pinecone import ServerlessSpec
import numpy as np
import random
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from collections import Counter

# 固定所有随机种子，确保数据划分完全一致
random.seed(42)
np.random.seed(42)


# -------------------------- 1. 配置日志（过滤“未找到匹配结果”的警告） --------------------------
class NoMatchFilter(logging.Filter):
    def filter(self, record):
        return not ("未找到匹配结果" in record.getMessage() and record.levelno == logging.WARNING)


logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)
logger.addFilter(NoMatchFilter())

# -------------------------- 2. 初始化 Pinecone 并管理索引（强化删除逻辑） --------------------------
pinecone_api_key = "pcsk_4Nwk78_Q2Pt2WuX6bWXkBUkmxUqGGsMeqDGEjqmiJbjCyTMVUXWVa2f7kooNFbUnJW1W8R"
pinecone_env = "us-east-1"
pc = pinecone.Pinecone(api_key=pinecone_api_key)
index_name = "mnist-index"

# 强化删除逻辑：确保旧索引完全删除（最多检查5次，每次间隔10秒）
existing_indexes = pc.list_indexes().names()
if index_name in existing_indexes:
    logger.info(f"索引 {index_name} 已存在，开始删除...")
    pc.delete_index(index_name)

    # 多次检查是否删除成功，避免网络延迟导致的误判
    delete_check_count = 0
    max_checks = 5
    while index_name in pc.list_indexes().names() and delete_check_count < max_checks:
        logger.info(f"等待索引删除（第 {delete_check_count + 1}/{max_checks} 次检查）...")
        time.sleep(10)  # 每次检查间隔10秒，确保有足够时间删除
        delete_check_count += 1

    # 最终确认删除结果
    if index_name not in pc.list_indexes().names():
        logger.info(f"索引 {index_name} 已彻底删除")
    else:
        logger.error("索引删除失败！请手动登录Pinecone控制台删除后重试（https://app.pinecone.io/）")
        exit()  # 若删除失败，直接退出，避免后续操作出错

# 创建新索引
logger.info(f"创建新索引 {index_name}...")
pc.create_index(
    name=index_name,
    dimension=64,  # 严格匹配MNIST的64维向量
    metric="euclidean",
    spec=ServerlessSpec(cloud="aws", region=pinecone_env)
)

# 等待索引就绪（增加状态检查，确保完全初始化）
logger.info("等待索引初始化...")
while True:
    index_desc = pc.describe_index(index_name)
    if index_desc.status.state == "Ready":
        logger.info("索引已完全就绪，可执行操作！")
        break
    time.sleep(5)  # 延长间隔，避免频繁查询
index = pc.Index(index_name)
logger.info(f"已连接到索引 {index_name}")

# -------------------------- 3. 加载数据并划分（固定随机种子，确保每次划分一致） --------------------------
digits = load_digits(n_class=10)
X = digits.data
y = digits.target

# 固定random_state=42，确保训练集/测试集划分完全一致
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, stratify=y, random_state=42
)
logger.info(f"数据划分完成：训练集 {len(X_train)} 条，测试集 {len(X_test)} 条（划分固定，每次一致）")

# -------------------------- 4. 上传训练数据（验证格式一致性） --------------------------
vectors = []
for i in range(len(X_train)):
    vector_id = f"train_{i}"  # 唯一ID，格式固定
    vector_values = X_train[i].tolist()  # 转换为列表，与查询格式一致
    metadata = {"label": int(y_train[i])}  # 标签为整数，元数据格式固定
    vectors.append((vector_id, vector_values, metadata))

# 上传时验证每批数据格式
batch_size = 1000
upload_count = 0
with tqdm(total=len(vectors), desc="上传训练数据") as pbar:
    for i in range(0, len(vectors), batch_size):
        batch = vectors[i:i + batch_size]
        # 验证批次数据格式（可选，确保无异常）
        assert all(isinstance(vid, str) for vid, _, _ in batch), "ID必须为字符串"
        assert all(isinstance(vec, list) and len(vec) == 64 for _, vec, _ in batch), "向量必须是64维列表"
        assert all(isinstance(meta["label"], int) for _, _, meta in batch), "标签必须是整数"

        index.upsert(vectors=batch)
        upload_count += len(batch)
        pbar.update(len(batch))
logger.info(f"训练数据上传完成，共 {upload_count} 条（与训练集数量一致）")

# -------------------------- 5. 测试k=11准确率（确保查询格式一致） --------------------------
correct = 0
with tqdm(total=len(X_test), desc="测试准确率") as pbar:
    for i in range(len(X_test)):
        # 查询向量格式与训练数据一致（列表类型）
        query_vector = X_test[i].tolist()
        results = index.query(
            vector=query_vector,
            top_k=11,
            include_metadata=True
        )

        if not results.matches:
            logger.warning(f"测试样本 {i} 未找到匹配结果，跳过")
            pbar.update(1)
            continue

        labels = [match.metadata["label"] for match in results.matches]
        predicted = Counter(labels).most_common(1)[0][0]
        if predicted == y_test[i]:
            correct += 1
        pbar.update(1)

accuracy = correct / len(X_test) if len(X_test) > 0 else 0
logger.info(f"k=11时准确率：{accuracy:.4f}（每次运行环境一致，结果应稳定）")