import logging
import time
import numpy as np
from tqdm import tqdm
from tensorflow.keras.datasets import mnist
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

# 从 pinecone 库中导入必要的类
from pinecone import Pinecone, ServerlessSpec

# ---------------------- 日志配置（带日期格式）----------------------
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'  # 日志日期格式
)

# ---------------------- 配置参数 ----------------------
PINECONE_API_KEY = "pcsk_7MfWGD_UtVfYr1M4i3amLN3DWBtULNNnHL4RPedFgLpy1A1zTCDxnSQgerfd1D7iMWDL9i"  # 替换为你的API密钥
INDEX_NAME = "mnist-index"
DIMENSION = 784  # MNIST图像展平后维度（28*28）
TRAIN_SIZE = 0.8  # 训练集比例
TEST_K = 11  # 测试时的k值
BATCH_SIZE = 100  # 批量上传批次大小

# ---------------------- 初始化Pinecone ----------------------
def init_pinecone():
    """初始化Pinecone客户端并创建索引"""
    # 创建Pinecone客户端实例
    pc = Pinecone(api_key=PINECONE_API_KEY)
    
    # 删除已存在的同名索引（避免冲突）
    if INDEX_NAME in pc.list_indexes().names():
        pc.delete_index(INDEX_NAME)
        logging.info(f"已删除原有索引：{INDEX_NAME}")
    
    # 创建新索引
    pc.create_index(
        name=INDEX_NAME,
        dimension=DIMENSION,
        metric="euclidean",  # 欧氏距离（适合图像特征匹配）
        spec=ServerlessSpec(
            cloud="aws",
            region="us-east-1"  # 替换为您的Pinecone环境区域
        )
    )
    logging.info(f"成功创建索引：{INDEX_NAME}")
    return pc.Index(INDEX_NAME)

# ---------------------- 加载并预处理MNIST数据 ----------------------
def load_mnist_data(max_train_size=None, max_test_size=None):
    """加载MNIST数据集并拆分训练/测试集，可设置最大数据量"""
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    
    # 数据预处理：展平（28*28→784）、归一化（0-255→0-1）
    x_train = x_train.reshape(-1, DIMENSION).astype(np.float32) / 255.0
    x_test = x_test.reshape(-1, DIMENSION).astype(np.float32) / 255.0
    
    # 合并后按8:2拆分（确保训练集是整体的80%）
    x_all = np.vstack((x_train, x_test))
    y_all = np.hstack((y_train, y_test))
    
    # 如果设置了最大数据量，限制总数据量
    if max_train_size and max_test_size:
        total_max = max_train_size + max_test_size
        if len(x_all) > total_max:
            # 随机采样部分数据
            np.random.seed(42)  # 设置随机种子，保证结果可复现
            indices = np.random.choice(len(x_all), total_max, replace=False)
            x_all = x_all[indices]
            y_all = y_all[indices]
            logging.info(f"已将总数据量限制为{total_max}条")
    
    x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(
        x_all, y_all, train_size=TRAIN_SIZE, random_state=42, stratify=y_all
    )
    
    # 如果设置了最大训练集和测试集大小，再次限制
    if max_train_size and len(x_train_split) > max_train_size:
        x_train_split = x_train_split[:max_train_size]
        y_train_split = y_train_split[:max_train_size]
        logging.info(f"已将训练集限制为{max_train_size}条")
    
    if max_test_size and len(x_test_split) > max_test_size:
        x_test_split = x_test_split[:max_test_size]
        y_test_split = y_test_split[:max_test_size]
        logging.info(f"已将测试集限制为{max_test_size}条")
    
    logging.info(f"数据拆分完成：训练集{len(x_train_split)}条，测试集{len(x_test_split)}条")
    return x_train_split, y_train_split, x_test_split, y_test_split

# ---------------------- 批量上传训练数据到Pinecone ----------------------
def upload_train_data(index, x_train, y_train):
    """批量上传训练数据到Pinecone，带进度条"""
    total_data = len(x_train)
    uploaded_count = 0
    
    # 生成唯一ID（格式：mnist-索引）
    ids = [f"mnist-{i}" for i in range(total_data)]
    
    # 批量上传（避免单次请求过大）
    with tqdm(total=total_data, desc="上传训练数据") as pbar:
        for i in range(0, total_data, BATCH_SIZE):
            end_idx = min(i + BATCH_SIZE, total_data)
            batch_ids = ids[i:end_idx]
            batch_vectors = x_train[i:end_idx].tolist()
            batch_metadata = [{"label": int(y)} for y in y_train[i:end_idx]]
            
            # 构造向量数据（(id, vector, metadata)）
            vectors = list(zip(batch_ids, batch_vectors, batch_metadata))
            
            # 上传批次数据
            index.upsert(vectors=vectors)
            
            # 更新进度
            uploaded_count += len(batch_ids)
            pbar.update(len(batch_ids))
            time.sleep(0.1)  # 避免请求频率过高
    
    logging.info(f"成功创建索引，并上传了{uploaded_count}条数据")
    return uploaded_count

# ---------------------- 测试准确率（k=11）----------------------
def test_accuracy(index, x_test, y_test, k=TEST_K):
    """测试k近邻准确率，带进度条"""
    total_test = len(x_test)
    y_pred = []
    
    with tqdm(total=total_test, desc=f"测试k={k}准确率") as pbar:
        for i in range(total_test):
            # 单个测试样本查询
            query_vector = x_test[i].tolist()
            results = index.query(
                vector=query_vector,
                top_k=k,
                include_metadata=True  # 包含标签元数据
            )
            
            # 提取近邻标签并投票（取出现次数最多的标签）
            neighbor_labels = [int(match["metadata"]["label"]) for match in results["matches"]]
            pred_label = max(set(neighbor_labels), key=neighbor_labels.count)
            y_pred.append(pred_label)
            
            pbar.update(1)
            time.sleep(0.05)  # 避免请求频率过高
    
    # 计算准确率
    accuracy = accuracy_score(y_test, y_pred)
    logging.info(f"当k={k}时，使用Pinecone的准确率：{accuracy:.4f}")
    return accuracy

# ---------------------- 主函数 ----------------------
if __name__ == "__main__":
    try:
        # 1. 初始化Pinecone
        index = init_pinecone()

        # 2. 加载预处理数据（只调用一次带参数的函数）
        x_train, y_train, x_test, y_test = load_mnist_data(max_train_size=10000, max_test_size=2000)
        
        # 3. 上传训练数据
        upload_train_data(index, x_train, y_train)
        
        # 4. 测试准确率
        test_accuracy(index, x_test, y_test, k=TEST_K)
        
    except Exception as e:
        logging.error(f"程序执行出错：{str(e)}", exc_info=True)
    finally:
        # 新版Pinecone包不需要deinit()方法
        logging.info("程序执行完毕")