import pickle
import numpy as np
from pinecone import Pinecone, ServerlessSpec
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import time
import os


# 1. 加载保存的KNN模型
def load_knn_model(file_path):
    with open(file_path, 'rb') as file:
        model = pickle.load(file)
    return model


# 2. Pinecone初始化和管理
def setup_pinecone_index(api_key, index_name="mnist-index2", dimension=64):
    # 初始化Pinecone - 新版本方式
    pc = Pinecone(api_key=api_key)

    # 检查索引是否存在，存在则删除
    existing_indexes = pc.list_indexes().names()
    if index_name in existing_indexes:
        print(f"索引 {index_name} 已存在，正在删除...")
        pc.delete_index(index_name)
        # 等待索引完全删除
        time.sleep(10)

    # 创建新索引
    print(f"正在创建索引 {index_name}...")
    pc.create_index(
        name=index_name,
        dimension=dimension,
        metric="euclidean",
        spec=ServerlessSpec(
            cloud="aws",
            region="us-east-1"
        )
    )

    # 等待索引准备就绪
    time.sleep(10)

    # 连接到索引
    index = pc.Index(index_name)
    return index, pc


# 3. 准备手写数字数据并划分训练测试集
def prepare_digits_data(test_size=0.2, random_state=42):
    # 加载手写数字数据集
    digits = load_digits()
    X, y = digits.data, digits.target

    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=test_size, random_state=random_state, stratify=y
    )

    # 数据标准化 - 只在训练集上拟合，然后转换所有数据
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)

    return X_train_scaled, X_test_scaled, y_train, y_test, scaler


# 4. 将训练数据分批次插入Pinecone索引
def insert_data_to_pinecone(index, vectors, labels, batch_size=1000):
    print(f"正在将训练数据分批次插入Pinecone索引，每批次 {batch_size} 个向量...")

    total_vectors = len(vectors)
    num_batches = (total_vectors + batch_size - 1) // batch_size  # 向上取整

    for batch_idx in range(num_batches):
        start_idx = batch_idx * batch_size
        end_idx = min((batch_idx + 1) * batch_size, total_vectors)

        # 准备当前批次的向量数据
        vectors_to_upsert = []
        for i in range(start_idx, end_idx):
            vector = vectors[i]
            label = labels[i]

            # 创建向量ID和元数据
            vector_id = f"digit_{i}"
            metadata = {"label": int(label), "original_index": i}

            vectors_to_upsert.append((vector_id, vector.tolist(), metadata))

        # 插入当前批次
        print(f"正在插入第 {batch_idx + 1}/{num_batches} 批次 (向量 {start_idx} 到 {end_idx - 1})...")
        index.upsert(vectors=vectors_to_upsert)

        # 批次间短暂等待，避免请求过于频繁
        if batch_idx < num_batches - 1:  # 不是最后一个批次
            time.sleep(2)

    print(f"完成! 总共插入了 {total_vectors} 个训练向量")

    # 等待数据完全索引
    print("等待数据完全索引...")
    time.sleep(10)


# 5. 使用KNN模型和Pinecone进行预测
def hybrid_prediction(knn_model, pinecone_index, query_vector, k=5):
    # 使用Pinecone进行相似向量搜索
    query_vector_list = query_vector.tolist()

    try:
        results = pinecone_index.query(
            vector=query_vector_list,
            top_k=k,
            include_metadata=True
        )

        # 检查是否有结果
        if not results.matches:
            print("警告: 没有找到匹配的向量")
            return None, [], []

        # 从Pinecone结果中提取邻居信息
        neighbor_indices = []
        neighbor_distances = []

        for match in results.matches:
            original_idx = match.metadata["original_index"]
            neighbor_indices.append(original_idx)
            neighbor_distances.append(match.score)  # Pinecone返回的是相似度分数

        # 提取标签
        labels = [match.metadata["label"] for match in results.matches]

        # 统计最频繁的标签
        predicted_label = max(set(labels), key=labels.count)

        return predicted_label, labels, neighbor_distances

    except Exception as e:
        print(f"查询过程中发生错误: {e}")
        return None, [], []


# 6. 评估模型性能
def evaluate_model(knn_model, index, X_test, y_test, num_samples=None):
    print("\n开始模型评估...")

    if num_samples is not None:
        # 只评估部分样本以加快速度
        X_test = X_test[:num_samples]
        y_test = y_test[:num_samples]

    correct_predictions = 0
    total_predictions = len(X_test)

    for i, (sample, true_label) in enumerate(zip(X_test, y_test)):
        predicted_label, neighbor_labels, distances = hybrid_prediction(
            knn_model, index, sample, k=5
        )

        if predicted_label is not None and predicted_label == true_label:
            correct_predictions += 1

        # 每评估100个样本打印一次进度
        if (i + 1) % 100 == 0:
            accuracy = correct_predictions / (i + 1)
            print(f"已评估 {i + 1}/{total_predictions} 个样本，当前准确率: {accuracy:.4f}")

    accuracy = correct_predictions / total_predictions
    print(f"\n最终评估结果:")
    print(f"总样本数: {total_predictions}")
    print(f"正确预测: {correct_predictions}")
    print(f"准确率: {accuracy:.4f}")

    return accuracy


# 主函数
def main():
    # 您的Pinecone API密钥
    PINECONE_API_KEY = "pcsk_4Nwk78_Q2Pt2WuX6bWXkBUkmxUqGGsMeqDGEjqmiJbjCyTMVUXWVa2f7kooNFbUnJW1W8R"

    try:
        # 1. 加载KNN模型
        print("正在加载KNN模型...")
        knn_model = load_knn_model("best_knn_model.pkl")
        print("KNN模型加载成功!")

        # 2. 设置Pinecone索引
        print("正在设置Pinecone索引...")
        index, pc = setup_pinecone_index(PINECONE_API_KEY, "mnist-index2", dimension=64)
        print("Pinecone索引设置完成!")

        # 3. 准备数据并划分训练测试集
        print("正在准备手写数字数据...")
        X_train, X_test, y_train, y_test, scaler = prepare_digits_data(test_size=0.2)
        print(f"训练集: {X_train.shape[0]} 个样本")
        print(f"测试集: {X_test.shape[0]} 个样本")
        print(f"特征维度: {X_train.shape[1]}")

        # 4. 只将训练数据分批次插入Pinecone
        insert_data_to_pinecone(index, X_train, y_train, batch_size=1000)

        # 5. 显示索引统计信息
        index_stats = index.describe_index_stats()
        print(f"\n索引统计: {index_stats}")

        # 6. 评估模型性能
        # 为了节省时间，只评估测试集的前100个样本
        accuracy = evaluate_model(knn_model, index, X_test, y_test, num_samples=100)

        # 7. 单个样本测试示例
        print("\n单个样本测试示例:")
        test_sample_idx = 0
        test_sample = X_test[test_sample_idx]
        true_label = y_test[test_sample_idx]

        predicted_label, neighbor_labels, distances = hybrid_prediction(
            knn_model, index, test_sample, k=5
        )

        if predicted_label is not None:
            print(f"真实标签: {true_label}")
            print(f"预测标签: {predicted_label}")
            print(f"邻居标签: {neighbor_labels}")
            print(f"距离: {distances}")
            print(f"预测{'正确' if predicted_label == true_label else '错误'}")
        else:
            print("无法进行预测")

    except Exception as e:
        print(f"发生错误: {e}")


# 如果只想重新创建索引而不运行完整流程
def recreate_index_only():
    PINECONE_API_KEY = "pcsk_4Nwk78_Q2Pt2WuX6bWXkBUkmxUqGGsMeqDGEjqmiJbjCyTMVUXWVa2f7kooNFbUnJW1W8R"

    try:
        index, pc = setup_pinecone_index(PINECONE_API_KEY, "mnist-index2", dimension=64)
        print("索引重新创建完成!")
        return index
    except Exception as e:
        print(f"重新创建索引时发生错误: {e}")
        return None


if __name__ == "__main__":
    main()

    # 如果只想重新创建索引，取消下面的注释
    # recreate_index_only()