import numpy as np
import logging
from datetime import datetime
from tqdm import tqdm
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from pinecone import Pinecone

# 配置logging，包含日期时间
logging.basicConfig(
    format='%(asctime)s - %(levelname)s - %(message)s',
    level=logging.INFO,
    datefmt='%Y-%m-%d %H:%M:%S'
)

# 连接到Pinecone并获取现有索引
def get_pinecone_index():
    # 初始化Pinecone客户端
    pc = Pinecone(api_key="pcsk_4js8wK_9HDCxEApcaiaSiAibHfiHkgeP98qpaV27QoyeqJn2tv2biGZKhN8pX9MXuxpE2H")
    index_name = "mnist-index"
    
    # 检查索引是否存在
    if index_name not in pc.list_indexes().names():
        # 创建新索引
        logging.info(f"创建新索引 '{index_name}'...")
        pc.create_index(
            name=index_name,
            dimension=64,
            metric="euclidean",
            spec={"serverless": {"cloud": "aws", "region": "us-east-1"}}
        )
        logging.info(f"索引 '{index_name}' 创建成功")
    else:
        logging.info(f"索引 '{index_name}' 已存在，直接使用")
    
    return pc.Index(index_name)

# 上传训练数据到Pinecone索引
def upload_training_data(index, X_train, y_train):
    vectors = []
    # 准备向量数据
    for i in tqdm(range(len(X_train)), desc="准备训练数据"):
        vector_id = f"train_{i}"  # 区分训练数据ID
        vector_values = X_train[i].tolist()
        metadata = {"label": int(y_train[i])}
        vectors.append((vector_id, vector_values, metadata))
    
    # 分批上传
    batch_size = 1000
    total_uploaded = 0
    for i in tqdm(range(0, len(vectors), batch_size), desc="上传训练数据"):
        batch = vectors[i:i + batch_size]
        index.upsert(batch)
        total_uploaded += len(batch)
    
    logging.info(f"成功创建索引，并上传了{total_uploaded}条数据")
    return total_uploaded

# 使用Pinecone索引进行KNN预测
def pinecone_knn_predict(index, X_test, k=5):
    predictions = []
    for sample in tqdm(X_test, desc="测试预测进度"):
        # 将测试样本转换为Pinecone所需格式
        query_vector = sample.tolist()
        
        # 搜索最近邻
        results = index.query(
            vector=query_vector,
            top_k=k,
            include_metadata=True
        )
        
        # 提取邻居标签并投票决定预测结果
        labels = [match['metadata']['label'] for match in results['matches']]
        most_common = max(set(labels), key=labels.count)
        predictions.append(most_common)
    
    return np.array(predictions)

# 主训练和评估流程
def main():
    # 加载数据集
    digits = load_digits(n_class=10)
    X, y = digits.data, digits.target
    # 划分80%训练集，20%测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42, stratify=y
    )
    
    # 获取Pinecone索引
    logging.info("连接到Pinecone索引...")
    index = get_pinecone_index()
    
    # 上传训练数据
    upload_training_data(index, X_train, y_train)
    
    # 测试k=11时的准确率
    specified_k = 11
    logging.info(f"开始测试k={specified_k}时的准确率...")
    y_pred = pinecone_knn_predict(index, X_test, k=specified_k)
    accuracy = accuracy_score(y_test, y_pred)
    logging.info(f"当k=11时，使用Pinecone的准确率: {accuracy:.4f}")

if __name__ == "__main__":
    main()