import numpy as np
import logging
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from collections import Counter
from pinecone import Pinecone, ServerlessSpec
from tqdm import tqdm
import time

# 配置日志格式
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)

class PineconeMNIST:
    def __init__(self, api_key, index_name="mnist-index"):
        self.api_key = api_key
        self.index_name = index_name
        self.pc = None
        self.index = None
        
    def initialize_pinecone(self):
        """初始化Pinecone客户端"""
        try:
            self.pc = Pinecone(api_key=self.api_key)
            logging.info("Pinecone初始化成功")
            return True
        except Exception as e:
            logging.error(f"Pinecone初始化失败: {e}")
            return False
    
    def create_index(self):
        """创建并连接到Pinecone索引"""
        # 检查、清除之前创建的索引
        existing_indexes = self.pc.list_indexes()
        
        if any(index_info.name == self.index_name for index_info in existing_indexes):
            logging.info(f"索引 '{self.index_name}' 已存在，正在删除...")
            self.pc.delete_index(self.index_name)
            logging.info(f"索引 '{self.index_name}' 已成功删除...")
        else:
            logging.info(f"索引 '{self.index_name}' 不存在，将创建新索引...")
        
        # 创建新索引
        logging.info(f"正在创建新索引 '{self.index_name}'...")
        self.pc.create_index(
            name=self.index_name,
            dimension=64,  # MNIST每个图像展平后是64维向量
            metric="euclidean",  # 使用欧氏距离
            spec=ServerlessSpec(
                cloud="aws",
                region="us-east-1"
            )
        )
        logging.info(f"索引 '{self.index_name}' 创建成功。")
        
        # 等待索引准备就绪
        time.sleep(10)
        
        # 连接到索引
        self.index = self.pc.Index(self.index_name)
        logging.info(f"已成功连接到索引 '{self.index_name}'")
    
    def load_and_split_data(self):
        """加载MNIST数据并分割为训练集和测试集"""
        # 加载MNIST数据集
        digits = load_digits(n_class=10)  # 加载0-9所有数字
        
        X = digits.data  # 特征数据
        y = digits.target  # 标签数据
        
        # 分割数据：80%训练，20%测试
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42, stratify=y
        )
        
        logging.info(f"数据集总大小: {len(X)} 个样本")
        logging.info(f"训练集大小: {len(X_train)} 个样本 (80%)")
        logging.info(f"测试集大小: {len(X_test)} 个样本 (20%)")
        
        return X_train, X_test, y_train, y_test, digits
    
    def upload_training_data(self, X_train, y_train):
        """上传训练数据到Pinecone索引"""
        vectors = []
        
        # 准备向量数据
        for i in range(len(X_train)):
            vector_id = f"train_{i}"
            vector_values = X_train[i].tolist()
            metadata = {'label': int(y_train[i])}
            vectors.append((vector_id, vector_values, metadata))
        
        # 分批上传数据
        batch_size = 100
        total_vectors = len(vectors)
        
        logging.info(f"开始上传数据到Pinecone，共 {total_vectors} 条数据")
        
        # 使用进度条显示上传进度
        for i in tqdm(range(0, total_vectors, batch_size), desc="上传数据到Pinecone"):
            batch = vectors[i:i + batch_size]
            self.index.upsert(vectors=batch)
        
        logging.info(f"成功上传了 {total_vectors} 条数据到Pinecone")
        return total_vectors
    
    def test_accuracy(self, X_test, y_test, k=11):
        """测试KNN准确率"""
        correct_predictions = 0
        total_samples = len(X_test)
        
        logging.info(f"开始测试 k={k} 的准确率，共 {total_samples} 个测试样本")
        
        # 使用进度条显示测试进度
        for i in tqdm(range(total_samples), desc=f"测试 k={k} 的准确率"):
            query_vector = X_test[i].tolist()
            true_label = y_test[i]
            
            # 执行相似性搜索
            results = self.index.query(
                vector=query_vector,
                top_k=k,
                include_metadata=True
            )
            
            # 提取邻居标签
            neighbor_labels = [match['metadata']['label'] for match in results['matches']]
            
            # 投票机制预测标签
            if neighbor_labels:  # 确保列表不为空
                predicted_label = Counter(neighbor_labels).most_common(1)[0][0]
                
                # 检查预测是否正确
                if predicted_label == true_label:
                    correct_predictions += 1
        
        accuracy = correct_predictions / total_samples
        logging.info(f"k={k} 时的准确率为: {accuracy:.4f}")
        
        return accuracy
    
    def run_complete_pipeline(self):
        """运行完整的训练和测试流程"""
        # 1. 初始化Pinecone
        if not self.initialize_pinecone():
            return
        
        # 2. 创建索引
        self.create_index()
        
        # 3. 加载和分割数据
        X_train, X_test, y_train, y_test, digits = self.load_and_split_data()
        
        # 4. 上传训练数据
        total_uploaded = self.upload_training_data(X_train, y_train)
        logging.info(f"成功创建索引，并上传了 {total_uploaded} 条数据")
        
        # 5. 测试准确率
        accuracy = self.test_accuracy(X_test, y_test, k=11)
        
        return accuracy, total_uploaded

def main():
    """主函数"""
    # 你的Pinecone API密钥
    API_KEY = "pcsk_5XCTf3_7o7fbVgtePSb8P89EiDJAUNpX2Dc1Dan2GnuML5WQoWTcznFvE5dVhFTmd6A6MC"
    INDEX_NAME = "mnist-index"
    
    # 创建PineconeMNIST实例并运行完整流程
    pinecone_mnist = PineconeMNIST(API_KEY, INDEX_NAME)
    accuracy, total_data = pinecone_mnist.run_complete_pipeline()
    
    # 打印最终结果摘要
    logging.info("=" * 50)
    logging.info("项目完成摘要:")
    logging.info(f"- 上传数据量: {total_data} 条")
    logging.info(f"- 测试准确率 (k=11): {accuracy:.4f}")
    logging.info("=" * 50)

if __name__ == "__main__":
    main()