# 导入必要的库
import numpy as np
import faiss
from util import createXY
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import argparse
import logging
from tqdm import tqdm
from FaissKNeighbors import FaissKNeighbors

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


def get_args():
    parser = argparse.ArgumentParser(description='使用CPU或GPU训练模型。')
    parser.add_argument('-m', '--mode', type=str, required=True, choices=['cpu', 'gpu'], help='选择训练模式：CPU或GPU。')
    parser.add_argument('-f', '--feature', type=str, required=True, choices=['flat', 'vgg'],
                        help='选择特征提取方法：flat或vgg。')
    parser.add_argument('-l', '--library', type=str, required=True, choices=['sklearn', 'faiss'],
                        help='选择使用的库：sklearn或faiss。')
    args = parser.parse_args()
    return args


def normalize_l2(X):
    """L2归一化"""
    norms = np.linalg.norm(X, axis=1, keepdims=True)
    norms[norms == 0] = 1  # 避免除以零
    return X / norms


def main():
    args = get_args()

    # 检查GPU模式是否可用
    if args.mode == 'gpu':
        try:
            # 检查是否有GPU版本的faiss
            if hasattr(faiss, 'StandardGpuResources'):
                res = faiss.StandardGpuResources()
                logging.info("GPU模式已启用")
            else:
                logging.warning("FAISS GPU不可用，回退到CPU模式")
                res = None
                args.mode = 'cpu'
        except Exception as e:
            logging.warning(f"GPU初始化失败: {e}，回退到CPU模式")
            res = None
            args.mode = 'cpu'
    else:
        res = None

    logging.info(f"选择模式是 {args.mode.upper()}")
    logging.info(f"选择特征提取方法是 {args.feature.upper()}")
    logging.info(f"选择使用的库是 {args.library.upper()}")

    # 载入和预处理数据 - 修改路径为当前目录
    X, y = createXY(train_folder=".", dest_folder=".", method=args.feature)

    # 确保X是二维数组
    if len(X.shape) == 1:
        logging.error(f"X的形状不正确: {X.shape}，应该是二维数组")
        return

    X = np.array(X).astype('float32')

    # 使用自定义的L2归一化
    X = normalize_l2(X)
    y = np.array(y)
    logging.info(f"数据加载和预处理完成。X.shape: {X.shape}, y.shape: {y.shape}")

    # 数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=2023)
    logging.info(f"数据集划分为训练集和测试集。训练集: {X_train.shape}, 测试集: {X_test.shape}")

    # 初始化变量，跟踪最佳的k值和相应的准确率
    best_k = -1
    best_accuracy = 0.0

    # 定义测试的k值范围
    k_values = range(1, 6)

    # 根据提供的库选择K近邻算法实现
    if args.library == 'faiss':
        KNNClass = FaissKNeighbors
    else:
        KNNClass = KNeighborsClassifier

    logging.info(f"使用的库为: {args.library.upper()}")

    # 遍历k值，训练并评估模型
    for k in tqdm(k_values, desc='寻找最佳k值'):
        try:
            if args.library == 'faiss':
                knn = KNNClass(k=k, res=res)
            else:
                knn = KNNClass(n_neighbors=k)

            knn.fit(X_train, y_train)
            accuracy = knn.score(X_test, y_test)

            # 更新最佳k值和准确率
            if accuracy > best_accuracy:
                best_k = k
                best_accuracy = accuracy

            logging.info(f"k={k}, 准确率: {accuracy:.4f}")

        except Exception as e:
            logging.error(f"k={k} 时训练失败: {e}")
            continue

    # 打印结果
    if best_k != -1:
        logging.info(f'最佳k值: {best_k}, 最高准确率: {best_accuracy:.4f}')
    else:
        logging.error("未能成功训练任何模型")


if __name__ == '__main__':
    main()