import cv2
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
import os
import concurrent.futures  ## 新增：导入并发库

## 新增：设定线程数，自动检测核数 - 2，并保证最少为1
workers = max(1, os.cpu_count() - 2)

cat_paths = [f"./dataset/Cat/{i}.jpg" for i in range(0, 101)]
dog_paths = [f"./dataset/Dog/{i}.jpg" for i in range(0, 101)]
TEST_CAT_DIR = r"./dataset/test/Cat"
TEST_DOG_DIR = r"./dataset/test/Dog"
IMAGE_EXTENSIONS = (".jpg", ".jpeg", ".png", ".bmp", ".gif")

def extract_manual_features(img_path):
    """此函数保持不变，依然是手动实现特征提取"""
    img = cv2.imread(img_path)
    if img is None:
        # 在并行环境中，打印错误信息比抛出异常更友好
        print(f"警告：无法读取图像：{img_path}，将跳过此文件。")
        return None  # 返回None以便后续处理
    H, W, _ = img.shape

    B, G, R = cv2.split(img)
    
    gray_manual = 0.299 * R + 0.587 * G + 0.114 * B
    mean_brightness = np.mean(gray_manual)
    
    sobel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
    sobel_y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
    edges_x, edges_y = np.zeros_like(gray_manual), np.zeros_like(gray_manual)
    for i in range(1, H-1):
        for j in range(1, W-1):
            edges_x[i, j] = np.sum(gray_manual[i-1:i+2, j-1:j+2] * sobel_x)
            edges_y[i, j] = np.sum(gray_manual[i-1:i+2, j-1:j+2] * sobel_y)
    edge_strength = np.sqrt(edges_x**2 + edges_y**2)
    edge_density = np.sum(edge_strength > 50) / (H * W)
    
    R_norm, G_norm, B_norm = R / 255.0, G / 255.0, B / 255.0
    min_rgb = np.minimum(np.minimum(R_norm, G_norm), B_norm)
    max_rgb = np.maximum(np.maximum(R_norm, G_norm), B_norm)
    saturation_pixel = np.where(max_rgb == 0, 0, 1 - (min_rgb / max_rgb))
    mean_saturation = np.mean(saturation_pixel) * 100
    
    return [mean_brightness, edge_density, mean_saturation]

## --- 修改：使用线程池并行提取训练集特征 ---
print(f"检测到 {os.cpu_count()} 个CPU核心，将使用 {workers} 个线程进行并行处理。")

# 1. 准备训练数据路径和标签
train_paths = cat_paths + dog_paths
train_labels_list = [0] * len(cat_paths) + [1] * len(dog_paths)

# 2. 使用线程池并行提取特征
print("正在并行提取训练集特征...")
X_train_list = []
y_train_list = []
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
    # 使用 executor.map 来并行执行函数
    results = executor.map(extract_manual_features, train_paths)
    
    # 3. 收集结果，并过滤掉读取失败的图片
    for i, features in enumerate(results):
        if features is not None:
            X_train_list.append(features)
            y_train_list.append(train_labels_list[i])

X_train = np.array(X_train_list)
y_train = np.array(y_train_list)
print(f"训练集特征提取完成，共处理 {len(X_train)} 张有效图片。")
## --- 修改结束 ---

scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)

def get_test_data(test_cat_dir, test_dog_dir):
    test_data = []
    for filename in os.listdir(test_cat_dir):
        if filename.lower().endswith(IMAGE_EXTENSIONS):
            img_path = os.path.join(test_cat_dir, filename)
            test_data.append((img_path, 0))
    for filename in os.listdir(test_dog_dir):
        if filename.lower().endswith(IMAGE_EXTENSIONS):
            img_path = os.path.join(test_dog_dir, filename)
            test_data.append((img_path, 1))
    return test_data

test_data_original = get_test_data(TEST_CAT_DIR, TEST_DOG_DIR)

## --- 新增：一次性并行提取所有测试图片的特征 ---
print("\n正在并行提取测试集特征...")
test_paths = [item[0] for item in test_data_original]
test_labels_list = [item[1] for item in test_data_original]

X_test_list = []
y_test_list = []
test_data = [] # 创建一个新的test_data列表，只包含有效的图片信息

with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
    results = executor.map(extract_manual_features, test_paths)
    
    for i, features in enumerate(results):
        if features is not None:
            X_test_list.append(features)
            y_test_list.append(test_labels_list[i])
            test_data.append(test_data_original[i]) # 将有效图片信息加入新列表

X_test = np.array(X_test_list)
y_test = np.array(y_test_list)
print(f"测试集特征提取完成，共处理 {len(X_test)} 张有效图片。")
## --- 新增结束 ---

print(f"\n=== KNN批量测试结果（共{len(test_data)}张测试图）===")
for k in [5, 7, 9]:
    knn = KNeighborsClassifier(n_neighbors=k)
    knn.fit(X_train, y_train)
    
    correct = 0
    total = len(test_data)
    print(f"\n--- K={k} 详细结果 ---")
    print("-" * 80)
    print(f"{'测试文件名':<30} {'预测结果':<10} {'真实结果':<10} {'是否正确'}")
    print("-" * 80)
    
    ## --- 修改：使用预先提取好的测试集特征 ---
    for i in range(total):
        img_path, true_label = test_data[i]
        test_feat = X_test[i]  # 直接使用预提取的特征
        
        test_feat_norm = scaler.transform([test_feat])
        pred_label = knn.predict(test_feat_norm)[0]
        
        pred_str = "猫" if pred_label == 0 else "狗"
        true_str = "猫" if true_label == 0 else "狗"
        is_correct = "是" if pred_label == true_label else "否"
        
        if is_correct == "是":
            correct += 1
            
        filename = os.path.basename(img_path)
        print(f"{filename:<30} {pred_str:<10} {true_str:<10} {is_correct}")
        
    accuracy = correct / total * 100 if total > 0 else 0
    print("-" * 80)
    print(f"K={k} 总结：共测试{total}张图，正确{correct}张，准确率：{accuracy:.2f}%")
## --- 修改结束 ---

print("\n=== 测试总结 ===")
accuracies = []
for k in [5, 7, 9]:
    knn = KNeighborsClassifier(n_neighbors=k)
    knn.fit(X_train, y_train)
    
    ## --- 修改：使用预提取的特征进行预测，代码更高效 ---
    if len(X_test) > 0:
        X_test_norm = scaler.transform(X_test)
        pred_labels = knn.predict(X_test_norm)
        correct = np.sum(pred_labels == y_test)
        acc = correct / len(y_test) * 100
        accuracies.append((k, acc))
    else:
        accuracies.append((k, 0))
## --- 修改结束 ---

if accuracies:
    best_k, best_acc = max(accuracies, key=lambda x: x[1])
    print(f"在当前测试集下，最优K值为 {best_k}，对应准确率：{best_acc:.2f}%")
else:
    print("没有有效的测试图片，无法进行总结。")
