# -*- coding=utf-8 -*-
import glob
import platform
import time
import concurrent.futures
import gc
from PIL import Image
from skimage.feature import hog
import numpy as np
import os
import joblib
from sklearn.svm import LinearSVC
import shutil
import logging
from tqdm import tqdm
import matplotlib.pyplot as plt
from matplotlib import gridspec

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 类别映射（英文）
label_map = {
    1: 'cat',
    2: 'chick',
    3: 'snake',
    4: 'unknown'  # 添加未知类别
}

# 修改 DEFAULT_PARAMS 中的路径为绝对路径
base_dir = r'D:\DatacanvasWork\分类'  # 基础目录
DEFAULT_PARAMS = {
    'train_image_path': os.path.join(base_dir, 'image128'),
    'test_image_path': os.path.join(base_dir, 'image128'),
    'train_label_path': os.path.join(base_dir, 'image128', 'train.txt'),
    'test_label_path': os.path.join(base_dir, 'image128', 'train.txt'),
    'image_height': 128,
    'image_width': 100,
    'train_feat_path': os.path.join(base_dir, 'train'),  # 使用绝对路径
    'test_feat_path': os.path.join(base_dir, 'test'),    # 使用绝对路径
    'model_path': os.path.join(base_dir, 'model'),       # 使用绝对路径
    'new_test_image_path': os.path.join(base_dir, 'test_images'),  # 更新测试图像路径
    'batch_size': 32,
    'n_workers': max(1, os.cpu_count() or 4 - 1),
}

# HOG参数
HOG_PARAMS = {
    'orientations': 12,
    'pixels_per_cell': [8, 8],
    'cells_per_block': [4, 4],
    'block_norm': 'L1',
    'transform_sqrt': True,
    'visualize': True,  # 设置为True以获取可视化图像
}

# 获取图片批次
def get_image_batches(file_path, name_list, batch_size=32):
    """按批次加载图片，使用生成器减少内存使用"""
    n_batches = (len(name_list) + batch_size - 1) // batch_size
    
    for i in range(n_batches):
        batch_names = name_list[i * batch_size:(i + 1) * batch_size]
        batch_images = []
        
        for name in batch_names:
            try:
                with Image.open(os.path.join(file_path, name)) as img:
                    # 直接在加载时转换为灰度图并调整大小
                    if img.mode != 'L':  # 如果不是灰度图，转换为灰度图
                        img = img.convert('L')
                    img = img.resize((DEFAULT_PARAMS['image_width'], DEFAULT_PARAMS['image_height']))
                    batch_images.append(np.array(img))
            except Exception as e:
                logger.error(f"加载图片 {name} 时出错: {e}")
                continue
                
        yield batch_names, batch_images
        
        # 强制垃圾回收
        gc.collect()

# 提取单张图片的HOG特征
def extract_hog_feature(image, visualization=False):
    """从单张图片中提取HOG特征，可选返回可视化"""
    try:
        # 归一化
        image = image.astype(np.float32) / 255.0
        
        # 提取HOG特征
        if visualization:
            # 设置visualize=True以获取HOG可视化
            vis_params = HOG_PARAMS.copy()
            vis_params['visualize'] = True
            fd, hog_image = hog(image, **vis_params)
            return fd, hog_image
        else:
            # 正常提取特征
            vis_params = HOG_PARAMS.copy()
            vis_params['visualize'] = False
            fd = hog(image, **vis_params)
            return fd
    except Exception as e:
        logger.error(f"提取HOG特征时出错: {e}")
        return None if not visualization else (None, None)

# 并行提取一批图片的特征
def batch_extract_features(batch_images, batch_names, labels, save_path):
    """并行提取一批图片的特征"""
    # 创建线程池
    with concurrent.futures.ThreadPoolExecutor(max_workers=DEFAULT_PARAMS['n_workers']) as executor:
        # 提交所有图片的特征提取任务
        future_to_idx = {executor.submit(extract_hog_feature, img): i 
                        for i, img in enumerate(batch_images)}
        
        # 处理结果
        for future in concurrent.futures.as_completed(future_to_idx):
            idx = future_to_idx[future]
            name = batch_names[idx]
            label = labels[idx] if idx < len(labels) else None
            
            try:
                feature = future.result()
                if feature is not None and label is not None:
                    # 将特征与标签合并
                    feature_with_label = np.concatenate((feature, [int(label)]))
                    
                    # 保存特征
                    fd_name = name + '.feat'
                    fd_path = os.path.join(save_path, fd_name)
                    joblib.dump(feature_with_label, fd_path)
            except Exception as e:
                logger.error(f"处理 {name} 的特征时出错: {e}")

# 获得图片名称与对应的类别
def get_name_label(file_path):
    """从标签文件中读取图像名称和对应的标签"""
    logger.info(f"从 {file_path} 读取标签")
    name_list = []
    label_list = []
    
    try:
        with open(file_path) as f:
            for line in f.readlines():
                if len(line) >= 3:  # 忽略空白行
                    parts = line.strip().split(' ')
                    if len(parts) >= 2:
                        name = parts[0]
                        label = parts[1]
                        
                        if not label.isdigit():
                            logger.error(f"标签必须为数字，得到的是: {label}")
                            continue
                            
                        name_list.append(name)
                        label_list.append(label)
    except Exception as e:
        logger.error(f"读取标签文件时出错: {e}")
        
    logger.info(f"读取了 {len(name_list)} 个样本标签")
    return name_list, label_list

# 提取特征
def extract_features():
    """提取训练集和测试集的特征"""
    # 获取图片名称和标签
    train_name, train_label = get_name_label(DEFAULT_PARAMS['train_label_path'])
    test_name, test_label = get_name_label(DEFAULT_PARAMS['test_label_path'])
    
    # 检查是否存在不在label_map中的标签
    unique_labels = set(int(label) for label in test_label)
    for label in unique_labels:
        if label not in label_map:
            logger.warning(f"Discovered label not defined in label_map: {label}")
    
    # 检查是否有数据
    if not train_name or not test_name:
        logger.error("No valid training or test data found")
        return False
    
    # 提取训练集特征
    logger.info("Starting to extract training set features...")
    for batch_idx, (batch_names, batch_images) in enumerate(tqdm(
            get_image_batches(DEFAULT_PARAMS['train_image_path'], train_name, DEFAULT_PARAMS['batch_size']))):
        batch_labels = [train_label[train_name.index(name)] if name in train_name else None for name in batch_names]
        batch_extract_features(batch_images, batch_names, batch_labels, DEFAULT_PARAMS['train_feat_path'])
    
    # 提取测试集特征
    logger.info("Starting to extract test set features...")
    for batch_idx, (batch_names, batch_images) in enumerate(tqdm(
            get_image_batches(DEFAULT_PARAMS['test_image_path'], test_name, DEFAULT_PARAMS['batch_size']))):
        batch_labels = [test_label[test_name.index(name)] if name in test_name else None for name in batch_names]
        batch_extract_features(batch_images, batch_names, batch_labels, DEFAULT_PARAMS['test_feat_path'])
    
    logger.info("Feature extraction completed")
    return True

# 创建存放特征的文件夹
def create_directories():
    """创建必要的目录"""
    os.makedirs(DEFAULT_PARAMS['train_feat_path'], exist_ok=True)
    os.makedirs(DEFAULT_PARAMS['test_feat_path'], exist_ok=True)
    os.makedirs(DEFAULT_PARAMS['model_path'], exist_ok=True)
    os.makedirs(DEFAULT_PARAMS['new_test_image_path'], exist_ok=True)

# 训练模型
def train_model():
    """训练SVM模型"""
    logger.info("加载训练特征...")
    features = []
    labels = []
    
    # 加载所有训练特征
    feat_files = glob.glob(os.path.join(DEFAULT_PARAMS['train_feat_path'], '*.feat'))
    for feat_path in tqdm(feat_files):
        try:
            data = joblib.load(feat_path)
            features.append(data[:-1])
            labels.append(data[-1])
        except Exception as e:
            logger.error(f"加载特征文件 {feat_path} 时出错: {e}")
    
    if not features:
        logger.error("没有找到有效的训练特征")
        return None
    
    # 转换为numpy数组
    features = np.array(features)
    labels = np.array(labels)
    
    logger.info("训练SVM分类器...")
    t0 = time.time()
    
    # 创建并训练分类器
    clf = LinearSVC(random_state=42, max_iter=5000)
    clf.fit(features, labels)
    
    t1 = time.time()
    logger.info(f"训练完成，耗时: {t1 - t0:.2f}秒")
    
    # 保存模型
    model_file = os.path.join(DEFAULT_PARAMS['model_path'], 'model')
    joblib.dump(clf, model_file)
    logger.info(f"模型保存在: {model_file}")
    
    return clf

# 单张图片预测
def predict_single_image(image_path, clf):
    """对单张图片进行预测，返回预测类别和图像数据"""
    try:
        # 加载图片
        with Image.open(image_path) as img:
            # 转换为灰度图并调整大小
            if img.mode != 'L':
                img = img.convert('L')
            img = img.resize((DEFAULT_PARAMS['image_width'], DEFAULT_PARAMS['image_height']))
            image = np.array(img)
        
        # 提取HOG特征和可视化图像
        feature, hog_image = extract_hog_feature(image, visualization=True)
        if feature is None:
            return None, (None, None)
        
        # 预测
        pred = clf.predict(feature.reshape(1, -1))
        pred_label = int(pred[0])
        pred_class = label_map[pred_label]
        
        return pred_class, (image, hog_image)
    
    except Exception as e:
        logger.error(f"预测图片 {image_path} 时出错: {e}")
        return None, (None, None)

# 显示图像及其HOG特征
def show_images(images, hog_images, labels, n_cols=4):
    """显示图像及其对应的HOG特征"""
    n_items = len(images)
    n_rows = (n_items + n_cols - 1) // n_cols
    
    # 设置全局字体为不含中文的字体
    plt.rcParams['font.sans-serif'] = ['Arial']
    plt.rcParams['axes.unicode_minus'] = False
    
    fig = plt.figure(figsize=(4 * n_cols, 4 * n_rows))
    gs = gridspec.GridSpec(n_rows, n_cols * 2)
    
    for i in range(n_items):
        # 原始图像
        ax1 = plt.subplot(gs[i // n_cols, (i % n_cols) * 2])
        ax1.imshow(images[i], cmap='gray')
        ax1.set_title(f'Class: {labels[i]}')
        ax1.axis('off')
        
        # HOG特征可视化
        ax2 = plt.subplot(gs[i // n_cols, (i % n_cols) * 2 + 1])
        ax2.imshow(hog_images[i], cmap='gray')
        ax2.set_title('HOG Features')
        ax2.axis('off')
    
    plt.tight_layout()
    plt.show()

# 预测并可视化图像
def predict_and_visualize(n_train=3, n_test=3, force_retrain=False):
    """
    预测并可视化训练集和新测试集图像
    
    参数:
    - n_train: 训练集中选择的图像数量
    - n_test: 新测试集中选择的图像数量
    - force_retrain: 是否强制重新训练模型，即使模型文件已存在
    """
    # 首先检查模型是否存在，如果不存在则训练
    model_file = os.path.join(DEFAULT_PARAMS['model_path'], 'model')
    
    if os.path.exists(model_file) and not force_retrain:
        clf = joblib.load(model_file)
        logger.info("Model loaded successfully")
    else:
        # 确保特征目录存在且有特征文件
        if not os.path.exists(DEFAULT_PARAMS['train_feat_path']) or \
           len(glob.glob(os.path.join(DEFAULT_PARAMS['train_feat_path'], '*.feat'))) == 0:
            logger.info("Starting feature extraction...")
            extract_features()
        
        # 训练模型
        logger.info("Starting model training...")
        clf = train_model()
        if clf is None:
            logger.error("Model training failed")
            return
    
    # 1. 处理训练集图像
    logger.info("Processing training set images...")
    test_name, test_label = get_name_label(DEFAULT_PARAMS['test_label_path'])
    if test_name:
        # 选择训练集中的n_train张图像进行预测和可视化
        selected_indices = np.random.choice(len(test_name), min(n_train, len(test_name)), replace=False)
        selected_names = [test_name[i] for i in selected_indices]
        
        # 安全地获取标签，如果标签不在映射中则使用'unknown'
        selected_labels = []
        for i in selected_indices:
            label_id = int(test_label[i])
            if label_id in label_map:
                selected_labels.append(label_map[label_id])
            else:
                logger.warning(f"遇到未知标签ID: {label_id}，使用'unknown'替代")
                selected_labels.append('unknown')
        
        # 预测并收集结果
        train_images = []
        train_hog_images = []
        train_labels = []
        
        for name, true_label in zip(selected_names, selected_labels):
            image_path = os.path.join(DEFAULT_PARAMS['test_image_path'], name)
            pred_class, (image, hog_image) = predict_single_image(image_path, clf)
            
            if pred_class is not None:
                train_images.append(image)
                train_hog_images.append(hog_image)
                train_labels.append(true_label)  # 使用真实标签
        
        # 可视化训练集结果
        if train_images:
            logger.info("Training set images and their HOG features:")
            show_images(train_images, train_hog_images, train_labels, n_cols=min(4, len(train_images)))
        else:
            logger.info("No training set prediction results to display")
    
    # 2. 处理新测试集图像
    logger.info("Processing new test set images...")
    # 获取新测试集中的所有图像文件
    test_images_path = DEFAULT_PARAMS['new_test_image_path']
    image_extensions = ['.jpg', '.jpeg', '.png', '.bmp', '.gif']
    test_image_files = []
    
    for ext in image_extensions:
        test_image_files.extend(glob.glob(os.path.join(test_images_path, f'*{ext}')))
    
    if test_image_files:
        # 如果图像太多，随机选择n_test张
        if len(test_image_files) > n_test:
            test_image_files = np.random.choice(test_image_files, n_test, replace=False)
        
        # 预测并收集结果
        new_test_images = []
        new_test_hog_images = []
        new_test_labels = []
        
        for image_path in test_image_files:
            pred_class, (image, hog_image) = predict_single_image(image_path, clf)
            
            if pred_class is not None:
                new_test_images.append(image)
                new_test_hog_images.append(hog_image)
                new_test_labels.append(pred_class)  # 使用预测标签
        
        # 可视化新测试集结果
        if new_test_images:
            logger.info("New test set images prediction results and their HOG features:")
            show_images(new_test_images, new_test_hog_images, new_test_labels, n_cols=min(4, len(new_test_images)))
        else:
            logger.info("No new test set prediction results to display")
    else:
        logger.info(f"No image files found in {test_images_path}")

# 主函数
def main():
    """主函数"""
    # 创建目录
    create_directories()
    
    # 清除旧特征并创建新目录
    logger.info("Clearing old features...")
    shutil.rmtree(DEFAULT_PARAMS['train_feat_path'], ignore_errors=True)
    shutil.rmtree(DEFAULT_PARAMS['test_feat_path'], ignore_errors=True)
    create_directories()
    
    # 提取特征
    if not extract_features():
        logger.error("Feature extraction failed, program terminated")
        return
    
    # 训练模型
    logger.info("Starting model training...")
    clf = train_model()
    if clf is None:
        logger.error("Model training failed, program terminated")
        return
    
    # 预测并可视化图像
    predict_and_visualize(n_train=3, n_test=3)
    
    logger.info("Program execution completed")

# 程序入口
if __name__ == '__main__':
    main()