import os

import joblib
import numpy as np
import pandas as pd
import shap
import lightgbm as lgb

from PIL import Image
from flask import Blueprint, jsonify
from openpyxl import load_workbook
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from tensorflow.keras import applications, models, backend as k, layers

from utils.config_utils import config

preprocess_input = applications.resnet50.preprocess_input
ResNet50 = applications.resnet50.ResNet50
Model = models.Model
GlobalAveragePooling2D = layers.GlobalAveragePooling2D

# 设置 libomp 路径
brew_path = os.popen('brew --prefix libomp').read().strip()
os.environ['DYLD_LIBRARY_PATH'] = f"{brew_path}/lib" + os.pathsep + os.environ.get('DYLD_LIBRARY_PATH', '')

# 避免可能的库冲突
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import xgboost as xgb

# 禁用GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# 创建Flask蓝图
bp_sem_resnet50_multi_predict_electret = Blueprint('bp_sem_resnet50_multi_predict_electret', __name__)

# ====================== 配置区域 ======================
BASE_DATA_PATH = config.get("path").get("sem_data")
EXCEL_PATH = os.path.join(BASE_DATA_PATH, "melt_cnn_data_dev.xlsx")
MODEL_SAVE_PATH = os.path.join(BASE_DATA_PATH, "fusion_model_dev.pkl")
SCALER_SAVE_PATH = os.path.join(BASE_DATA_PATH, "feature_scaler_dev.pkl")
IMAGE_DEBUG_PATH = os.path.join(BASE_DATA_PATH, "img_debug")
IMAGE_SIZE = (224, 224)  # ResNet50的标准输入尺寸
# ====================== 支持多模型切换的配置 ======================
MODEL_TYPE = "lgb"  # 可选: "xgb" | "lgb" | "rf"
USE_GPU = False  # 是否启用GPU加速
SHAP_SAMPLES = 100  # SHAP分析采样数（平衡精度与速度）
FEATURE_NAMES_PATH = os.path.join(BASE_DATA_PATH, "feature_names.txt")
SHAP_SAVE_PATH = os.path.join(BASE_DATA_PATH, "shap_values.npy")
FEATURE_IMPORTANCE_PATH = os.path.join(BASE_DATA_PATH, "feature_importance.csv")

# 数据表格列配置
DATA_COLUMNS = [
    "序号", "表面电势（V）", "电荷量（pC）", "SEM图片路径"
]

# 工艺参数配置（根据Excel表格结构）
PROCESS_PARAMS = [
    "螺杆温度（℃）", "模头温度（℃）", "风压（MPa）",
    "挤出速度（g/min）", "驻极水压（MPa）", "驻极速度（m/min）",
    "烘干温度（℃）", "混合物配比（辅料/主料）"
]

# 目标变量列（需要预测的值）
TARGET_COLUMNS = ["表面电势（V）", "电荷量（pC）"]

# 分隔符
SPLIT_FLAG = "|,|"

# 驱动列映射 - 定义驱动列名称和对应的值列名称
DRIVE_COL_MAPPING = {
    "驻极水压（MPa）": "驻极水压（MPa）",
    "驻极速度（m/min）": "驻极速度（m/min）",
    "混合物配比（辅料/主料）": "混合物配比（辅料/主料）"
}

# 模型参数配置
MODEL_PARAMS = {
    "xgb": {
        "objective": "reg:squarederror",
        "n_estimators": 300,
        "max_depth": 8,
        "subsample": 0.8,
        "colsample_bytree": 0.8,
        "random_state": 42,
        "n_jobs": -1,
        "tree_method": "gpu_hist" if USE_GPU else "hist"
    },
    "lgb": {
        "objective": "regression",
        "num_leaves": 63,
        "learning_rate": 0.05,
        "n_estimators": 500,
        "random_state": 42,
        "n_jobs": -1,
        "device": "gpu" if USE_GPU else "cpu"
    },
    "rf": {
        "n_estimators": 200,
        "max_depth": 15,
        "min_samples_split": 5,
        "random_state": 42,
        "n_jobs": -1
    }
}


def get_feature_extractor():
    if not hasattr(get_feature_extractor, "model"):
        print("正在加载ResNet50特征提取器...")
        base_model = ResNet50(weights='imagenet', include_top=False)
        x = GlobalAveragePooling2D()(base_model.output)
        get_feature_extractor.model = Model(inputs=base_model.input, outputs=x)
        print("ResNet50特征提取器加载完成")
    return get_feature_extractor.model


# 接口调用入口
@bp_sem_resnet50_multi_predict_electret.route('/dhu-ai/api/sem-resnet50-multi-predict', methods=['POST'])
def predict_endpoint():
    """
    微服务接口：触发数据处理流程
    """
    try:
        print(f"获取数据路径: {BASE_DATA_PATH}")
        # 执行处理流程
        result = process_data()

        # 返回结果
        return jsonify({
            "status": result["status"],
            "message": result["message"],
            "details": {
                "processed_sheets": result.get("processed_sheets", 0),
                "processed_samples": result.get("processed_samples", 0),
                "feature_dimension": result.get("feature_dimension", 0)
            }
        })
    except Exception as e:
        return jsonify({
            "status": "error",
            "message": f"接口错误: {str(e)}"
        }), 500
    finally:
        clear_tensorflow_session()


# 数据处理主函数
def process_data():
    try:
        # 第一步：合并所有材料sheet到merge_data
        merge_sheets_to_merge_data()

        # 第二步：从merge_data sheet中读取数据
        xl = pd.ExcelFile(EXCEL_PATH)

        # 检查是否存在merge_data sheet
        if 'merge_data' not in xl.sheet_names:
            print("未找到merge_data sheet")
            return {
                "status": "error",
                "message": "未找到merge_data sheet"
            }

        print("\n处理表：merge_data")
        df_merge = pd.read_excel(EXCEL_PATH, sheet_name='merge_data')

        # 验证列名
        required_columns = [
            "序号", "螺杆温度（℃）", "模头温度（℃）", "风压（MPa）", "挤出速度（g/min）",
            "驻极水压（MPa）", "驻极速度（m/min）", "烘干温度（℃）", "混合物配比（辅料/主料）",
            "主料", "辅料", "表面电势（V）", "电荷量（pC）", "SEM图片路径"
        ]

        if not all(col in df_merge.columns for col in required_columns):
            print(f"merge_data sheet缺少必要的列：{df_merge.columns}")
            return {
                "status": "error",
                "message": "merge_data sheet缺少必要的列"
            }

        all_results = []

        # 处理每一行数据
        for idx, row in df_merge.iterrows():
            # 检查图片路径是否存在
            if pd.isna(row['SEM图片路径']):
                print(f"Row {idx + 1}: 缺少图片路径")
                continue

            # 解析图片路径
            img_files = [f.strip() for f in str(row['SEM图片路径']).split(SPLIT_FLAG)]
            img_files = [f for f in img_files if f.endswith('.tif')]

            if not img_files:
                print(f"Row {idx + 1}: 无有效的图片文件")
                continue

            # 构建完整图片路径
            img_folder = row['主料']
            full_paths = [os.path.join(BASE_DATA_PATH, img_folder, f) for f in img_files]

            # 提取图像特征（处理多张图片）
            img_features_list = []
            valid_paths = []
            for path in full_paths:
                if os.path.exists(path):
                    features = extract_image_features(path)
                    if features is not None:
                        img_features_list.append(features)
                        valid_paths.append(path)
                else:
                    print(f"图片不存在: {path}")

            if not img_features_list:
                print(f"Row {idx + 1}: 无有效的图像特征")
                continue

            # 平均多张图像的特征
            avg_img_features = np.mean(img_features_list, axis=0)

            # 提取工艺参数特征 - 直接从行中获取
            process_features = []
            for param in PROCESS_PARAMS:
                value = row[param]
                if pd.isna(value):
                    value = 0.0
                    print(f"警告: Row {idx + 1} 缺少工艺参数 {param}，已用0填充")
                else:
                    # 处理多值参数（如螺杆温度）
                    if isinstance(value, str) and SPLIT_FLAG in value:
                        try:
                            # 分割并转换为浮点数
                            values = [float(v.strip()) for v in value.split(SPLIT_FLAG)]
                            # 取平均值
                            value = np.mean(values)
                        except ValueError:
                            value = 0.0
                            print(f"警告: Row {idx + 1} 无法转换多值参数 {param}，已用0填充")
                    # 处理分数形式的混合物配比
                    elif param == "混合物配比（辅料/主料）" and isinstance(value, str) and '/' in value:
                        try:
                            num, denom = value.split('/')
                            value = float(num) / float(denom)
                        except (ValueError, ZeroDivisionError):
                            value = 0.0
                            print(f"警告: Row {idx + 1} 无法转换混合物配比 {value}，已用0填充")
                    else:
                        try:
                            value = float(value)
                        except ValueError:
                            value = 0.0
                            print(f"警告: Row {idx + 1} 无法转换工艺参数 {param}，已用0填充")
                process_features.append(value)

            # 融合所有特征
            fused_features = np.concatenate([
                avg_img_features,
                np.array(process_features)
            ])

            # 获取多目标值
            target_values = {}
            missing_targets = []
            for target_col in TARGET_COLUMNS:
                if target_col in row and not pd.isna(row[target_col]):
                    try:
                        target_values[target_col] = float(row[target_col])
                    except ValueError:
                        print(f"Row {idx + 1}: 无法转换目标列'{target_col}'的值")
                        target_values[target_col] = None
                else:
                    missing_targets.append(target_col)

            if missing_targets:
                print(f"Row {idx + 1}: 缺失目标值 {missing_targets}")

            all_results.append({
                "sheet": row['主料'],
                "row_index": int(row['序号']),
                "image_paths": valid_paths,
                "process_params": {param: row[param] for param in PROCESS_PARAMS},
                "fused_features": fused_features.tolist(),
                "targets": target_values
            })

        # 训练融合模型
        if all_results:
            model, scaler, feature_importance = train_fusion_model(all_results)
            if model and scaler and feature_importance:
                return {
                    "status": "success",
                    "message": "数据处理和模型训练完成",
                    "processed_sheets": 1,  # 只处理了merge_data sheet
                    "processed_samples": len(all_results),
                    "feature_dimension": len(all_results[0]['fused_features']) if all_results else 0,
                    "feature_analysis": feature_importance.to_dict(orient='records')
                    if feature_importance is not None else None
                }
            return {
                "status": "warning",
                "message": "处理完成但未训练模型（缺少目标值）",
                "processed_sheets": 1,
                "processed_samples": len(all_results),
                "feature_analysis": None
            }
        else:
            return {
                "status": "warning",
                "message": "未处理任何有效数据",
                "processed_sheets": 1,
                "processed_samples": 0
            }

    except Exception as e:
        import traceback
        traceback.print_exc()
        return {
            "status": "error",
            "message": f"处理失败: {str(e)}"
        }
    finally:
        clear_tensorflow_session()


# 合并所有材料sheet到merge_data sheet
def merge_sheets_to_merge_data():
    try:
        # 读取Excel文件
        xl = pd.ExcelFile(EXCEL_PATH)

        # 获取所有以"材料"开头的sheet
        sheet_names = [name for name in xl.sheet_names if name.startswith("材料")]
        print(f"找到: {len(sheet_names)}个材料表: {sheet_names}")

        if not sheet_names:
            print("没有需要合并的材料表")
            return

        # 初始化合并数据
        all_merged_data = []

        # 定义merge_data的列顺序
        merge_columns = [
            "序号", "螺杆温度（℃）", "模头温度（℃）", "风压（MPa）", "挤出速度（g/min）",
            "驻极水压（MPa）", "驻极速度（m/min）", "烘干温度（℃）", "混合物配比（辅料/主料）",
            "主料", "辅料", "表面电势（V）", "电荷量（pC）", "SEM图片路径"
        ]

        # 处理每个sheet
        for sheet_name in sheet_names:
            print(f"处理表：{sheet_name}")

            # 读取原始sheet数据
            df_raw = pd.read_excel(EXCEL_PATH, sheet_name=sheet_name, header=None)

            # 解析工艺参数和表头
            try:
                # 返回工艺参数字典、驱动列索引和表头行索引
                process_params_dict, drive_col_indices, header_row_idx = parse_process_params(df_raw)
                print(f"Sheet: {sheet_name}工艺参数：{process_params_dict}")
                print(f"驱动列索引: {drive_col_indices}")
            except ValueError as e:
                print(f"Sheet: {sheet_name}解析失败：{str(e)}")
                continue

            # 读取数据表格部分
            df_data = pd.read_excel(EXCEL_PATH, sheet_name=sheet_name, header=header_row_idx)

            # 验证列名
            if not all(col in df_data.columns for col in DATA_COLUMNS):
                print(f"Sheet：{sheet_name}缺少必要的列：{df_data.columns}")
                continue

            # 处理每一行数据
            for _, row in df_data.iterrows():
                # 检查序号是否存在
                if pd.isna(row['序号']):
                    print(f"跳过行（缺少序号）: {row}")
                    continue

                # 创建新行数据
                new_row = {
                    "序号": row['序号'],
                    "主料": sheet_name,
                    "辅料": row['辅料'] if '辅料' in row else '',
                    "表面电势（V）": row['表面电势（V）'],
                    "电荷量（pC）": row['电荷量（pC）'],
                    "SEM图片路径": row['SEM图片路径']
                }

                # 添加工艺参数 - 非驱动列
                for param, value in process_params_dict.items():
                    # 处理多值参数
                    if isinstance(value, list):
                        # 转换为分隔符连接的字符串
                        new_row[param] = SPLIT_FLAG.join(map(str, value))
                    else:
                        new_row[param] = value

                # 添加驱动列值 - 从数据行中获取正确的驱动列值
                for col_idx in drive_col_indices:
                    if col_idx < len(row):
                        # 获取驱动列名称
                        drive_col_name = df_data.columns[col_idx]

                        # 映射到标准列名
                        if drive_col_name in DRIVE_COL_MAPPING:
                            std_col_name = DRIVE_COL_MAPPING[drive_col_name]
                            new_row[std_col_name] = row[std_col_name]

                all_merged_data.append(new_row)

        # 创建DataFrame
        df_merge = pd.DataFrame(all_merged_data, columns=merge_columns)

        # 保存到merge_data sheet - 使用更可靠的方法
        if os.path.exists(EXCEL_PATH):
            # 加载现有工作簿
            book = load_workbook(EXCEL_PATH)
            # 如果已有merge_data sheet，先删除
            if 'merge_data' in book.sheetnames:
                std = book['merge_data']
                book.remove(std)

            # 创建新的merge_data sheet
            # 使用正确的ExcelWriter初始化方式
            with pd.ExcelWriter(
                    EXCEL_PATH,
                    engine='openpyxl',
                    mode='a',
                    if_sheet_exists='replace'  # 确保可以替换sheet
            ) as writer:
                # 直接写入数据，不设置book和sheets属性
                df_merge.to_excel(writer, sheet_name='merge_data', index=False)
        else:
            # 如果文件不存在，直接创建新文件
            df_merge.to_excel(EXCEL_PATH, sheet_name='merge_data', index=False)

        print(f"成功合并数据到merge_data sheet，共{len(df_merge)}行")

    except Exception as e:
        import traceback
        traceback.print_exc()
        print(f"合并数据失败: {str(e)}")


# 训练融合模型
def train_fusion_model(all_data):
    # 训练图像特征+工艺参数的融合模型

    if not all_data:
        print("无有效数据用于训练")
        return None, None, None

    # 检查特征维度一致性
    feature_lengths = [len(item['fused_features']) for item in all_data]
    if len(set(feature_lengths)) > 1:
        print(f"错误: 特征维度不一致! 发现维度: {set(feature_lengths)}")
        # 记录不一致的样本
        for i, item in enumerate(all_data):
            print(f"样本 {i} (Sheet: {item['sheet']}, Row: {item['row_index']}): 维度={len(item['fused_features'])}")
        return None, None, None

    # 准备训练数据
    features = []
    # 多目标字典
    targets = {col: [] for col in TARGET_COLUMNS}

    for item in all_data:
        if all(item['targets'].get(col) is not None for col in TARGET_COLUMNS):
            features.append(item['fused_features'])
            for col in TARGET_COLUMNS:
                targets[col].append(item['targets'][col])

    if not features:
        print("无有效的训练样本")
        return None, None, None

    # 转换为Numpy数组
    X = np.array(features)
    y = np.column_stack([targets[col] for col in TARGET_COLUMNS])

    # 特征标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 构建特性名称
    img_feature_names = [f"img_feat_{i}" for i in range(2048)]  # ResNet50特征维度
    feature_names = img_feature_names + PROCESS_PARAMS

    # 保存特征名称
    with open(FEATURE_NAMES_PATH, "w") as f:
        f.write("\n".join(feature_names))

    print(f"特征名称已保存至：{FEATURE_NAMES_PATH}")

    # 根据配置选择模型
    if MODEL_TYPE == "xgb":
        print("训练XGBoost模型...")
        model = xgb.XGBRegressor(**MODEL_PARAMS[MODEL_TYPE])
    elif MODEL_TYPE == "lgb":
        print("训练lightGBM模型...")
        model = lgb.LGBMRegressor(**MODEL_PARAMS[MODEL_TYPE])
    else:
        print("训练随机森林模型...")
        model = RandomForestRegressor(**MODEL_PARAMS[MODEL_TYPE])

    model.fit(X_scaled, y)

    # 特征重要性分析
    feature_importance = analyze_feature_importance(model, feature_names, X_scaled)

    # 保存模型和结果
    joblib.dump(model, MODEL_SAVE_PATH)
    joblib.dump(scaler, SCALER_SAVE_PATH)
    feature_importance.to_csv(FEATURE_IMPORTANCE_PATH, index=False)

    print(f"模型训练完成，保存至：{MODEL_SAVE_PATH}")
    print(f"特征重要性分析，保存至：{FEATURE_IMPORTANCE_PATH}")

    return model, scaler, feature_importance


def analyze_feature_importance(model, feature_names, X):
    """
    综合特征重要性分析：
    1. 内置特征重要性
    2. SHAP值分析
    3. 按目标变量分组
    """
    # 创建结果DataFrame
    results = pd.DataFrame({
        "feature": feature_names,
        "type": ["image"] * 2048 + ["process"] * len(PROCESS_PARAMS)
    })

    # 内置特征重要性
    if hasattr(model, "feature_importances_"):
        results["builtin_importance"] = model.feature_importances_
    else:
        # 对于多输出模型，取平均值
        if hasattr(model, "estimators_"):
            importances = np.mean([est.feature_importances_ for est in model.estimators_], axis=0)
            results["builtin_importance"] = importances
        else:
            results["builtin_importance"] = 0

    # SHAP值分析（抽样计算以提高效率）
    if MODEL_TYPE in ["xgb", "lgb"] and X.shape[0] == SHAP_SAMPLES:
        print("计算SHAP值...")
        try:
            # 创建SHAP解释器
            if MODEL_TYPE == "xgb":
                explainer = shap.TreeExplainer(model)
            else:
                explainer = shap.TreeExplainer(model.booster_)
            # 随机抽样
            sample_idx = np.random.choice(X.shape[0], size=min(SHAP_SAMPLES, X.shapep[0]), replace=False)
            X_sample = X[sample_idx]

            # 计算SHAP值
            shap_values = explainer.shap_values(X_sample)

            # 处理多目标输出
            if isinstance(shap_values, list):
                # 多目标：取绝对值平均值
                shap_abs = np.mean([np.abs(sv).mean(axis=0) for sv in shap_values], axis=0)
            else:
                # 单目标
                shap_abs = np.abs(shap_values).mean(axis=0)

            results["shap_importance"] = shap_abs
            np.save(SHAP_SAVE_PATH, shap_values)
            print(f"SHAP值已保存至：{SHAP_SAVE_PATH}")

        except Exception as e:
            print(f"SHAP计算失败: {str(e)}")
            results["shap_importance"] = 0
    else:
        results["shap_importance"] = results["builtin_importance"]

    # 标准化重要性分散
    results["combined_importance"] = (
            0.7 * results["shap_importance"] + 0.3 * results["builtin_importance"]
    )

    # 归一化到百分比
    total = results["combined_importance"].sum()
    if total > 0:
        results["importance_percent"] = results["combined_importance"] / total * 100
    else:
        results["importance_percent"] = 0

    # 分组分析
    print("\n=== 关键特征重要性 ===")
    print("图像特征TOP5:")
    img_top = results[results["type"] == "image"].nlargest(5, "importance_percent")
    print(img_top[["feature", "importance_percent"]])

    print("\n工艺参数重要性:")
    process_imp = results[results["type"] == "process"].sort_values(by="importance_percent", ascending=False)
    print(process_imp[["feature", "importance_percent"]])

    return results


# 解析工艺参数部分
def parse_process_params(df_raw):
    """
    解析Excel表格上方的工艺参数部分
    返回:
    process_params_dict: 工艺参数字典
    drive_col_indices: 驱动列的列索引列表
    header_row_idx: 表头行索引
    """
    params_dict = {}
    # 存储驱动列的列索引
    drive_col_indices = []
    # 固定表头行索引为第3行（0-based索引）
    header_row_idx = 2

    # 获取表头行数据
    header_row = df_raw.iloc[header_row_idx]  # 第3行作为表头行

    # 只处理前两行（工艺参数行）
    for row_idx in range(0, 1):  # 只处理第1行，取工艺参数
        # 遍历所有列
        for col_idx in range(len(df_raw.columns)):
            cell_value = df_raw.iloc[row_idx, col_idx]

            # 跳过空单元格
            if pd.isna(cell_value) or cell_value == "":
                continue

            # 检查单元格内容是否包含工艺参数名称
            for param in PROCESS_PARAMS:
                if param in str(cell_value):
                    # 获取参数值（同一行下一列）
                    value_cell = df_raw.iloc[row_idx + 1, col_idx]

                    # 检查是否为驱动列（值为"/"）
                    if value_cell == "/":
                        # 在表头行查找与工艺参数名称完全匹配的列
                        for header_col_idx, header_value in enumerate(header_row):
                            # 精确匹配工艺参数名称
                            if not pd.isna(header_value) and header_value == param:
                                # 标记为驱动列
                                drive_col_indices.append(header_col_idx)
                                print(f"发现驱动列: {param} (列索引: {header_col_idx})")
                                break  # 找到后跳出内部循环
                        break  # 找到参数后跳出内部循环

                    # 处理多值情况（用逗号分隔）
                    values = []
                    if isinstance(value_cell, str) and SPLIT_FLAG in value_cell:
                        values = [v.strip() for v in value_cell.split(SPLIT_FLAG)]
                    # 跳过驱动列
                    elif value_cell != "/":
                        values = [value_cell]

                    # 转换为浮点数
                    float_values = []
                    for v in values:
                        # 尝试转换为浮点数，如果失败则尝试解析分数
                        try:
                            float_values.append(float(v))
                        except ValueError:
                            # 尝试解析分数 (如 "1/99")
                            if '/' in v:
                                num, denom = v.split('/')
                                float_values.append(float(num) / float(denom))
                            else:
                                print(f"警告：无法转换参数值 '{v}' 为浮点数")

                    # 存储参数值
                    if float_values:
                        if len(float_values) == 1:
                            params_dict[param] = float_values[0]
                        else:
                            # 存储为列表
                            params_dict[param] = float_values

                    # 找到参数后跳出内部循环
                    break

    return params_dict, drive_col_indices, header_row_idx


def isolate_fiber_region(image):
    """
    裁剪掉底部15%的区域（包含文本），保留顶部85%的区域（包含纤维网）
    """
    try:
        width, height = image.size

        # 计算裁剪区域（保留顶部85%）
        new_height = int(height * 0.85)

        # 裁剪区域：(left, top, right, bottom)
        # 保留从顶部(0)到85%高度的区域
        crop_box = (0, 0, width, new_height)

        # 裁剪图像
        cropped_img = image.crop(crop_box)
        return cropped_img

    except Exception as e:
        print(f"底部裁剪失败: {str(e)}")
        return image


def extract_image_features(img_path):
    """
    从单张图像提取特征向量
    """
    try:
        # 直接打开tif图像，不立即转换为L模式
        img = Image.open(img_path)

        # 获取图像模式
        img_mode = img.mode

        # 智能隔离纤维区域
        fiber_img = isolate_fiber_region(img)

        # 调试保存处理后的图像
        debug_path = os.path.join(IMAGE_DEBUG_PATH, os.path.basename(img_path))
        os.makedirs(os.path.dirname(debug_path), exist_ok=True)

        # 特殊处理16位灰度图像
        if img_mode == 'I;16':  # 16位灰度图像
            # 将16位图像转换为numpy数组
            img_array = np.array(fiber_img)
            # 归一化到0-255范围并转换为8位
            if img_array.max() > 0:
                img_array = (img_array / img_array.max() * 255).astype(np.uint8)
            else:
                img_array = img_array.astype(np.uint8)
            # 创建8位灰度图像
            fiber_img = Image.fromarray(img_array, mode='L')
        else:
            # 对于其他模式，转换为灰度后保存
            fiber_img = fiber_img.convert('L')
        fiber_img.save(debug_path)

        # 确保图像是RGB三通道（ResNet50要求）
        fiber_img = fiber_img.convert('RGB')
        fiber_img = fiber_img.resize(IMAGE_SIZE)

        img_array = np.array(fiber_img)
        img_array = np.expand_dims(img_array, axis=0)
        img_array = preprocess_input(img_array)

        # 使用批处理预测
        feature_model = get_feature_extractor()
        features = feature_model.predict(img_array, verbose=0)
        return features.flatten()
    except Exception as e:
        print(f"Error processing image {img_path}: {str(e)}")
        return None


# 清理TensorFlow会话以释放内存
def clear_tensorflow_session():
    k.clear_session()
    print("Tensorflow session cleared")
