import os

import joblib
import numpy as np
import pandas as pd
from PIL import Image
from flask import Blueprint, jsonify
from openpyxl import load_workbook
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from tensorflow.keras import applications, models, backend as k, layers

from utils.config_utils import config

preprocess_input = applications.resnet50.preprocess_input
ResNet50 = applications.resnet50.ResNet50
Model = models.Model
GlobalAveragePooling2D = layers.GlobalAveragePooling2D

# 禁用GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# 创建Flask蓝图
bp_sem_resnet50_rf_predict_electret = Blueprint('bp_sem_resnet50_rf_predict_electret', __name__)

# ====================== 配置区域 ======================
BASE_DATA_PATH = config.get("path").get("sem_data")
EXCEL_PATH = os.path.join(BASE_DATA_PATH, "melt_cnn_data_dev.xlsx")
MODEL_SAVE_PATH = os.path.join(BASE_DATA_PATH, "fusion_model_dev.pkl")
SCALER_SAVE_PATH = os.path.join(BASE_DATA_PATH, "feature_scaler_dev.pkl")
IMAGE_DEBUG_PATH = os.path.join(BASE_DATA_PATH, "img_debug")
IMAGE_SIZE = (224, 224)  # ResNet50的标准输入尺寸

# 数据表格列配置
DATA_COLUMNS = [
    "序号", "表面电势（V）", "电荷量（pC）", "SEM图片路径"
]

# 工艺参数配置（根据Excel表格结构）
PROCESS_PARAMS = [
    "螺杆温度（℃）", "模头温度（℃）", "风压（MPa）",
    "挤出速度（g/min）", "驻极水压（MPa）", "驻极速度（m/min）",
    "烘干温度（℃）", "混合物配比（辅料/主料）"
]

# 目标变量列（需要预测的值）
TARGET_COLUMNS = ["表面电势（V）", "电荷量（pC）"]

# 分隔符
SPLIT_FLAG = "|,|"

# 驱动列映射 - 定义驱动列名称和对应的值列名称
DRIVE_COL_MAPPING = {
    "驻极水压（MPa）": "驻极水压（MPa）",
    "驻极速度（m/min）": "驻极速度（m/min）",
    "混合物配比（辅料/主料）": "混合物配比（辅料/主料）"
}


def get_feature_extractor():
    if not hasattr(get_feature_extractor, "model"):
        print("正在加载ResNet50特征提取器...")
        base_model = ResNet50(weights='imagenet', include_top=False)
        x = GlobalAveragePooling2D()(base_model.output)
        get_feature_extractor.model = Model(inputs=base_model.input, outputs=x)
        print("ResNet50特征提取器加载完成")
    return get_feature_extractor.model


# 接口调用入口
@bp_sem_resnet50_rf_predict_electret.route('/dhu-ai/api/sem-resnet50-rf-predict', methods=['POST'])
def predict_endpoint():
    """
    微服务接口：触发数据处理流程
    """
    try:
        print(f"获取数据路径: {BASE_DATA_PATH}")
        # 执行处理流程
        result = process_data()

        # 返回结果
        return jsonify({
            "status": result["status"],
            "message": result["message"],
            "details": {
                "processed_sheets": result.get("processed_sheets", 0),
                "processed_samples": result.get("processed_samples", 0),
                "feature_dimension": result.get("feature_dimension", 0)
            }
        })
    except Exception as e:
        return jsonify({
            "status": "error",
            "message": f"接口错误: {str(e)}"
        }), 500
    finally:
        clear_tensorflow_session()


# 数据处理主函数
def process_data():
    try:
        # 第一步：合并所有材料sheet到merge_data
        merge_sheets_to_merge_data()

        # 第二步：从merge_data sheet中读取数据
        xl = pd.ExcelFile(EXCEL_PATH)

        # 检查是否存在merge_data sheet
        if 'merge_data' not in xl.sheet_names:
            print("未找到merge_data sheet")
            return {
                "status": "error",
                "message": "未找到merge_data sheet"
            }

        print("\n处理表：merge_data")
        df_merge = pd.read_excel(EXCEL_PATH, sheet_name='merge_data')

        # 验证列名
        required_columns = [
            "序号", "螺杆温度（℃）", "模头温度（℃）", "风压（MPa）", "挤出速度（g/min）",
            "驻极水压（MPa）", "驻极速度（m/min）", "烘干温度（℃）", "混合物配比（辅料/主料）",
            "主料", "辅料", "表面电势（V）", "电荷量（pC）", "SEM图片路径"
        ]

        if not all(col in df_merge.columns for col in required_columns):
            print(f"merge_data sheet缺少必要的列：{df_merge.columns}")
            return {
                "status": "error",
                "message": "merge_data sheet缺少必要的列"
            }

        all_results = []

        # 处理每一行数据
        for idx, row in df_merge.iterrows():
            # 检查图片路径是否存在
            if pd.isna(row['SEM图片路径']):
                print(f"Row {idx + 1}: 缺少图片路径")
                continue

            # 解析图片路径
            img_files = [f.strip() for f in str(row['SEM图片路径']).split(SPLIT_FLAG)]
            img_files = [f for f in img_files if f.endswith('.tif')]

            if not img_files:
                print(f"Row {idx + 1}: 无有效的图片文件")
                continue

            # 构建完整图片路径
            img_folder = row['主料']
            full_paths = [os.path.join(BASE_DATA_PATH, img_folder, f) for f in img_files]

            # 提取图像特征（处理多张图片）
            img_features_list = []
            valid_paths = []
            for path in full_paths:
                if os.path.exists(path):
                    features = extract_image_features(path)
                    if features is not None:
                        img_features_list.append(features)
                        valid_paths.append(path)
                else:
                    print(f"图片不存在: {path}")

            if not img_features_list:
                print(f"Row {idx + 1}: 无有效的图像特征")
                continue

            # 平均多张图像的特征
            avg_img_features = np.mean(img_features_list, axis=0)

            # 提取工艺参数特征 - 直接从行中获取
            process_features = []
            for param in PROCESS_PARAMS:
                value = row[param]
                if pd.isna(value):
                    value = 0.0
                    print(f"警告: Row {idx + 1} 缺少工艺参数 {param}，已用0填充")
                else:
                    # 处理多值参数（如螺杆温度）
                    if isinstance(value, str) and SPLIT_FLAG in value:
                        try:
                            # 分割并转换为浮点数
                            values = [float(v.strip()) for v in value.split(SPLIT_FLAG)]
                            # 取平均值
                            value = np.mean(values)
                        except ValueError:
                            value = 0.0
                            print(f"警告: Row {idx + 1} 无法转换多值参数 {param}，已用0填充")
                    # 处理分数形式的混合物配比
                    elif param == "混合物配比（辅料/主料）" and isinstance(value, str) and '/' in value:
                        try:
                            num, denom = value.split('/')
                            value = float(num) / float(denom)
                        except (ValueError, ZeroDivisionError):
                            value = 0.0
                            print(f"警告: Row {idx + 1} 无法转换混合物配比 {value}，已用0填充")
                    else:
                        try:
                            value = float(value)
                        except ValueError:
                            value = 0.0
                            print(f"警告: Row {idx + 1} 无法转换工艺参数 {param}，已用0填充")
                process_features.append(value)

            # 融合所有特征
            fused_features = np.concatenate([
                avg_img_features,
                np.array(process_features)
            ])

            # 获取多目标值
            target_values = {}
            missing_targets = []
            for target_col in TARGET_COLUMNS:
                if target_col in row and not pd.isna(row[target_col]):
                    try:
                        target_values[target_col] = float(row[target_col])
                    except ValueError:
                        print(f"Row {idx + 1}: 无法转换目标列'{target_col}'的值")
                        target_values[target_col] = None
                else:
                    missing_targets.append(target_col)

            if missing_targets:
                print(f"Row {idx + 1}: 缺失目标值 {missing_targets}")

            all_results.append({
                "sheet": row['主料'],
                "row_index": int(row['序号']),
                "image_paths": valid_paths,
                "process_params": {param: row[param] for param in PROCESS_PARAMS},
                "fused_features": fused_features.tolist(),
                "targets": target_values
            })

        # 训练融合模型
        if all_results:
            model, scaler = train_fusion_model(all_results)
            if model and scaler:
                return {
                    "status": "success",
                    "message": "数据处理和模型训练完成",
                    "processed_sheets": 1,  # 只处理了merge_data sheet
                    "processed_samples": len(all_results),
                    "feature_dimension": len(all_results[0]['fused_features']) if all_results else 0
                }
            return {
                "status": "warning",
                "message": "处理完成但未训练模型（缺少目标值）",
                "processed_sheets": 1,
                "processed_samples": len(all_results)
            }
        else:
            return {
                "status": "warning",
                "message": "未处理任何有效数据",
                "processed_sheets": 1,
                "processed_samples": 0
            }

    except Exception as e:
        import traceback
        traceback.print_exc()
        return {
            "status": "error",
            "message": f"处理失败: {str(e)}"
        }
    finally:
        clear_tensorflow_session()


# 合并所有材料sheet到merge_data sheet
def merge_sheets_to_merge_data():
    try:
        # 读取Excel文件
        xl = pd.ExcelFile(EXCEL_PATH)

        # 获取所有以"材料"开头的sheet
        sheet_names = [name for name in xl.sheet_names if name.startswith("材料")]
        print(f"找到: {len(sheet_names)}个材料表: {sheet_names}")

        if not sheet_names:
            print("没有需要合并的材料表")
            return

        # 初始化合并数据
        all_merged_data = []

        # 定义merge_data的列顺序
        merge_columns = [
            "序号", "螺杆温度（℃）", "模头温度（℃）", "风压（MPa）", "挤出速度（g/min）",
            "驻极水压（MPa）", "驻极速度（m/min）", "烘干温度（℃）", "混合物配比（辅料/主料）",
            "主料", "辅料", "表面电势（V）", "电荷量（pC）", "SEM图片路径"
        ]

        # 处理每个sheet
        for sheet_name in sheet_names:
            print(f"处理表：{sheet_name}")

            # 读取原始sheet数据
            df_raw = pd.read_excel(EXCEL_PATH, sheet_name=sheet_name, header=None)

            # 解析工艺参数和表头
            try:
                # 返回工艺参数字典、驱动列索引和表头行索引
                process_params_dict, drive_col_indices, header_row_idx = parse_process_params(df_raw)
                print(f"Sheet: {sheet_name}工艺参数：{process_params_dict}")
                print(f"驱动列索引: {drive_col_indices}")
            except ValueError as e:
                print(f"Sheet: {sheet_name}解析失败：{str(e)}")
                continue

            # 读取数据表格部分
            df_data = pd.read_excel(EXCEL_PATH, sheet_name=sheet_name, header=header_row_idx)

            # 验证列名
            if not all(col in df_data.columns for col in DATA_COLUMNS):
                print(f"Sheet：{sheet_name}缺少必要的列：{df_data.columns}")
                continue

            # 处理每一行数据
            for _, row in df_data.iterrows():
                # 检查序号是否存在
                if pd.isna(row['序号']):
                    print(f"跳过行（缺少序号）: {row}")
                    continue

                # 创建新行数据
                new_row = {
                    "序号": row['序号'],
                    "主料": sheet_name,
                    "辅料": row['辅料'] if '辅料' in row else '',
                    "表面电势（V）": row['表面电势（V）'],
                    "电荷量（pC）": row['电荷量（pC）'],
                    "SEM图片路径": row['SEM图片路径']
                }

                # 添加工艺参数 - 非驱动列
                for param, value in process_params_dict.items():
                    # 处理多值参数
                    if isinstance(value, list):
                        # 转换为分隔符连接的字符串
                        new_row[param] = SPLIT_FLAG.join(map(str, value))
                    else:
                        new_row[param] = value

                # 添加驱动列值 - 从数据行中获取正确的驱动列值
                for col_idx in drive_col_indices:
                    if col_idx < len(row):
                        # 获取驱动列名称
                        drive_col_name = df_data.columns[col_idx]

                        # 映射到标准列名
                        if drive_col_name in DRIVE_COL_MAPPING:
                            std_col_name = DRIVE_COL_MAPPING[drive_col_name]
                            new_row[std_col_name] = row[std_col_name]

                all_merged_data.append(new_row)

        # 创建DataFrame
        df_merge = pd.DataFrame(all_merged_data, columns=merge_columns)

        # 保存到merge_data sheet - 使用更可靠的方法
        if os.path.exists(EXCEL_PATH):
            # 加载现有工作簿
            book = load_workbook(EXCEL_PATH)
            # 如果已有merge_data sheet，先删除
            if 'merge_data' in book.sheetnames:
                std = book['merge_data']
                book.remove(std)

            # 创建新的merge_data sheet
            # 使用正确的ExcelWriter初始化方式
            with pd.ExcelWriter(
                    EXCEL_PATH,
                    engine='openpyxl',
                    mode='a',
                    if_sheet_exists='replace'  # 确保可以替换sheet
            ) as writer:
                # 直接写入数据，不设置book和sheets属性
                df_merge.to_excel(writer, sheet_name='merge_data', index=False)
        else:
            # 如果文件不存在，直接创建新文件
            df_merge.to_excel(EXCEL_PATH, sheet_name='merge_data', index=False)

        print(f"成功合并数据到merge_data sheet，共{len(df_merge)}行")

    except Exception as e:
        import traceback
        traceback.print_exc()
        print(f"合并数据失败: {str(e)}")


# 训练融合模型
def train_fusion_model(all_data):
    # 训练图像特征+工艺参数的融合模型

    if not all_data:
        print("无有效数据用于训练")
        return None, None

    # 检查特征维度一致性
    feature_lengths = [len(item['fused_features']) for item in all_data]
    if len(set(feature_lengths)) > 1:
        print(f"错误: 特征维度不一致! 发现维度: {set(feature_lengths)}")
        # 记录不一致的样本
        for i, item in enumerate(all_data):
            print(f"样本 {i} (Sheet: {item['sheet']}, Row: {item['row_index']}): 维度={len(item['fused_features'])}")
        return None, None

    # 准备训练数据
    features = []
    # 多目标字典
    targets = {col: [] for col in TARGET_COLUMNS}

    for item in all_data:
        if all(item['targets'].get(col) is not None for col in TARGET_COLUMNS):
            features.append(item['fused_features'])
            for col in TARGET_COLUMNS:
                targets[col].append(item['targets'][col])

    if not features:
        print("无有效的训练样本")
        return None, None

    # 转换为二维目标数组 [[target1, target2], ...]
    target_array = np.column_stack([targets[col] for col in TARGET_COLUMNS])

    # 特征标准化
    scaler = StandardScaler()
    scaled_features = scaler.fit_transform(features)

    # 训练随机森林模型
    model = RandomForestRegressor(
        n_estimators=200,
        max_depth=15,
        min_samples_split=5,
        random_state=42,
        n_jobs=-1
    )
    # 直接使用二维目标数组
    model.fit(scaled_features, target_array)

    # 保存模型和标准化器
    joblib.dump(model, MODEL_SAVE_PATH)
    joblib.dump(scaler, SCALER_SAVE_PATH)

    print(f"模型训练完成，保存到: {MODEL_SAVE_PATH}")
    print(f"特征标准化器保存到: {SCALER_SAVE_PATH}")

    # 输出多目标评估
    print(f"模型训练完成，支持{len(TARGET_COLUMNS)}个目标变量：{TARGET_COLUMNS}")
    return model, scaler


# 解析工艺参数部分
def parse_process_params(df_raw):
    """
    解析Excel表格上方的工艺参数部分
    返回:
    process_params_dict: 工艺参数字典
    drive_col_indices: 驱动列的列索引列表
    header_row_idx: 表头行索引
    """
    params_dict = {}
    # 存储驱动列的列索引
    drive_col_indices = []
    # 固定表头行索引为第3行（0-based索引）
    header_row_idx = 2

    # 获取表头行数据
    header_row = df_raw.iloc[header_row_idx]  # 第3行作为表头行

    # 只处理前两行（工艺参数行）
    for row_idx in range(0, 1):  # 只处理第1行，取工艺参数
        # 遍历所有列
        for col_idx in range(len(df_raw.columns)):
            cell_value = df_raw.iloc[row_idx, col_idx]

            # 跳过空单元格
            if pd.isna(cell_value) or cell_value == "":
                continue

            # 检查单元格内容是否包含工艺参数名称
            for param in PROCESS_PARAMS:
                if param in str(cell_value):
                    # 获取参数值（同一行下一列）
                    value_cell = df_raw.iloc[row_idx + 1, col_idx]

                    # 检查是否为驱动列（值为"/"）
                    if value_cell == "/":
                        # 在表头行查找与工艺参数名称完全匹配的列
                        for header_col_idx, header_value in enumerate(header_row):
                            # 精确匹配工艺参数名称
                            if not pd.isna(header_value) and header_value == param:
                                # 标记为驱动列
                                drive_col_indices.append(header_col_idx)
                                print(f"发现驱动列: {param} (列索引: {header_col_idx})")
                                break  # 找到后跳出内部循环
                        break  # 找到参数后跳出内部循环

                    # 处理多值情况（用逗号分隔）
                    values = []
                    if isinstance(value_cell, str) and SPLIT_FLAG in value_cell:
                        values = [v.strip() for v in value_cell.split(SPLIT_FLAG)]
                    # 跳过驱动列
                    elif value_cell != "/":
                        values = [value_cell]

                    # 转换为浮点数
                    float_values = []
                    for v in values:
                        # 尝试转换为浮点数，如果失败则尝试解析分数
                        try:
                            float_values.append(float(v))
                        except ValueError:
                            # 尝试解析分数 (如 "1/99")
                            if '/' in v:
                                num, denom = v.split('/')
                                float_values.append(float(num) / float(denom))
                            else:
                                print(f"警告：无法转换参数值 '{v}' 为浮点数")

                    # 存储参数值
                    if float_values:
                        if len(float_values) == 1:
                            params_dict[param] = float_values[0]
                        else:
                            # 存储为列表
                            params_dict[param] = float_values

                    # 找到参数后跳出内部循环
                    break

    return params_dict, drive_col_indices, header_row_idx


def isolate_fiber_region(image):
    """
    裁剪掉底部15%的区域（包含文本），保留顶部85%的区域（包含纤维网）
    """
    try:
        width, height = image.size

        # 计算裁剪区域（保留顶部85%）
        new_height = int(height * 0.85)

        # 裁剪区域：(left, top, right, bottom)
        # 保留从顶部(0)到85%高度的区域
        crop_box = (0, 0, width, new_height)

        # 裁剪图像
        cropped_img = image.crop(crop_box)
        return cropped_img

    except Exception as e:
        print(f"底部裁剪失败: {str(e)}")
        return image


def extract_image_features(img_path):
    """
    从单张图像提取特征向量
    """
    try:
        # 直接打开tif图像，不立即转换为L模式
        img = Image.open(img_path)

        # 获取图像模式
        img_mode = img.mode

        # 智能隔离纤维区域
        fiber_img = isolate_fiber_region(img)

        # 调试保存处理后的图像
        debug_path = os.path.join(IMAGE_DEBUG_PATH, os.path.basename(img_path))
        os.makedirs(os.path.dirname(debug_path), exist_ok=True)

        # 特殊处理16位灰度图像
        if img_mode == 'I;16':  # 16位灰度图像
            # 将16位图像转换为numpy数组
            img_array = np.array(fiber_img)
            # 归一化到0-255范围并转换为8位
            if img_array.max() > 0:
                img_array = (img_array / img_array.max() * 255).astype(np.uint8)
            else:
                img_array = img_array.astype(np.uint8)
            # 创建8位灰度图像
            fiber_img = Image.fromarray(img_array, mode='L')
        else:
            # 对于其他模式，转换为灰度后保存
            fiber_img = fiber_img.convert('L')
        fiber_img.save(debug_path)

        # 确保图像是RGB三通道（ResNet50要求）
        fiber_img = fiber_img.convert('RGB')
        fiber_img = fiber_img.resize(IMAGE_SIZE)

        img_array = np.array(fiber_img)
        img_array = np.expand_dims(img_array, axis=0)
        img_array = preprocess_input(img_array)

        # 使用批处理预测
        feature_model = get_feature_extractor()
        features = feature_model.predict(img_array, verbose=0)
        return features.flatten()
    except Exception as e:
        print(f"Error processing image {img_path}: {str(e)}")
        return None


# 清理TensorFlow会话以释放内存
def clear_tensorflow_session():
    k.clear_session()
    print("Tensorflow session cleared")
