import os
import json
import threading
from io import BytesIO
import zipfile
from datetime import datetime
import glob
import shutil
import platform
import signal
import atexit

# 注意：macOS 的 multiprocessing 设置将在 __main__ 中进行

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import models, transforms

import requests
from PIL import Image
import numpy as np

from flask import Flask, render_template, request, jsonify, send_file
from flask_socketio import SocketIO

# 导入数据库连接库
try:
    import mysql.connector
    from mysql.connector import Error
    MYSQL_AVAILABLE = True
except ImportError:
    MYSQL_AVAILABLE = False
    print("Warning: mysql-connector-python not installed. JSON generation feature will be disabled.")

# 导入配置
from config import Config, config

# 获取运行环境
env = os.environ.get('FLASK_ENV', 'production')  # 默认为生产环境
app_config = config.get(env, config['production'])

print(f"🌍 运行环境: {env}")
print(f"🔧 调试模式: {app_config.DEBUG}")

# 导入火山引擎TOS库（可选）
try:
    import tos
    TOS_AVAILABLE = True
except ImportError:
    TOS_AVAILABLE = False
    print("Warning: tos library not installed. Will use HTTP requests for image download.")

# ----------------- Flask 和 SocketIO 设置 -----------------
app = Flask(__name__)
# 根据环境变量加载对应配置
app.config.from_object(app_config)
app.config['UPLOAD_FOLDER'] = app_config.UPLOAD_FOLDER
app.config['MODEL_FOLDER'] = app_config.MODEL_FOLDER

# 在 macOS 上使用 threading 模式，避免 eventlet 的多进程问题
if platform.system() == 'Darwin':
    socketio = SocketIO(app, async_mode='threading', cors_allowed_origins="*")
else:
    socketio = SocketIO(app, async_mode='eventlet', cors_allowed_origins="*")

os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
os.makedirs(app.config['MODEL_FOLDER'], exist_ok=True)

# --- 全局变量来追踪当前模型 ---
current_model_name = None
generated_json_filename = None  # 用于存储生成的JSON文件名

def cleanup_resources():
    """清理资源函数"""
    try:
        # 清理 PyTorch 缓存
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        elif torch.backends.mps.is_available():
            # 检查是否有 mps.empty_cache 方法
            if hasattr(torch, 'mps') and hasattr(torch.mps, 'empty_cache'):
                torch.mps.empty_cache()
        
        print("🧹 资源清理完成")
    except Exception as e:
        print(f"⚠️ 清理资源时出错: {e}")

def signal_handler(signum, frame):
    """信号处理函数"""
    print(f"\n📡 接收到信号 {signum}")
    try:
        cleanup_resources()
    except:
        pass
    os._exit(0)

# 注册清理函数和信号处理（仅在主进程中）
# 将注册逻辑移至 __main__ 块中，以确保它在主进程中运行
# atexit.register(cleanup_resources)
# signal.signal(signal.SIGINT, signal_handler)
# signal.signal(signal.SIGTERM, signal_handler)

def get_device():    
    """获取最佳的计算设备"""    
    #if torch.cuda.is_available():
    #    return torch.device("cuda")
    #elif torch.backends.mps.is_available():   
    #    return torch.device("mps")
    #else:
    #    return torch.device("cpu")
    
    # return torch.device("mps") if torch.backends.mps.is_available() else torch.device("cpu")
    return torch.device("cpu")

def get_optimal_workers():
    """根据平台和设备获取最佳的 worker 数量"""
    # 在所有情况下都使用 0 workers 避免多进程问题，特别是在 macOS 和 Flask-SocketIO 环境中
    return 0

def get_model_transform(is_training=True):
    """获取标准化的图像预处理transforms"""
    if is_training:
        return transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
    else:
        return transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])

def create_resnext_model(num_classes, device):
    """创建并初始化ResNeXt模型"""
    try:
        # 使用新的权重参数名称
        model = models.resnext50_32x4d(weights=models.ResNeXt50_32X4D_Weights.DEFAULT)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        model = model.to(device)
        return model
    except Exception as e:
        # 兼容旧版本PyTorch
        print(f"Warning: {e}. Falling back to legacy weight loading.")
        model = models.resnext50_32x4d(pretrained=True)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        model = model.to(device)
        return model

def get_latest_model():
    """查找最新的模型并设置为当前模型"""
    global current_model_name
    try:
        model_files = glob.glob(os.path.join(app.config['MODEL_FOLDER'], 'trained_model_*.pth'))
        if not model_files:
            current_model_name = None
            return
        latest_file = max(model_files, key=os.path.getctime)
        current_model_name = os.path.basename(latest_file)
    except Exception as e:
        print(f"Error getting latest model: {e}")
        current_model_name = None

# ----------------- PyTorch 自定义数据集 -----------------
class ImageFromURLDataset(Dataset):
    """从URL加载图像的自定义数据集"""
    def __init__(self, json_path, transform=None):
        with open(json_path, 'r', encoding='utf-8') as f:
            self.data = json.load(f)
        
        self.transform = transform
        
        # 创建标签到索引的映射
        self.labels = sorted(list(set(item['image']['point_code'] for item in self.data)))
        self.label_to_idx = {label: idx for idx, label in enumerate(self.labels)}
        
        # 预先下载并缓存图片（可选，但推荐用于性能）
        # 为简化示例，这里在__getitem__中动态下载
        # 在实际项目中，最好在这里添加一个预下载和缓存的逻辑

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        item = self.data[idx]['image']
        image_url = item['url']
        label_str = item['point_code']
        label_idx = self.label_to_idx[label_str]

        try:
            # 使用封装的下载函数，支持HTTP和TOS
            image = download_image_from_url(image_url, max_retries=3, timeout=15)
                
            # 清理5分钟前的临时jpg文件
            cleanup_temp_files('./temp/', 5)

        except Exception as e:
            print(f"警告: 无法加载图片 {image_url}. 错误: {e}. 将使用一张占位图.")
            # 创建一个黑色的占位图，防止训练中断
            image = Image.new('RGB', (224, 224), color='black')

        if self.transform:
            try:
                image = self.transform(image)
            except Exception as e:
                print(f"警告: 图像预处理失败. 错误: {e}. 使用原始图像.")
                # 如果预处理失败，创建一个基础的tensor
                image = transforms.ToTensor()(image)
            
        return image, label_idx
        
    def get_num_classes(self):
        return len(self.labels)

    def get_labels(self):
        return self.labels

# ----------------- 模型训练逻辑 -----------------
def run_training(json_path):
    """在一个单独的线程中运行模型训练"""
    socketio.emit('status_update', {'msg': '训练准备中：正在初始化数据集...'})
    
    # 1. 数据准备
    transform = get_model_transform(is_training=True)

    try:
        dataset = ImageFromURLDataset(json_path=json_path, transform=transform)
        num_classes = dataset.get_num_classes()
        if num_classes == 0:
            socketio.emit('status_update', {'msg': '错误：JSON文件中未找到有效的图片或标签。'})
            return

        # 使用较小的子集进行演示，避免下载所有图片耗时过长
        # subset_indices = list(range(min(len(dataset), 200))) # 可根据需要调整
        # subset = torch.utils.data.Subset(dataset, subset_indices)
        # dataloader = DataLoader(subset, batch_size=16, shuffle=True, num_workers=0)

        # 在 macOS 上使用 num_workers=0 避免多进程资源泄漏
        num_workers = get_optimal_workers()
        dataloader = DataLoader(dataset, batch_size=16, shuffle=True, num_workers=num_workers)
        
    except Exception as e:
        socketio.emit('status_update', {'msg': f'错误：无法加载数据集。 {e}'})
        return

    # 2. 模型设置
    socketio.emit('status_update', {'msg': f'训练准备中：正在加载ResNeXt模型，共 {num_classes} 个分类...'})
    
    device = get_device()
    model = create_resnext_model(num_classes, device)

    if device.type == 'mps':
        print("MPS is available:", torch.backends.mps.is_available())
        print("MPS is built:", torch.backends.mps.is_built())
        print("Torch version:", torch.__version__)

    # 3. 损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

    # 4. 训练循环
    num_epochs = 10  # 演示 epochs
    socketio.emit('status_update', {'msg': f'训练开始！设备: {device}, 总轮次: {num_epochs}'})

    total_steps = len(dataloader)
    print("##START###") # 按照用户要求，在训练循环前添加调试输出
    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0
        running_corrects = 0
        total_samples = 0

        for i, (inputs, labels) in enumerate(dataloader):
            try:
                inputs = inputs.to(device)
                labels = labels.to(device)

                optimizer.zero_grad()

                with torch.set_grad_enabled(True):
                    outputs = model(inputs)
                    _, preds = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)
                    loss.backward()
                    optimizer.step()

                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)
                total_samples += inputs.size(0)
                
                # 发送实时数据到前端
                current_loss = running_loss / total_samples
                current_acc = running_corrects.double() / total_samples * 100
                
                socketio.emit('update_stats', {
                    'epoch': epoch + 1,
                    'step': i + 1,
                    'total_steps': total_steps,
                    'loss': current_loss,
                    'accuracy': current_acc.item()
                })
                socketio.sleep(0.01) # 给予服务器处理其他请求的时间
                
            except Exception as e:
                socketio.emit('status_update', {'msg': f'训练步骤出错: {e}'})
                continue

        # 清理GPU/MPS内存
        if device.type == 'cuda':
            torch.cuda.empty_cache()
        elif device.type == 'mps':
            # 检查是否有 mps.empty_cache 方法
            if hasattr(torch, 'mps') and hasattr(torch.mps, 'empty_cache'):
                torch.mps.empty_cache()
            
        epoch_loss = running_loss / len(dataloader.dataset)
        epoch_acc = running_corrects.double() / len(dataloader.dataset) * 100
        
        socketio.emit('status_update', {
            'msg': f' 设备：{device} | 轮次 {epoch + 1}/{num_epochs} 完成 | 损失: {epoch_loss:.4f} | 准确率: {epoch_acc:.2f}%'
        })

    # --- 保存带时间戳的模型和标签 ---
    try:
        global current_model_name
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        model_filename = f"trained_model_{timestamp}.pth"
        labels_filename = f"labels_{timestamp}.json"
        
        model_path = os.path.join(app.config['MODEL_FOLDER'], model_filename)
        labels_path = os.path.join(app.config['MODEL_FOLDER'], labels_filename)
        
        torch.save(model.state_dict(), model_path)
        with open(labels_path, 'w', encoding='utf-8') as f:
            json.dump(dataset.get_labels(), f, ensure_ascii=False, indent=4)

        # 更新当前模型名称
        current_model_name = model_filename
        socketio.emit('training_complete', {
            'msg': f'训练已成功完成！模型已保存为 {model_filename}',
            'model_name': current_model_name
        })
    except Exception as e:
        socketio.emit('status_update', {'msg': f'保存模型时出错: {e}'})

# ----------------- 模型验证逻辑 -----------------
def run_validation(json_path):
    """在一个单独的线程中运行模型验证"""
    global current_model_name
    if not current_model_name:
        socketio.emit('status_update', {'msg': '错误：当前没有可用的模型。请先训练或上传一个模型。'})
        return

    socketio.emit('status_update', {'msg': '验证准备中：正在加载模型和数据...'})
    
    model_path = os.path.join(app.config['MODEL_FOLDER'], current_model_name)
    labels_filename = "labels_" + current_model_name.split('trained_model_')[1].replace('.pth', '.json')
    labels_path = os.path.join(app.config['MODEL_FOLDER'], labels_filename)

    if not os.path.exists(model_path) or not os.path.exists(labels_path):
        socketio.emit('status_update', {'msg': f'错误：找不到模型文件 {current_model_name} 或其对应的标签文件。'})
        get_latest_model() # 尝试重置为最新的有效模型
        return

    try:
        with open(labels_path, 'r', encoding='utf-8') as f:
            labels_list = json.load(f)
        num_classes = len(labels_list)
    except Exception as e:
        socketio.emit('status_update', {'msg': f'错误：无法读取标签文件。 {e}'})
        return

    # 1. 数据准备
    transform = get_model_transform(is_training=True)

    try:
        dataset = ImageFromURLDataset(json_path=json_path, transform=transform)
        if num_classes != dataset.get_num_classes():
             socketio.emit('status_update', {'msg': '错误：验证数据的类别数量与已训练模型的类别数量不匹配。'})
             return
        num_workers = get_optimal_workers()
        dataloader = DataLoader(dataset, batch_size=16, shuffle=False, num_workers=num_workers)
    except Exception as e:
        socketio.emit('status_update', {'msg': f'错误：无法加载验证数据集。 {e}'})
        return

    # 2. 模型加载
    device = get_device()

    try:
        model = create_resnext_model(num_classes, device)
        model.load_state_dict(torch.load(model_path, map_location=device))
        model.eval()
    except Exception as e:
        socketio.emit('status_update', {'msg': f'错误：无法加载模型。 {e}'})
        return

    criterion = nn.CrossEntropyLoss()
    
    socketio.emit('status_update', {'msg': f'验证开始！设备: {device}'})
    # 发送当前使用的模型名称
    socketio.emit('current_model_info', {'model_name': current_model_name})
    
    running_loss = 0.0
    running_corrects = 0
    total_samples = 0
    total_steps = len(dataloader)

    with torch.no_grad():
        for i, (inputs, labels) in enumerate(dataloader):
            try:
                inputs = inputs.to(device)
                labels = labels.to(device)

                outputs = model(inputs)
                _, preds = torch.max(outputs, 1)
                loss = criterion(outputs, labels)

                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)
                total_samples += inputs.size(0)

                current_loss = running_loss / total_samples
                current_acc = running_corrects.double() / total_samples * 100
                
                socketio.emit('update_validation_stats', {
                    'step': i + 1,
                    'total_steps': total_steps,
                    'loss': current_loss,
                    'accuracy': current_acc.item()
                })
                socketio.sleep(0.01)
                
            except Exception as e:
                socketio.emit('status_update', {'msg': f'验证步骤出错: {e}'})
                continue

    # 清理GPU/MPS内存
    if device.type == 'cuda':
        torch.cuda.empty_cache()
    elif device.type == 'mps':
        # 检查是否有 mps.empty_cache 方法
        if hasattr(torch, 'mps') and hasattr(torch.mps, 'empty_cache'):
            torch.mps.empty_cache()

    final_loss = running_loss / len(dataset)
    final_acc = running_corrects.double() / len(dataset) * 100
    
    socketio.emit('validation_complete', {
        'msg': f'验证完成 | 总损失: {final_loss:.4f} | 总准确率: {final_acc:.2f}%'
    })


# ----------------- 数据库操作和JSON生成功能 -----------------
def get_database_connection():
    """获取数据库连接"""
    if not MYSQL_AVAILABLE:
        return None, "MySQL连接库未安装，请运行: pip install mysql-connector-python"
    
    try:
        connection = mysql.connector.connect(
            host=Config.MYSQL_HOST,
            port=Config.MYSQL_PORT,
            user=Config.MYSQL_USERNAME,
            password=Config.MYSQL_PASSWORD,
            database=Config.MYSQL_DATABASE
        )
        return connection, None
    except Error as e:
        return None, f"数据库连接失败: {e}"

def execute_sql_and_generate_json(sql_query):
    """执行SQL查询并生成JSON文件"""
    global generated_json_filename
    
    connection, error = get_database_connection()
    if error:
        return None, error
    
    try:
        cursor = connection.cursor(dictionary=True)
        cursor.execute(sql_query)
        results = cursor.fetchall()
        
        if not results:
            return None, "查询结果为空"
        
        # 转换为trans.json格式
        formatted_data = []
        for row in results:
            # 构建图片URL，假设material_file_key是文件路径或标识符
            image_url = f"https://your-domain.com/images/{row.get('material_file_key', '')}"
            
            formatted_item = {
                "image": {
                    "id": str(row.get('id', '')),
                    "url": image_url,
                    "point_type": str(row.get('point_type', '')),
                    "point_type_name": str(row.get('point_type_name', '')),
                    "point_code": str(row.get('point_code', '')),
                    "point_name": str(row.get('point_name', '')),
                    "material_file_key": str(row.get('material_file_key', '')),
                    "material_file_name": str(row.get('material_file_name', ''))
                }
            }
            formatted_data.append(formatted_item)
        
        # 生成文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"trans-{timestamp}.json"
        filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        
        # 保存JSON文件
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(formatted_data, f, ensure_ascii=False, indent=2)
        
        generated_json_filename = filename
        return formatted_data, None
        
    except Error as e:
        return None, f"SQL执行失败: {e}"
    except Exception as e:
        return None, f"处理数据时出错: {e}"
    finally:
        if connection and connection.is_connected():
            cursor.close()
            connection.close()

# ----------------- 图片下载工具函数 -----------------
def download_image_from_url(image_url, max_retries=3, timeout=15):
    """
    从URL下载图片，支持多种下载方式
    
    Args:
        image_url: 图片URL
        max_retries: 最大重试次数
        timeout: 超时时间
    
    Returns:
        PIL.Image: 下载的图片对象
        
    Raises:
        Exception: 下载失败时抛出异常
    """
    
    # 检查是否为TOS链接
    if 'tos-cn-' in image_url or 'ivolces.com'  in image_url or '2025' in image_url:
        return download_image_from_tos(image_url, max_retries)
    else:
        return download_image_from_http(image_url, max_retries, timeout)

def download_image_from_http(image_url, max_retries=3, timeout=15):
    """使用HTTP请求下载图片"""
    for attempt in range(max_retries):
        try:
            response = requests.get(image_url, timeout=timeout)
            response.raise_for_status()
            image = Image.open(BytesIO(response.content)).convert('RGB')
            return image
        except requests.exceptions.RequestException as e:
            if attempt == max_retries - 1:
                raise e
            continue

def download_image_from_tos(image_url, max_retries=3):
    """
    使用火山引擎TOS客户端下载图片
    适用于 tos://bucket/object_key 格式的URL
    """
    if not TOS_AVAILABLE:
        # 如果TOS库不可用，降级到HTTP下载
        return download_image_from_http(image_url, max_retries, 15)
    
    try:
        # 解析TOS URL格式
        # 支持: tos://bucket/object_key 或 https://tos-cn-region.ivolces.com/bucket/object_key
        if image_url.startswith('tos://'):
            # tos://dcp-upload-pro/2025-03-22/filename.jpg
            url_parts = image_url.replace('tos://', '').split('/', 1)
            bucket_name = url_parts[0]
            object_key = url_parts[1] if len(url_parts) > 1 else ''
        elif 'ivolces.com' in image_url:
            # https://tos-cn-guangzhou.ivolces.com/bucket/object_key
            from urllib.parse import urlparse
            parsed = urlparse(image_url)
            path_parts = parsed.path.lstrip('/').split('/', 1)
            bucket_name = path_parts[0] if path_parts else ''
            object_key = path_parts[1] if len(path_parts) > 1 else ''
            
            # 从主机名推断region
            if 'tos-cn-guangzhou' in parsed.hostname:
                region = 'cn-guangzhou'
                endpoint = 'tos-cn-guangzhou.ivolces.com'
            elif 'tos-cn-beijing' in parsed.hostname:
                region = 'cn-beijing'
                endpoint = 'tos-cn-beijing.ivolces.com'
            else:
                # 默认使用广州区域
                region = 'cn-guangzhou'
                endpoint = 'tos-cn-guangzhou.ivolces.com'
        else:
            # 不是TOS格式，降级到HTTP下载
            return download_image_from_http(image_url, max_retries, 15)
        
        # 从配置获取TOS认证信息
        ak = getattr(Config, 'TOS_ACCESS_KEY', 'AKLTY2FmZmI5Y2JhYmZmNDc3MmJmOTU3Yjg5Mzk5Y2U2YWM')
        sk = getattr(Config, 'TOS_SECRET_KEY', 'WVRVMVpEVmtOV1V6TWpRME5HWmxZVGxoTURCbU9ISXpNemRtTURNd1lUUQ==')
        endpoint = getattr(Config, 'TOS_ENDPOINT', 'tos-cn-guangzhou.ivolces.com')
        region = getattr(Config, 'TOS_REGION', 'cn-guangzhou')
        
        # 创建TOS客户端
        client = tos.TosClientV2(ak, sk, endpoint, region)
        
        # 下载图片到内存
        for attempt in range(max_retries):
            try:
                # 获取对象内容
                result = client.get_object(bucket_name, object_key)
                image_data = result.read()
                
                # 转换为PIL图像
                image = Image.open(BytesIO(image_data)).convert('RGB')
                return image
                
            except tos.exceptions.TosClientError as e:
                print(f'TOS客户端错误 (尝试 {attempt + 1}/{max_retries}): {e.message}')
                if attempt == max_retries - 1:
                    raise Exception(f'TOS下载失败: {e.message}')
                    
            except tos.exceptions.TosServerError as e:
                print(f'TOS服务器错误 (尝试 {attempt + 1}/{max_retries}): {e.code} - {e.message}')
                if attempt == max_retries - 1:
                    raise Exception(f'TOS服务器错误: {e.code} - {e.message}')
                    
            except Exception as e:
                print(f'TOS未知错误 (尝试 {attempt + 1}/{max_retries}): {e}')
                if attempt == max_retries - 1:
                    # 最后尝试降级到HTTP下载
                    return download_image_from_http(image_url, 1, 15)
                    
    except Exception as e:
        print(f'TOS URL解析失败: {e}，降级到HTTP下载')
        return download_image_from_http(image_url, max_retries, 15)

def cleanup_temp_files(temp_dir='./temp/', max_age_minutes=5):
    """清理指定目录下超过指定时间的jpg文件"""
    try:
        import glob
        import time
        
        if not os.path.exists(temp_dir):
            return
            
        current_time = time.time()
        max_age_seconds = max_age_minutes * 60
        
        for jpg_file in glob.glob(os.path.join(temp_dir, '*.jpg')):
            try:
                file_mtime = os.path.getmtime(jpg_file)
                if current_time - file_mtime > max_age_seconds:
                    os.remove(jpg_file)
                    print(f"已删除过期临时文件: {jpg_file}")
            except OSError as e:
                print(f"删除临时文件失败: {jpg_file}, 错误: {e}")
                
    except Exception as e:
        print(f"清理临时文件时出错: {e}")

# ----------------- Flask 路由和 SocketIO 事件处理 -----------------
@app.route('/')
def index():
    return render_template('index.html')

@app.route('/upload', methods=['POST'])
def upload_file():
    if 'file' not in request.files:
        return jsonify({'error': 'No file part'}), 400
    file = request.files['file']
    if file.filename == '':
        return jsonify({'error': 'No selected file'}), 400
    
    # 验证文件类型和大小
    if not file.filename.endswith('.json'):
        return jsonify({'error': 'Invalid file type'}), 400
    
    # 检查文件大小（限制为10MB）
    max_file_size = 10 * 1024 * 1024  # 10MB
    file.seek(0, os.SEEK_END)
    file_size = file.tell()
    file.seek(0)
    
    if file_size > max_file_size:
        return jsonify({'error': 'JSON文件过大，请确保文件小于10MB。'}), 400
    
    try:
        # 验证JSON格式
        content = file.read()
        json.loads(content.decode('utf-8'))
        file.seek(0)  # 重置文件指针
        
        # 根据请求来源（哪个功能页面）来决定文件名
        file_purpose = request.form.get('purpose', 'training')
        if file_purpose == 'training':
            filename = "uploaded_training_data.json"
        elif file_purpose == 'validation':
            filename = "uploaded_validation_data.json"
        else:
            filename = "uploaded_data.json"

        filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        file.save(filepath)
        return jsonify({'success': 'File uploaded successfully', 'filename': filename})
        
    except json.JSONDecodeError:
        return jsonify({'error': '无效的JSON文件格式。'}), 400
    except Exception as e:
        return jsonify({'error': f'上传文件时出错: {e}'}), 500

@app.route('/upload_model', methods=['POST'])
def upload_model():
    global current_model_name
    if 'file' not in request.files:
        return jsonify({'error': 'No file part'}), 400
    file = request.files['file']
    if file.filename == '':
        return jsonify({'error': 'No selected file'}), 400
    if file and file.filename.endswith('.zip'):
        try:
            # 检查ZIP文件大小，防止过大的文件占用过多内存
            max_file_size = 100 * 1024 * 1024  # 100MB
            file.seek(0, os.SEEK_END)
            file_size = file.tell()
            file.seek(0)
            
            if file_size > max_file_size:
                return jsonify({'error': 'ZIP文件过大，请确保模型文件小于100MB。'}), 400
                
            with zipfile.ZipFile(file, 'r') as zip_ref:
                # 安全检查：确保ZIP文件不包含危险路径
                for name in zip_ref.namelist():
                    if os.path.isabs(name) or ".." in name:
                        return jsonify({'error': 'ZIP文件包含不安全的路径。'}), 400
                
                # 提取到模型文件夹
                zip_ref.extractall(app.config['MODEL_FOLDER'])
                
                # 从zip文件中查找模型文件名
                model_filename = None
                labels_filename = None
                for name in zip_ref.namelist():
                    if name.startswith('trained_model_') and name.endswith('.pth'):
                        model_filename = name
                    elif name.startswith('labels_') and name.endswith('.json'):
                        labels_filename = name
                
                if model_filename and labels_filename:
                    current_model_name = model_filename
                    return jsonify({'success': '模型上传并设置成功', 'model_name': current_model_name})
                else:
                    return jsonify({'error': 'ZIP文件中未找到有效的模型文件或标签文件。'}), 400
        except zipfile.BadZipFile:
            return jsonify({'error': '无效的ZIP文件格式。'}), 400
        except Exception as e:
            return jsonify({'error': f'处理ZIP文件时出错: {e}'}), 500
    return jsonify({'error': '无效的文件类型，请上传ZIP文件。'}), 400

@app.route('/download_model')
def download_model():
    global current_model_name
    if not current_model_name:
        return "没有可供下载的模型。", 404

    model_path = os.path.join(app.config['MODEL_FOLDER'], current_model_name)
    labels_filename = "labels_" + current_model_name.split('trained_model_')[1].replace('.pth', '.json')
    labels_path = os.path.join(app.config['MODEL_FOLDER'], labels_filename)

    if not os.path.exists(model_path) or not os.path.exists(labels_path):
        return "模型或标签文件丢失。", 404

    try:
        zip_buffer = BytesIO()
        with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
            zip_file.write(model_path, os.path.basename(model_path))
            zip_file.write(labels_path, os.path.basename(labels_path))
        
        zip_buffer.seek(0)
        zip_filename = f"model_package_{current_model_name.split('trained_model_')[1].replace('.pth', '.zip')}"
        
        return zip_buffer.getvalue(), 200, {
            'Content-Type': 'application/zip',
            'Content-Disposition': f'attachment; filename={zip_filename}'
        }
    except Exception as e:
        return f"打包模型时出错: {e}", 500


@app.route('/predict', methods=['POST'])
def predict():
    global current_model_name
    if 'file' not in request.files:
        return jsonify({'error': 'No file part'}), 400
    file = request.files['file']
    if file.filename == '':
        return jsonify({'error': 'No selected file'}), 400

    if not current_model_name:
        return jsonify({'error': '未找到已训练的模型，请先训练或上传。'}), 404

    model_path = os.path.join(app.config['MODEL_FOLDER'], current_model_name)
    labels_filename = "labels_" + current_model_name.split('trained_model_')[1].replace('.pth', '.json')
    labels_path = os.path.join(app.config['MODEL_FOLDER'], labels_filename)

    if not os.path.exists(model_path) or not os.path.exists(labels_path):
        return jsonify({'error': '当前模型文件或标签文件丢失。'}), 404

    try:
        with open(labels_path, 'r', encoding='utf-8') as f:
            labels_list = json.load(f)
        num_classes = len(labels_list)

        device = get_device()

        model = create_resnext_model(num_classes, device)
        model.load_state_dict(torch.load(model_path, map_location=device))
        model.eval()

        transform = get_model_transform(is_training=False)

        image = Image.open(file.stream).convert('RGB')
        image_tensor = transform(image).unsqueeze(0).to(device)

        with torch.no_grad():
            outputs = model(image_tensor)
            _, preds = torch.max(outputs, 1)
            prediction_idx = preds.item()
        
        # 清理GPU/MPS内存
        if device.type == 'cuda':
            torch.cuda.empty_cache()
        elif device.type == 'mps':
            # 检查是否有 mps.empty_cache 方法
            if hasattr(torch, 'mps') and hasattr(torch.mps, 'empty_cache'):
                torch.mps.empty_cache()
            
        predicted_label = labels_list[prediction_idx]
        return jsonify({'prediction': predicted_label})

    except Exception as e:
        return jsonify({'error': f'推理时发生错误: {e}'}), 500

@app.route('/execute_sql', methods=['POST'])
def execute_sql():
    """执行SQL查询并生成JSON"""
    try:
        data = request.get_json()
        sql_query = data.get('sql', '').strip()
        
        if not sql_query:
            return jsonify({'error': 'SQL查询语句不能为空'}), 400
        
        result_data, error = execute_sql_and_generate_json(sql_query)
        
        if error:
            return jsonify({'error': error}), 500
        
        # 返回结果预览（限制前几条记录）
        preview_data = result_data[:3] if len(result_data) > 3 else result_data
        
        return jsonify({
            'success': True,
            'filename': generated_json_filename,
            'total_records': len(result_data),
            'preview': preview_data
        })
        
    except Exception as e:
        return jsonify({'error': f'执行SQL时出错: {e}'}), 500

@app.route('/download_generated_json')
def download_generated_json():
    """下载生成的JSON文件"""
    global generated_json_filename
    
    if not generated_json_filename:
        return "没有可下载的JSON文件", 404
    
    filepath = os.path.join(app.config['UPLOAD_FOLDER'], generated_json_filename)
    
    if not os.path.exists(filepath):
        return "JSON文件不存在", 404
    
    try:
        return send_file(
            filepath,
            as_attachment=True,
            download_name=generated_json_filename,
            mimetype='application/json'
        )
    except Exception as e:
        return f"下载文件时出错: {e}", 500

@socketio.on('connect')
def handle_connect():
    print('客户端已连接')

@socketio.on('start_training')
def handle_start_training(data):
    filename = data.get('filename')
    if not filename:
        socketio.emit('status_update', {'msg': '错误：未提供文件名。'})
        return
        
    json_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
    if not os.path.exists(json_path):
        socketio.emit('status_update', {'msg': f'错误：文件 {filename} 未找到。'})
        return
        
    # 在后台线程中启动训练
    socketio.start_background_task(run_training, json_path)

@socketio.on('start_validation')
def handle_start_validation(data):
    filename = data.get('filename')
    if not filename:
        socketio.emit('status_update', {'msg': '错误：未提供用于验证的文件名。'})
        return
        
    json_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
    if not os.path.exists(json_path):
        socketio.emit('status_update', {'msg': f'错误：文件 {filename} 未找到。'})
        return
        
    # 在后台线程中启动验证
    socketio.start_background_task(run_validation, json_path)


if __name__ == '__main__':
    # --- macOS 特定设置，必须在任何其他库加载之前执行 ---
    if platform.system() == 'Darwin':  # macOS
        import multiprocessing
        # 设置环境变量，这对于避免在fork()后出现Metal相关的崩溃至关重要
        os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
        
        # 强制使用 'spawn' 启动方法，这是在macOS上使用PyTorch和多进程时最安全的方式
        try:
            if multiprocessing.get_start_method() != 'spawn':
                multiprocessing.set_start_method('spawn', force=True)
            print("✅ macOS: Multiprocessing start method set to 'spawn'.")
        except RuntimeError:
            # 如果已经设置过，忽略错误
            print("ℹ️ macOS: Multiprocessing start method was already set.")
            pass

    # --- 注册信号处理和清理函数 ---
    atexit.register(cleanup_resources)
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    
    # --- 应用启动时，设置当前模型 ---
    get_latest_model()
    
    # --- 显示启动信息 ---
    device = get_device()
    print(f"🚀 ResNeXt 训练平台启动")
    print(f"💻 运行平台: {platform.system()} {platform.machine()}")
    print(f"🔧 计算设备: {device}")
    print(f"👥 Worker 数量: {get_optimal_workers()}")
    
    if device.type == 'mps':
        print("🍎 检测到 Apple Silicon，使用 MPS 加速")
    elif device.type == 'cuda':
        print(f"🎮 检测到 CUDA，GPU: {torch.cuda.get_device_name()}")
    else:
        print("💻 使用 CPU 计算")
    
    try:
        # --- 启动服务器 ---
        # 生产模式设置：禁用debug和reloader
        # 可通过环境变量FLASK_ENV=development临时启用开发模式
        is_development = os.environ.get('FLASK_ENV', '').lower() == 'development'
        
        if is_development:
            print("🔧 开发模式：启用debug模式")
            debug_flag = True
            use_reloader_flag = False if platform.system() == 'Darwin' else True
        else:
            print("🔒 生产模式：禁用debug和reloader")
            debug_flag = False
            use_reloader_flag = False
        
        print(f"🔧 服务器设置: debug={debug_flag}, use_reloader={use_reloader_flag}")
        
        socketio.run(app, host='0.0.0.0', port=5001, debug=debug_flag, use_reloader=use_reloader_flag)

    except KeyboardInterrupt:
        print('\n🛑 ResNeXt 训练平台服务器已退出。')
    finally:
        cleanup_resources()

