from flask import Flask, request, jsonify
from flask_cors import CORS
import os
import uuid
from werkzeug.utils import secure_filename
from datetime import datetime
import re
from langchain_chroma import Chroma
from langchain_huggingface import HuggingFaceEmbeddings
import warnings
import threading
import torch
import clip
import numpy as np
import faiss
from PIL import Image
import json
import sys
import base64
from openai import OpenAI
# 补充缺失的import
import subprocess
import time

# 初始化Flask应用
app = Flask(__name__)
CORS(app, resources={r"/api/*": {"origins": "http://localhost:5173"}})

# 配置上传设置
UPLOAD_FOLDER = 'uploads'
LEARN_PIC_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LearnPic")
COUNT_ID_FILE = os.path.join(LEARN_PIC_DIR, "CountID.txt")
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['LEARN_PIC_DIR'] = LEARN_PIC_DIR
app.config['MAX_CONTENT_LENGTH'] = 20 * 1024 * 1024  # 20MB 限制

# 确保上传目录存在
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
os.makedirs(LEARN_PIC_DIR, exist_ok=True)

# ================== 配置参数 ==================
# 向量数据库配置
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
VECTORDB_DIR = os.path.join(BASE_DIR, "VectorDB")
MODEL_PATH = "E:/BAAIbge-small-zh-v1.5"
POWERLINE_VECTORDB_DIR = os.path.join(VECTORDB_DIR, "PowerLineVectorDB")

# ================== 全局变量 ==================
# 存储预加载的资源
text_vector_dbs = {}  # 文本向量数据库
image_index = None  # 图片向量索引
image_metadata = None  # 图片元数据
clip_model = None  # CLIP模型
preprocess = None  # 图片预处理函数
embeddings = None  # 文本嵌入模型

# 线程锁，确保增量学习和查询操作不会同时进行
db_lock = threading.Lock()

# ================== 初始化函数 ==================
def initialize_resources():
    """在应用启动时初始化所有资源"""
    global text_vector_dbs, image_index, image_metadata, clip_model, preprocess, embeddings

    print("🚀🚀🚀🚀 开始加载资源...")

    # 1. 加载文本嵌入模型
    try:
        embeddings = HuggingFaceEmbeddings(
            model_name=MODEL_PATH,
            model_kwargs={"local_files_only": True}
        )
        print("✅ 文本嵌入模型加载成功")
    except Exception as e:
        print(f"❌❌❌❌ 文本嵌入模型加载失败: {str(e)}")
        return False

    # 2. 加载文本向量数据库
    try:
        # 获取所有向量库目录
        vector_dbs = []
        for item in os.listdir(VECTORDB_DIR):
            item_path = os.path.join(VECTORDB_DIR, item)
            if os.path.isdir(item_path) and item.startswith("map_"):
                vector_dbs.append(item)

        print(f"📚📚📚📚 发现 {len(vector_dbs)} 个文本向量数据库")

        # 为每个向量库创建实例
        for collection_name in vector_dbs:
            try:
                vector_db_path = os.path.join(VECTORDB_DIR, collection_name)
                text_vector_dbs[collection_name] = Chroma(
                    persist_directory=vector_db_path,
                    embedding_function=embeddings,
                    collection_name=collection_name
                )
                print(f"  已加载: {collection_name}")
            except Exception as e:
                print(f"❌❌❌❌ 加载文本向量库 {collection_name} 时出错: {str(e)}")

        print(f"✅ 成功加载 {len(text_vector_dbs)}/{len(vector_dbs)} 个文本向量数据库")
    except Exception as e:
        print(f"❌❌❌❌ 加载文本向量数据库时出错: {str(e)}")
        return False

    # 3. 加载图片向量数据库
    try:
        # 加载FAISS索引
        index_path = os.path.join(POWERLINE_VECTORDB_DIR, "image_index.faiss")
        if os.path.exists(index_path):
            image_index = faiss.read_index(index_path)
            print("✅ 图片向量索引加载成功")
        else:
            print(f"❌❌❌❌ 图片向量索引文件不存在: {index_path}")
            return False

        # 加载元数据
        metadata_path = os.path.join(POWERLINE_VECTORDB_DIR, "metadata.json")
        if os.path.exists(metadata_path):
            with open(metadata_path, "r", encoding="utf-8") as f:
                image_metadata = json.load(f)
            print(f"✅ 图片元数据加载成功 ({len(image_metadata)} 条记录)")
        else:
            print(f"❌❌❌❌ 图片元数据文件不存在: {metadata_path}")
            return False
    except Exception as e:
        print(f"❌❌❌❌ 加载图片向量数据库时出错: {str(e)}")
        return False

    # 4. 加载CLIP模型
    try:
        device = "cuda" if torch.cuda.is_available() else "cpu"
        clip_model, preprocess = clip.load("ViT-B/32", device=device)
        print(f"✅ CLIP模型加载成功 (设备: {device})")
    except Exception as e:
        print(f"❌❌❌❌ CLIP模型加载失败: {str(e)}")
        return False

    print("🎉🎉🎉🎉 所有资源加载完成")
    return True

# ================== 辅助函数 ==================
def allowed_file(filename):
    """检查文件扩展名是否允许"""
    return '.' in filename and \
        filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

def query_text_vector_dbs(question, k=3):
    """查询所有文本向量数据库"""
    print(f"🔍🔍🔍🔍 开始文本查询: {question}")
    results = {}

    # 查询每个向量库
    for db_name, vector_db in text_vector_dbs.items():
        try:
            print(f"  正在查询: {db_name}")

            # 执行查询
            docs = vector_db.similarity_search(question, k=k)

            # 格式化结果
            formatted_results = []
            for i, doc in enumerate(docs):
                if isinstance(doc.page_content, str):
                    clean_content = re.sub(r'\s+', ' ', doc.page_content).strip()
                else:
                    clean_content = str(doc.page_content)

                if len(clean_content) > 500:
                    clean_content = clean_content[:500] + "..."

                source = doc.metadata.get("source", "未知来源")
                if isinstance(source, str):
                    source = os.path.basename(source)

                formatted_results.append({
                    "内容": clean_content,
                    "来源": source
                })

            results[db_name] = formatted_results

        except Exception as e:
            print(f"❌❌❌❌ 查询文本向量库 {db_name} 时出错: {str(e)}")
            results[db_name] = [{"error": f"查询错误: {str(e)}"}]

    return results

def search_similar_images(query_image_path, top_k=3):
    """搜索相似图片"""
    if not image_index or not image_metadata:
        print("⚠️ 图片向量数据库未加载，无法查询")
        return []

    try:
        # 处理查询图片
        img = Image.open(query_image_path)
        img = preprocess(img).unsqueeze(0)

        # 确定模型所在的设备
        device = next(clip_model.parameters()).device

        # 将图像移动到与模型相同的设备
        img = img.to(device)

        # 提取特征
        with torch.no_grad():
            query_features = clip_model.encode_image(img).cpu().numpy().astype('float32')

        # 归一化查询向量
        faiss.normalize_L2(query_features)

        # 搜索最相似的图片
        distances, indices = image_index.search(query_features, top_k)

        # 获取结果
        results = []
        for i in range(top_k):
            idx = indices[0][i]
            distance = distances[0][i]
            item = image_metadata[idx]
            results.append({
                "rank": i + 1,
                "distance": distance,
                "id": item["id"],
                "image_path": item["image_path"],
                "description": item["description"]
            })

        return results

    except Exception as e:
        print(f"❌❌❌❌ 图片查询失败: {str(e)}")
        return []

def format_results_for_print(text_results, image_results):
    """将查询结果格式化为字符串用于打印"""
    result_str = "\n" + "=" * 80
    result_str += "\n📊📊📊📊 查询结果汇总\n"
    result_str += "=" * 80 + "\n"

    # 文本查询结果
    if text_results:
        result_str += "\n📝📝📝📝 文本查询结果:\n"
        result_str += "-" * 60 + "\n"

        for db_name, results in text_results.items():
            result_str += f"\n🔹🔹🔹🔹 数据库: {db_name}\n"

            for i, res in enumerate(results, 1):
                if "error" in res:
                    result_str += f"   {i}. ❌❌❌❌ 错误: {res['error']}\n"
                else:
                    result_str += f"   {i}. {res['内容']}\n"
                    result_str += f"      📍📍📍 来源: {res['来源']}\n"
    else:
        result_str += "\n📝📝📝📝 无文本查询结果\n"

    # 图片查询结果
    if image_results:
        result_str += "\n🖼🖼🖼🖼🖼🖼🖼🖼🖼️ 图片查询结果:\n"
        result_str += "-" * 60 + "\n"

        for res in image_results:
            result_str += f"\n🔸🔸🔸🔸 排名 #{res['rank']} (相似度: {res['distance']:.4f})\n"
            result_str += f"   🆔🆔🆔🆔🆔🆔🆔🆔🆔 ID: {res['id']}\n"
            result_str += f"   📋📋📋📋 描述: {res['description']}\n"
            result_str += f"   📁📁📁📁 路径: {res['image_path']}\n"
    else:
        result_str += "\n🖼🖼🖼🖼🖼🖼🖼🖼🖼️ 无图片查询结果\n"

    result_str += "\n" + "=" * 80 + "\n"
    return result_str

def encode_image(image_path):
    """将图片编码为base64格式"""
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")

def prepare_vlm_input(user_question, text_results, image_results):
    """准备发送给VLM的输入数据"""
    # 构建文本部分
    text_context = ""
    if text_results:
        text_context += "📚📚 文本检索结果:\n"
        for db_name, results in text_results.items():
            for i, res in enumerate(results, 1):
                if "error" not in res:
                    # 只取内容，忽略来源信息
                    text_context += f"{i}. {res['内容']}\n"

    # 构建图片部分
    image_context = ""
    if image_results:
        image_context += "🖼🖼🖼️ 图片检索结果:\n"
        for i, res in enumerate(image_results, 1):
            # 提取异物检测信息
            match = re.search(r"异物检测:\s*(是|否)[\s.,]*类型:\s*([\w]+)", res["description"])
            if match:
                has_foreign = match.group(1)
                obj_type = match.group(2)
                image_context += f"{i}. 异物检测: {has_foreign}, 类型: {obj_type}\n"
            else:
                image_context += f"{i}. {res['description']}\n"

    # 组合完整输入
    vlm_input = f"用户提问: {user_question}\n\n"
    if text_context:
        vlm_input += text_context + "\n"
    if image_context:
        vlm_input += image_context

    return vlm_input

def call_vlm(user_question, text_results, image_results, query_image_path=None):
    """调用VLM大模型进行分析"""
    # 初始化OpenAI客户端
    client = OpenAI(
        api_key="sk-7884aef2857c4cd8bf8c51cc604eac0f",
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
    )

    # 准备消息内容
    messages = [{"role": "user", "content": []}]

    #输入标注
    Tips = "(注意：当分析图片时,你可以说说1.是否有异物2.异物类型['kite','trash','nest','balloon','other']或者部件类型['均压环','复合绝缘子','挂点金具','玻璃绝缘子','连接金具','防震锤','other']3.位置，自然语言描述一下)";
    # 添加文本输入
    vlm_input = prepare_vlm_input("你是一个电力检测AI,当发送图片要你识别时,你先做出判断是否存在异物，第一行回复必须为“无异物”或者“存在异物”,我会给你提供相似图片作为参考（相似图片不用分析说明）,识别结果以你的判断为准\n"+user_question+"\n"+Tips, text_results, image_results)
    messages[0]["content"].append({"type": "text", "text": vlm_input})

    # 添加图片并明确标注来源
    images_added = 0

    # 1. 添加用户上传的原始图片（如果有）
    if query_image_path:
        base64_image = encode_image(query_image_path)
        mime_type = "image/jpeg"  # 默认使用jpeg
        if query_image_path.lower().endswith('.png'):
            mime_type = "image/png"
        elif query_image_path.lower().endswith('.gif'):
            mime_type = "image/gif"

        # 添加图片描述
        messages[0]["content"].append({
            "type": "text",
            "text": f"📸📸 这是用户上传的原始图片"
        })

        # 添加图片本身
        messages[0]["content"].append({
            "type": "image_url",
            "image_url": {"url": f"data:{mime_type};base64,{base64_image}"}
        })
        images_added += 1

    # 2. 添加检索到的相似图片（最多3张）
    if image_results:
        for i, res in enumerate(image_results[:3]):
            image_path = res["image_path"]
            if os.path.exists(image_path):
                base64_image = encode_image(image_path)
                mime_type = "image/jpeg"  # 默认使用jpeg
                if image_path.lower().endswith('.png'):
                    mime_type = "image/png"
                elif image_path.lower().endswith('.gif'):
                    mime_type = "image/gif"

                # 添加图片描述
                messages[0]["content"].append({
                    "type": "text",
                    "text": f"🖼🖼🖼️ 这是检索到的相似图片 #{i + 1} (相似度: {res['distance']:.4f})\n"
                            f"描述: {res['description']}"
                })

                # 添加图片本身
                messages[0]["content"].append({
                    "type": "image_url",
                    "image_url": {"url": f"data:{mime_type};base64,{base64_image}"}
                })
                images_added += 1

    # 添加图片数量说明
    if images_added > 0:
        messages[0]["content"].append({
            "type": "text",
            "text": f"🔢🔢 总共提供了 {images_added} 张图片："
                    f"{'1张用户上传图片' if query_image_path else ''}"
                    f"{'和' if query_image_path and image_results else ''}"
                    f"{f'{min(3, len(image_results))}张相似图片' if image_results else ''}"
        })

    #调用大模型
    try:
        print("\n" + "=" * 80)
        print("🤖🤖🤖🤖 调用VLM大模型...")

        # 创建或打开响应记录文件（使用绝对路径）
        script_dir = os.path.dirname(os.path.abspath(__file__))
        response_file = os.path.join(script_dir, "VLMResponse.txt")

        # 确保目录存在
        os.makedirs(script_dir, exist_ok=True)

        file_mode = "a" if os.path.exists(response_file) else "w"
        with open(response_file, file_mode, encoding="utf-8") as f:
            # 写入查询标识和用户问题
            f.write("\n" + "=" * 80 + "\n")
            f.write(f"📅 查询时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(f"📝 用户提问: {user_question}\n")
            if query_image_path:
                f.write(f"🖼️ 上传图片: {query_image_path}\n")
            f.write("-" * 80 + "\n")

            # 记录检索结果（如果存在）
            if text_results:
                f.write("\n📚 文本检索结果:\n")
                for db_name, results in text_results.items():
                    f.write(f"  🔹 {db_name}:\n")
                    for i, res in enumerate(results, 1):
                        if "error" in res:
                            f.write(f"    ❌ 错误: {res['error']}\n")
                        else:
                            f.write(f"    {i}. {res['内容']}\n")
                            f.write(f"      来源: {res['来源']}\n")

            if image_results:
                f.write("\n🖼️ 图片检索结果:\n")
                for res in image_results:
                    f.write(f"  🔸 排名 #{res['rank']} (相似度: {res['distance']:.4f})\n")
                    f.write(f"    ID: {res['id']}\n")
                    f.write(f"    描述: {res['description']}\n")
                    f.write(f"    路径: {res['image_path']}\n")

            f.write("\n" + "=" * 80 + "\n")
            f.write("🤖 VLM思考过程:\n")

            completion = client.chat.completions.create(
                model="qvq-max",
                messages=messages,
                stream=True
            )

            reasoning_content = ""
            answer_content = ""
            is_answering = False

            print("\n" + "=" * 20 + "思考过程" + "=" * 20 + "\n")
            f.write("=" * 20 + "思考过程" + "=" * 20 + "\n")
            f.flush()  # 确保标题立即写入

            for chunk in completion:
                if not chunk.choices:
                    continue

                delta = chunk.choices[0].delta

                # 打印思考过程并写入文件
                if hasattr(delta, 'reasoning_content') and delta.reasoning_content:
                    print(delta.reasoning_content, end='', flush=True)
                    reasoning_content += delta.reasoning_content
                    f.write(delta.reasoning_content)
                    f.flush()  # 确保内容立即写入文件
                elif delta.content:
                    # 开始回复
                    if not is_answering:
                        print("\n" + "=" * 20 + "完整回复" + "=" * 20 + "\n")
                        f.write("\n" + "=" * 20 + "完整回复" + "=" * 20 + "\n")
                        is_answering = True

                    # 打印回复过程并写入文件
                    print(delta.content, end='', flush=True)
                    answer_content += delta.content
                    f.write(delta.content)
                    f.flush()  # 确保内容立即写入文件

            print("\n" + "=" * 80 + "\n")
            f.write("\n" + "=" * 80 + "\n")
            f.write("✅ VLM响应记录完成\n")

        print(f"📝 VLM响应已记录到文件: {response_file}")
        return answer_content

    except Exception as e:
        print(f"❌❌❌❌ 调用VLM失败: {str(e)}")
        # 记录错误信息到文件
        with open("VLMResponse.txt", "a", encoding="utf-8") as f:
            f.write(f"\n❌ VLM调用失败: {str(e)}\n")
        return f"VLM处理失败: {str(e)}"

# ================== 增量学习功能函数 ==================
def get_next_id():
    """获取下一个自增ID"""
    # 如果文件不存在，创建并初始化ID为2000
    if not os.path.exists(COUNT_ID_FILE):
        with open(COUNT_ID_FILE, "w") as f:
            f.write("2000")
        return 2000

    # 读取当前ID
    with open(COUNT_ID_FILE, "r") as f:
        content = f.read().strip()
        current_id = int(content) if content else 2000

    # 更新ID
    next_id = current_id + 1
    with open(COUNT_ID_FILE, "w") as f:
        f.write(str(next_id))

    return next_id

def extract_qa_pairs(text_data):
    """从文本数据中提取QA对"""
    lines = text_data.strip().split('\n')
    qa_pairs = []

    # 确保行数是偶数
    if len(lines) % 2 != 0:
        raise ValueError("文本格式错误：问题与回答应该成对出现")

    for i in range(0, len(lines), 2):
        question = lines[i].strip()
        answer = lines[i + 1].strip()
        qa_pairs.append((question, answer))

    return qa_pairs

def build_metadata_entry(image_path, qa_pairs, image_id):
    """构建元数据条目"""
    # 解析QA对
    has_foreign = None
    obj_type = None

    # 确保有足够的QA对
    if len(qa_pairs) < 3:
        raise ValueError("至少需要3对QA数据")

    # 第一个QA：异物检测
    q1, a1 = qa_pairs[0]
    if "是否存在异物" in q1:
        has_foreign = a1

    # 第二个QA：异物/部件类型
    q2, a2 = qa_pairs[1]
    if "异物是什么" in q2:
        obj_type = a2
    elif "部件是什么" in q2:
        obj_type = a2

    # 第三个QA：位置描述
    _, a3 = qa_pairs[2]

    # 构建description
    description = f"异物检测: {has_foreign}. 类型: {obj_type}."

    # 构建conversations
    conversations = []
    for i, (q, a) in enumerate(qa_pairs):
        # 第一个问题需要包含图片路径
        if i == 0:
            # 创建相对路径格式
            rel_path = os.path.relpath(image_path, BASE_DIR)
            rel_path = rel_path.replace("\\", "/")  # 统一使用正斜杠
            q = f"Picture 1:{rel_path}\n{q}"

        conversations.append({"from": "user", "value": q})
        conversations.append({"from": "assistant", "value": a})

    return {
        "id": f"identity_{image_id}",
        "image_path": image_path,
        "description": description,
        "conversations": conversations
    }

def add_to_vector_db(new_metadata, image_path):
    """将新数据添加到向量数据库"""
    global clip_model, preprocess, image_index, image_metadata

    # 1. 处理图片
    try:
        img = Image.open(image_path)
        img = preprocess(img).unsqueeze(0)

        # 确定模型所在的设备
        device = next(clip_model.parameters()).device

        # 将图像移动到与模型相同的设备
        img = img.to(device)

        # 提取特征
        with torch.no_grad():
            image_features = clip_model.encode_image(img).cpu().numpy().astype('float32')

        # 归一化特征向量
        faiss.normalize_L2(image_features)

    except Exception as e:
        print(f"❌❌❌❌ 处理新图片失败: {str(e)}")
        raise

    # 2. 加载现有元数据和索引
    metadata_path = os.path.join(POWERLINE_VECTORDB_DIR, "metadata.json")
    index_path = os.path.join(POWERLINE_VECTORDB_DIR, "image_index.faiss")

    # 初始化元数据和索引
    existing_metadata = []
    if os.path.exists(metadata_path):
        try:
            with open(metadata_path, "r", encoding="utf-8") as f:
                existing_metadata = json.load(f)
        except Exception as e:
            print(f"⚠️⚠️ 加载元数据失败，创建新文件: {str(e)}")

    # 初始化索引
    dim = image_features.shape[1]
    if os.path.exists(index_path):
        try:
            existing_index = faiss.read_index(index_path)
        except Exception as e:
            print(f"⚠️⚠️ 加载索引失败，创建新索引: {str(e)}")
            existing_index = faiss.IndexFlatIP(dim)
    else:
        existing_index = faiss.IndexFlatIP(dim)

    # 3. 添加新数据
    # 添加元数据
    existing_metadata.append(new_metadata)

    # 添加特征向量到索引
    existing_index.add(image_features)

    # 4. 保存更新
    try:
        # 保存元数据
        with open(metadata_path, "w", encoding="utf-8") as f:
            json.dump(existing_metadata, f, ensure_ascii=False, indent=2)

        # 保存索引
        faiss.write_index(existing_index, index_path)

        # 更新全局变量
        image_metadata = existing_metadata
        image_index = existing_index

        print(f"✅✅ 成功更新向量数据库，添加条目: {new_metadata['id']}")
        print(f"  元数据位置: {metadata_path}")
        print(f"  索引位置: {index_path}")

    except Exception as e:
        print(f"❌❌❌❌ 保存更新失败: {str(e)}")
        raise

# ================== Flask路由 ==================
@app.route('/api/message', methods=['POST'])
def handle_message():
    # 获取线程锁，确保增量学习不会同时进行
    with db_lock:
        try:
            # 获取文本数据
            user_question = request.form.get('text', default=None)
            image_file = request.files.get('image')
            image_path = None
            text_results = None
            image_results = None

            # 新增：检测模式前缀
            mode_prefix = None
            if user_question:
                # 检测模式前缀
                if user_question.startswith("快速模式：") or user_question.startswith("思考模式："):
                    # 提取前缀并移除
                    mode_prefix = user_question[:5]  # 两种前缀都是5个字符，加冒号
                    user_question = user_question[5:].strip()
                    print(f"检测到模式前缀: [{mode_prefix}]，处理后问题: [{user_question}]")

            # 处理文本
            if user_question:
                print(f"[{datetime.now()}] 收到文本: {user_question}")

                # 修改：仅在思考模式下执行文本检索
                if mode_prefix == "思考模式：" or mode_prefix is None:
                    text_results = query_text_vector_dbs(user_question, k=3)
                else:
                    print("快速模式：跳过文本检索")

            # 处理文件上传
            if image_file and image_file.filename != '':
                if allowed_file(image_file.filename):
                    # 生成安全唯一的文件名
                    filename = secure_filename(image_file.filename)
                    unique_filename = f"{uuid.uuid4().hex}_{filename}"
                    file_path = os.path.join(app.config['UPLOAD_FOLDER'], unique_filename)

                    # 保存文件
                    image_file.save(file_path)
                    image_path = file_path
                    print(f"[{datetime.now()}] 保存图片到: {file_path}")

                    # 修改：仅在思考模式下执行图片检索
                    if mode_prefix == "思考模式：" or mode_prefix is None:
                        image_results = search_similar_images(file_path, top_k=3)
                    else:
                        print("快速模式：跳过图片检索")
                else:
                    return jsonify({
                        "status": "error",
                        "message": "不允许的文件类型"
                    }), 400

            # 验证至少有一种数据
            if not user_question and image_file is None:
                return jsonify({
                    "status": "error",
                    "message": "消息必须包含文本或图片至少一种"
                }), 400

            # 格式化并打印查询结果
            if text_results or image_results:
                results_str = format_results_for_print(text_results, image_results)
                print(results_str)

            # 调用VLM大模型进行分析
            vlm_response = ""
            if user_question or image_path:
                vlm_response = call_vlm(
                    user_question if user_question else "分析图片中的电力设备",
                    text_results,
                    image_results,
                    image_path
                )
            else:
                vlm_response = "⚠️ 没有足够的信息调用VLM"

            # 构造响应
            response = {
                "status": "success",
                "message": "消息已接收并处理",
                "vlm_response": vlm_response,  # 直接放在顶层
                "details": {
                    "text_received": user_question is not None,
                    "image_received": image_path is not None,
                    "mode": mode_prefix[:-1] if mode_prefix else None,  # 移除冒号
                    "timestamp": datetime.now().isoformat()
                }
            }

            return jsonify(response)

        except Exception as e:
            print(f"❌❌ 处理消息时出错: {str(e)}")
            return jsonify({
                "status": "error",
                "message": "服务器处理失败",
                "error": str(e)
            }), 500

@app.route('/api/learn', methods=['POST'])
def learn_new_data():
    """学习新数据并添加到知识图谱"""
    # 获取线程锁，确保聊天操作不会同时进行
    with db_lock:
        try:
            print("\n" + "=" * 80)
            print("📚📚📚📚 开始学习新数据...")

            # 1. 获取并保存图片
            image_file = request.files.get('image')
            if not image_file or image_file.filename == '':
                return jsonify({
                    "status": "error",
                    "message": "必须提供图片文件"
                }), 400

            # 生成唯一文件名
            filename = secure_filename(image_file.filename)
            unique_filename = f"{uuid.uuid4().hex}_{filename}"
            file_path = os.path.join(app.config['LEARN_PIC_DIR'], unique_filename)

            # 确保目录存在
            os.makedirs(app.config['LEARN_PIC_DIR'], exist_ok=True)

            # 保存文件
            image_file.save(file_path)
            print(f"✅ 图片保存到: {file_path}")

            # 2. 获取并解析文本数据
            text_data = request.form.get('text', '')
            if not text_data:
                return jsonify({
                    "status": "error",
                    "message": "必须提供文本数据"
                }), 400

            print(f"📝📝📝📝 收到文本数据:\n{text_data}")

            # 提取QA对
            try:
                qa_pairs = extract_qa_pairs(text_data)
                print(f"✅ 解析出 {len(qa_pairs)} 对QA数据")
            except Exception as e:
                return jsonify({
                    "status": "error",
                    "message": f"文本格式错误: {str(e)}"
                }), 400

            # 3. 获取下一个ID
            try:
                image_id = get_next_id()
                print(f"🆔🆔🆔🆔🆔🆔 分配ID: identity_{image_id}")
            except Exception as e:
                return jsonify({
                    "status": "error",
                    "message": f"ID分配失败: {str(e)}"
                }), 500

            # 4. 构建元数据条目
            try:
                metadata_entry = build_metadata_entry(file_path, qa_pairs, image_id)
                print(f"📄📄📄📄 构建元数据成功")
                print(json.dumps(metadata_entry, indent=2, ensure_ascii=False))
            except Exception as e:
                return jsonify({
                    "status": "error",
                    "message": f"元数据构建失败: {str(e)}"
                }), 400

            # 5. 添加到向量数据库
            try:
                add_to_vector_db(metadata_entry, file_path)
            except Exception as e:
                return jsonify({
                    "status": "error",
                    "message": f"添加到向量数据库失败: {str(e)}"
                }), 500

            # 6. 返回成功响应
            response = {
                "status": "success",
                "message": "新数据已成功添加到知识图谱",
                "new_id": metadata_entry["id"],
                "image_path": file_path,
                "timestamp": datetime.now().isoformat()
            }

            return jsonify(response)

        except Exception as e:
            import traceback
            print(f"❌❌❌❌ 学习新数据时出错: {str(e)}")
            traceback.print_exc()
            return jsonify({
                "status": "error",
                "message": "服务器处理失败",
                "error": str(e)
            }), 500


# ================== 应用启动 ==================
if __name__ == '__main__':
    # 忽略LangChain的弃用警告
    warnings.filterwarnings("ignore", category=DeprecationWarning, module="langchain_core")

    # 第一步：启动另一个应用
    try:
        print("🚀 正在启动另一个应用...")

        # 第一条命令：切换到指定目录
        target_dir = r"E:\2025summerJINLIUelectirc\ElecSolutionBB\fastapi_backend"
        os.chdir(target_dir)
        print(f"📁 已切换到目录: {target_dir}")

        # 第二条命令：启动另一个应用
        command = "python -m app.main"
        print(f"⚡ 执行命令: {command}")

        # 使用subprocess.Popen启动新进程，不等待完成
        subprocess.Popen(command, shell=True)

        print("✅ 另一个应用已启动")
        time.sleep(1)  # 给另一个应用一些启动时间
    except Exception as e:
        print(f"❌ 启动另一个应用失败: {str(e)}")
        sys.exit(1)

    # 第二步：初始化当前应用的资源
    if not initialize_resources():
        print("❌❌❌❌ 资源初始化失败，应用无法启动")
        sys.exit(1)

    # 第三步：启动当前应用
    print("🚀 正在启动当前应用...")
    app.run(host='0.0.0.0', port=5000, debug=False)