#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
SD-LoRA校园图片生成系统 - 后端主应用
"""

import os
import json
import requests
from flask import Flask, request, jsonify, Response
from flask_socketio import SocketIO, emit
from datetime import datetime
from openai import OpenAI
import base64

# 初始化Flask应用
app = Flask(__name__)
app.config['SECRET_KEY'] = 'sd-lora-campus-secret'
app.config['UPLOAD_FOLDER'] = os.path.join(os.path.dirname(__file__), 'static/images')

# Deepseek API 配置
app.config['DEEPSEEK_API_KEY'] = os.environ.get('DEEPSEEK_API_KEY', '***********') # ***内输入你的api key
app.config['DEEPSEEK_BASE_URL'] = 'https://api.deepseek.com'

# SD API 配置
app.config['SD_API_URL'] = os.environ.get('SD_API_URL', 'http://127.0.0.1:7860')

# 初始化最新生成参数的全局存储
app.config['LATEST_GENERATION_PARAMS'] = None

# 确保上传目录存在
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)

# 初始化SocketIO
socketio = SocketIO(app, cors_allowed_origins="*")

# 初始化Deepseek客户端
deepseek_client = OpenAI(
    api_key=app.config['DEEPSEEK_API_KEY'],
    base_url=app.config['DEEPSEEK_BASE_URL']
)

# 存储会话历史的字典
sessions = {}

# 定义一个通用的系统提示函数，确保两处使用相同的提示
def get_image_generation_system_prompt():
    return """
    你是一个图像生成参数提取专家。请分析用户的绘画需求，提取关键参数并以纯文本形式返回参数键值对，每行一个参数，格式为"参数名:参数值"。
    必须返回以下参数:
    prompt:详细的绘画描述（英文，包含风格、场景、内容等）
    negative_prompt:不希望出现的元素（英文）
    batch_size:2
    width:512
    height:512
    steps:30
    guidance_scale:7
    sampler_index:Euler a
    enable_hr:true
    hr_scale:2
    hr_upscaler:ESRGAN_4x
    hr_second_pass_steps:15
    denoising_strength:0.3
    sd_model_checkpoint:anything-v5.safetensors（动漫类）/majicMIX realistic 麦橘写实_v7.safetensors（真实类）
    

    注意：
    1. 前置条件加在lora模型前边，人物之类的要加权重如(1girl:1)不加的话脸会糊。
    2. 根据用户需求灵活调整拍摄角度，不要总是生成半身照，可以是全身照(full body)、特写(close-up)等,灵活变通。
    3. 当用户提到DTS或产业学院时必须在提示词中包含<lora:DTS:0.7>标签。
    4. 我是在调用你的api，不要说："目前我无法直接生成图片，但我可以为你提供一个详细的文字描述，你可以根据这个描述使用AI绘画工具（如DALL·E、MidJourney等）来生成图像"。
    正向提示词示例真实类示例："(1girl:1),smile,upper_body,sparkling eyes,black hair,long hair,holding_basketball,(mid_shot:1),front view,looking_at_viewer,<lora:DTS:0.7>,playground,basketball hoop,"
    正向提示词示例动漫类示例："((masterpiece)),best quality,illustration,1 girl, small breast,white_hair, beautiful detailed eyes,beautiful detailed sky, beautifu detailed watercinematic lighting dramatic anglesailor dresswet clothes bikini,sideboob,white shirt long sleeves hoodie, midriff, jacket midriff,white bloomers,"
    负面提示词真实类的模型用："(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark"，动漫类的用"lowres,bad anatomy bad hands, texterror missing fingers, extra digit,fewer digits,croppedworst quality, low qualitynormal qualityjpeg artifacts,signature,watermark, username, blurry, bad feet"
    
    我的Lora模型名为DTS，当用户提到DTS或产业学院时使用这个lora模型，使用的是真实模型，通常权重为<lora:DTS:0.7>，使用时在正向提示词内加<lora:DTS:0.7>
    使用lora模型时的正向提示词示例："(1girl:1),smile,upper_body,sparkling eyes,black hair,long hair,holding_basketball,(mid_shot:1),front view,looking_at_viewer,<lora:DTS:0.7>,playground,basketball hoop,"
    lora模型训练时的标签内有汉字如："
    10号楼：10号楼, building, scenery, tree, outdoors, day, sky, window, real world location, city, stairs, plant，
    9号楼：9号楼, tree, scenery, outdoors, sky, building, day, grass, window, bush, lamppost，
    教室：教室, indoors, ceiling light, chair, scenery, door, ceiling, folding chair, window（实践教室）/ 教室, chair, indoors, scenery, ceiling light, monitor, computer, window, office chair, cable, keyboard \(computer\), table, desk（机房教室）/ ，
    食堂：食堂, scenery, ceiling light, restaurant, shop, window, chair, table, indoors, sign, building，
    宿舍楼：宿舍楼, scenery, window, building, outdoors, sky, day, stairs, railing, house，
    操场： 操场, tree, outdoors, scenery, lamppost, sky, cloud, building, day, fence, basketball hoop（室外运动场）/ 操场, indoors, scenery（室内运动场），
    教学楼：教学楼, outdoors, building, scenery, day, sky, tree, window, city, blue sky, plant，
    下沉广场：下沉广场, outdoors, tree, scenery, sky, flower, building, day, cloud, grey sky，
    学校正门：学校正门, building, scenery, outdoors, sky, lamppost, day, road, real world location, city, motor vehicle, blue sky, window"
    当我提到这些汉字标签时将对应的提示词加入正向提示词后。
    
    我会将你生成的参数放入sd接口中训练。
    注意！不要说你无法生成图片！！！我是只是调用的你的api让你生成参数而已。如果你说出来用户会在对话内看到！
    """

@app.route('/')
def index():
    """返回API状态"""
    return jsonify({
        "status": "running",
        "api_version": "1.0",
        "system": "SD-LoRA校园图片生成系统"
    })

@app.route('/api/chat', methods=['POST'])
def handle_chat():
    """处理用户聊天请求，返回AI回复"""
    data = request.json
    user_message = data.get('message', '')
    session_id = data.get('session_id', None)
    mode = data.get('mode', 'chat')  # 模式：chat（普通对话）或draw（绘画）
    model = data.get('model', 'deepseek-reasoner')  # 模型：deepseek-chat 或 deepseek-reasoner
    
    # 获取前端传来的高级设置参数
    advanced_settings = data.get('advanced_settings', None)
    if advanced_settings:
        print(f"收到前端高级设置参数: {json.dumps(advanced_settings, ensure_ascii=False)}")
    
    # 生成会话ID（如果没有提供）
    if not session_id or session_id == "None":
        session_id = f"session_{datetime.now().strftime('%Y%m%d%H%M%S')}"
    
    # 获取或创建会话历史
    if session_id not in sessions:
        sessions[session_id] = {
            'messages': [],
            'mode': mode,
            'model': model,
            'created_at': datetime.now().isoformat()
        }
    
    # 更新会话模式和模型
    sessions[session_id]['mode'] = mode
    sessions[session_id]['model'] = model
    
    # 添加用户消息到历史
    sessions[session_id]['messages'].append({
        'role': 'user',
        'content': user_message,
        'timestamp': datetime.now().isoformat()
    })
    
    # 准备发送到Deepseek的消息
    messages = [
        {"role": "system", "content": "你是一个有用的助手，擅长处理各种问题和生成创意内容"},
    ]
    
    # 添加历史消息（最多10轮对话，20条消息）
    for msg in sessions[session_id]['messages'][-20:]:
        if msg['role'] in ['user', 'assistant']:
            messages.append({"role": msg['role'], "content": msg['content']})
    
    try:
        # 调用Deepseek API
        if model == 'deepseek-reasoner':
            # 使用推理模型
            response = deepseek_client.chat.completions.create(
                model="deepseek-reasoner",
                messages=messages,
                stream=False,
                max_tokens=8000
            )
            ai_response = response.choices[0].message.content
            reasoning_content = response.choices[0].message.reasoning_content
        else:
            # 使用普通聊天模型
            response = deepseek_client.chat.completions.create(
                model="deepseek-chat",
                messages=messages,
                stream=False
            )
            ai_response = response.choices[0].message.content
            reasoning_content = None
        
        # 添加AI回复到历史
        assistant_message = {
            'role': 'assistant',
            'content': ai_response,
            'timestamp': datetime.now().isoformat(),
            'reasoning_content': reasoning_content
        }
        
        # 如果是绘画模式，添加生成参数到消息中
        if mode == 'draw' and generation_params:
            assistant_message['generation_params'] = generation_params
        
        sessions[session_id]['messages'].append(assistant_message)
        
        # 如果是绘画模式，尝试解析AI回复中的绘画参数
        generation_params = None
        if mode == 'draw':
            try:
                print("绘画模式：正在生成参数...")
                # 解析绘画参数，使用与stream路由一致的方法
                
                # 构建用户提示，包含高级设置参数
                user_prompt = f"请分析这段绘画需求并提取参数：{user_message}"
                if advanced_settings:
                    user_prompt += f"\n\n请考虑以下高级设置参数：{json.dumps(advanced_settings, ensure_ascii=False)}"
                
                param_response = deepseek_client.chat.completions.create(
                    model="deepseek-chat",  # 使用deepseek-chat而非deepseek-reasoner
                    messages=[
                        {"role": "system", "content": get_image_generation_system_prompt()},
                        {"role": "user", "content": user_prompt}
                    ]
                )
                param_content = param_response.choices[0].message.content
                print(f"DeepSeek API返回的文本参数: {param_content}")
                
                # 将文本格式转换为字典
                generation_params = {}
                override_settings = {}
                for line in param_content.strip().split('\n'):
                    if ':' in line:
                        key, value = line.split(':', 1)
                        key = key.strip()
                        value = value.strip()
                        
                        # 处理特殊类型
                        if key == 'batch_size' or key == 'width' or key == 'height' or key == 'steps' or key == 'hr_scale' or key == 'hr_second_pass_steps':
                            try:
                                value = int(value)
                            except:
                                pass
                        elif key == 'guidance_scale' or key == 'denoising_strength':
                            try:
                                value = float(value)
                            except:
                                pass
                        elif key == 'enable_hr':
                            value = value.lower() == 'true'
                        elif key == 'sd_model_checkpoint':
                            # 将模型名称放入override_settings
                            override_settings[key] = value
                            continue
                        
                        generation_params[key] = value
                
                # 合并前端传来的高级设置参数(如果有)
                if advanced_settings:
                    for key, value in advanced_settings.items():
                        # 只覆盖前端明确设置的参数
                        if key and value is not None:
                            if key == 'sd_model_checkpoint':
                                override_settings[key] = value
                            else:
                                generation_params[key] = value
                
                # 添加override_settings到参数中
                if override_settings:
                    generation_params['override_settings'] = override_settings
                
                print(f"解析后的参数: {json.dumps(generation_params, ensure_ascii=False, indent=2)}")
                
                # 保存参数到会话中，以便后续使用
                sessions[session_id]['generation_params'] = generation_params
                print(f"保存生成参数到会话: {session_id}")
            except Exception as e:
                print(f"解析绘画参数时出错: {str(e)}")
                generation_params = {
                    "prompt": user_message,
                    "negative_prompt": "(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark",
                    "batch_size": 2,
                    "width": 512,
                    "height": 512,
                    "steps": 30,
                    "guidance_scale": 7.5,
                    "sampler_index": "Euler a",
                    "enable_hr": True,
                    "hr_scale": 2,
                    "hr_upscaler": "ESRGAN_4x",
                    "hr_second_pass_steps": 15,
                    "denoising_strength": 0.3,
                    "override_settings": {
                        "sd_model_checkpoint": "anything-v5.safetensors"
                    }
                }
                
                # 合并前端传来的高级设置参数(如果有)
                if advanced_settings:
                    for key, value in advanced_settings.items():
                        # 只覆盖前端明确设置的参数
                        if key and value is not None:
                            if key == 'sd_model_checkpoint':
                                generation_params['override_settings'][key] = value
                            else:
                                generation_params[key] = value
                
                # 即使出错也保存默认参数到会话
                sessions[session_id]['generation_params'] = generation_params
        
        return jsonify({
            "response": ai_response,
            "session_id": session_id,
            "generation_params": generation_params,
            "reasoning_content": reasoning_content
        })
        
    except Exception as e:
        print(f"API调用错误: {str(e)}")
        return jsonify({
            "response": f"抱歉，我在处理您的请求时遇到了问题: {str(e)}",
            "session_id": session_id
        })

@app.route('/api/deepseek/stream', methods=['GET'])
def stream_response():
    """流式输出DeepSeek API响应
    
    使用Server-Sent Events (SSE)实现流式响应
    """
    message = request.args.get('message', '')
    session_id = request.args.get('session_id', None)
    mode = request.args.get('mode', 'chat')
    model = request.args.get('model', 'deepseek-chat')
    
    # 获取前端传来的高级设置参数
    advanced_settings_json = request.args.get('advanced_settings', None)
    advanced_settings = {}
    if advanced_settings_json:
        try:
            advanced_settings = json.loads(advanced_settings_json)
            print(f"收到前端高级设置参数: {json.dumps(advanced_settings, ensure_ascii=False)}")
        except Exception as e:
            print(f"解析高级设置参数出错: {str(e)}")
    
    # 生成会话ID（如果没有提供）
    if not session_id or session_id == "None":
        session_id = f"session_{datetime.now().strftime('%Y%m%d%H%M%S')}"
        print(f"生成新的会话ID: {session_id}")
    
    # 获取或创建会话历史
    if session_id not in sessions:
        sessions[session_id] = {
            'messages': [],
            'mode': mode,
            'model': model,
            'created_at': datetime.now().isoformat()
        }
    
    # 添加用户消息到历史
    sessions[session_id]['messages'].append({
        'role': 'user',
        'content': message,
        'timestamp': datetime.now().isoformat()
    })
    
    # 用于存储生成的参数
    generation_params = None
    
    # 如果是绘画模式，立即生成JSON参数
    if mode == 'draw':
        try:
            print(f"绘画模式：立即生成JSON参数，session_id={session_id}")
            # 使用不同的系统提示符，专门为图片生成优化，但不使用json_object响应格式
            
            # 构建用户提示，包含高级设置参数
            user_prompt = f"请分析这段绘画需求并提取参数：{message}"
            if advanced_settings:
                user_prompt += f"\n\n请考虑以下高级设置参数：{json.dumps(advanced_settings, ensure_ascii=False)}"
                
            param_response = deepseek_client.chat.completions.create(
                model="deepseek-chat",  # 改为使用deepseek-chat模型而非deepseek-reasoner
                messages=[
                    {"role": "system", "content": get_image_generation_system_prompt()},
                    {"role": "user", "content": user_prompt}
                ]
            )
            
            # 解析文本参数
            param_content = param_response.choices[0].message.content
            print(f"DeepSeek API返回的文本参数: {param_content}")
            
            # 将文本格式转换为字典
            generation_params = {}
            override_settings = {}
            for line in param_content.strip().split('\n'):
                if ':' in line:
                    key, value = line.split(':', 1)
                    key = key.strip()
                    value = value.strip()
                    
                    # 处理特殊类型
                    if key == 'batch_size' or key == 'width' or key == 'height' or key == 'steps' or key == 'hr_scale' or key == 'hr_second_pass_steps':
                        try:
                            value = int(value)
                        except:
                            pass
                    elif key == 'guidance_scale' or key == 'denoising_strength':
                        try:
                            value = float(value)
                        except:
                            pass
                    elif key == 'enable_hr':
                        value = value.lower() == 'true'
                    elif key == 'sd_model_checkpoint':
                        # 将模型名称放入override_settings
                        override_settings[key] = value
                        continue
                    
                    generation_params[key] = value
            
            # 合并前端传来的高级设置参数(如果有)
            if advanced_settings:
                for key, value in advanced_settings.items():
                    # 只覆盖前端明确设置的参数
                    if key and value is not None:
                        if key == 'sd_model_checkpoint':
                            override_settings[key] = value
                        else:
                            generation_params[key] = value
            
            # 添加override_settings到参数中
            if override_settings:
                generation_params['override_settings'] = override_settings
            
            print(f"解析后的参数: {json.dumps(generation_params, ensure_ascii=False, indent=2)}")
            
            # 存储参数到会话中，以便后续使用
            sessions[session_id]['generation_params'] = generation_params
            
            # 同时存储到一个全局变量中，以便在session_id为None的情况下也能获取
            app.config['LATEST_GENERATION_PARAMS'] = {
                'params': generation_params,
                'timestamp': datetime.now().isoformat(),
                'session_id': session_id
            }
            
            print(f"已将生成参数保存到会话: {session_id}")
            print(f"同时保存到全局配置中，以便任何会话可访问")
            
        except Exception as e:
            print(f"解析绘画参数时出错: {str(e)}")
            # 使用默认参数
            generation_params = {
                "prompt": message,
                "negative_prompt": "(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark",
                "batch_size": 2,
                "width": 512,
                "height": 512,
                "steps": 30,
                "guidance_scale": 7.5,
                "sampler_index": "Euler a",
                "enable_hr": True,
                "hr_scale": 2,
                "hr_upscaler": "ESRGAN_4x",
                "hr_second_pass_steps": 15,
                "denoising_strength": 0.3,
                "override_settings": {
                    "sd_model_checkpoint": "anything-v5.safetensors"
                }
            }
            
            # 合并前端传来的高级设置参数(如果有)
            if advanced_settings:
                for key, value in advanced_settings.items():
                    # 只覆盖前端明确设置的参数
                    if key and value is not None:
                        if key == 'sd_model_checkpoint':
                            generation_params['override_settings'][key] = value
                        else:
                            generation_params[key] = value
            
            # 即使出错也保存默认参数到会话
            sessions[session_id]['generation_params'] = generation_params
            
            # 同时保存到全局变量
            app.config['LATEST_GENERATION_PARAMS'] = {
                'params': generation_params,
                'timestamp': datetime.now().isoformat(),
                'session_id': session_id
            }
            
            print(f"已将默认生成参数保存到会话: {session_id}")
            print(f"同时保存到全局配置中，以便任何会话可访问")
    
    # 准备发送到Deepseek的消息
    messages = [
        {"role": "system", "content": "你是一个有用的助手，擅长处理各种问题和生成创意内容"},
    ]
    
    # 添加历史消息（最多10轮对话，20条消息）
    for msg in sessions[session_id]['messages'][-20:]:
        if msg['role'] in ['user', 'assistant']:
            messages.append({"role": msg['role'], "content": msg['content']})
    
    def generate():
        """生成SSE流式响应"""
        full_response = ""
        reasoning_content = ""
        
        try:
            # 如果是绘画模式并且已经生成了参数，先发送参数准备就绪事件
            if mode == 'draw' and generation_params:
                params_ready_data = json.dumps({
                    'type': 'params_ready',
                    'generation_params': generation_params,
                    'session_id': session_id
                })
                yield f"data: {params_ready_data}\n\n"
            
            if model == 'deepseek-reasoner':
                # 存在某些情况下流式输出可能失败，先尝试流式，如果失败再尝试非流式
                try:
                    # 发送一个信号，表明这是推理模型响应的开始
                    start_data = json.dumps({
                        'type': 'reasoning_start',
                        'session_id': session_id
                    })
                    yield f"data: {start_data}\n\n"
                    
                    # 使用流式API
                    stream = deepseek_client.chat.completions.create(
                        model="deepseek-reasoner",
                        messages=messages,
                        stream=True,
                        max_tokens=8000
                    )
                    
                    # 存储是否已开始输出普通内容
                    has_started_normal_content = False
                    reasoning_buffer = ""
                    content_buffer = ""
                    normal_content_started = False
                    
                    for chunk in stream:
                        # 调试输出
                        print(f"收到数据块: {chunk}")
                        
                        # 处理推理内容
                        if hasattr(chunk.choices[0].delta, 'reasoning_content') and chunk.choices[0].delta.reasoning_content:
                            reasoning_chunk = chunk.choices[0].delta.reasoning_content
                            reasoning_content += reasoning_chunk
                            
                            # 先流式输出推理内容
                            for char in reasoning_chunk:
                                reasoning_buffer += char
                                if len(reasoning_buffer) >= 4:  # 稍微缓冲，减少太频繁的发送
                                    data = json.dumps({
                                        'type': 'reasoning_chunk',
                                        'content': reasoning_buffer,
                                        'session_id': session_id
                                    })
                                    yield f"data: {data}\n\n"
                                    reasoning_buffer = ""
                        
                        # 处理普通内容
                        if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
                            content_chunk = chunk.choices[0].delta.content
                            
                            # 如果这是第一个普通内容块，发送分隔信号
                            if not normal_content_started:
                                normal_content_started = True
                                # 先发送剩余的推理内容
                                if reasoning_buffer:
                                    data = json.dumps({
                                        'type': 'reasoning_chunk',
                                        'content': reasoning_buffer,
                                        'session_id': session_id
                                    })
                                    yield f"data: {data}\n\n"
                                    reasoning_buffer = ""
                                
                                # 然后发送分隔符
                                separator_data = json.dumps({
                                    'type': 'content_start',
                                    'session_id': session_id
                                })
                                yield f"data: {separator_data}\n\n"
                            
                            full_response += content_chunk
                            
                            # 流式输出普通内容
                            for char in content_chunk:
                                content_buffer += char
                                if len(content_buffer) >= 4:  # 稍微缓冲，减少太频繁的发送
                                    data = json.dumps({
                                        'type': 'chunk',
                                        'content': content_buffer,
                                        'session_id': session_id
                                    })
                                    yield f"data: {data}\n\n"
                                    content_buffer = ""
                    
                    # 发送最后的缓冲区内容
                    # 发送推理内容缓冲
                    if reasoning_buffer:
                        data = json.dumps({
                            'type': 'reasoning_chunk',
                            'content': reasoning_buffer,
                            'session_id': session_id
                        })
                        yield f"data: {data}\n\n"
                    
                    # 如果还没有发送内容开始信号但有完整响应，则发送
                    if not normal_content_started and full_response:
                        separator_data = json.dumps({
                            'type': 'content_start',
                            'session_id': session_id
                        })
                        yield f"data: {separator_data}\n\n"
                    
                    # 发送内容缓冲
                    if content_buffer:
                        data = json.dumps({
                            'type': 'chunk',
                            'content': content_buffer,
                            'session_id': session_id
                        })
                        yield f"data: {data}\n\n"
                
                except Exception as stream_error:
                    print(f"流式处理失败，尝试非流式: {stream_error}")
                    
                    # 流式处理失败，使用非流式方法
                    response = deepseek_client.chat.completions.create(
                        model="deepseek-reasoner",
                        messages=messages,
                        stream=False,
                        max_tokens=4000
                    )
                    
                    reasoning_content = response.choices[0].message.reasoning_content or ""
                    full_response = response.choices[0].message.content or ""
                    
                    # 先发送推理内容
                    if reasoning_content:
                        start_data = json.dumps({
                            'type': 'reasoning_start',
                            'session_id': session_id
                        })
                        yield f"data: {start_data}\n\n"
                        
                        # 分批发送推理内容，模拟流式效果
                        for i in range(0, len(reasoning_content), 10):
                            chunk = reasoning_content[i:i+10]
                            data = json.dumps({
                                'type': 'reasoning_chunk',
                                'content': chunk,
                                'session_id': session_id
                            })
                            yield f"data: {data}\n\n"
                    
                    # 发送内容开始信号
                    separator_data = json.dumps({
                        'type': 'content_start',
                        'session_id': session_id
                    })
                    yield f"data: {separator_data}\n\n"
                    
                    # 分批发送正文内容，模拟流式效果
                    for i in range(0, len(full_response), 10):
                        chunk = full_response[i:i+10]
                        data = json.dumps({
                            'type': 'chunk',
                            'content': chunk,
                            'session_id': session_id
                        })
                        yield f"data: {data}\n\n"
            else:
                # 使用流式API
                stream = deepseek_client.chat.completions.create(
                    model="deepseek-chat",
                    messages=messages,
                    stream=True
                )
                
                buffer = ""
                for chunk in stream:
                    if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
                        content = chunk.choices[0].delta.content
                        full_response += content
                        
                        # 优化：将接收到的内容按字符发送，而不是整块发送
                        # 这样前端可以实现更流畅的打字机效果
                        for char in content:
                            buffer += char
                            
                            # 每累积一定字符就发送一次，避免频繁发送
                            if len(buffer) >= 2:
                                data = json.dumps({
                                    'type': 'chunk',
                                    'content': buffer,
                                    'session_id': session_id
                                })
                                yield f"data: {data}\n\n"
                                buffer = ""
                
                # 发送缓冲区中剩余的字符
                if buffer:
                    data = json.dumps({
                        'type': 'chunk',
                        'content': buffer,
                        'session_id': session_id
                    })
                    yield f"data: {data}\n\n"
            
            # 保存完整响应到会话历史
            assistant_message = {
                'role': 'assistant',
                'content': full_response,
                'timestamp': datetime.now().isoformat(),
                'reasoning_content': reasoning_content
            }

            # 如果是绘画模式且有生成参数，将参数添加到消息中
            if mode == 'draw' and sessions[session_id].get('generation_params'):
                assistant_message['generation_params'] = sessions[session_id].get('generation_params')
            
            sessions[session_id]['messages'].append(assistant_message)

            # 发送完成消息
            complete_data = {
                'type': 'complete',
                'content': full_response,
                'session_id': session_id,
                'generation_params': sessions[session_id].get('generation_params'),
                'reasoning_content': reasoning_content
            }
            yield f"data: {json.dumps(complete_data)}\n\n"
            
        except Exception as e:
            print(f"流式API调用错误: {str(e)}")
            error_data = {
                'type': 'error',
                'content': f"抱歉，发生错误: {str(e)}",
                'session_id': session_id
            }
            yield f"data: {json.dumps(error_data)}\n\n"
    
    # 返回流式响应
    return Response(generate(), mimetype='text/event-stream')

@app.route('/api/generate', methods=['POST'])
def generate_image():
    """处理图片生成请求"""
    data = request.json
    params = data.get('params', {})
    session_id = data.get('session_id')
    message_id = data.get('message_id')
    advanced_settings = data.get('advanced_settings', None)
    
    # 处理 session_id 为 "None" 的情况
    if session_id == "None" or session_id is None:
        # 尝试从全局配置获取最后一个会话ID
        if 'LATEST_GENERATION_PARAMS' in app.config:
            latest_params = app.config['LATEST_GENERATION_PARAMS']
            if latest_params.get('session_id') and latest_params['session_id'] != "None":
                session_id = latest_params['session_id']
                print(f"从全局配置中获取会话ID: {session_id}")
            else:
                session_id = f"session_{datetime.now().strftime('%Y%m%d%H%M%S')}"
                print(f"全局参数中没有有效会话ID，生成新的: {session_id}")
        else:
            session_id = f"session_{datetime.now().strftime('%Y%m%d%H%M%S')}"
            print(f"没有找到全局参数，生成新的会话ID: {session_id}")
        
        # 如果该会话不存在，创建它
        if session_id not in sessions:
            sessions[session_id] = {
                'messages': [],
                'mode': 'draw', # 默认为绘画模式
                'model': 'deepseek-chat',
                'created_at': datetime.now().isoformat(),
                'generation_params': {}
            }
    
    # 检查会话中是否存在DeepSeek生成的参数
    deepseek_params = {}
    if session_id and session_id in sessions and 'generation_params' in sessions[session_id]:
        deepseek_params = sessions[session_id]['generation_params']
        print(f"找到DeepSeek生成的参数: {json.dumps(deepseek_params, ensure_ascii=False, indent=2)}")
    else:
        print(f"未找到DeepSeek生成的参数，session_id: {session_id}")
        # 尝试查找最近一次生成的参数
        for s_id, session_data in sessions.items():
            if 'generation_params' in session_data:
                deepseek_params = session_data['generation_params']
                print(f"使用其他会话({s_id})的参数: {json.dumps(deepseek_params, ensure_ascii=False, indent=2)}")
                break
        
        # 如果session_id为None但有会话存在，尝试使用第一个会话
        if not deepseek_params and session_id is None and sessions:
            # 获取第一个会话ID
            first_session_id = next(iter(sessions))
            if 'generation_params' in sessions[first_session_id]:
                deepseek_params = sessions[first_session_id]['generation_params']
                print(f"使用第一个会话({first_session_id})的参数: {json.dumps(deepseek_params, ensure_ascii=False, indent=2)}")
        
        # 尝试从全局配置中获取最新参数
        if not deepseek_params and 'LATEST_GENERATION_PARAMS' in app.config:
            latest_params = app.config['LATEST_GENERATION_PARAMS']
            deepseek_params = latest_params['params']
            print(f"使用全局最新参数(来自会话{latest_params['session_id']}): {json.dumps(deepseek_params, ensure_ascii=False, indent=2)}")
        
        if session_id in sessions:
            print(f"会话存在但没有generation_params键，会话内容: {list(sessions[session_id].keys())}")
    
    # 合并参数，优先使用DeepSeek生成的参数
    merged_params = {**params}
    if deepseek_params:
        # 只覆盖DeepSeek有提供的参数
        for key, value in deepseek_params.items():
            # 确保键存在且值不为空
            if key and value is not None:
                merged_params[key] = value
        print(f"合并后的参数: {json.dumps(merged_params, ensure_ascii=False, indent=2)}")
    
    # 合并前端传来的高级设置参数(如果有)
    if advanced_settings:
        for key, value in advanced_settings.items():
            # 只覆盖前端明确设置的参数
            if key and value is not None:
                if key == 'sd_model_checkpoint' and 'override_settings' in merged_params:
                    merged_params['override_settings'][key] = value
                else:
                    merged_params[key] = value
        print(f"合并高级设置后的参数: {json.dumps(merged_params, ensure_ascii=False, indent=2)}")
    
    # 确保参数正确映射
    prompt = merged_params.get('prompt', '')
    if not prompt and 'message' in params:
        prompt = params.get('message', '')
    
    # 预先定义batch_size，避免未定义错误
    batch_size = min(merged_params.get('batch_size', 2), 4)
    
    # 准备发送到Stable Diffusion API的参数
    sd_params = {
        "prompt": prompt,
        "negative_prompt": merged_params.get('negative_prompt', '(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark'),
        "batch_size": batch_size,  # 使用预定义的batch_size
        "steps": merged_params.get('steps', 30),
        "cfg_scale": merged_params.get('guidance_scale', 7),
        "width": merged_params.get('width', 512),
        "height": merged_params.get('height', 512),
        "sampler_name": merged_params.get('sampler_index', "Euler a")  # 使用传入的采样器或默认值
    }
    
    # 添加高级参数（如果存在）
    if merged_params.get('enable_hr'):
        sd_params["enable_hr"] = merged_params.get('enable_hr', True)
        sd_params["hr_scale"] = merged_params.get('hr_scale', 2)
        sd_params["hr_upscaler"] = merged_params.get('hr_upscaler', "ESRGAN_4x")
        sd_params["hr_second_pass_steps"] = merged_params.get('hr_second_pass_steps', 15)
        sd_params["denoising_strength"] = merged_params.get('denoising_strength', 0.3)
    
    # 添加模型选择（如果存在）
    if merged_params.get('override_settings') and merged_params['override_settings'].get('sd_model_checkpoint'):
        sd_params["override_settings"] = {
            "sd_model_checkpoint": merged_params['override_settings']['sd_model_checkpoint']
        }
    
    print(f"正在调用SD API生成图像，参数: {json.dumps(sd_params, ensure_ascii=False, indent=2)}")
    
    image_urls = []
    try:
        # 调用Stable Diffusion API
        sd_url = app.config['SD_API_URL'] + "/sdapi/v1/txt2img"
        print(f"发送请求到SD API: {sd_url}")
        print(f"请求参数: {json.dumps(sd_params, ensure_ascii=False, indent=2)}")
        
        response = requests.post(sd_url, json=sd_params, timeout=180)  # 增加超时时间到180秒，图片生成可能很慢
        
        if response.status_code != 200:
            print(f"SD API返回错误状态码: {response.status_code}")
            print(f"响应内容: {response.text[:500]}")  # 只打印前500字符以避免过多输出
            raise Exception(f"SD API返回错误: {response.status_code}")
        
        print("SD API返回成功，正在解析结果...")
        result = response.json()
        images = result.get('images', [])
        
        if not images or len(images) == 0:
            print("SD API未返回任何图片")
            raise Exception("未能生成图片")
            
        print(f"成功获取 {len(images)} 张图片")
        
        # 保存生成的图片到本地并返回URL
        for i, img_base64 in enumerate(images):
            # 解码Base64图像数据
            img_data = base64.b64decode(img_base64)
            
            # 生成图片文件名
            timestamp = int(datetime.now().timestamp())
            filename = f"{timestamp}_{session_id}_{i}.png"
            filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            
            # 确保目录存在
            os.makedirs(os.path.dirname(filepath), exist_ok=True)
            
            # 保存图片
            with open(filepath, "wb") as f:
                f.write(img_data)
            
            # 构建URL - 使用相对于后端的URL路径
            image_url = f"/static/images/{filename}"
            image_urls.append(image_url)
        
        print(f"成功生成 {len(image_urls)} 张图片，保存路径: {app.config['UPLOAD_FOLDER']}")
        
    except Exception as e:
        print(f"调用SD API出错: {str(e)}")
        # 如果真实API调用失败，退回到使用随机图片
        # 注意：batch_size已在try块外定义，此处无需重复定义
    
    # 如果API调用失败，返回错误
    if not image_urls:
        return jsonify({
            "error": "生成图片失败",
            "params": merged_params
        }), 500
    
    # 如果有会话历史，更新图片URL和生成参数
    if session_id in sessions and len(sessions[session_id]['messages']) > 0:
        last_message = sessions[session_id]['messages'][-1]
        if last_message.get('role') == 'assistant':
            last_message['images'] = image_urls
            
            # 确保生成参数也保存在消息中
            if 'generation_params' not in last_message and merged_params:
                last_message['generation_params'] = merged_params
    
    # 返回生成的图片URL和使用的参数
    return jsonify({
        "images": image_urls,
        "params": merged_params  # 返回合并后的参数，包括deepseek生成的和前端传入的高级设置
    })

@app.route('/api/history', methods=['GET'])
def get_history():
    """获取会话历史列表"""
    sessions_list = []
    for session_id, session_data in sessions.items():
        preview = ""
        if session_data['messages'] and len(session_data['messages']) > 0:
            preview = session_data['messages'][0]['content']
            if len(preview) > 50:
                preview = preview[:50] + "..."
        
        sessions_list.append({
            "id": session_id,
            "preview": preview,
            "created_at": session_data['created_at'],
            "mode": session_data.get('mode', 'chat'),
            "model": session_data.get('model', 'deepseek-chat')
        })
    
    # 按创建时间降序排序
    sessions_list.sort(key=lambda x: x['created_at'], reverse=True)
    
    return jsonify({
        "sessions": sessions_list
    })

@app.route('/api/history/<session_id>', methods=['GET'])
def get_session_history(session_id):
    """获取特定会话的历史记录"""
    if session_id in sessions:
        return jsonify({
            "messages": sessions[session_id]['messages'],
            "mode": sessions[session_id].get('mode', 'chat'),
            "model": sessions[session_id].get('model', 'deepseek-chat'),
            "generation_params": sessions[session_id].get('generation_params')
        })
    
    # 会话不存在，返回空历史
    return jsonify({
        "messages": [],
        "mode": "chat",
        "model": "deepseek-chat",
        "generation_params": None
    })

@app.route('/api/history/<session_id>', methods=['DELETE'])
def delete_session_history(session_id):
    """删除会话历史"""
    if session_id in sessions:
        del sessions[session_id]
    return jsonify({"success": True})

@socketio.on('message')
def handle_message(data):
    """处理WebSocket消息"""
    user_message = data.get('message', '')
    session_id = data.get('session_id', None)
    
    # 简单的回复逻辑
    response = f"我理解您想要生成图片：{user_message}"
    
    emit('response_chunk', {
        'chunk': response,
        'session_id': session_id
    })
    
    emit('response_complete', {
        'session_id': session_id
    })

if __name__ == '__main__':
    socketio.run(app, host='0.0.0.0', port=5000, debug=True) 