#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
SD-LoRA校园图片生成系统 - 前端应用
"""

import os
import json
import requests
import time
from flask import Flask, render_template, request, jsonify, redirect, url_for, Response, stream_with_context
from flask_socketio import SocketIO

# 初始化Flask应用
app = Flask(__name__)
app.config['SECRET_KEY'] = 'sd-lora-campus-frontend-secret'
app.config['BACKEND_URL'] = 'http://127.0.0.1:5000'

# 初始化SocketIO
socketio = SocketIO(app, cors_allowed_origins="*", ping_timeout=120, ping_interval=25)

@app.route('/')
def index():
    """渲染主页"""
    return render_template('index.html')

@app.route('/chat')
def chat():
    """渲染聊天页面"""
    session_id = request.args.get('session_id', None)
    return render_template('chat.html', session_id=session_id)

@app.route('/draw')
def draw():
    """渲染绘画页面"""
    session_id = request.args.get('session_id', None)
    return render_template('draw.html', session_id=session_id)

@app.route('/api/message', methods=['POST'])
def send_message():
    """发送消息到后端（传统非流式API，作为备用）"""
    data = request.json
    user_message = data.get('message', '')
    session_id = data.get('session_id', None)
    mode = data.get('mode', 'chat')  # 获取模式参数
    model = data.get('model', 'deepseek-chat')  # 获取模型参数
    advanced_settings = data.get('advanced_settings', None)  # 获取高级设置参数
    
    # 模拟后端响应（当后端未连接时）
    max_retries = 3  # 最大重试次数
    retry_count = 0
    retry_delay = 3  # 重试间隔秒数
    
    # 如果启用了SocketIO，发送处理状态更新
    if session_id:
        socketio.emit('processing_status', {
            'session_id': session_id, 
            'status': 'started',
            'message': '正在处理您的请求...'
        })
    
    while retry_count < max_retries:
        try:
            # 构建请求数据
            request_data = {
                'message': user_message,
                'session_id': session_id,
                'mode': mode,
                'model': model
            }
            
            # 添加高级设置参数(如果存在)
            if advanced_settings:
                request_data['advanced_settings'] = advanced_settings
            
            # 调用后端API
            response = requests.post(
                f"{app.config['BACKEND_URL']}/api/chat",
                json=request_data,
                timeout=90  # 增加超时时间到90秒
            )
            
            if response.status_code == 200:
                # 如果启用了SocketIO，发送完成状态
                if session_id:
                    socketio.emit('processing_status', {
                        'session_id': session_id, 
                        'status': 'completed'
                    })
                return jsonify(response.json())
            
            # 如果后端返回错误但状态码不是网络相关错误，不进行重试
            if response.status_code < 500:
                break
                
            # 服务器错误，进行重试
            retry_count += 1
            if retry_count < max_retries:
                # 通知前端重试状态
                if session_id:
                    socketio.emit('processing_status', {
                        'session_id': session_id, 
                        'status': 'retrying',
                        'retry': retry_count,
                        'max_retries': max_retries
                    })
                time.sleep(retry_delay)
                continue
                
        except requests.RequestException:
            # 连接错误，进行重试
            retry_count += 1
            if retry_count < max_retries:
                # 通知前端重试状态
                if session_id:
                    socketio.emit('processing_status', {
                        'session_id': session_id, 
                        'status': 'connection_error',
                        'retry': retry_count,
                        'max_retries': max_retries
                    })
                time.sleep(retry_delay)
                continue
    
    # 如果重试后仍然失败，返回备用响应
    # 通知前端失败状态
    if session_id:
        socketio.emit('processing_status', {
            'session_id': session_id, 
            'status': 'failed'
        })
    
    # 处理绘画模式的生成参数
    generation_params = None
    if mode == 'draw':
        # 如果有高级设置，合并到生成参数中
        generation_params = {
            'prompt': user_message,
            'negative_prompt': '模糊, 变形',
            'batch_size': 4,
            'width': 512,
            'height': 512,
            'steps': 30,
            'guidance_scale': 7.5
        }
        
        if advanced_settings:
            for key, value in advanced_settings.items():
                if key and value is not None:
                    generation_params[key] = value
    
    # 处理深度思考模式
    reasoning_content = None
    if model == 'deepseek-reasoner':
        reasoning_content = "这是一个模拟的深度思考过程。在实际环境中，这里会显示模型的推理过程。\n\n首先，我需要理解用户的问题...\n然后，我会分析可能的解决方案...\n最后，我会给出最合理的答案。"
    
    # 构建完整的响应对象
    response_data = {
        'response': '抱歉，后端服务暂时不可用，但前端演示仍然可以继续。这是一个模拟的回复。',
        'session_id': session_id or 'temp_session_' + str(hash(user_message))[:8],
        'generation_params': generation_params,
        'reasoning_content': reasoning_content  # 确保推理内容被包含在响应中
    }
    
    return jsonify(response_data)

@app.route('/api/deepseek/stream')
def stream_chat():
    """流式获取DeepSeek API响应，转发后端SSE流到前端"""
    # 获取查询参数
    message = request.args.get('message', '')
    session_id = request.args.get('session_id', None)
    mode = request.args.get('mode', 'chat')  
    model = request.args.get('model', 'deepseek-chat')
    
    # 获取高级设置参数
    advanced_settings = request.args.get('advanced_settings', None)
    
    # 构建后端URL
    backend_url = f"{app.config['BACKEND_URL']}/api/deepseek/stream"
    
    # 转发所有参数
    params = {
        'message': message,
        'session_id': session_id,
        'mode': mode,
        'model': model
    }
    
    # 添加高级设置参数(如果存在)
    if advanced_settings:
        params['advanced_settings'] = advanced_settings
    
    # 使用stream_with_context转发SSE流
    def generate():
        try:
            # 尝试连接后端流式API
            response = requests.get(backend_url, params=params, stream=True, timeout=90)
            
            if response.status_code == 200:
                # 成功连接，开始转发数据流
                for chunk in response.iter_content(chunk_size=1024):
                    if chunk:
                        yield chunk
            else:
                # 后端返回错误状态码
                error_data = json.dumps({
                    'type': 'error',
                    'content': f'后端服务返回错误: HTTP {response.status_code}',
                    'session_id': session_id
                })
                yield f"data: {error_data}\n\n"
                
                # 添加模拟数据
                if model == 'deepseek-reasoner':
                    # 模拟深度思考模式的数据
                    # 始终首先发送思考开始信号
                    start_data = json.dumps({
                        'type': 'reasoning_start',
                        'session_id': session_id
                    })
                    yield f"data: {start_data}\n\n"
                    time.sleep(0.2)  # 短暂延迟
                    
                    # 然后分段发送思考内容
                    reasoning = "这是一个模拟的深度思考过程。在实际环境中，这里会显示模型的推理过程。\n\n首先，我需要理解用户的问题...\n然后，我会分析可能的解决方案...\n最后，我会给出最合理的答案。"
                    for i in range(0, len(reasoning), 10):  # 每次发送10个字符
                        chunk = reasoning[i:i+10]
                        reasoning_data = json.dumps({
                            'type': 'reasoning_chunk',
                            'content': chunk,
                            'session_id': session_id
                        })
                        yield f"data: {reasoning_data}\n\n"
                        time.sleep(0.05)  # 短暂延迟
                    
                    # 发送内容开始信号
                    separator_data = json.dumps({
                        'type': 'content_start',
                        'session_id': session_id
                    })
                    yield f"data: {separator_data}\n\n"
                
                # 发送模拟的最终回复
                mock_response = "抱歉，后端服务暂时不可用，但前端演示仍然可以继续。这是一个模拟的回复。"
                # 分段发送回复内容
                for i in range(0, len(mock_response), 5):  # 每次发送5个字符
                    chunk = mock_response[i:i+5]
                    chunk_data = json.dumps({
                        'type': 'chunk',
                        'content': chunk,
                        'session_id': session_id
                    })
                    yield f"data: {chunk_data}\n\n"
                    time.sleep(0.05)  # 短暂延迟
                
                # 发送完成信号，包含完整的推理内容
                complete_data = json.dumps({
                    'type': 'complete',
                    'content': mock_response,
                    'reasoning_content': reasoning if model == 'deepseek-reasoner' else None,
                    'session_id': session_id
                })
                yield f"data: {complete_data}\n\n"
                
        except requests.RequestException as e:
            # 连接出错，返回错误信息
            error_data = json.dumps({
                'type': 'error',
                'content': f'连接后端服务失败: {str(e)}',
                'session_id': session_id
            })
            yield f"data: {error_data}\n\n"
            
            # 添加模拟数据
            if model == 'deepseek-reasoner':
                # 模拟深度思考模式的数据
                # 始终首先发送思考开始信号
                start_data = json.dumps({
                    'type': 'reasoning_start',
                    'session_id': session_id
                })
                yield f"data: {start_data}\n\n"
                time.sleep(0.2)  # 短暂延迟
                
                # 然后分段发送思考内容
                reasoning = "这是一个模拟的深度思考过程。在实际环境中，这里会显示模型的推理过程。\n\n首先，我需要理解用户的问题...\n然后，我会分析可能的解决方案...\n最后，我会给出最合理的答案。"
                for i in range(0, len(reasoning), 10):  # 每次发送10个字符
                    chunk = reasoning[i:i+10]
                    reasoning_data = json.dumps({
                        'type': 'reasoning_chunk',
                        'content': chunk,
                        'session_id': session_id
                    })
                    yield f"data: {reasoning_data}\n\n"
                    time.sleep(0.05)  # 短暂延迟
                
                # 发送内容开始信号
                separator_data = json.dumps({
                    'type': 'content_start',
                    'session_id': session_id
                })
                yield f"data: {separator_data}\n\n"
            
            # 发送模拟的最终回复
            mock_response = "抱歉，后端服务暂时不可用，但前端演示仍然可以继续。这是一个模拟的回复。"
            # 分段发送回复内容
            for i in range(0, len(mock_response), 5):  # 每次发送5个字符
                chunk = mock_response[i:i+5]
                chunk_data = json.dumps({
                    'type': 'chunk',
                    'content': chunk,
                    'session_id': session_id
                })
                yield f"data: {chunk_data}\n\n"
                time.sleep(0.05)  # 短暂延迟
            
            # 发送完成信号，包含完整的推理内容
            complete_data = json.dumps({
                'type': 'complete',
                'content': mock_response,
                'reasoning_content': reasoning if model == 'deepseek-reasoner' else None,
                'session_id': session_id
            })
            yield f"data: {complete_data}\n\n"
    
    # 返回流式响应
    return Response(stream_with_context(generate()), mimetype='text/event-stream')

@app.route('/api/generate', methods=['POST'])
def generate_images():
    """发送图片生成请求到后端"""
    data = request.json
    
    # 兼容前端传参格式
    params = data.get('params', data)
    
    # 添加更多调试日志
    batch_size = params.get('batch_size', 1)
    print(f"前端收到图片生成请求: {params}")
    print(f"批量生成数量(batch_size): {batch_size}")
    
    try:
        # 调用后端API
        print(f"发送请求到后端: {app.config['BACKEND_URL']}/api/generate")
        response = requests.post(
            f"{app.config['BACKEND_URL']}/api/generate",
            json=params,
            timeout=240  # 增加超时时间到120秒，图片生成需要更长时间
        )
        
        print(f"后端响应状态码: {response.status_code}")
        
        if response.status_code == 200:
            result = response.json()
            print(f"成功获取后端响应，返回{len(result.get('images', []))}张图片")
            return jsonify(result)
        
        error_msg = f"图片生成失败，状态码: {response.status_code}"
        print(error_msg)
        try:
            error_detail = response.json()
            print(f"错误详情: {error_detail}")
        except:
            print(f"响应内容: {response.text}")
        
        return jsonify({
            'error': error_msg,
            'status_code': response.status_code
        }), 500
        
    except requests.RequestException as e:
        error_msg = f'连接后端服务失败: {str(e)}'
        print(error_msg)
        return jsonify({
            'error': error_msg
        }), 500

@app.route('/api/history', methods=['GET'])
def get_history():
    """获取会话历史"""
    try:
        response = requests.get(f"{app.config['BACKEND_URL']}/api/history", timeout=90)
        
        if response.status_code == 200:
            return jsonify(response.json())
    except requests.RequestException:
        pass
    
    # 返回空列表
    return jsonify({
        'sessions': []
    })

@app.route('/api/history/<session_id>', methods=['GET'])
def get_session(session_id):
    """获取特定会话的历史"""
    try:
        response = requests.get(f"{app.config['BACKEND_URL']}/api/history/{session_id}", timeout=90)
        
        if response.status_code == 200:
            session_data = response.json()
            print(f"成功获取会话历史: {session_id}")
            return jsonify(session_data)
    except requests.RequestException as e:
        print(f"获取会话历史失败: {str(e)}")
    
    # 返回空会话
    return jsonify({
        'messages': [],
        'mode': 'chat',
        'model': 'deepseek-chat',
        'generation_params': None
    })

@app.route('/api/history/<session_id>', methods=['DELETE'])
def delete_session(session_id):
    """删除会话"""
    try:
        response = requests.delete(f"{app.config['BACKEND_URL']}/api/history/{session_id}", timeout=5)
        if response.status_code == 200:
            return jsonify({'success': True})
    except requests.RequestException:
        pass
    
    # 总是返回成功
    return jsonify({'success': True})

@socketio.on('connect')
def handle_connect():
    """处理WebSocket连接"""
    print(f"Client connected: {request.sid}")

@socketio.on('message')
def handle_message(data):
    """将消息转发到后端WebSocket"""
    try:
        print(f"收到WebSocket消息: {data}")
        # 尝试将消息转发到后端WebSocket
        # 这里实现逻辑取决于后端WebSocket的实现方式
    except Exception as e:
        print(f"处理WebSocket消息时出错: {e}")
        # 出错时向客户端发送错误消息
        socketio.emit('error', {'message': '消息处理失败'}, room=request.sid)

@socketio.on('disconnect')
def handle_disconnect():
    """处理断开连接"""
    print(f"Client disconnected: {request.sid}")

@app.route('/static/images/<path:filename>')
def backend_static_images(filename):
    """代理后端的静态图片到前端"""
    # 构建完整的后端URL
    backend_url = f"{app.config['BACKEND_URL']}/static/images/{filename}"
    
    try:
        # 从后端获取图片
        response = requests.get(backend_url, stream=True, timeout=5)
        
        if response.status_code == 200:
            # 将后端响应转发到前端
            return Response(
                stream_with_context(response.iter_content(chunk_size=1024)),
                content_type=response.headers['Content-Type']
            )
        else:
            return f"图片获取失败: HTTP {response.status_code}", 404
    except requests.RequestException as e:
        return f"连接后端服务失败: {str(e)}", 500

@app.route('/api/download-image')
def download_image():
    """代理下载图片，解决跨域问题"""
    # 获取图片URL参数
    image_url = request.args.get('url')
    print(f"收到下载图片请求，URL: {image_url}")
    
    if not image_url:
        print("错误：URL参数缺失")
        return "URL参数缺失", 400
    
    try:
        # 处理可能的相对URL
        if image_url.startswith('/'):
            # 相对于后端的URL
            image_url = f"{app.config['BACKEND_URL']}{image_url}"
            print(f"转换为后端URL: {image_url}")
        
        # 获取文件名（使用时间戳和英文名称避免编码问题）
        timestamp = int(time.time())
        # 使用ASCII文件名避免编码问题
        safe_filename = f"campus_image_{timestamp}.png"
        print(f"安全文件名: {safe_filename}")
        
        # 从URL获取图片
        print(f"开始下载图片: {image_url}")
        response = requests.get(image_url, stream=True, timeout=10)
        
        if response.status_code != 200:
            print(f"图片获取失败: HTTP {response.status_code}")
            return f"图片获取失败: {response.status_code}", 404
        
        # 推断内容类型
        content_type = response.headers.get('Content-Type', 'image/png')
        print(f"内容类型: {content_type}")
        
        # 创建Flask响应
        proxy_response = Response(
            response.iter_content(chunk_size=1024),
            content_type=content_type
        )
        
        # 使用RFC 6266规范的方式设置Content-Disposition头
        proxy_response.headers["Content-Disposition"] = f'attachment; filename="{safe_filename}"'
        print(f"生成下载响应，文件名: {safe_filename}")
        
        return proxy_response
        
    except Exception as e:
        print(f"下载图片异常: {str(e)}")
        return f"下载图片失败: {str(e)}", 500

if __name__ == '__main__':
    # 打印启动信息
    port = int(os.environ.get('PORT', 5001))
    host = os.environ.get('HOST', '127.0.0.1')
    print(f"Starting SD-LoRA校园图片生成系统 on http://{host}:{port}")
    
    # 启动应用
    socketio.run(app, debug=True, host=host, port=port)