# app.py
from flask import Flask, request, jsonify
from flask_cors import CORS
from transformers import AutoTokenizer
import threading
import logging
import time
from datetime import datetime
import hashlib
from collections import deque

app = Flask(__name__)
# 配置路由 待修改
CORS(app)

# 配置日志 - 增强日志配置
logging.basicConfig(
    level=logging.DEBUG,  # 设置为DEBUG级别以获取更详细输出
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

# 全局变量
tokenizer = None
tokenizer_lock = threading.Lock()
tokenizer_name = "Qwen/Qwen2.5-7B-Instruct"

# ===== 配置区 =====
class Config:
    TOKEN_THRESHOLD = 4000  # 触发摘要的Token阈值
    SESSION_TIMEOUT = 1800  # 会话过期时间(秒)
    TOKEN_COST_PER_CHAR = 0.25  # 简单估算: 每个字符消耗的Token数 (中英文混合粗略估算)

# ===== 数据结构 =====
class SessionState:
    """会话状态，核心是维护权威总数和等待队列"""
    def __init__(self):
        self.expected_count = 0  # 后端期望的下一个消息数量 (权威的N)
        self.token_count = 0  # 当前会话累计Token数
        self.pending_queue = deque()  # 等待队列: 存储(消息序号, 消息内容)
        self.last_updated = time.time()  # 最后活动时间
        # 后加的元素
        self.message_count = 0 # 当前会话的已有消息数
        self.message_content = {}   # 当前会话的内容
        
    def __str__(self):
        """添加字符串表示，便于日志输出"""
        return (f"SessionState(expected_count={self.expected_count}, "
                f"token_count={self.token_count}, "
                f"pending_queue_size={len(self.pending_queue)}, "
                f"last_updated={datetime.fromtimestamp(self.last_updated).strftime('%Y-%m-%d %H:%M:%S')})")

# ===== 全局状态存储 =====
# 生产环境请替换为Redis或数据库
sessions = {
    #'9904e55093f24e5096767570df76a5dc':
} # key: session_id, value: SessionState

# ===== 工具函数 =====
def calculate_token_count(text):
    global tokenizer
    logger.debug(f"计算Token计数，输入类型: {type(text)}")

    # 👉 确保处理的是字符串内容，而不是整个消息字典
    if isinstance(text, dict) and 'content' in text:
        text = text['content']  # 提取出真正的文本内容
        logger.debug(f"从字典中提取content字段，文本长度: {len(text)}")
    elif not isinstance(text, str):
        text = str(text)
        logger.debug(f"输入转换为字符串，长度: {len(text)}")

    # 👉 修复1：确保 text 是字符串
    if text is None:
        text = ""
        logger.debug("输入文本为None，已转换为空字符串")
    elif not isinstance(text, str):
        text = str(text)  # 关键！把 int、float 等转成字符串
        logger.debug(f"输入文本非字符串，已转换为字符串: {text}")

    with tokenizer_lock:
        if tokenizer is None:
            try:
                logger.info("尝试加载Tokenizer...")
                tokenizer = AutoTokenizer.from_pretrained(
                    tokenizer_name,
                    trust_remote_code=True
                )
                logger.info("✅ Tokenizer加载成功")
            except Exception as e:
                logger.error(f"❌ Tokenizer加载失败: {e}")
                tokenizer = None

    if tokenizer is None:
        estimated_tokens = max(1, int(len(text) * Config.TOKEN_COST_PER_CHAR))
        logger.debug(f"使用估算Token计数: {estimated_tokens} (字符长度 * {Config.TOKEN_COST_PER_CHAR})")
        return estimated_tokens

    try:
        encoded = tokenizer.encode(text)  # 现在 text 一定是 str，安全
        token_count = len(encoded)
        logger.debug(f"使用精确Token计数: {token_count}")
        return token_count
    except Exception as e:
        logger.error(f"Token计算错误: {e}")
        estimated_tokens = max(1, int(len(text) * Config.TOKEN_COST_PER_CHAR))
        logger.debug(f"Token计算出错，使用估算值: {estimated_tokens}")
        return estimated_tokens

def is_session_expired(session_state):
    """检查会话是否过期"""
    expired = (time.time() - session_state.last_updated) > Config.SESSION_TIMEOUT
    if expired:
        logger.info(f"会话已过期，最后活动时间: {datetime.fromtimestamp(session_state.last_updated).strftime('%Y-%m-%d %H:%M:%S')}")
    return expired

# ===== 核心业务逻辑 =====
def process_single_message(session_state, message_content, message_seq):
    """
    处理单条消息的核心逻辑
    Returns:
        tuple: (是否处理成功, 消耗的Token数, 是否需要触发摘要)
    """
    # 初始化所有局部变量，避免 UnboundLocalError
    tokens_consumed = 0
    need_summary = False
    success = False
    
    logger.debug(f"处理单条消息 seq={message_seq}")
    
    try:
        # 提取消息内容（如果是字典格式）
        content_text = message_content
        if isinstance(message_content, dict) and 'content' in message_content:
            content_text = message_content['content']
            logger.debug(f"消息内容: '{content_text[:50]}{'...' if len(content_text) > 50 else ''}'")
        else:
            logger.debug(f"消息内容: '{content_text[:50]}{'...' if len(content_text) > 50 else ''}'")
        
        # 计算Token消耗
        tokens_consumed = calculate_token_count(content_text)
        session_state.token_count += tokens_consumed
        session_state.last_updated = time.time()
        
        need_summary = session_state.token_count >= Config.TOKEN_THRESHOLD
        success = True
        
        if need_summary:
            logger.info(f"⚠️ Token计数达到阈值: {session_state.token_count} >= {Config.TOKEN_THRESHOLD}, 需要生成摘要")
        
    except Exception as e:
        logger.error(f"处理消息时发生错误: {e}")
        success = False
    
    logger.debug(f"消息处理完成: seq={message_seq}, tokens_consumed={tokens_consumed}, total_tokens={session_state.token_count}, need_summary={need_summary}")
    return (success, tokens_consumed, need_summary)

def generate_summary(session_id, session_state):
    """生成摘要 (此处为模拟实现)"""
    logger.info(f"生成摘要 session_id={session_id}, current_tokens={session_state.token_count}")
    
    # 真实场景应调用AI模型API生成摘要
    summary_content = f"[摘要] 会话 {session_id} 的摘要内容 (基于Token计数: {session_state.token_count})"
    summary_tokens = calculate_token_count(summary_content)
    
    # 重置会话状态 (保留摘要的Token计数)
    old_token_count = session_state.token_count
    session_state.token_count = summary_tokens
    queue_size = len(session_state.pending_queue)
    session_state.pending_queue.clear()
    
    logger.info(f"摘要生成完成: summary_tokens={summary_tokens}, 重置前token_count={old_token_count}, 重置后token_count={session_state.token_count}, 清空队列(size={queue_size})")
    return summary_content

def process_pending_queue(session_state, session_id):
    """处理等待队列中已连续的消息"""
    logger.debug(f"处理等待队列 session_id={session_id}, 队列大小: {len(session_state.pending_queue)}")
    
    results = []
    processed_count = 0
    
    while session_state.pending_queue and session_state.pending_queue[0][0] == session_state.expected_count:
        # 取出队列头部的消息 (序号正好是期望值)
        seq, message_content = session_state.pending_queue.popleft()
        logger.debug(f"从队列中处理消息 seq={seq}, content='{message_content[:30]}...'")
        
        success, tokens, need_summary = process_single_message(session_state, message_content, seq)
        if success:
            session_state.expected_count += 1  # 成功处理，期望值+1
            results.append({
                "seq": seq,
                "content": message_content,
                "tokens": tokens,
                "processed": True
            })
            processed_count += 1
            
            # 如果需要触发摘要，在此处理
            if need_summary:
                summary = generate_summary(session_id, session_state)
                results.append({
                    "seq": "summary",
                    "content": summary,
                    "tokens": session_state.token_count,
                    "is_summary": True
                })
        else:
            results.append({
                "seq": seq,
                "content": message_content,
                "processed": False,
                "error": "处理失败"
            })
    
    if processed_count > 0:
        logger.info(f"从等待队列中处理了 {processed_count} 条消息")
    
    return results

# ===== API端点 =====
@app.route('/api/report-message', methods=['POST'])
def handle_message_report():
    """核心API: 处理前端上报的消息"""
    try:
        data = request.get_json()
        session_id = data.get('session_id')
        current_total = data.get('current_total')  # 前端统计的当前总数 (M)
        new_messages = data.get('new_messages')  # 新增的消息内容列表
        
        logger.info(f"📨 收到前端消息: session_id={session_id}, current_total={current_total}")
        logger.debug(f"新增消息类型: {new_messages.role}")
        logger.debug(f"新增消息时间戳：{new_messages.date}")
        logger.debug(f"新增消息内容: {new_messages.content}")
        # role, date, content


        # 参数验证
        if not session_id or current_total is None or not new_messages:
            logger.warning(f"❌ 缺少必要参数: session_id={session_id}, current_total={current_total}, new_messages={new_messages}")
            return jsonify({
                "status": "error",
                "message": "缺少必要参数: session_id, current_total 或 new_messages"
            }), 400
        
        # 获取或创建会话状态
        if session_id not in sessions:
            logger.info(f"🆕 创建新会话: session_id={session_id}")
            sessions[session_id] = SessionState()
            
        session_state = sessions[session_id]
        
        # 检查会话是否过期
        if is_session_expired(session_state):
            logger.info(f"🔄 会话已过期，重置会话: session_id={session_id}")
            sessions[session_id] = SessionState()
            session_state = sessions[session_id]
        
        expected = session_state.expected_count  # 后端期望的数量 (N)
        logger.debug(f"后端期望数量: expected_count={expected}, 前端当前总数: current_total={current_total}")
        
        # 设置初始期望值（如果是新创建的会话）
        if expected == 0:
            # 修复：初始期望值应该是第一条消息的序号
            session_state.expected_count = current_total - len(new_messages) + 1
            expected = session_state.expected_count
            logger.info(f"📝 初始化期望值: expected_count={expected}")
        
        # 核心逻辑: 判断消息序号
        if current_total < expected:
            # 情况1: 过期或重复消息
            logger.warning(f"⏮️ 过期或重复消息: current_total={current_total} < expected={expected}")
            return jsonify({
                "status": "ignored",
                "message": "重复或过时的消息",
                "expected_count": expected  # 告知前端后端期望的值
            }), 200
        

        # 计算消息的起始序号
        start_seq = current_total - len(new_messages) + 1
        logger.debug(f"消息起始序号: start_seq={start_seq}, 期望序号: expected={expected}")
        



        # 情况2： 消息是下一个期望的消息（连续）
        if start_seq == expected:
            logger.info(f"✅ 收到连续消息，开始批量处理 {len(new_messages)} 条消息")

            processing_results = []

            # -- 新增：初始化批量变量 --
            batch_token_total = 0       # 用于累计本批消息的总Token数
            need_summary_batch = False      # 用于记录本批处理是否需要触发摘要

            # -- 先循环计算整个数组的总Token，并不立即更新会话状态 --
            for i,msg_content in enumerate(new_messages):
                seq = start_seq + i
                logger.debug(f"290 计算消息 seq={seq} 的Token")
                
                #计算单条消息的Token，并累加到批次总量中
                tokens_consumed = calculate_token_count(msg_content)
                batch_token_total += tokens_consumed

                # 先将本条消息的处理结果暂存起来
                processing_results.append({
                    "seq": seq,
                    "processed": True,  # 假设成功，实际可加入异常处理
                    "tokens": tokens_consumed
                })

            # -- 在处理完整个数组后，一次性更新会话状态
            session_state.token_count += batch_token_total  # 一次性累加本批总Token
            session_state.expected_count += len(new_messages)   #关键！一次性增加期望值
            session_state.last_updated = time.time()    # 更新活动时间

            # 基于更新后的总Token计数，判断是否需要出发摘要
            need_summary_batch = session_state.token_count >= Config.TOKEN_THRESHOLD


            # -- 如果出发摘要，在此处理（只处理一次）--
            if need_summary_batch:
                logger.info(f"⚠️ 批量处理完成后Token计数达到阈值，需要生成摘要")
                summary_content = generate_summary(session_id, session_state)
                processing_results.append({
                    "seq": "summary",
                    "content": summary_content,
                    "tokens": session_state.token_count, # 这里是摘要后的新Token计数
                    "is_summary": True
                })   

            # ... 后续处理等待队列的代码保持不变 ...
            logger.debug("处理完成后检查等待队列...")
            queue_results = process_pending_queue(session_state, session_id)
            
            logger.info(f"✅ 批量处理完成: 处理了 {len(new_messages)} 条消息, 消耗Token: {batch_token_total}, 当前总Token计数: {session_state.token_count}, 新期望值: {session_state.expected_count}")
            
            return jsonify({
                "status": "processed",
                "message": "消息已批量处理",
                "processed_messages": processing_results,
                "queue_processed": queue_results,
                "current_token_count": session_state.token_count,
                "expected_count": session_state.expected_count
            }), 200    
        
        
        # 情况3: 消息序号大于期望值 (不连续，放入队列)
        elif start_seq > expected:
            logger.warning(f"📥 收到不连续消息，存入等待队列: start_seq={start_seq} > expected={expected}")
            
            queued_count = 0
            for i, msg_content in enumerate(new_messages):
                seq = start_seq + i
                # 只添加比当前期望值新的消息
                if seq >= expected:
                    session_state.pending_queue.append((seq, msg_content))
                    queued_count += 1
                    logger.debug(f"消息存入队列 seq={seq}")
            
            # 按序号排序队列
            session_state.pending_queue = deque(sorted(session_state.pending_queue, key=lambda x: x[0]))
            
            logger.info(f"📥 已存入队列: {queued_count} 条消息, 队列总大小: {len(session_state.pending_queue)}")
            
            return jsonify({
                "status": "queued",
                "message": "消息已存入等待队列",
                "expected_count": expected,
                "queued_count": len(session_state.pending_queue)
            }), 200
        else:
            logger.warning(f"364 📥 收到不连续消息，存入等待队列: start_seq={start_seq} < expected={expected}")
            
    except Exception as e:
        logger.error(f"❌ 服务器内部错误: {str(e)}", exc_info=True)
        return jsonify({
            "status": "error",
            "message": f"服务器内部错误: {str(e)}"
        }), 500

@app.route('/api/session-status/<session_id>', methods=['GET'])
def get_session_status(session_id):
    """获取会话状态"""
    logger.debug(f"获取会话状态: session_id={session_id}")
    
    if session_id not in sessions:
        logger.warning(f"会话不存在: session_id={session_id}")
        return jsonify({"status": "not_found"}), 404
    
    session_state = sessions[session_id]
    logger.debug(f"会话状态: {session_state}")
    
    return jsonify({
        "expected_count": session_state.expected_count,
        "token_count": session_state.token_count,
        "queued_count": len(session_state.pending_queue),
        "last_updated": session_state.last_updated
    }), 200

# ===== 定时任务: 清理过期会话 =====
def cleanup_expired_sessions():
    """定期清理过期会话"""
    while True:
        time.sleep(300)  # 每5分钟清理一次
        now = time.time()
        expired_sessions = [
            sid for sid, state in sessions.items() 
            if (now - state.last_updated) > Config.SESSION_TIMEOUT
        ]
        for sid in expired_sessions:
            del sessions[sid]
        if expired_sessions:
            logger.info(f"🧹 已清理 {len(expired_sessions)} 个过期会话")

# 启动清理线程
cleanup_thread = threading.Thread(target=cleanup_expired_sessions, daemon=True)
cleanup_thread.start()

@app.route('/health', methods=['GET'])
def health_check():
    """健康检查端点"""
    status = {
        "status": "ok",
        "active_sessions": len(sessions),
        "timestamp": time.time()
    }
    logger.debug(f"健康检查: {status}")
    return jsonify(status), 200

if __name__ == '__main__':
    logger.info("🚀 启动Flask服务器...")
    app.run(debug=True, port=5000)