import torch
original_torch_load = torch.load
def patched_torch_load(*args, **kwargs):
    if 'weights_only' not in kwargs:
        kwargs['weights_only'] = False
    return original_torch_load(*args, **kwargs)
torch.load = patched_torch_load

import time  # 确保导入了 time 模块
from typing import Optional, List, Dict, Any
from contextlib import asynccontextmanager
from pydantic import BaseModel
from fastapi import FastAPI, HTTPException, Header, UploadFile, File, Form
import uvicorn
from fastapi.responses import StreamingResponse
from transformers import AutoTokenizer
import os
import sys
import argparse
import threading

# 配置路径
LOCAL_MODEL_PATH = os.environ.get("LOCAL_MODEL_PATH", "/data/Matrix/yy/gjj_pre_merge-v31/xrayglm/XrayGLM/checkpoints/checkpoints-XrayGLM-3000")
XRAYGLM_PATH = os.environ.get("XRAYGLM_PATH", "/data/Matrix/yy/gjj_pre_merge-v31/xrayglm/XrayGLM")
API_TOKEN = os.environ.get("API_TOKEN", None)

# 全局变量
model = None
tokenizer = None
device = None

# 设置可用的 GPU 环境变量
gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0")  # 默认使用 GPU 0
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_ids
available_gpus = len(gpu_ids.split(','))

@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理（启动和关闭）"""
    # 启动时加载模型
    try:
        load_xrayglm_model()
    except Exception as e:
        print(f"⚠️ 启动时模型加载失败，但服务将继续运行: {str(e)}")
        # 服务继续运行，只是模型未加载
    
    yield  # 应用运行期间
    
    # 关闭时清理资源（可选）
    print("正在关闭模型推理服务...")

app = FastAPI(title="XrayGLM Inference Service", lifespan=lifespan)

class XrayGLMRequest(BaseModel):
    """XrayGLM 推理请求"""
    input_text: str
    image_path: str  # 图片路径
    history: Optional[List[List[str]]] = None  # XrayGLM格式的历史: [["question1", "answer1"], ...]
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 0.7
    top_k: Optional[int] = 30
    max_length: Optional[int] = 512
    repetition_penalty: Optional[float] = 1.2

class XrayGLMResponse(BaseModel):
    """XrayGLM 推理响应"""
    text: str
    success: bool = True
    error: Optional[str] = None

def load_xrayglm_model():
    """加载 XrayGLM 模型"""
    global model, tokenizer, device
    
    print("=" * 60)
    print("🚀 正在加载 XrayGLM 模型...")
    print(f"📁 模型路径: {LOCAL_MODEL_PATH}")
    print("=" * 60)
    
    # 将XrayGLM目录添加到Python路径
    if XRAYGLM_PATH not in sys.path:
        sys.path.insert(0, XRAYGLM_PATH)
    
    # 从环境变量读取GPU卡ID，默认为0号卡
    gpu_id = os.environ.get("GPU_ID", "0")
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    
    try:
        from sat.model import AutoModel
        from sat.model.mixins import CachedAutoregressiveMixin
        from finetune_XrayGLM import FineTuneVisualGLMModel
        
        # 构建args
        model_args = argparse.Namespace(
            fp16=True,
            skip_init=True,
            use_gpu_initialization=True if torch.cuda.is_available() else False,
            device='cuda' if torch.cuda.is_available() else 'cpu',
            image_length=32,  # 确保模型使用此属性
            eva_args={},
            qformer_args={},
            vocab_size=130528,
            pad_token_id=3,
            bos_token_id=130004,
            eos_token_id=130005,
            mask_token_id=130000,
            gmask_token_id=130001,
        )
        
        # 加载模型
        print("⏳ 加载XrayGLM模型...")
        model, model_args = FineTuneVisualGLMModel.from_pretrained(
            LOCAL_MODEL_PATH,
            args=model_args
        )
        print("✅ 模型加载成功")
        
        # 如果使用GPU，移动模型到GPU
        if torch.cuda.is_available():
            print("⏳ 将模型移动到GPU...")
            model = model.cuda()
            model = model.half()  # FP16
            print("✅ 模型已移动到GPU")
        
        # 如果模型使用 DataParallel，获取实际模型
        if isinstance(model, torch.nn.DataParallel):
            model = model.module  # 获取原始的模型对象
        
        model = model.eval()
        
        # 添加auto-regressive mixin
        print("⏳ 添加auto-regressive mixin...")
        model.add_mixin('auto-regressive', CachedAutoregressiveMixin())
        print("✅ Mixin添加成功")
        
        # 加载tokenizer
        print(f"正在从/home/Matrix/.cache/huggingface/hub/models--THUDM--chatglm-6b/snapshots/bf0f5cfb575eebebf9b655c5861177acfee03f16加载tokenizer...")
        tokenizer = AutoTokenizer.from_pretrained(
            "/home/Matrix/.cache/huggingface/hub/models--THUDM--chatglm-6b/snapshots/bf0f5cfb575eebebf9b655c5861177acfee03f16", 
            trust_remote_code=True,
            vocab_size=130528
        )
        print("✅ Tokenizer加载成功")
        
        device = torch.device(f"cuda:0" if torch.cuda.is_available() else "cpu")
        print(f"✅ 模型准备完成. Device: {device}, Using GPU ID: {gpu_id}")
        
    except Exception as e:
        print(f"❌ 模型加载失败: {str(e)}")
        import traceback
        traceback.print_exc()
        model = None
        tokenizer = None
        device = None
        print("⚠️ 服务将继续运行，但模型未加载。推理请求将返回错误。")

def check_auth(authorization: Optional[str]):
    """检查认证"""
    if API_TOKEN:
        if not authorization or authorization != f"Bearer {API_TOKEN}":
            raise HTTPException(status_code=401, detail="Unauthorized")

@app.post("/generate", response_model=XrayGLMResponse)
def generate(req: XrayGLMRequest, authorization: Optional[str] = Header(None)):
    """生成文本（非流式）"""
    check_auth(authorization)
    
    if not model or not tokenizer:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    try:
        # 导入chat函数
        from model import chat
        
        # 确保图片文件存在
        if not os.path.exists(req.image_path):
            raise HTTPException(status_code=404, detail=f"Image file not found: {req.image_path}")
        
        # 获取 tokenizer 的词汇表大小，用于验证
        vocab_size = None
        if hasattr(tokenizer, 'vocab_size'):
            vocab_size = tokenizer.vocab_size
        elif hasattr(tokenizer, '__len__'):
            vocab_size = len(tokenizer)
        
        # 验证历史记录中的 token IDs（如果可能）
        if vocab_size and req.history:
            # 历史记录是字符串格式，不需要验证 token IDs
            pass
        
        # 估算输入序列长度，确保 max_length 足够大
        # 包括：图像 tokens (32) + 历史记录 + 当前问题 + 特殊 tokens
        estimated_input_length = 32  # 图像 tokens
        estimated_input_length += 10  # 特殊 tokens (bos, eos, mask 等)
        
        if req.history:
            # 更保守的估算：每个历史记录对大约 100-150 tokens
            # 包括问题、答案和格式化字符
            for hist_item in req.history:
                if isinstance(hist_item, list) and len(hist_item) >= 2:
                    # 问题 + 答案 + 格式化
                    q_len = len(str(hist_item[0])) if hist_item[0] else 0
                    a_len = len(str(hist_item[1])) if hist_item[1] else 0
                    estimated_input_length += (q_len + a_len) // 2 + 20  # 除以2是粗略的字符到token转换
        
        # 当前问题
        estimated_input_length += len(req.input_text) // 2 + 10  # 当前问题 + 格式化
        
        # 确保 max_length 至少是输入长度的 2 倍，且最小为 512
        # 使用更保守的倍数，确保有足够的空间生成回复
        safe_max_length = max(req.max_length, int(estimated_input_length * 2), 512)
        if safe_max_length > req.max_length:
            print(f"⚠️  调整 max_length: {req.max_length} -> {safe_max_length} (输入长度估算: {estimated_input_length})")
        
        # 调用模型生成
        with torch.no_grad():
            try:
                response, _, _ = chat(
                    req.image_path,
                    model,
                    tokenizer,
                    req.input_text,
                    history=req.history or [],
                    image=None,
                    max_length=safe_max_length,  # 使用调整后的 max_length
                    top_p=req.top_p,
                    top_k=req.top_k,
                    temperature=req.temperature,
                    english=False,
                    repetition_penalty=req.repetition_penalty
                )
            except IndexError as idx_err:
                # 捕获索引越界错误
                error_msg = str(idx_err)
                print(f"❌ 索引越界错误: {error_msg}")
                
                # 判断是序列长度问题还是 token ID 问题
                if "out of bounds for dimension 0 with size" in error_msg:
                    # 提取序列大小
                    import re
                    match = re.search(r'index (\d+) is out of bounds for dimension 0 with size (\d+)', error_msg)
                    if match:
                        index_val = int(match.group(1))
                        size_val = int(match.group(2))
                        print(f"   序列长度问题: 尝试访问索引 {index_val}，但序列大小只有 {size_val}")
                        print(f"   这通常是因为 max_length ({safe_max_length}) 小于输入序列长度")
                        print(f"   建议: 增加 max_length 或减少历史记录长度")
                
                if vocab_size:
                    print(f"   词汇表大小: {vocab_size}, 有效索引范围: [0, {vocab_size-1}]")
                
                import traceback
                traceback.print_exc()
                return XrayGLMResponse(
                    text="", 
                    success=False, 
                    error=f"Index out of bounds: {error_msg}. This may be due to max_length being too small for the input sequence."
                )
        
        return XrayGLMResponse(text=response, success=True)
        
    except Exception as e:
        print(f"❌ 推理失败: {str(e)}")
        import traceback
        traceback.print_exc()
        return XrayGLMResponse(text="", success=False, error=str(e))

@app.post("/generate_stream")
def generate_stream(req: XrayGLMRequest, authorization: Optional[str] = Header(None)):
    """流式生成文本"""
    check_auth(authorization)
    
    if not model or not tokenizer:
        raise HTTPException(status_code=503, detail="Model not loaded")
    
    try:
        from model import chat
        
        # 确保图片文件存在
        if not os.path.exists(req.image_path):
            raise HTTPException(status_code=404, detail=f"Image file not found: {req.image_path}")
        
        # 获取 tokenizer 的词汇表大小
        vocab_size = None
        if hasattr(tokenizer, 'vocab_size'):
            vocab_size = tokenizer.vocab_size
        elif hasattr(tokenizer, '__len__'):
            vocab_size = len(tokenizer)
        
        # 估算输入序列长度，确保 max_length 足够大
        estimated_input_length = 32  # 图像 tokens
        estimated_input_length += 10  # 特殊 tokens
        
        if req.history:
            for hist_item in req.history:
                if isinstance(hist_item, list) and len(hist_item) >= 2:
                    q_len = len(str(hist_item[0])) if hist_item[0] else 0
                    a_len = len(str(hist_item[1])) if hist_item[1] else 0
                    estimated_input_length += (q_len + a_len) // 2 + 20
        
        estimated_input_length += len(req.input_text) // 2 + 10
        
        # 确保 max_length 至少是输入长度的 2 倍，且最小为 512
        safe_max_length = max(req.max_length, int(estimated_input_length * 2), 512)
        if safe_max_length > req.max_length:
            print(f"⚠️  调整 max_length: {req.max_length} -> {safe_max_length} (输入长度估算: {estimated_input_length})")
        
        # 流式生成器
        def generate_chunks():
            try:
                with torch.no_grad():
                    try:
                        response, _, _ = chat(
                            req.image_path,
                            model,
                            tokenizer,
                            req.input_text,
                            history=req.history or [],
                            image=None,
                            max_length=safe_max_length,  # 使用调整后的 max_length
                            top_p=req.top_p,
                            top_k=req.top_k,
                            temperature=req.temperature,
                            english=False,
                            repetition_penalty=req.repetition_penalty
                        )
                    except IndexError as idx_err:
                        # 捕获索引越界错误
                        error_msg = str(idx_err)
                        print(f"❌ 索引越界错误: {error_msg}")
                        
                        # 判断是序列长度问题还是 token ID 问题
                        if "out of bounds for dimension 0 with size" in error_msg:
                            import re
                            match = re.search(r'index (\d+) is out of bounds for dimension 0 with size (\d+)', error_msg)
                            if match:
                                index_val = int(match.group(1))
                                size_val = int(match.group(2))
                                print(f"   序列长度问题: 尝试访问索引 {index_val}，但序列大小只有 {size_val}")
                                print(f"   这通常是因为 max_length ({safe_max_length}) 小于输入序列长度")
                        
                        if vocab_size:
                            print(f"   词汇表大小: {vocab_size}, 有效索引范围: [0, {vocab_size-1}]")
                        
                        import traceback
                        traceback.print_exc()
                        yield f"\n\n❌ 生成失败: Index out of bounds: {error_msg}"
                        yield f"\n这通常是因为 max_length 小于输入序列长度，已自动调整但可能仍不足。"
                        return
                
                # 逐字符或逐词发送（根据需求调整）
                chunk_size = 10  # 每次发送10个字符
                for i in range(0, len(response), chunk_size):
                    chunk = response[i:i+chunk_size]
                    yield chunk
                    time.sleep(0.01)  # 模拟流式效果
                    
            except Exception as e:
                yield f"\n\n❌ 生成失败: {str(e)}"
        
        return StreamingResponse(generate_chunks(), media_type="text/plain")
        
    except Exception as e:
        print(f"❌ 流式推理失败: {str(e)}")
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/health")
def health():
    """健康检查"""
    return {
        "status": "healthy",
        "model_loaded": model is not None,
        "tokenizer_loaded": tokenizer is not None,
        "device": str(device) if device else None
    }

if __name__ == "__main__":
    port = int(os.environ.get("PORT", 8010))
    uvicorn.run("xrayglm_inference_service:app", host="0.0.0.0", port=port, workers=1)