#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
调试Chinese-CLIP文本编码问题
"""
import os
import sys
import torch
from pathlib import Path

# 添加项目根目录到Python路径
project_root = Path(__file__).parent
sys.path.insert(0, str(project_root))

from configs.config import Config
from image_processing_modules.clip_model import ChineseClipModelWrapper
from utils.logger_config import get_logger_config

def debug_chinese_clip_text_encoding():
    """调试Chinese-CLIP文本编码"""
    print("调试Chinese-CLIP文本编码...")
    
    # 加载配置
    config_path = "configs/local_22_config.json"
    config = Config(config_path)
    
    # 获取CLIP配置
    model_name = config.get("clip.model_name", "OFA-Sys/chinese-clip-vit-base-patch16")
    local_model_path = config.get("clip.local_model_path", "")
    offline_mode = config.get("clip.offline_mode", True)
    device = "cpu"  # 明确指定使用CPU
    
    try:
        # 初始化模型
        print("正在初始化Chinese-CLIP模型...")
        model = ChineseClipModelWrapper(
            model_name=model_name,
            local_model_path=local_model_path,
            offline_mode=offline_mode,
            device=device
        )
        
        print("模型初始化成功!")
        
        # 准备测试文本
        test_text = "这是一只猫"
        print(f"测试文本: {test_text}")
        
        # 直接使用处理器和模型进行调试
        print("\n直接测试处理器...")
        inputs = model.processor(text=[test_text], return_tensors="pt", padding=True)
        print(f"处理器输出 keys: {list(inputs.keys())}")
        for key, value in inputs.items():
            print(f"  {key}: {value.shape if hasattr(value, 'shape') else type(value)}")
        
        print("\n测试文本特征提取器...")
        model.model = model.model.to(device)
        with torch.no_grad():
            # 尝试直接使用模型的文本特征提取器
            pooled_output = model.model.text_model(**{k: v.to(device) for k, v in inputs.items()})[1]  # pooler_output
            print(f"pooled_output type: {type(pooled_output)}")
            print(f"pooled_output: {pooled_output}")
            
            if pooled_output is not None:
                # 尝试使用投影层
                text_features = model.model.text_projection(pooled_output)
                print(f"text_features shape: {text_features.shape}")
            else:
                print("pooled_output is None!")
        
        # 再次尝试使用模型接口
        print("\n尝试获取文本特征...")
        with torch.no_grad():
            text_features_direct = model.model.get_text_features(**{k: v.to(device) for k, v in inputs.items()})
            print(f"text_features_direct: {text_features_direct}")
            print(f"text_features_direct shape: {text_features_direct.shape}")
            
    except Exception as e:
        print(f"调试过程中出现错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    get_logger_config(name="debug_chinese_clip").get_logger()  # 初始化日志
    debug_chinese_clip_text_encoding()