#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
简单的Qwen3模型测试
直接使用llama-cpp-python测试向量化功能
"""

import os
from llama_cpp import Llama

def main():
    print("🧪 简单Qwen3向量化测试")
    print("=" * 40)
    
    # 模型路径
    model_path = "/Users/baimu/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B-GGUF/Qwen3-Embedding-8B-Q8_0.gguf"
    
    if not os.path.exists(model_path):
        print(f"❌ 模型文件不存在: {model_path}")
        return
    
    print(f"📁 模型文件: {os.path.basename(model_path)}")
    print(f"📊 文件大小: {os.path.getsize(model_path) / (1024**3):.2f} GB")
    
    try:
        print("\n🔄 加载模型...")
        model = Llama(
            model_path=model_path,
            n_ctx=512,  # 小一点的上下文
            n_threads=4,  # 少一点的线程
            embedding=True,
            verbose=False
        )
        print("✅ 模型加载成功!")
        
        # 测试向量化
        print("\n🔍 测试向量化...")
        test_texts = ["测试", "hello", "人工智能"]
        
        for text in test_texts:
            try:
                print(f"  正在编码: '{text}'")
                embedding = model.embed(text)
                
                if embedding and len(embedding) > 0:
                    print(f"    ✅ 成功! 维度: {len(embedding)}, 前3维: {embedding[:3]}")
                else:
                    print(f"    ❌ 失败: 返回空向量")
                    
            except Exception as e:
                print(f"    ❌ 错误: {str(e)}")
        
        print("\n🎉 测试完成!")
        
    except Exception as e:
        print(f"❌ 模型加载失败: {str(e)}")

if __name__ == "__main__":
    main()
