import sys
import os
from pathlib import Path
import time

# 添加项目根目录到Python路径
sys.path.append(str(Path(__file__).parent))

from backend.common.config import config_manager
from backend.llm import llm_manager
from backend.logger_setup import get_logger

logger = get_logger('verify_ollama_fix')

print("=== 验证OllamaProvider修复 ===" )

# 等待后端服务完全启动
time.sleep(2)

# 显示当前配置的LLM提供商信息
print(f"\n激活的提供商: {config_manager.llm_providers.active_provider}")
print(f"Ollama模型: {config_manager.llm_providers.ollama.model}")
print(f"Ollama基础URL: {config_manager.llm_providers.ollama.base_url}")

# 尝试获取Ollama提供商实例并发送测试消息
try:
    print("\n尝试获取Ollama提供商实例...")
    # 获取Ollama配置并传递给get_provider方法
    ollama_config = config_manager.llm_providers.ollama.model_dump()
    provider = llm_manager.get_provider('ollama', ollama_config)
    print("成功获取Ollama提供商实例!")
    print(f"使用的模型: {provider.model_name}")
    
    # 发送一个简单的测试消息
    test_message = "你好，这是一个测试消息。"
    print(f"\n发送测试消息: {test_message}")
    
    # 测量响应时间
    start_time = time.time()
    response = provider.generate(test_message)
    end_time = time.time()
    
    print(f"\nOllama响应成功!")
    print(f"响应内容: {response}")
    print(f"响应时间: {end_time - start_time:.2f}秒")
    
    # 测试流式生成
    print("\n测试流式生成...")
    stream_start = time.time()
    stream_responses = []
    
    for chunk in provider.generate_stream("请简单介绍一下自己。"):
        if chunk:
            print(f"流式响应片段: {chunk}")
            stream_responses.append(chunk)
    
    stream_end = time.time()
    print(f"流式响应完成! 总时间: {stream_end - stream_start:.2f}秒")
    print(f"完整流式响应: {''.join(stream_responses)}")
    
    # 测试嵌入生成
    print("\n测试嵌入生成...")
    embedding = provider.generate_embedding("测试嵌入文本")
    print(f"嵌入向量维度: {len(embedding)}")
    print(f"嵌入向量前5个值: {embedding[:5]}")
    
    print("\n=== 验证成功完成 ===")
except Exception as e:
    print(f"验证失败: {str(e)}")
    import traceback
    print(f"详细错误信息: {traceback.format_exc()}")
    print("\n=== 验证失败 ===")