#!/usr/bin/env python3
from llama_cpp import Llama
import time
import os

def main():
    print("=== 大模型冷启动测试开始 ===")
    start_total = time.time()

    # 模型路径
    model_path = "/model/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"

    # 1. 模型加载阶段
    start_load = time.time()
    print("1. 正在加载模型...")

    llm = Llama(
        model_path=model_path,
        n_ctx=2048,
        n_threads=4,
        n_gpu_layers=0,
        verbose=False
    )

    load_time = time.time() - start_load
    print(f"   模型加载时间: {load_time:.3f}s")

    # 2. 首次推理阶段
    start_inference = time.time()
    print("2. 执行首次推理...")

    # 测试问题
    prompt = "What is the capital of France?"
    output = llm(
        prompt,
        max_tokens=50,
        temperature=0.7,
        top_p=0.9,
        echo=False
    )

    inference_time = time.time() - start_inference
    # 安全获取字段
    response = None
    try:
        response = output['choices'][0]['text'].strip()
    except Exception:
        response = str(output)

    print(f"   问题: {prompt}")
    print(f"   回答: {response}")
    try:
        tokens = output.get('usage', {}).get('completion_tokens')
    except Exception:
        tokens = None
    print(f"   推理时间: {inference_time:.3f}s")
    print(f"   生成令牌数: {tokens}")

    # 3. 内存使用信息（模拟）
    start_mem = time.time()
    print("3. 模拟内存初始化...")
    mem_time = time.time() - start_mem
    print(f"   内存初始化时间: {mem_time:.3f}s")

    end_total = time.time()
    total_time = end_total - start_total

    print("=========================================")
    print(f"总执行时间: {total_time:.3f} 秒")
    print(f"模型加载占比: {(load_time / total_time)*100:.1f}%")
    print(f"推理计算占比: {(inference_time / total_time)*100:.1f}%")
    print("=========================================")

    return total_time, load_time, inference_time

if __name__ == '__main__':
    main()
