#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
测试ERNIE-4.5-0.3B-Paddle模型的程序
根据rule.md和demo.md的要求开发
"""

import torch
import sys
import os

def test_ernie_model():
    """
    测试ERNIE-4.5-0.3B-Paddle模型的基本功能
    """
    print("正在加载ERNIE-4.5-0.3B-Paddle模型...")
    
    # 添加当前目录到Python路径
    sys.path.append(os.path.dirname(os.path.abspath(__file__)))
    
    # 指定本地模型路径
    model_path = "./ERNIE-4.5-0.3B-Paddle"
    
    try:
        # 尝试导入PaddlePaddle相关库
        try:
            from paddlenlp.transformers import AutoModelForCausalLM, AutoTokenizer
            use_paddle = True
            print("使用PaddleNLP加载模型...")
        except ImportError:
            # 如果没有PaddlePaddle，则尝试使用transformers
            from transformers import AutoModelForCausalLM, AutoTokenizer
            use_paddle = False
            print("使用HuggingFace Transformers加载模型...")
        
        # 加载tokenizer和模型
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        model = AutoModelForCausalLM.from_pretrained(
            model_path, 
            trust_remote_code=True,
            # 根据可用资源调整数据类型和设备
            torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
            device_map="auto" if torch.cuda.is_available() else None
        )
        
        print("模型加载成功！")
        
        # 准备输入
        prompt = "请简要介绍一下大语言模型。"
        messages = [
            {"role": "user", "content": prompt}
        ]
        
        # 应用聊天模板
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        
        # 编码输入
        model_inputs = tokenizer([text], add_special_tokens=False, return_tensors="pt")
        
        # 如果使用CUDA，将输入移到GPU上
        if torch.cuda.is_available():
            model_inputs = model_inputs.to(model.device)
        
        print("正在生成文本...")
        
        # 生成文本
        with torch.no_grad():  # 禁用梯度计算以节省内存
            generated_ids = model.generate(
                model_inputs.input_ids,
                max_new_tokens=512,
                temperature=0.7,
                top_p=0.9,
                do_sample=True
            )
        
        # 解码输出
        output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
        generated_text = tokenizer.decode(output_ids, skip_special_tokens=True).strip()
        
        print("输入提示:", prompt)
        print("生成结果:")
        print(generated_text)
        print("\n" + "="*50 + "\n")
        
        # 测试另一个示例
        prompt2 = "什么是人工智能？"
        messages2 = [
            {"role": "user", "content": prompt2}
        ]
        
        text2 = tokenizer.apply_chat_template(
            messages2,
            tokenize=False,
            add_generation_prompt=True
        )
        
        model_inputs2 = tokenizer([text2], add_special_tokens=False, return_tensors="pt")
        
        # 如果使用CUDA，将输入移到GPU上
        if torch.cuda.is_available():
            model_inputs2 = model_inputs2.to(model.device)
        
        print("正在生成第二个文本...")
        
        # 生成文本
        with torch.no_grad():  # 禁用梯度计算以节省内存
            generated_ids2 = model.generate(
                model_inputs2.input_ids,
                max_new_tokens=512,
                temperature=0.7,
                top_p=0.9,
                do_sample=True
            )
        
        output_ids2 = generated_ids2[0][len(model_inputs2.input_ids[0]):].tolist()
        generated_text2 = tokenizer.decode(output_ids2, skip_special_tokens=True).strip()
        
        print("输入提示:", prompt2)
        print("生成结果:")
        print(generated_text2)
        
    except Exception as e:
        print(f"测试过程中出现错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    test_ernie_model()