#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
ERNIE-4.5-0.3B-Paddle模型推理程序
根据rule.md和demo.md的要求开发
"""

import json
import torch
from transformers import AutoModelForCausalLM

def load_tokenizer_from_config(model_path):
    """
    根据配置文件手动创建tokenizer
    """
    try:
        # 尝试导入SentencePiece tokenizer
        import sentencepiece as spm
        
        # 从配置文件中读取特殊token信息
        with open(f"{model_path}/tokenizer_config.json", "r", encoding="utf-8") as f:
            tokenizer_config = json.load(f)
            
        # 加载tokenizer模型文件
        sp_model = spm.SentencePieceProcessor()
        sp_model.load(f"{model_path}/tokenizer.model")
        
        return sp_model, tokenizer_config
    except Exception as e:
        print(f"加载tokenizer时出错: {e}")
        return None, None

def apply_chat_template(messages, tokenizer_config):
    """
    手动应用聊天模板
    """
    # 根据tokenizer_config.json中的chat_template实现
    cls_token = tokenizer_config.get("cls_token", "<|begin_of_sentence|>")
    sep_token = tokenizer_config.get("sep_token", "<|end_of_sentence|>")
    
    # 构建文本
    text = cls_token + "\n"
    for message in messages:
        if message["role"] == "user":
            text += f"User: {message['content']}\n"
        elif message["role"] == "assistant":
            text += f"Assistant: {message['content']}{sep_token}"
        elif message["role"] == "system":
            text += f"{message['content']}\n"
    
    # 添加生成提示
    text += "Assistant: "
    return text

def encode_text(text, sp_model, max_length=2048):
    """
    编码文本为模型输入
    """
    # 使用SentencePiece tokenizer编码
    input_ids = sp_model.encode(text)
    
    # 截断或填充到最大长度
    if len(input_ids) > max_length:
        input_ids = input_ids[:max_length]
    else:
        # 在左侧填充
        input_ids = [0] * (max_length - len(input_ids)) + input_ids
    
    # 转换为tensor
    input_ids = torch.tensor([input_ids], dtype=torch.long)
    attention_mask = torch.ones_like(input_ids)
    attention_mask[input_ids == 0] = 0
    
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask
    }

def decode_text(token_ids, sp_model):
    """
    解码模型输出为文本
    """
    # 移除特殊token (假设0是pad token)
    token_ids = [tid for tid in token_ids if tid != 0]
    
    # 解码
    text = sp_model.decode(token_ids)
    return text

def test_ernie_model():
    """
    测试ERNIE-4.5-0.3B-Paddle模型的基本功能
    """
    print("正在加载ERNIE-4.5-0.3B-Paddle模型...")
    
    # 指定本地模型路径
    model_path = "./ERNIE-4.5-0.3B-Paddle"
    
    try:
        # 加载tokenizer
        sp_model, tokenizer_config = load_tokenizer_from_config(model_path)
        if sp_model is None:
            print("无法加载tokenizer")
            return
            
        # 加载模型
        model = AutoModelForCausalLM.from_pretrained(
            model_path,
            trust_remote_code=True,
            torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
            device_map="auto" if torch.cuda.is_available() else None,
            low_cpu_mem_usage=True
        )
        
        print("模型加载成功！")
        
        # 准备输入
        prompt = "请简要介绍一下大语言模型。"
        messages = [
            {"role": "user", "content": prompt}
        ]
        
        # 应用聊天模板
        text = apply_chat_template(messages, tokenizer_config)
        print(f"应用模板后的文本: {text}")
        
        # 编码输入
        model_inputs = encode_text(text, sp_model)
        
        # 如果使用CUDA，将输入移到GPU上
        if torch.cuda.is_available():
            model_inputs = {k: v.to(model.device) for k, v in model_inputs.items()}
        
        print("正在生成文本...")
        
        # 生成文本
        with torch.no_grad():
            generated_ids = model.generate(
                input_ids=model_inputs["input_ids"],
                attention_mask=model_inputs["attention_mask"],
                max_new_tokens=512,
                temperature=0.7,
                top_p=0.9,
                do_sample=True
            )
        
        # 解码输出
        # 只解码新生成的部分
        new_ids = generated_ids[0][len(model_inputs["input_ids"][0]):]
        generated_text = decode_text(new_ids.tolist(), sp_model)
        
        print("输入提示:", prompt)
        print("生成结果:")
        print(generated_text)
        print("\n" + "="*50 + "\n")
        
        # 测试另一个示例
        prompt2 = "什么是人工智能？"
        messages2 = [
            {"role": "user", "content": prompt2}
        ]
        
        text2 = apply_chat_template(messages2, tokenizer_config)
        model_inputs2 = encode_text(text2, sp_model)
        
        # 如果使用CUDA，将输入移到GPU上
        if torch.cuda.is_available():
            model_inputs2 = {k: v.to(model.device) for k, v in model_inputs2.items()}
        
        print("正在生成第二个文本...")
        
        # 生成文本
        with torch.no_grad():
            generated_ids2 = model.generate(
                input_ids=model_inputs2["input_ids"],
                attention_mask=model_inputs2["attention_mask"],
                max_new_tokens=512,
                temperature=0.7,
                top_p=0.9,
                do_sample=True
            )
        
        # 解码输出
        new_ids2 = generated_ids2[0][len(model_inputs2["input_ids"][0]):]
        generated_text2 = decode_text(new_ids2.tolist(), sp_model)
        
        print("输入提示:", prompt2)
        print("生成结果:")
        print(generated_text2)
        
    except Exception as e:
        print(f"测试过程中出现错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    test_ernie_model()