#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
增强版ERNIE-4.5-0.3B-Paddle测试程序
结合Infinity-Instruct数据集，实现更丰富的功能
"""

import json
import os
import random
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

class ERNIEEnhancedTester:
    def __init__(self, model_path="./ERNIE-4.5-0.3B-Paddle"):
        """
        初始化ERNIE测试器
        
        Args:
            model_path (str): 本地模型路径
        """
        self.model_path = model_path
        self.model = None
        self.tokenizer = None
        self.infinity_instruct_data = None
        
    def load_model(self):
        """
        加载ERNIE模型和tokenizer
        """
        print("正在加载ERNIE-4.5-0.3B-Paddle模型...")
        try:
            # 加载tokenizer和模型
            self.tokenizer = AutoTokenizer.from_pretrained(
                self.model_path, 
                trust_remote_code=True
            )
            self.model = AutoModelForCausalLM.from_pretrained(
                self.model_path,
                trust_remote_code=True,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                device_map="auto" if torch.cuda.is_available() else None,
                low_cpu_mem_usage=True
            )
            print("✓ 模型加载成功！")
            return True
        except Exception as e:
            print(f"✗ 模型加载失败: {e}")
            print("提示：请确保已正确安装PaddlePaddle或使用最新版本的Transformers库")
            return False
    
    def load_infinity_instruct_dataset(self, sample_size=5):
        """
        加载Infinity-Instruct数据集示例
        
        Args:
            sample_size (int): 采样数据集大小
        """
        print("正在加载Infinity-Instruct数据集示例...")
        try:
            # 检查Infinity-Instruct目录
            instruct_dir = "./Infinity-Instruct"
            if not os.path.exists(instruct_dir):
                print("✗ 未找到Infinity-Instruct数据集")
                return False
                
            # 查找json文件
            json_files = [f for f in os.listdir(instruct_dir) if f.endswith('.json')]
            if not json_files:
                print("✗ Infinity-Instruct目录中未找到JSON文件")
                return False
                
            # 读取示例数据
            sample_file = os.path.join(instruct_dir, json_files[0])
            with open(sample_file, 'r', encoding='utf-8') as f:
                data = json.load(f)
                
            # 采样数据
            if len(data) > sample_size:
                self.infinity_instruct_data = random.sample(data, sample_size)
            else:
                self.infinity_instruct_data = data
                
            print(f"✓ 成功加载{len(self.infinity_instruct_data)}条示例数据")
            return True
        except Exception as e:
            print(f"✗ 加载Infinity-Instruct数据集时出错: {e}")
            return False
    
    def apply_chat_template(self, messages):
        """
        应用聊天模板
        
        Args:
            messages (list): 消息列表
            
        Returns:
            str: 应用模板后的文本
        """
        try:
            # 使用tokenizer的apply_chat_template方法
            text = self.tokenizer.apply_chat_template(
                messages,
                tokenize=False,
                add_generation_prompt=True
            )
            return text
        except Exception as e:
            # 如果tokenizer不支持apply_chat_template，则手动构建
            print(f"应用聊天模板时出错，使用默认格式: {e}")
            text = ""
            for message in messages:
                if message["role"] == "user":
                    text += f"User: {message['content']}\n"
                elif message["role"] == "assistant":
                    text += f"Assistant: {message['content']}\n"
            text += "Assistant: "
            return text
    
    def generate_text(self, prompt, max_new_tokens=512, temperature=0.7, top_p=0.9):
        """
        使用ERNIE模型生成文本
        
        Args:
            prompt (str): 输入提示
            max_new_tokens (int): 最大生成token数
            temperature (float): 温度参数
            top_p (float): Top-p参数
            
        Returns:
            str: 生成的文本
        """
        if not self.model or not self.tokenizer:
            print("错误：模型未加载")
            return ""
            
        try:
            # 准备输入
            messages = [{"role": "user", "content": prompt}]
            text = self.apply_chat_template(messages)
            
            # 编码输入
            model_inputs = self.tokenizer(
                [text], 
                add_special_tokens=False, 
                return_tensors="pt"
            )
            
            # 如果使用CUDA，将输入移到GPU上
            if torch.cuda.is_available():
                model_inputs = model_inputs.to(self.model.device)
            
            print("正在生成文本...")
            
            # 生成文本
            with torch.no_grad():
                generated_ids = self.model.generate(
                    model_inputs.input_ids,
                    max_new_tokens=max_new_tokens,
                    temperature=temperature,
                    top_p=top_p,
                    do_sample=True
                )
            
            # 解码输出
            output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
            generated_text = self.tokenizer.decode(output_ids, skip_special_tokens=True).strip()
            
            return generated_text
        except Exception as e:
            print(f"文本生成过程中出错: {e}")
            return ""
    
    def test_with_instruct_samples(self):
        """
        使用Infinity-Instruct数据集中的样本进行测试
        """
        if not self.infinity_instruct_data:
            print("错误：未加载Infinity-Instruct数据集")
            return
            
        print("\n" + "="*60)
        print("使用Infinity-Instruct数据集样本进行测试")
        print("="*60)
        
        for i, sample in enumerate(self.infinity_instruct_data[:3]):  # 只测试前3个样本
            print(f"\n--- 测试样本 {i+1} ---")
            
            # 获取用户查询
            user_query = None
            for conversation in sample.get("conversations", []):
                if conversation.get("from") == "human":
                    user_query = conversation.get("value")
                    break
                    
            if user_query:
                print(f"输入: {user_query[:100]}..." if len(user_query) > 100 else f"输入: {user_query}")
                
                # 生成响应
                response = self.generate_text(user_query)
                if response:
                    print(f"生成结果: {response[:200]}..." if len(response) > 200 else f"生成结果: {response}")
                else:
                    print("生成失败")
            else:
                print("未找到有效的用户查询")
    
    def interactive_mode(self):
        """
        交互模式，允许用户输入自定义提示
        """
        print("\n" + "="*60)
        print("交互模式 - 输入 'quit' 退出")
        print("="*60)
        
        while True:
            try:
                user_input = input("\n请输入您的问题: ").strip()
                if user_input.lower() in ['quit', 'exit', '退出']:
                    break
                    
                if user_input:
                    response = self.generate_text(user_input)
                    if response:
                        print(f"\nERNIE回答:\n{response}")
                    else:
                        print("抱歉，未能生成有效回答。")
                else:
                    print("请输入有效问题。")
                    
            except KeyboardInterrupt:
                print("\n\n程序被用户中断。")
                break
            except Exception as e:
                print(f"处理输入时出错: {e}")
    
    def run_demo(self):
        """
        运行演示程序
        """
        print("ERNIE-4.5-0.3B-Paddle增强测试程序")
        print("="*60)
        
        # 加载模型
        if not self.load_model():
            print("无法加载模型，程序退出。")
            return
            
        # 加载数据集
        self.load_infinity_instruct_dataset()
        
        # 运行预设测试
        self.test_with_instruct_samples()
        
        # 交互模式
        self.interactive_mode()
        
        print("\n感谢使用ERNIE-4.5-0.3B-Paddle增强测试程序！")

def main():
    """
    主函数
    """
    tester = ERNIEEnhancedTester()
    tester.run_demo()

if __name__ == "__main__":
    main()