# -*- coding: utf-8 -*-
# time: 2025/4/21 09:19
# file: qwen_distill_myModel.py
# author: hanson
import torch
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer

# 加载蒸馏后的模型
distilled_model = AutoModelForCausalLM.from_pretrained("./distilled_medical_model")
distilled_tokenizer = AutoTokenizer.from_pretrained("./distilled_medical_model")

# 创建文本生成管道
medical_chatbot = pipeline(
    "text-generation",
    model=distilled_model,
    tokenizer=distilled_tokenizer,
    device=0 if torch.cuda.is_available() else -1
)


# 测试对话 - 适配 instruction/input 格式
def test_medical_dialogue():
    print("医疗对话测试 (输入'退出'结束)")
    while True:
        instruction = input("Instruction (输入指令): ")
        if instruction.lower() == '退出':
            break

        user_input = input("Input (输入内容，若无直接回车): ")

        if user_input:
            prompt = f"Instruction: {instruction}\nInput: {user_input}\nOutput:"
        else:
            prompt = f"Instruction: {instruction}\nOutput:"

        response = medical_chatbot(
            prompt,
            max_length=200,
            num_return_sequences=1,
            temperature=0.7,
            top_p=0.9
        )

        # 提取生成的输出部分
        generated_text = response[0]['generated_text']
        output = generated_text[len(prompt):].strip()
        print("Output:", output)


test_medical_dialogue()