# -*- coding: utf-8 -*-
# time: 2025/5/10 09:18
# file: tf01.py
# author: hanson
"""
小模型推理
Pythia-70m需要约1GB GPU内存

"""
import torch
from datasets import load_dataset

from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments

from base.tansformers_pip import generator
from turn_found.small_tf.tf02_dataSet_modify import get_modify_dataSet

# ===== Fine-tuning Test Code =====
def generate_response(prompt, model, tokenizer, max_length=100):
    inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
    outputs = model.generate(
        input_ids=inputs["input_ids"],
        attention_mask=inputs["attention_mask"],
        max_length=max_length,
        do_sample=True,
        temperature=0.7,
        top_p=0.9
    )
    return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Test the fine-tuned model
test_questions = [
    "中国中山大学在哪里?",
    "中国中山大学在哪里?",
    "中国中山大学在哪里?"
]


# Evaluate on test dataset

# Load saved model for testing (verifies save/load works)
print("\nTesting saved model loading...")
loaded_model = AutoModelForCausalLM.from_pretrained("./my_finetuned")
loaded_tokenizer = AutoTokenizer.from_pretrained("./my_finetuned")

test_question = "请用中文介绍自己?"
prompt = f"question: {test_question}\nanswer:"
response = generate_response(prompt, loaded_model, loaded_tokenizer)
print(f"Q: {test_question}")
print(f"A: {response}")