remiai3's picture
Upload 10 files
f62f52e verified
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Define the model and tokenizer paths
model_path = "./tiny-gpt2-model/models--sshleifer--tiny-gpt2/snapshots/5f91d94bd9cd7190a9f3216ff93cd1dd95f2c7be"
tokenizer_path = "./tiny-gpt2-model/models--sshleifer--tiny-gpt2/snapshots/5f91d94bd9cd7190a9f3216ff93cd1dd95f2c7be"
# Verify the directory contents
if not os.path.exists(model_path) or not os.path.exists(tokenizer_path):
print(f"Error: Directory not found at {model_path}")
exit(1)
required_files = ["config.json", "pytorch_model.bin", "vocab.json", "merges.txt"]
for file in required_files:
if not os.path.exists(os.path.join(model_path, file)):
print(f"Error: {file} not found in {model_path}")
exit(1)
# Load the tokenizer and model
try:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, local_files_only=True)
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float32, local_files_only=True)
except Exception as e:
print(f"Error loading model or tokenizer: {e}")
exit(1)
# Set pad_token_id to eos_token_id if not already set
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
# Set model to evaluation mode
model.eval()
# Prepare input text
prompt = "Once upon a time"
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to("cpu")
# Generate text
outputs = model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
max_length=50,
num_return_sequences=1,
no_repeat_ngram_size=2,
do_sample=True,
top_k=50,
top_p=0.95,
temperature=0.7,
pad_token_id=tokenizer.eos_token_id
)
# Decode and print the generated text
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
print("Generated Text:", generated_text)