# Load model directly and generate text from a test string | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
tokenizer = AutoTokenizer.from_pretrained("Nbardy/mini-mistral") | |
model = AutoModelForCausalLM.from_pretrained("Nbardy/mini-mistral") | |
# Prepare the test string for input | |
input_text = "This is a test string" | |
input_ids = tokenizer.encode(input_text, return_tensors="pt") | |
# Generate text using the model | |
output = model.generate(input_ids) | |
# Decode and print the generated text | |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
print(generated_text) | |