Spaces:
Sleeping
Sleeping
File size: 1,566 Bytes
9deb214 4230190 22a3dda f600486 8fceb5b 22a3dda f600486 4230190 f600486 8fceb5b 4230190 f600486 8fceb5b f600486 8fceb5b 4230190 8fceb5b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
# App header
st.header("Know Your Medicine - Multiplication Table Generator")
# Load the model and tokenizer
@st.cache_resource
def load_model():
model_name = "gpt2" # Using GPT-2 for faster builds
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
return model, tokenizer
# Load the model
model, tokenizer = load_model()
# Input for the number to generate the multiplication table
number = st.number_input("Enter a number:", min_value=1, max_value=100, value=5)
# Define the prompt for generating the multiplication table
prompt = f"Give me the multiplication table of {number} up to 12."
# Generate text based on the input
if st.button("Generate Multiplication Table"):
# Tokenize the input prompt
tokenized_input = tokenizer(prompt, return_tensors="pt")
input_ids = tokenized_input["input_ids"] # Using CPU for simplicity
attention_mask = tokenized_input["attention_mask"] # Using CPU for simplicity
# Generate the response from the model
response_token_ids = model.generate(
input_ids,
attention_mask=attention_mask,
max_new_tokens=150,
pad_token_id=tokenizer.eos_token_id
)
# Decode the generated tokens to text
generated_text = tokenizer.decode(response_token_ids[0], skip_special_tokens=True)
# Display the generated multiplication table
st.write("Generated Multiplication Table:")
st.write(generated_text)
|