IBMproject / app.py
lohith29's picture
Update app.py
718508a verified
import streamlit as st
from transformers import pipeline
def main():
st.title("GPT-Neo 2.7B Text Generator")
# User input
prompt = st.text_input("Enter a prompt:", "olympics")
max_length = st.slider("Max Length", min_value=20, max_value=200, value=50)
temperature = st.slider("Temperature", min_value=0.1, max_value=1.5, value=0.9, step=0.1)
# Load the model using PyTorch (forcing CPU if necessary)
try:
generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B', device=-1) # Use CPU (-1) if no GPU
if st.button("Generate Text"):
with st.spinner("Generating..."):
res = generator(prompt, max_length=max_length, do_sample=True, temperature=temperature)
generated_text = res[0]['generated_text']
st.subheader("Generated Text")
st.write(generated_text)
except Exception as e:
st.error(f"Error loading model: {e}")
if __name__ == "__main__":
main()