import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM st.title("Text Generation with Bloom") tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom") model = AutoModelForCausalLM.from_pretrained("bigscience/bloom") user_input = st.text_area("Enter your prompt:", height=100) if st.button('Generate Text'): inputs = tokenizer(user_input, return_tensors="pt") outputs = model.generate(**inputs, max_length=100) # Adjust max_length as needed generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) st.write("Generated Text:") st.write(generated_text)