stelladai's picture
Update app.py
b62807b verified
import streamlit as st
import os
from transformers import AutoTokenizer, pipeline
import torch
# get the Hugging Face API token from environment variables
hugging_face_token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
def getLLamaresponse(input_text, no_words, blog_style):
model = "daryl149/llama-2-7b-chat-hf"
tokenizer = AutoTokenizer.from_pretrained(model, use_auth_token=hugging_face_token)
# Configure the model's device (CPU or GPU) based on the environment
device = 0 if torch.cuda.is_available() else -1
text_generation_pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
use_auth_token=hugging_face_token,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device=device
)
# prompt
prompt = f"Write a blog for a {blog_style} job profile about {input_text} within {no_words} words.\n"
# generate
sequences = text_generation_pipeline(
prompt,
do_sample=True,
top_k=10,
num_return_sequences=1,
max_length=int(no_words),
)
# print and return the text of the first generated sequence
for seq in sequences:
print(f"Result: {seq['generated_text']}")
return seq['generated_text']
# streamlit
st.set_page_config(page_title="Generate Blogs",
page_icon='🤖',
layout='centered',
initial_sidebar_state='collapsed')
st.header("Generate Blogs 🤖")
input_text = st.text_input("Enter the Blog Topic")
# creating to more columns for additonal 2 fields
col1, col2 = st.columns([5, 5])
with col1:
no_words = st.text_input('No of Words')
with col2:
blog_style = st.selectbox('Writing the blog for',
('Researchers', 'Data Scientist', 'Common People'), index=0)
submit = st.button("Generate")
# Final response
if submit:
st.write(getLLamaresponse(input_text, no_words, blog_style))