stelladai commited on
Commit
3acc7a3
1 Parent(s): ce6cac3
Files changed (2) hide show
  1. app.py +63 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from transformers import AutoTokenizer, pipeline
4
+ import torch
5
+
6
+ # get the Hugging Face API token from environment variables
7
+ hugging_face_token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
8
+
9
+ def getLLamaresponse(input_text, no_words, blog_style):
10
+ model = "daryl149/llama-2-7b-chat-hf"
11
+ tokenizer = AutoTokenizer.from_pretrained(model, use_auth_token=hugging_face_token)
12
+
13
+ # Configure the model's device (CPU or GPU) based on the environment
14
+ device = 0 if torch.cuda.is_available() else -1
15
+
16
+ text_generation_pipeline = pipeline(
17
+ "text-generation",
18
+ model=model,
19
+ tokenizer=tokenizer,
20
+ use_auth_token=hugging_face_token,
21
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
22
+ device=device
23
+ )
24
+
25
+ # prompt
26
+ prompt = f"Write a blog for a {blog_style} job profile about {input_text} within {no_words} words.\n"
27
+
28
+ # generate
29
+ sequences = text_generation_pipeline(
30
+ prompt,
31
+ do_sample=True,
32
+ top_k=10,
33
+ num_return_sequences=1,
34
+ max_length=int(no_words),
35
+ )
36
+
37
+ # print and return the text of the first generated sequence
38
+ for seq in sequences:
39
+ print(f"Result: {seq['generated_text']}")
40
+ return seq['generated_text']
41
+
42
+ # streamlit
43
+ st.set_page_config(page_title="Generate Blogs",
44
+ page_icon='🤖',
45
+ layout='centered',
46
+ initial_sidebar_state='collapsed')
47
+
48
+ st.header("Generate Blogs 🤖")
49
+ input_text = st.text_input("Enter the Blog Topic")
50
+
51
+ # creating to more columns for additonal 2 fields
52
+ col1, col2 = st.columns([5, 5])
53
+ with col1:
54
+ no_words = st.text_input('No of Words')
55
+ with col2:
56
+ blog_style = st.selectbox('Writing the blog for',
57
+ ('Researchers', 'Data Scientist', 'Common People'), index=0)
58
+
59
+ submit = st.button("Generate")
60
+
61
+ # Final response
62
+ if submit:
63
+ st.write(getLLamaresponse(input_text, no_words, blog_style))
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ langchain
2
+ streamlit
3
+ transformers
4
+ torch