sachitksh123 commited on
Commit
5321eaf
·
verified ·
1 Parent(s): 65c7cad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -38
app.py CHANGED
@@ -1,45 +1,26 @@
1
- import os
2
  import streamlit as st
 
3
 
 
 
4
 
5
- from dotenv import load_dotenv
6
- from langchain.llms import HuggingFaceEndpoint
7
 
8
- load_dotenv()
 
9
 
10
- os.environ["HUGGINGFACEHUB_API_TOKEN"]=os.getenv("hf_token")
 
11
 
12
- huggingface_token = os.environ["HUGGINGFACEHUB_API_TOKEN"]
 
 
 
 
 
 
 
 
 
13
 
14
- #Function to return the response
15
- def load_answer(question):
16
- # "text-davinci-003" model is depreciated, so using the latest one https://platform.openai.com/docs/deprecations
17
- if question:
18
- llm = HuggingFaceEndpoint(repo_id="mistralai/Mistral-7B-Instruct-v0.2")
19
-
20
- #Last week langchain has recommended to use invoke function for the below please :)
21
- answer=llm.invoke(question)
22
- return answer
23
-
24
-
25
- #App UI starts here
26
- st.set_page_config(page_title="LangChain Demo - Mistral", page_icon=":robot:")
27
- st.header("LangChain Demo - Mistral")
28
-
29
- #Gets the user input
30
- def get_text():
31
- input_text = st.text_input("You: ", key="input")
32
- return input_text
33
-
34
-
35
- user_input=get_text()
36
- response = load_answer(user_input)
37
-
38
- submit = st.button('Generate')
39
-
40
- #If generate button is clicked
41
- if submit:
42
-
43
- st.subheader("Answer:")
44
-
45
- st.write(response)
 
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
 
4
+ # Load the text generation model
5
+ generator = pipeline("text-generation", model="distilgpt2")
6
 
7
+ # Streamlit app title
8
+ st.title("Text Generation with DistilGPT-2")
9
 
10
+ # Input text from the user
11
+ input_text = st.text_area("Enter your prompt:", "In this course, we will teach you how to")
12
 
13
+ # Number of sequences to generate
14
+ num_sequences = st.slider("Number of sequences to generate:", 1, 5, 2)
15
 
16
+ # Generate button
17
+ if st.button("Generate Text"):
18
+ with st.spinner("Generating..."):
19
+ # Generate text using the model
20
+ generated_texts = generator(input_text, max_length=30, num_return_sequences=num_sequences)
21
+
22
+ # Display generated texts
23
+ for i, generated in enumerate(generated_texts):
24
+ st.subheader(f"Generated Text {i + 1}")
25
+ st.write(generated['generated_text'])
26