acloudfan commited on
Commit
a11fd35
1 Parent(s): ab5d43a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -11
app.py CHANGED
@@ -1,14 +1,27 @@
1
- # Demonstrates the basic usage of Streamlit
2
- # Requires a Hugging Face secret value : HUGGINGFACEHUB_API_TOKEN
 
 
3
 
4
  import streamlit as st
 
5
  import os
6
- import time
7
 
8
- from langchain_community.llms import HuggingFaceHub
9
  from langchain_community.llms import HuggingFaceEndpoint
10
 
11
 
 
 
 
 
 
 
 
 
 
 
 
12
  # Title
13
  st.title('Try out the model')
14
 
@@ -76,10 +89,10 @@ max_tokens = st.sidebar.number_input(
76
  value=50
77
  )
78
 
79
- # invoke the LLM
80
- model_kwargs={ "temperature": "0.1" }
81
- def invoke():
82
- llm_hf = HuggingFaceEndpoint(
83
  repo_id=model_id,
84
  temperature=temperature,
85
  top_k = top_k,
@@ -87,11 +100,14 @@ def invoke():
87
  repetition_penalty = repetition_penalty,
88
  max_new_tokens=max_tokens
89
  )
 
 
 
 
90
 
91
  # Show spinner, while we are waiting for the response
92
  with st.spinner('Invoking LLM ... '):
93
- time.sleep(5)
94
- st.session_state['model-response'] = llm_hf.invoke(query)
95
- print(query)
96
 
97
  st.button("Invoke", on_click=invoke)
 
1
+ # Demonstrates the basic usage
2
+ # 1. Select model
3
+ # 2. Provide a query
4
+ # 3. Invoke the model
5
 
6
  import streamlit as st
7
+ from dotenv import load_dotenv
8
  import os
 
9
 
10
+ # from langchain_community.llms import HuggingFaceHub
11
  from langchain_community.llms import HuggingFaceEndpoint
12
 
13
 
14
+ # Load the API keys, if running locally
15
+ # CHANGE the path to the env file
16
+
17
+ # If HF space is used then set the env var HUGGINGFACEHUB_API_TOKEN in the settings
18
+ try:
19
+ load_dotenv('C:\\Users\\raj\\.jupyter\\.env')
20
+ except:
21
+ print("Environment file not found !! MUST find the env var HUGGINGFACEHUB_API_TOKEN to work.")
22
+
23
+
24
+
25
  # Title
26
  st.title('Try out the model')
27
 
 
89
  value=50
90
  )
91
 
92
+ # chached
93
+ @st.cache_resource
94
+ def get_llm(model_id):
95
+ return HuggingFaceEndpoint(
96
  repo_id=model_id,
97
  temperature=temperature,
98
  top_k = top_k,
 
100
  repetition_penalty = repetition_penalty,
101
  max_new_tokens=max_tokens
102
  )
103
+
104
+ # invoke the LLM
105
+ def invoke():
106
+ llm_hf = get_llm(model_id)
107
 
108
  # Show spinner, while we are waiting for the response
109
  with st.spinner('Invoking LLM ... '):
110
+ st.session_state['model-response'] = llm_hf.invoke(query)
111
+
 
112
 
113
  st.button("Invoke", on_click=invoke)