Sharathhebbar24 commited on
Commit
b52c479
β€’
1 Parent(s): 8869ff1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +169 -88
app.py CHANGED
@@ -1,96 +1,177 @@
1
  import os
2
  import streamlit as st
3
  from langchain.llms import HuggingFaceHub
 
4
 
5
 
6
  from models import return_models, return_text2text_generation_models, return_task_name, return_text_generation_models
7
- dummy_parent = "google"
8
- models_count = return_text2text_generation_models(dummy_parent, True) + return_text_generation_models(dummy_parent, True)
9
- st.warning("Warning: Some models may not work and some models may require GPU to run")
10
- st.text(f"As of now there are {models_count} model available")
11
- st.text("Made with Langchain, StreamLit, Hugging Face and πŸ’–")
12
- st.header('πŸ¦œπŸ”— One stop for Open Source Models')
13
-
14
- API_KEY = st.sidebar.text_input(
15
- 'API Key',
16
- type='password',
17
- help="Type in your HuggingFace API key to use this app")
18
-
19
- task_name = st.sidebar.selectbox(
20
- label = "Choose the task you want to perform",
21
- options = return_task_name(),
22
- help="Choose your open source LLM to get started"
23
- )
24
- if task_name is None:
25
- model_parent_visibility = True
26
- else:
27
- model_parent_visibility = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
- model_parent_options = return_models(task_name)
30
- model_parent = st.sidebar.selectbox(
31
- label = "Choose your Source",
32
- options = model_parent_options,
33
- help="Choose your source of models",
34
- disabled=model_parent_visibility
35
- )
36
-
37
- if model_parent is None:
38
- model_name_visibility = True
39
- else:
40
- model_name_visibility = False
41
- if task_name == "text2text-generation":
42
- options = return_text2text_generation_models(model_parent)
43
- else:
44
- options = return_text_generation_models(model_parent)
45
- model_name = st.sidebar.selectbox(
46
- label = "Choose your Models",
47
- options = options,
48
- help="Choose your open source LLM to get started",
49
- disabled=model_name_visibility
50
- )
51
-
52
- temperature = st.sidebar.slider(
53
- label="Temperature",
54
- min_value=0.1,
55
- max_value=1.0,
56
- step=0.1,
57
- value=0.9,
58
- help="Set the temperature to get accurate results"
59
- )
60
-
61
- max_token_length = st.sidebar.slider(
62
- label="Token Length",
63
- min_value=32,
64
- max_value=1024,
65
- step=32,
66
- value=1024,
67
- help="Set the max tokens to get accurate results"
68
- )
69
-
70
-
71
- os.environ['HUGGINGFACEHUB_API_TOKEN'] = API_KEY
72
- def generate_response(input_text):
73
 
74
- model_kwargs = {
75
- "temperature": temperature,
76
- "max_length": max_token_length
77
- }
78
- llm = HuggingFaceHub(
79
- repo_id = model_name,
80
- model_kwargs = model_kwargs
81
- )
82
-
83
- st.info(llm(input_text))
84
-
85
-
86
- with st.form('my_form'):
87
- try:
88
- text = st.text_area('Enter Your Prompt', 'What are the three key pieces of advice for learning how to code?')
89
- submitted = st.form_submit_button('Submit')
90
- if not API_KEY.startswith('hf_'):
91
- st.warning('Please enter your API key!', icon='⚠')
92
- if submitted and API_KEY.startswith('hf_'):
93
- with st.spinner("Running...."):
94
- generate_response(text)
95
- except Exception as e:
96
- st.error(e, icon="🚨")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import streamlit as st
3
  from langchain.llms import HuggingFaceHub
4
+ from llm import similarity
5
 
6
 
7
  from models import return_models, return_text2text_generation_models, return_task_name, return_text_generation_models
8
+ class LLM_Langchain():
9
+ def __init__(self):
10
+ dummy_parent = "google"
11
+ self.models_count = return_text2text_generation_models(dummy_parent, True) + return_text_generation_models(dummy_parent, True)
12
+ st.warning("Warning: Some models may not work and some models may require GPU to run")
13
+ st.text(f"As of now there are {self.models_count} model available")
14
+ st.text("Made with Langchain, StreamLit, Hugging Face and πŸ’–")
15
+ st.header('πŸ¦œπŸ”— One stop for Open Source Models')
16
+
17
+ self.API_KEY = st.sidebar.text_input(
18
+ 'API Key',
19
+ type='password',
20
+ help="Type in your HuggingFace API key to use this app")
21
+
22
+ self.task_name = st.sidebar.selectbox(
23
+ label = "Choose the task you want to perform",
24
+ options = return_task_name(),
25
+ help="Choose your open source LLM to get started"
26
+ )
27
+ if self.task_name is None:
28
+ model_parent_visibility = True
29
+ else:
30
+ model_parent_visibility = False
31
+
32
+ model_parent_options = return_models(self.task_name)
33
+ model_parent = st.sidebar.selectbox(
34
+ label = "Choose your Source",
35
+ options = model_parent_options,
36
+ help="Choose your source of models",
37
+ disabled=model_parent_visibility
38
+ )
39
+
40
+ if model_parent is None:
41
+ model_name_visibility = True
42
+ else:
43
+ model_name_visibility = False
44
+ if self.task_name == "text2text-generation":
45
+ options = return_text2text_generation_models(model_parent)
46
+ else:
47
+ options = return_text_generation_models(model_parent)
48
+ self.model_name = st.sidebar.selectbox(
49
+ label = "Choose your Models",
50
+ options = options,
51
+ help="Choose your open source LLM to get started",
52
+ disabled=model_name_visibility
53
+ )
54
+
55
+ self.temperature = st.sidebar.slider(
56
+ label="Temperature",
57
+ min_value=0.1,
58
+ max_value=1.0,
59
+ step=0.1,
60
+ value=0.9,
61
+ help="Set the temperature to get accurate results"
62
+ )
63
+
64
+ self.max_token_length = st.sidebar.slider(
65
+ label="Token Length",
66
+ min_value=32,
67
+ max_value=1024,
68
+ step=32,
69
+ value=1024,
70
+ help="Set the max tokens to get accurate results"
71
+ )
72
+
73
+
74
+ self.model_kwargs = {
75
+ "temperature": self.temperature,
76
+ "max_length": self.max_token_length
77
+ }
78
+
79
+ os.environ['HUGGINGFACEHUB_API_TOKEN'] = self.API_KEY
80
+
81
+
82
+ def generate_response(self, input_text):
83
+
84
+
85
+ llm = HuggingFaceHub(
86
+ repo_id = self.model_name,
87
+ model_kwargs = self.model_kwargs
88
+ )
89
+
90
+ return llm(input_text)
91
 
92
+
93
+ def radio_button(self):
94
+ options = ['FineTune', 'Inference']
95
+ selected_option = st.radio(
96
+ label="Choose your options",
97
+ options=options
98
+ )
99
+ return selected_option
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
+
102
+ def pdf_uploader(self):
103
+ if self.selected_option == "Inference":
104
+ self.uploader_visibility = True
105
+ else:
106
+ self.uploader_visibility = False
107
+
108
+ self.file_upload_status = st.file_uploader(
109
+ label="Upload PDF file",
110
+ disabled=self.uploader_visibility
111
+ )
112
+
113
+ if self.file_upload_status is not None:
114
+ self.pdf_file_path = f"assets/{self.file_upload_status.name}"
115
+
116
+ with open(self.pdf_file_path, "wb") as f:
117
+ f.write(self.file_upload_status.getbuffer())
118
+ st.write("File Uploaded Successfully")
119
+
120
+ def form_data(self):
121
+ # with st.form('my_form'):
122
+ try:
123
+ if not self.API_KEY.startswith('hf_'):
124
+ st.warning('Please enter your API key!', icon='⚠')
125
+
126
+ self.selected_option = self.radio_button()
127
+ self.pdf_uploader()
128
+
129
+ if self.selected_option == "FineTune":
130
+ if self.file_upload_status is None:
131
+ text_input_visibility = True
132
+ else:
133
+ text_input_visibility = False
134
+ else:
135
+ text_input_visibility = False
136
+
137
+ if "messages" not in st.session_state:
138
+ st.session_state.messages = []
139
+
140
+ st.write(f"You are using {self.model_name} model")
141
+
142
+ for message in st.session_state.messages:
143
+ with st.chat_message(message.get('role')):
144
+ st.write(message.get("content"))
145
+ text = st.chat_input(disabled=text_input_visibility)
146
+
147
+ if text:
148
+ st.session_state.messages.append(
149
+ {
150
+ "role":"user",
151
+ "content": text
152
+ }
153
+ )
154
+ with st.chat_message("user"):
155
+ st.write(text)
156
+
157
+ if text.lower() == "clear":
158
+ del st.session_state.messages
159
+ return
160
+ if self.selected_option == 'FineTune':
161
+ result = similarity(self.pdf_file_path, self.model_name, self.model_kwargs, text)
162
+ else:
163
+ result = self.generate_response(text)
164
+ st.session_state.messages.append(
165
+ {
166
+ "role": "assistant",
167
+ "content": result
168
+ }
169
+ )
170
+ with st.chat_message('assistant'):
171
+ st.markdown(result)
172
+
173
+ except Exception as e:
174
+ st.error(e, icon="🚨")
175
+
176
+ model = LLM_Langchain()
177
+ model.form_data()