ProPerNounpYK commited on
Commit
086ee32
Β·
verified Β·
1 Parent(s): 2be2ddd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -16
app.py CHANGED
@@ -6,7 +6,7 @@ import sys
6
  from dotenv import loaddotenv, dotenvvalues
7
  loaddotenv()
8
 
9
- # initialize the client
10
  # client = OpenAI(
11
  # baseurl="https://api-inference.huggingface.co/v1",
12
  # apikey=os.environ.get('APIY') # Replace with your token
@@ -14,7 +14,7 @@ loaddotenv()
14
 
15
  # Create supported models
16
  modellinks = {
17
- "Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct"
18
  }
19
 
20
  #andom dog images for error message
@@ -33,7 +33,6 @@ randomdog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
33
  "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
34
 
35
 
36
-
37
  def resetconversation():
38
  '''
39
  Resets Conversation
@@ -81,11 +80,11 @@ repoid = modellinks[selectedmodel]
81
 
82
 
83
  st.subheader(f'ypeGP.net - {selectedmodel}')
84
- # st.title(f'ChatBot Using {selectedmodel}')
85
 
86
  # Set a default model
87
  if selectedmodel not in st.sessionstate:
88
- st.sessionstate[selectedmodel] = modellinks[selectedmodel]
89
 
90
  # Initialize chat history
91
  if "messages" not in st.sessionstate:
@@ -106,11 +105,11 @@ if prompt := st.chatinput(f"Hi I'm {selectedmodel}, ask me a question"):
106
  st.markdown(prompt)
107
  # Add user message to chat history
108
  st.sessionstate.messages.append({"role": "user", "content": prompt})
109
-
110
 
111
  # Display assistant response in chat message container
112
  with st.chatmessage("assistant"):
113
  try:
 
114
  # stream = client.chat.completions.create(
115
  # model=modellinks[selectedmodel],
116
  # messages=[
@@ -122,16 +121,83 @@ if prompt := st.chatinput(f"Hi I'm {selectedmodel}, ask me a question"):
122
  # maxtokens=3000,
123
  # )
124
 
125
- # μˆ˜μ • μ „ μ½”λ“œ (penAI)
126
- # response = st.writestream(stream)
127
-
128
- # μˆ˜μ • ν›„ μ½”λ“œ (Hugging Face)
129
- tokenizer = Autookenizer.frompretrained(repoid)
130
- model = AutoModelorCausalLM.frompretrained(repoid)
131
-
132
- inputids = tokenizer(prompt, returntensors="pt").inputids
133
- output = model.generate(inputids, maxlength=1000, temperature=tempvalues)
134
- response = tokenizer.decode(output[0], skipspecialtokens=rue)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
  except Exception as e:
137
  # st.empty()
 
6
  from dotenv import loaddotenv, dotenvvalues
7
  loaddotenv()
8
 
9
+ # μˆ˜μ • μ „ μ½”λ“œ (penAI)
10
  # client = OpenAI(
11
  # baseurl="https://api-inference.huggingface.co/v1",
12
  # apikey=os.environ.get('APIY') # Replace with your token
 
14
 
15
  # Create supported models
16
  modellinks = {
17
+ "Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
18
  }
19
 
20
  #andom dog images for error message
 
33
  "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
34
 
35
 
 
36
  def resetconversation():
37
  '''
38
  Resets Conversation
 
80
 
81
 
82
  st.subheader(f'ypeGP.net - {selectedmodel}')
83
+ st.title(f'ChatBot Using {selectedmodel}')
84
 
85
  # Set a default model
86
  if selectedmodel not in st.sessionstate:
87
+ st.sessionstate[selectedmodel] = modellinks[selectedmodel]
88
 
89
  # Initialize chat history
90
  if "messages" not in st.sessionstate:
 
105
  st.markdown(prompt)
106
  # Add user message to chat history
107
  st.sessionstate.messages.append({"role": "user", "content": prompt})
 
108
 
109
  # Display assistant response in chat message container
110
  with st.chatmessage("assistant"):
111
  try:
112
+ # μˆ˜μ • μ „ μ½”λ“œ (penAI)
113
  # stream = client.chat.completions.create(
114
  # model=modellinks[selectedmodel],
115
  # messages=[
 
121
  # maxtokens=3000,
122
  # )
123
 
124
+ # μˆ˜μ • ν›„ μ½”λ“œ (gradio & InferenceClient)
125
+ import gradio as gr
126
+ from huggingfacehub import InferenceClient
127
+
128
+ """
129
+ For more information on `huggingfacehub` Inference API support, please check the docs: https://huggingface.co/docs/huggingfacehub/v0.22.2/en/guides/inference
130
+ """
131
+ client = InferenceClient(repoid)
132
+
133
+ def respond(
134
+ message,
135
+ history: list[tuple[str, str]],
136
+ systemmessage,
137
+ maxtokens,
138
+ temperature,
139
+ topp,
140
+ ):
141
+ messages = [{"role": "system", "content": systemmessage}]
142
+
143
+ for val in history:
144
+ if val[0]:
145
+ messages.append({"role": "user", "content": val[0]})
146
+ if val[1]:
147
+ messages.append({"role": "assistant", "content": val[1]})
148
+
149
+ messages.append({"role": "user", "content": message})
150
+
151
+ response = ""
152
+
153
+ for message in client.chatcompletion(
154
+ messages,
155
+ maxtokens=maxtokens,
156
+ stream=rue,
157
+ temperature=temperature,
158
+ topp=topp,
159
+ ):
160
+ token = message.choices[0].delta.content
161
+
162
+ response += token
163
+ yield response
164
+
165
+ """
166
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
167
+ """
168
+ demo = gr.ChatInterface(
169
+ respond,
170
+ additionalinputs=[
171
+ gr.extbox(
172
+ value="You are a friendly Chatbot.", label="System message"
173
+ ),
174
+ gr.Slider(
175
+ minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"
176
+ ),
177
+ gr.Slider(
178
+ minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="emperature"
179
+ ),
180
+ gr.Slider(
181
+ minimum=0.1,
182
+ maximum=1.0,
183
+ value=0.95,
184
+ step=0.05,
185
+ label="op-p (nucleus sampling)",
186
+ ),
187
+ ],
188
+ )
189
+
190
+ response = ""
191
+
192
+ for message in demo(
193
+ prompt,
194
+ st.sessionstate.messages[1:],
195
+ "You are a friendly Chatbot.",
196
+ 512,
197
+ 0.7,
198
+ 0.95,
199
+ ):
200
+ response += message
201
 
202
  except Exception as e:
203
  # st.empty()