onuri commited on
Commit
dd21102
1 Parent(s): 9710daa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -34
app.py CHANGED
@@ -1,46 +1,54 @@
 
1
  import os
2
  import gradio as gr
3
- from gradio import blocks
4
- from text_generation import Client, InferenceAPIClient
5
 
6
- model = "OpenAssistant/oasst-sft-1-pythia-12b"
7
- client = InferenceAPIClient(model)
8
 
9
- # creating a list to store the chat history
10
- history = []
11
 
12
- def generate_response(inputs):
13
- # input validation
14
- if not inputs:
15
- return "Please enter a valid input."
 
16
 
17
- # checking if the input is a continuation of a conversation or a new conversation
18
- if history and history[-1]["speaker"] == "user":
19
- inputs = history[-1]["text"] + " " + inputs
20
 
21
- # generating a response using the inference api client
22
- response = client.generate_utterance(inputs)
 
 
 
 
23
 
24
- # adding the user input and the model's response to the history list
25
- history.append({"speaker": "user", "text": inputs})
26
- history.append({"speaker": "model", "text": response})
27
-
28
- return response
29
 
 
30
  iface = gr.Interface(
31
- generate_response,
32
- [
33
- gr.inputs.Textbox(
34
- placeholder="Hi, how can I help you?",
35
- label="User Input"
36
- )
37
- ],
38
- [gr.outputs.Textbox(label="Model Response")],
39
- title="OpenAssistant: AI Powered Chatbot",
40
- live=True,
41
- layout="vertical",
42
- theme="compact"
43
  )
44
 
45
- if __name__ == '__main__':
46
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # importing necessary libraries
2
  import os
3
  import gradio as gr
4
+ from gradio import interface, blocks
5
+ from text_generation import InferenceAPIClient
6
 
7
+ # Assigning the model name as a constant
8
+ MODEL = "OpenAssistant/oasst-sft-1-pythia-12b"
9
 
 
 
10
 
11
+ def handle_error(exception):
12
+ """
13
+ This function returns error message whenever an exception occurs in the api calls
14
+ """
15
+ return f"An error occurred: {str(exception)}"
16
 
 
 
 
17
 
18
+ def get_usernames(model: str):
19
+ """
20
+ This function returns the usernames and separators for doing the computations
21
+ """
22
+ if model == MODEL:
23
+ return "", "<|prompter|>", "<|assistant|>", ""
24
 
 
 
 
 
 
25
 
26
+ # Creating gradio interface for better interaction
27
  iface = gr.Interface(
28
+ fn="",
29
+ inputs=gr.inputs.Textbox(label="Enter Your Prompt Here..."),
30
+ outputs=gr.outputs.Textbox(),
31
+ title="AI Assistant",
32
+ description="An AI assistant for answering your queries and concerns.",
 
 
 
 
 
 
 
33
  )
34
 
35
+ client = InferenceAPIClient(MODEL)
36
+
37
+ # Function to generate response based on user input
38
+ def generate_response(prompt):
39
+ """
40
+ This function is responsible for generating the text from the given model using the prompt
41
+ """
42
+
43
+ # As handle_error decorates any exception
44
+ text = client.generate_text(prompt)[0]["generated_text"]
45
+ return text.strip()
46
+
47
+
48
+ iface.fn = generate_response
49
+
50
+ if __name__ == "__main__":
51
+ iface.test_launch() # Running the interface for debugging purposes. Probably it will be removed later.
52
+
53
+ # Saving the code to a python file
54
+ iface.launch(share=True) # with share=True the interface is accessible globally