paloma99 commited on
Commit
eb09c16
1 Parent(s): 70eb704

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -20
app.py CHANGED
@@ -1,7 +1,9 @@
1
- # Cell 1: Image Classification Model
2
  import gradio as gr
3
- from transformers import pipeline
 
4
 
 
5
  image_pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
6
 
7
  def predict_image(input_img):
@@ -9,17 +11,13 @@ def predict_image(input_img):
9
  return input_img, {p["label"]: p["score"] for p in predictions}
10
 
11
  image_gradio_app = gr.Interface(
12
- predict_image,
13
  inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
14
  outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
15
  title="Hot Dog? Or Not?",
16
  )
17
 
18
- # Cell 2: Chatbot Model
19
- import gradio as gr
20
- from transformers import AutoModelForCausalLM, AutoTokenizer
21
- import torch
22
-
23
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
24
  chatbot_model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
25
 
@@ -32,15 +30,15 @@ def predict_chatbot(input, history=[]):
32
  response_tuples = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)]
33
  return response_tuples, history
34
 
35
- chatbot_gradio_app = gr.Blocks()
36
- with chatbot_gradio_app as demo:
37
- chatbot = gr.Chatbot()
38
- state = gr.State([])
39
- with gr.Row():
40
- txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter")
41
- txt.submit(predict_chatbot, [txt, state], [chatbot, state])
42
-
43
- # Launch the interfaces
44
- if __name__ == "__main__":
45
- image_gradio_app.launch()
46
- chatbot_gradio_app.launch()
 
1
+ # Combined Interface
2
  import gradio as gr
3
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
 
6
+ # Image Classification Model
7
  image_pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
8
 
9
  def predict_image(input_img):
 
11
  return input_img, {p["label"]: p["score"] for p in predictions}
12
 
13
  image_gradio_app = gr.Interface(
14
+ fn=predict_image,
15
  inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
16
  outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
17
  title="Hot Dog? Or Not?",
18
  )
19
 
20
+ # Chatbot Model
 
 
 
 
21
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
22
  chatbot_model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
23
 
 
30
  response_tuples = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)]
31
  return response_tuples, history
32
 
33
+ chatbot_gradio_app = gr.Interface(
34
+ fn=predict_chatbot,
35
+ inputs=[gr.Textbox(show_label=False, placeholder="Enter text and press enter"), gr.State()],
36
+ outputs=[gr.Chatbot(), gr.State()],
37
+ live=True
38
+ )
39
+
40
+ # Display both interfaces vertically
41
+ gr.Interface(
42
+ columns=2,
43
+ children=[image_gradio_app, chatbot_gradio_app]
44
+ ).launch()