mintaeng commited on
Commit
27c2f4a
β€’
1 Parent(s): 02c6ccd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -69
app.py CHANGED
@@ -1,76 +1,21 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
- import torch
5
 
 
 
6
 
7
- title = "πŸ€–AI ChatBot"
8
- description = "Building open-domain chatbots is a challenging area for machine learning research."
9
- examples = [["FA μ„œλΉ„μŠ€λŠ” μ–΄λ–€ μ„œλΉ„μŠ€μΈκ°€μš”?"], ["ν’‹μ‚΄ κ²½κΈ°μž₯ κ·œκ²©μ€ μ–΄λ–»κ²Œ λ˜λ‚˜μš”?"], ["ν’‹μ‚΄ν™”λŠ” μ–΄λ–€κ±Έ μ‹ μ–΄μ•Ό ν•˜λ‚˜μš”?"]]
10
 
 
 
 
 
11
 
12
-
13
- """
14
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
15
- """
16
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
17
-
18
-
19
- def respond(
20
- message,
21
- history: list[tuple[str, str]],
22
- system_message,
23
- max_tokens,
24
- temperature,
25
- top_p,
26
- ):
27
- messages = [{"role": "system", "content": system_message}]
28
-
29
- for val in history:
30
- if val[0]:
31
- messages.append({"role": "user", "content": val[0]})
32
- if val[1]:
33
- messages.append({"role": "assistant", "content": val[1]})
34
-
35
- messages.append({"role": "user", "content": message})
36
-
37
- response = ""
38
-
39
- for message in client.chat_completion(
40
- messages,
41
- max_tokens=max_tokens,
42
- stream=True,
43
- temperature=temperature,
44
- top_p=top_p,
45
- ):
46
- token = message.choices[0].delta.content
47
-
48
- response += token
49
- yield response
50
-
51
- """
52
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
53
- """
54
- demo = gr.ChatInterface(
55
- respond,
56
- title=title,
57
- description=description,
58
- examples=examples,
59
- theme='ParityError/Anime',
60
- additional_inputs=[
61
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
62
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
63
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
64
- gr.Slider(
65
- minimum=0.1,
66
- maximum=1.0,
67
- value=0.95,
68
- step=0.05,
69
- label="Top-p (nucleus sampling)",
70
- ),
71
- ],
72
- )
73
-
74
 
75
  if __name__ == "__main__":
76
  demo.launch()
 
1
+ from interface import create_demo
2
+ from pdfchatbot import PDFChatBot
 
 
3
 
4
+ # Create Gradio interface
5
+ demo, chat_history, show_img, txt, submit_button, uploaded_pdf = create_demo()
6
 
7
+ # Create PDFChatBot instance
8
+ pdf_chatbot = PDFChatBot()
 
9
 
10
+ # Set up event handlers
11
+ with demo:
12
+ # Event handler for uploading a PDF
13
+ uploaded_pdf.upload(pdf_chatbot.render_file, inputs=[uploaded_pdf], outputs=[show_img])
14
 
15
+ # Event handler for submitting text and generating response
16
+ submit_button.click(pdf_chatbot.add_text, inputs=[chat_history, txt], outputs=[chat_history], queue=False).\
17
+ success(pdf_chatbot.generate_response, inputs=[chat_history, txt, uploaded_pdf], outputs=[chat_history, txt]).\
18
+ success(pdf_chatbot.render_file, inputs=[uploaded_pdf], outputs=[show_img])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  if __name__ == "__main__":
21
  demo.launch()