mateoluksenberg commited on
Commit
b250460
1 Parent(s): 46a2ec1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -8
app.py CHANGED
@@ -36,9 +36,12 @@ h1 {
36
 
37
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
38
 
 
 
39
  def extract_text(path):
40
  return open(path, 'r').read()
41
 
 
42
  def extract_pdf(path):
43
  doc = pymupdf.open(path)
44
  text = ""
@@ -46,6 +49,7 @@ def extract_pdf(path):
46
  text += page.get_text()
47
  return text
48
 
 
49
  def extract_docx(path):
50
  doc = docx.Document(path)
51
  data = []
@@ -54,6 +58,7 @@ def extract_docx(path):
54
  content = '\n\n'.join(data)
55
  return content
56
 
 
57
  def extract_pptx(path):
58
  prs = Presentation(path)
59
  text = ""
@@ -63,6 +68,7 @@ def extract_pptx(path):
63
  text += shape.text + "\n"
64
  return text
65
 
 
66
  def mode_load(path):
67
  choice = ""
68
  file_type = path.split(".")[-1]
@@ -79,13 +85,17 @@ def mode_load(path):
79
  choice = "doc"
80
  print(content[:100])
81
  return choice, content[:5000]
 
 
82
  elif file_type in ["png", "jpg", "jpeg", "bmp", "tiff", "webp"]:
83
  content = Image.open(path).convert('RGB')
84
  choice = "image"
85
  return choice, content
 
86
  else:
87
  raise gr.Error("Oops, unsupported files.")
88
 
 
89
  @spaces.GPU()
90
  def stream_chat(message, history: list, temperature: float, max_length: int, top_p: float, top_k: int, penalty: float):
91
 
@@ -100,9 +110,8 @@ def stream_chat(message, history: list, temperature: float, max_length: int, top
100
  print(f'history is - {history}')
101
  conversation = []
102
  prompt_files = []
103
-
104
- if "files" in message and message["files"]:
105
- choice, contents = mode_load(message["files"][-1].path)
106
  if choice == "image":
107
  conversation.append({"role": "user", "image": contents, "content": message['text']})
108
  elif choice == "doc":
@@ -110,9 +119,11 @@ def stream_chat(message, history: list, temperature: float, max_length: int, top
110
  conversation.append({"role": "user", "content": format_msg})
111
  else:
112
  if len(history) == 0:
 
113
  contents = None
114
  conversation.append({"role": "user", "content": message['text']})
115
  else:
 
116
  for prompt, answer in history:
117
  if answer is None:
118
  prompt_files.append(prompt[0])
@@ -120,17 +131,17 @@ def stream_chat(message, history: list, temperature: float, max_length: int, top
120
  else:
121
  conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
122
  if len(prompt_files) > 0:
123
- choice, contents = mode_load(prompt_files[-1].path)
124
  else:
125
  choice = ""
126
  conversation.append({"role": "user", "image": "", "content": message['text']})
127
 
 
128
  if choice == "image":
129
  conversation.append({"role": "user", "image": contents, "content": message['text']})
130
  elif choice == "doc":
131
  format_msg = contents + "\n\n\n" + "{} files uploaded.\n" + message['text']
132
  conversation.append({"role": "user", "content": format_msg})
133
-
134
  print(f"Conversation is -\n{conversation}")
135
 
136
  input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True,
@@ -157,14 +168,19 @@ def stream_chat(message, history: list, temperature: float, max_length: int, top
157
  buffer += new_text
158
  yield buffer
159
 
160
- chatbot = gr.Chatbot()
161
 
 
 
 
162
  chat_input = gr.MultimodalTextbox(
163
  interactive=True,
164
  placeholder="Enter message or upload a file ...",
165
  show_label=False,
166
- )
 
167
 
 
 
168
  EXAMPLES = [
169
  [{"text": "Write a poem about spring season in French Language", }],
170
  [{"text": "what does this chart mean?", "files": ["sales.png"]}],
@@ -178,6 +194,8 @@ with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
178
  gr.ChatInterface(
179
  fn=stream_chat,
180
  multimodal=True,
 
 
181
  textbox=chat_input,
182
  chatbot=chatbot,
183
  fill_height=True,
@@ -228,4 +246,5 @@ with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
228
  gr.Examples(EXAMPLES, [chat_input])
229
 
230
  if __name__ == "__main__":
231
- demo.queue(api_open=False).launch(show_api=False, share=False)
 
 
36
 
37
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
38
 
39
+
40
+
41
  def extract_text(path):
42
  return open(path, 'r').read()
43
 
44
+
45
  def extract_pdf(path):
46
  doc = pymupdf.open(path)
47
  text = ""
 
49
  text += page.get_text()
50
  return text
51
 
52
+
53
  def extract_docx(path):
54
  doc = docx.Document(path)
55
  data = []
 
58
  content = '\n\n'.join(data)
59
  return content
60
 
61
+
62
  def extract_pptx(path):
63
  prs = Presentation(path)
64
  text = ""
 
68
  text += shape.text + "\n"
69
  return text
70
 
71
+
72
  def mode_load(path):
73
  choice = ""
74
  file_type = path.split(".")[-1]
 
85
  choice = "doc"
86
  print(content[:100])
87
  return choice, content[:5000]
88
+
89
+
90
  elif file_type in ["png", "jpg", "jpeg", "bmp", "tiff", "webp"]:
91
  content = Image.open(path).convert('RGB')
92
  choice = "image"
93
  return choice, content
94
+
95
  else:
96
  raise gr.Error("Oops, unsupported files.")
97
 
98
+
99
  @spaces.GPU()
100
  def stream_chat(message, history: list, temperature: float, max_length: int, top_p: float, top_k: int, penalty: float):
101
 
 
110
  print(f'history is - {history}')
111
  conversation = []
112
  prompt_files = []
113
+ if message["files"]:
114
+ choice, contents = mode_load(message["files"][-1])
 
115
  if choice == "image":
116
  conversation.append({"role": "user", "image": contents, "content": message['text']})
117
  elif choice == "doc":
 
119
  conversation.append({"role": "user", "content": format_msg})
120
  else:
121
  if len(history) == 0:
122
+ # raise gr.Error("Please upload an image first.")
123
  contents = None
124
  conversation.append({"role": "user", "content": message['text']})
125
  else:
126
+ # image = Image.open(history[0][0][0])
127
  for prompt, answer in history:
128
  if answer is None:
129
  prompt_files.append(prompt[0])
 
131
  else:
132
  conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
133
  if len(prompt_files) > 0:
134
+ choice, contents = mode_load(prompt_files[-1])
135
  else:
136
  choice = ""
137
  conversation.append({"role": "user", "image": "", "content": message['text']})
138
 
139
+
140
  if choice == "image":
141
  conversation.append({"role": "user", "image": contents, "content": message['text']})
142
  elif choice == "doc":
143
  format_msg = contents + "\n\n\n" + "{} files uploaded.\n" + message['text']
144
  conversation.append({"role": "user", "content": format_msg})
 
145
  print(f"Conversation is -\n{conversation}")
146
 
147
  input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True,
 
168
  buffer += new_text
169
  yield buffer
170
 
 
171
 
172
+ chatbot = gr.Chatbot(
173
+ #rtl=True,
174
+ )
175
  chat_input = gr.MultimodalTextbox(
176
  interactive=True,
177
  placeholder="Enter message or upload a file ...",
178
  show_label=False,
179
+ #rtl=True,
180
+
181
 
182
+
183
+ )
184
  EXAMPLES = [
185
  [{"text": "Write a poem about spring season in French Language", }],
186
  [{"text": "what does this chart mean?", "files": ["sales.png"]}],
 
194
  gr.ChatInterface(
195
  fn=stream_chat,
196
  multimodal=True,
197
+
198
+
199
  textbox=chat_input,
200
  chatbot=chatbot,
201
  fill_height=True,
 
246
  gr.Examples(EXAMPLES, [chat_input])
247
 
248
  if __name__ == "__main__":
249
+
250
+ demo.queue(api_open=False).launch(show_api=False, share=False, )#server_name="0.0.0.0", )