mateoluksenberg commited on
Commit
0c44805
1 Parent(s): 9ab7e35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -79
app.py CHANGED
@@ -10,14 +10,13 @@ import pymupdf
10
  import docx
11
  from pptx import Presentation
12
 
13
-
14
  MODEL_LIST = ["nikravan/glm-4vq"]
15
 
16
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
17
  MODEL_ID = MODEL_LIST[0]
18
  MODEL_NAME = "GLM-4vq"
19
 
20
- TITLE = "<h1>3ML-bot</h1>"
21
 
22
  DESCRIPTION = f"""
23
  <center>
@@ -31,17 +30,19 @@ h1 {
31
  text-align: center;
32
  display: block;
33
  }
34
- """
35
 
 
 
 
 
 
 
36
 
37
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
38
 
39
-
40
-
41
  def extract_text(path):
42
  return open(path, 'r').read()
43
 
44
-
45
  def extract_pdf(path):
46
  doc = pymupdf.open(path)
47
  text = ""
@@ -49,7 +50,6 @@ def extract_pdf(path):
49
  text += page.get_text()
50
  return text
51
 
52
-
53
  def extract_docx(path):
54
  doc = docx.Document(path)
55
  data = []
@@ -58,7 +58,6 @@ def extract_docx(path):
58
  content = '\n\n'.join(data)
59
  return content
60
 
61
-
62
  def extract_pptx(path):
63
  prs = Presentation(path)
64
  text = ""
@@ -68,7 +67,6 @@ def extract_pptx(path):
68
  text += shape.text + "\n"
69
  return text
70
 
71
-
72
  def mode_load(path):
73
  choice = ""
74
  file_type = path.split(".")[-1]
@@ -85,20 +83,15 @@ def mode_load(path):
85
  choice = "doc"
86
  print(content[:100])
87
  return choice, content[:5000]
88
-
89
-
90
  elif file_type in ["png", "jpg", "jpeg", "bmp", "tiff", "webp"]:
91
  content = Image.open(path).convert('RGB')
92
  choice = "image"
93
  return choice, content
94
-
95
  else:
96
  raise gr.Error("Oops, unsupported files.")
97
 
98
-
99
  @spaces.GPU()
100
  def stream_chat(message, history: list, temperature: float, max_length: int, top_p: float, top_k: int, penalty: float):
101
-
102
  model = AutoModelForCausalLM.from_pretrained(
103
  MODEL_ID,
104
  torch_dtype=torch.bfloat16,
@@ -119,11 +112,9 @@ def stream_chat(message, history: list, temperature: float, max_length: int, top
119
  conversation.append({"role": "user", "content": format_msg})
120
  else:
121
  if len(history) == 0:
122
- # raise gr.Error("Please upload an image first.")
123
  contents = None
124
  conversation.append({"role": "user", "content": message['text']})
125
  else:
126
- # image = Image.open(history[0][0][0])
127
  for prompt, answer in history:
128
  if answer is None:
129
  prompt_files.append(prompt[0])
@@ -135,8 +126,6 @@ def stream_chat(message, history: list, temperature: float, max_length: int, top
135
  else:
136
  choice = ""
137
  conversation.append({"role": "user", "image": "", "content": message['text']})
138
-
139
-
140
  if choice == "image":
141
  conversation.append({"role": "user", "image": contents, "content": message['text']})
142
  elif choice == "doc":
@@ -168,7 +157,6 @@ def stream_chat(message, history: list, temperature: float, max_length: int, top
168
  buffer += new_text
169
  yield buffer
170
 
171
-
172
  chatbot = gr.Chatbot(
173
  #rtl=True,
174
  )
@@ -177,73 +165,69 @@ chat_input = gr.MultimodalTextbox(
177
  placeholder="Enter message or upload a file ...",
178
  show_label=False,
179
  #rtl=True,
180
-
181
-
182
-
183
  )
 
184
  EXAMPLES = [
185
  [{"text": "Quien es el Demandado?", }],
186
- [{"text": "Resumir el Documento?", }],
187
- [{"text": "Explicar el Documento?", }]
188
  ]
189
 
190
  with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
191
  gr.HTML(TITLE)
192
  gr.HTML(DESCRIPTION)
193
- gr.ChatInterface(
194
- fn=stream_chat,
195
- multimodal=True,
196
-
197
-
198
- textbox=chat_input,
199
- chatbot=chatbot,
200
- fill_height=True,
201
- additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
202
- additional_inputs=[
203
- gr.Slider(
204
- minimum=0,
205
- maximum=1,
206
- step=0.1,
207
- value=0.8,
208
- label="Temperature",
209
- render=False,
210
- ),
211
- gr.Slider(
212
- minimum=1024,
213
- maximum=8192,
214
- step=1,
215
- value=4096,
216
- label="Max Length",
217
- render=False,
218
- ),
219
- gr.Slider(
220
- minimum=0.0,
221
- maximum=1.0,
222
- step=0.1,
223
- value=1.0,
224
- label="top_p",
225
- render=False,
226
- ),
227
- gr.Slider(
228
- minimum=1,
229
- maximum=20,
230
- step=1,
231
- value=10,
232
- label="top_k",
233
- render=False,
234
- ),
235
- gr.Slider(
236
- minimum=0.0,
237
- maximum=2.0,
238
- step=0.1,
239
- value=1.0,
240
- label="Repetition penalty",
241
- render=False,
242
- ),
243
- ],
244
- ),
245
- gr.Examples(EXAMPLES, [chat_input])
246
 
247
  if __name__ == "__main__":
248
-
249
- demo.queue(api_open=False).launch(show_api=False, share=False, )#server_name="0.0.0.0", )
 
10
  import docx
11
  from pptx import Presentation
12
 
 
13
  MODEL_LIST = ["nikravan/glm-4vq"]
14
 
15
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
16
  MODEL_ID = MODEL_LIST[0]
17
  MODEL_NAME = "GLM-4vq"
18
 
19
+ TITLE = "<h1>AI Chat con Documentos </h1>"
20
 
21
  DESCRIPTION = f"""
22
  <center>
 
30
  text-align: center;
31
  display: block;
32
  }
 
33
 
34
+ #chatbot-container {
35
+ width: 90%;
36
+ height: 600px; /* Puedes ajustar la altura según tus necesidades */
37
+ margin: auto;
38
+ }
39
+ """
40
 
41
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
42
 
 
 
43
  def extract_text(path):
44
  return open(path, 'r').read()
45
 
 
46
  def extract_pdf(path):
47
  doc = pymupdf.open(path)
48
  text = ""
 
50
  text += page.get_text()
51
  return text
52
 
 
53
  def extract_docx(path):
54
  doc = docx.Document(path)
55
  data = []
 
58
  content = '\n\n'.join(data)
59
  return content
60
 
 
61
  def extract_pptx(path):
62
  prs = Presentation(path)
63
  text = ""
 
67
  text += shape.text + "\n"
68
  return text
69
 
 
70
  def mode_load(path):
71
  choice = ""
72
  file_type = path.split(".")[-1]
 
83
  choice = "doc"
84
  print(content[:100])
85
  return choice, content[:5000]
 
 
86
  elif file_type in ["png", "jpg", "jpeg", "bmp", "tiff", "webp"]:
87
  content = Image.open(path).convert('RGB')
88
  choice = "image"
89
  return choice, content
 
90
  else:
91
  raise gr.Error("Oops, unsupported files.")
92
 
 
93
  @spaces.GPU()
94
  def stream_chat(message, history: list, temperature: float, max_length: int, top_p: float, top_k: int, penalty: float):
 
95
  model = AutoModelForCausalLM.from_pretrained(
96
  MODEL_ID,
97
  torch_dtype=torch.bfloat16,
 
112
  conversation.append({"role": "user", "content": format_msg})
113
  else:
114
  if len(history) == 0:
 
115
  contents = None
116
  conversation.append({"role": "user", "content": message['text']})
117
  else:
 
118
  for prompt, answer in history:
119
  if answer is None:
120
  prompt_files.append(prompt[0])
 
126
  else:
127
  choice = ""
128
  conversation.append({"role": "user", "image": "", "content": message['text']})
 
 
129
  if choice == "image":
130
  conversation.append({"role": "user", "image": contents, "content": message['text']})
131
  elif choice == "doc":
 
157
  buffer += new_text
158
  yield buffer
159
 
 
160
  chatbot = gr.Chatbot(
161
  #rtl=True,
162
  )
 
165
  placeholder="Enter message or upload a file ...",
166
  show_label=False,
167
  #rtl=True,
 
 
 
168
  )
169
+
170
  EXAMPLES = [
171
  [{"text": "Quien es el Demandado?", }],
172
+ [{"text": "Resumir el Documento", }],
173
+ [{"text": "Explicar el Documento", }]
174
  ]
175
 
176
  with gr.Blocks(css=CSS, theme="soft", fill_height=True) as demo:
177
  gr.HTML(TITLE)
178
  gr.HTML(DESCRIPTION)
179
+ with gr.Column(id="chatbot-container"):
180
+ gr.ChatInterface(
181
+ fn=stream_chat,
182
+ multimodal=True,
183
+ textbox=chat_input,
184
+ chatbot=chatbot,
185
+ fill_height=True,
186
+ additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
187
+ additional_inputs=[
188
+ gr.Slider(
189
+ minimum=0,
190
+ maximum=1,
191
+ step=0.1,
192
+ value=0.8,
193
+ label="Temperature",
194
+ render=False,
195
+ ),
196
+ gr.Slider(
197
+ minimum=1024,
198
+ maximum=8192,
199
+ step=1,
200
+ value=4096,
201
+ label="Max Length",
202
+ render=False,
203
+ ),
204
+ gr.Slider(
205
+ minimum=0.0,
206
+ maximum=1.0,
207
+ step=0.1,
208
+ value=1.0,
209
+ label="top_p",
210
+ render=False,
211
+ ),
212
+ gr.Slider(
213
+ minimum=1,
214
+ maximum=20,
215
+ step=1,
216
+ value=10,
217
+ label="top_k",
218
+ render=False,
219
+ ),
220
+ gr.Slider(
221
+ minimum=0.0,
222
+ maximum=2.0,
223
+ step=0.1,
224
+ value=1.0,
225
+ label="Repetition penalty",
226
+ render=False,
227
+ ),
228
+ ],
229
+ ),
230
+ gr.Examples(EXAMPLES, [chat_input])
 
231
 
232
  if __name__ == "__main__":
233
+ demo.queue(api_open=False).launch(show_api=False, share=False)