alexkueck commited on
Commit
5f41473
1 Parent(s): b92c57d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -114,13 +114,15 @@ def clear_all():
114
  def add_text(chatbot, history, prompt, file):
115
  if (file == None):
116
  chatbot = chatbot +[(prompt, None)]
 
117
  else:
118
  if (prompt == ""):
119
  chatbot=chatbot + [((file.name,), "Prompt fehlt!")]
 
120
  else:
121
  chatbot = chatbot +[((file.name,), None), (prompt, None)]
122
- print("chatbot nach add_text............")
123
- print(chatbot)
124
  return chatbot, history, prompt, file, gr.Image(visible = False), "" #gr.Image( label=None, size=(30,30), visible=False, scale=1) #gr.Textbox(value="", interactive=False)
125
 
126
  def add_text2(chatbot, prompt):
@@ -204,6 +206,7 @@ def process_image(image_path, prompt):
204
  ##################################################
205
  #openassistant um uploaded Files zu analysieren
206
  def create_assistant(prompt, file):
 
207
  client = OpenAI()
208
  assistant = client.beta.assistants.create(name="File Analysator",instructions=template, model="gpt-4-1106-preview",)
209
  thread = client.beta.threads.create()
@@ -213,8 +216,6 @@ def create_assistant(prompt, file):
213
  thread, run = create_thread_and_run(prompt, client, assistant.id)
214
  run = wait_on_run(run, thread, client)
215
  response = get_response(thread, client, assistant.id)
216
- print("response...................")
217
- print(response)
218
  result = response.data[1].content[0].text.value
219
  return result
220
 
@@ -228,7 +229,7 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
228
  #kein Bild hochgeladen -> auf Text antworten...
229
  if (file == None):
230
  result = generate_text(prompt, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
231
- history = history + [(prompt, result)]
232
  else:
233
  #Es wurde ein Bild angehängt -> wenn prompt dazu, das Bild analysieren
234
  #geht nur über spezielle OpenAI-Schnittstelle...
@@ -238,11 +239,13 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
238
  else:
239
  result = generate_text_zu_doc(file, prompt, k, rag_option, chatbot)
240
 
241
- history = history + [((file,), None),(prompt, result)]
242
 
243
  chatbot[-1][1] = ""
 
244
  for character in result:
245
  chatbot[-1][1] += character
 
246
  time.sleep(0.03)
247
  yield chatbot, history, None, "Generating"
248
  if shared_state.interrupted:
 
114
  def add_text(chatbot, history, prompt, file):
115
  if (file == None):
116
  chatbot = chatbot +[(prompt, None)]
117
+ history = history +[(prompt, None)]
118
  else:
119
  if (prompt == ""):
120
  chatbot=chatbot + [((file.name,), "Prompt fehlt!")]
121
+ history=history + [((file.name,), "Prompt fehlt!")]
122
  else:
123
  chatbot = chatbot +[((file.name,), None), (prompt, None)]
124
+ history = history +[((file.name,), None), (prompt, None)]
125
+
126
  return chatbot, history, prompt, file, gr.Image(visible = False), "" #gr.Image( label=None, size=(30,30), visible=False, scale=1) #gr.Textbox(value="", interactive=False)
127
 
128
  def add_text2(chatbot, prompt):
 
206
  ##################################################
207
  #openassistant um uploaded Files zu analysieren
208
  def create_assistant(prompt, file):
209
+ #zentral einmal erzeugen!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
210
  client = OpenAI()
211
  assistant = client.beta.assistants.create(name="File Analysator",instructions=template, model="gpt-4-1106-preview",)
212
  thread = client.beta.threads.create()
 
216
  thread, run = create_thread_and_run(prompt, client, assistant.id)
217
  run = wait_on_run(run, thread, client)
218
  response = get_response(thread, client, assistant.id)
 
 
219
  result = response.data[1].content[0].text.value
220
  return result
221
 
 
229
  #kein Bild hochgeladen -> auf Text antworten...
230
  if (file == None):
231
  result = generate_text(prompt, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
232
+ #history = history + [(prompt, result)]
233
  else:
234
  #Es wurde ein Bild angehängt -> wenn prompt dazu, das Bild analysieren
235
  #geht nur über spezielle OpenAI-Schnittstelle...
 
239
  else:
240
  result = generate_text_zu_doc(file, prompt, k, rag_option, chatbot)
241
 
242
+ #history = history + [((file,), None),(prompt, result)]
243
 
244
  chatbot[-1][1] = ""
245
+ history[-1][1] = ""
246
  for character in result:
247
  chatbot[-1][1] += character
248
+ history[-1][1] += character
249
  time.sleep(0.03)
250
  yield chatbot, history, None, "Generating"
251
  if shared_state.interrupted: