alexkueck commited on
Commit
62cee3f
1 Parent(s): dd535b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -19
app.py CHANGED
@@ -8,6 +8,7 @@ import re
8
  import io
9
  from PIL import Image, ImageDraw, ImageOps, ImageFont
10
  from base64 import b64encode
 
11
 
12
  from langchain.chains import LLMChain, RetrievalQA
13
  from langchain.chat_models import ChatOpenAI
@@ -76,6 +77,7 @@ HEADERS = {"Authorization": f"Bearer {HUGGINGFACEHUB_API_TOKEN}"}
76
  PATH_WORK = "."
77
  CHROMA_DIR = "/chroma"
78
  YOUTUBE_DIR = "/youtube"
 
79
 
80
  ###############################################
81
  #URLs zu Dokumenten oder andere Inhalte, die einbezogen werden sollen
@@ -154,10 +156,21 @@ def umwandeln_fuer_anzeige(image):
154
  return buffer.getvalue()
155
 
156
  def process_image(image_path, prompt):
 
 
 
 
 
 
 
 
 
157
  # Convert image to base64
158
- with open(image_path, "rb") as image_file:
 
159
  encoded_string = b64encode(image_file.read()).decode()
160
-
 
161
  # Prepare the data for the API request (specific to the API you're using)
162
  data = {
163
  'image': encoded_string,
@@ -392,15 +405,10 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
392
  result = response.content
393
  #Bild ausgeben
394
  image = Image.open(io.BytesIO(result))
395
- print("result image...............")
396
- print (image)
397
  image_64 = umwandeln_fuer_anzeige(image)
398
- print("result image 64...............")
399
- print (image_64)
400
  chatbot[-1][1] = "<img src='data:image/png;base64,{0}'/>".format(b64encode(image_64).decode('utf-8'))
401
  history = history + [(prompt, result)]
402
- print("history zeichnen......................")
403
- print(chatbot)
404
  return chatbot, history, "Success"
405
  else:
406
  result = generate_text(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
@@ -410,11 +418,7 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
410
  history = history + [(prompt, result)]
411
  else:
412
  history = history + [((file,), None),(prompt, result)]
413
-
414
- print("history nach Zusatz und mit KI Antwort...........")
415
- print(history)
416
- print("chatbot nach Zusatz und mit KI Antwort...........")
417
- print(chatbot)
418
  return chatbot, history, "Success"
419
  """
420
  for character in result:
@@ -452,13 +456,7 @@ def generate_text (prompt, file, chatbot, history, rag_option, model_option, ope
452
  if (file == None):
453
  history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
454
  else:
455
- print("file bild uplad....................")
456
- print(file)
457
- #image = Image.open(io.BytesIO(file_anzeigen))
458
- #image_64 = umwandeln_fuer_anzeige(image)
459
  prompt_neu = process_image(file, prompt)
460
- print("prompt_neu............................")
461
- print(prompt_neu)
462
  history_text_und_prompt = generate_prompt_with_history_openai(prompt_neu, history)
463
 
464
  #history für HuggingFace Models formatieren
 
8
  import io
9
  from PIL import Image, ImageDraw, ImageOps, ImageFont
10
  from base64 import b64encode
11
+ import shutil
12
 
13
  from langchain.chains import LLMChain, RetrievalQA
14
  from langchain.chat_models import ChatOpenAI
 
77
  PATH_WORK = "."
78
  CHROMA_DIR = "/chroma"
79
  YOUTUBE_DIR = "/youtube"
80
+ HISTORY_PFAD = "/data/history"
81
 
82
  ###############################################
83
  #URLs zu Dokumenten oder andere Inhalte, die einbezogen werden sollen
 
156
  return buffer.getvalue()
157
 
158
  def process_image(image_path, prompt):
159
+ #Bild aus tmp in Space ablegen
160
+ #temp_image_path = "/tmp/gradio/01834b95fcf793903d65ab947cc410dc1600d0df/bbb 1.png"
161
+ # Zielverzeichnis für das dauerhafte Bild
162
+ #target_directory = HISTORY_PFAD
163
+ # Ziel-Pfad für das dauerhafte Bild
164
+ #target_image_path = os.path.join(target_directory, "bbb 1.png")
165
+ # Kopieren Sie das temporäre Bild in das Zielverzeichnis
166
+ #shutil.copy(temp_image_path, target_image_path)
167
+
168
  # Convert image to base64
169
+ #with open(image_path, "rb") as image_file:
170
+ with open("data/history/bbb 1.png", "rb") as image_file:
171
  encoded_string = b64encode(image_file.read()).decode()
172
+ print ("necoded string.................")
173
+ print(encoded_string)
174
  # Prepare the data for the API request (specific to the API you're using)
175
  data = {
176
  'image': encoded_string,
 
405
  result = response.content
406
  #Bild ausgeben
407
  image = Image.open(io.BytesIO(result))
 
 
408
  image_64 = umwandeln_fuer_anzeige(image)
 
 
409
  chatbot[-1][1] = "<img src='data:image/png;base64,{0}'/>".format(b64encode(image_64).decode('utf-8'))
410
  history = history + [(prompt, result)]
411
+
 
412
  return chatbot, history, "Success"
413
  else:
414
  result = generate_text(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
 
418
  history = history + [(prompt, result)]
419
  else:
420
  history = history + [((file,), None),(prompt, result)]
421
+
 
 
 
 
422
  return chatbot, history, "Success"
423
  """
424
  for character in result:
 
456
  if (file == None):
457
  history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
458
  else:
 
 
 
 
459
  prompt_neu = process_image(file, prompt)
 
 
460
  history_text_und_prompt = generate_prompt_with_history_openai(prompt_neu, history)
461
 
462
  #history für HuggingFace Models formatieren