GianJSX commited on
Commit
38bc0ad
1 Parent(s): 800575a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -4
app.py CHANGED
@@ -9,13 +9,18 @@ os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
9
  model = "gpt-3.5-turbo-1106"
10
  model_vision = "gpt-4-vision-preview"
11
  setup_langsmith_config()
12
-
 
13
  def process_images(msg: cl.Message):
14
  # Processing images exclusively
15
  images = [file for file in msg.elements if "image" in file.mime]
16
 
17
  # Accessing the bytes of a specific image
18
  image_bytes = images[0].content # take the first image just for demo purposes
 
 
 
 
19
 
20
  # we need base64 encoded image
21
  image_base64 = base64.b64encode(image_bytes).decode('utf-8')
@@ -29,6 +34,8 @@ async def process_stream(stream, msg: cl.Message):
29
  def handle_vision_call(msg, image_history):
30
  image_base64 = None
31
  image_base64 = process_images(msg)
 
 
32
 
33
  if image_base64:
34
  # add the image to the image history
@@ -47,6 +54,7 @@ def handle_vision_call(msg, image_history):
47
  }
48
  )
49
  stream = gpt_vision_call(image_history)
 
50
  image_history.clear()
51
  return stream
52
 
@@ -62,7 +70,7 @@ async def gpt_call(message_history: list = []):
62
 
63
  return stream
64
 
65
- @traceable(run_type="llm", name="gpt 4 turbo vision call")
66
  def gpt_vision_call(image_history: list = []):
67
  client = OpenAI()
68
 
@@ -84,7 +92,6 @@ def start_chat():
84
  cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant."}])
85
 
86
  @cl.on_message
87
- @traceable(run_type="chain", name="gpt 4 turbo")
88
  async def on_message(msg: cl.Message):
89
  message_history = cl.user_session.get("message_history")
90
  image_history = cl.user_session.get("image_history")
@@ -94,6 +101,9 @@ async def on_message(msg: cl.Message):
94
 
95
  if msg.elements:
96
  stream = handle_vision_call(msg, image_history)
 
 
 
97
 
98
  else:
99
  # add the message in both to keep the coherence between the two histories
@@ -106,4 +116,4 @@ async def on_message(msg: cl.Message):
106
  await process_stream(stream, msg=stream_msg)
107
  message_history.append({"role": "system", "content": stream_msg.content})
108
 
109
- return stream_msg.content
 
9
  model = "gpt-3.5-turbo-1106"
10
  model_vision = "gpt-4-vision-preview"
11
  setup_langsmith_config()
12
+
13
+
14
  def process_images(msg: cl.Message):
15
  # Processing images exclusively
16
  images = [file for file in msg.elements if "image" in file.mime]
17
 
18
  # Accessing the bytes of a specific image
19
  image_bytes = images[0].content # take the first image just for demo purposes
20
+ print(len(image_bytes))
21
+ # check the size of the image, max 1mb
22
+ if len(image_bytes) > 1000000:
23
+ return "too_large"
24
 
25
  # we need base64 encoded image
26
  image_base64 = base64.b64encode(image_bytes).decode('utf-8')
 
34
  def handle_vision_call(msg, image_history):
35
  image_base64 = None
36
  image_base64 = process_images(msg)
37
+ if image_base64 == "too_large":
38
+ return "too_large"
39
 
40
  if image_base64:
41
  # add the image to the image history
 
54
  }
55
  )
56
  stream = gpt_vision_call(image_history)
57
+ # clear the image history
58
  image_history.clear()
59
  return stream
60
 
 
70
 
71
  return stream
72
 
73
+
74
  def gpt_vision_call(image_history: list = []):
75
  client = OpenAI()
76
 
 
92
  cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant."}])
93
 
94
  @cl.on_message
 
95
  async def on_message(msg: cl.Message):
96
  message_history = cl.user_session.get("message_history")
97
  image_history = cl.user_session.get("image_history")
 
101
 
102
  if msg.elements:
103
  stream = handle_vision_call(msg, image_history)
104
+ if stream == "too_large":
105
+ return await cl.Message(content="Image too large, max 1mb").send()
106
+
107
 
108
  else:
109
  # add the message in both to keep the coherence between the two histories
 
116
  await process_stream(stream, msg=stream_msg)
117
  message_history.append({"role": "system", "content": stream_msg.content})
118
 
119
+ return stream_msg.content