File size: 3,550 Bytes
1d52b89
 
 
 
 
 
 
 
2b30723
1d52b89
 
38bc0ad
 
1d52b89
 
 
 
 
 
38bc0ad
 
 
 
1d52b89
 
 
 
 
 
 
 
 
 
 
 
 
38bc0ad
 
1d52b89
 
 
 
 
 
 
 
8f4648b
 
 
 
 
 
1d52b89
 
 
 
38bc0ad
5f96d12
1d52b89
 
 
 
 
 
 
 
 
 
 
 
 
 
38bc0ad
1d52b89
 
 
 
 
 
f12d387
1d52b89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38bc0ad
 
 
1d52b89
 
 
 
 
 
 
 
 
 
 
da203a4
38bc0ad
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import chainlit as cl
from openai import OpenAI
from langsmith.run_helpers import traceable
from langsmith_config import setup_langsmith_config
import base64
import os

os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
model = "gpt-3.5-turbo-1106"
model_vision = "gpt-4-vision-preview"
setup_langsmith_config()

   
def process_images(msg: cl.Message):
    # Processing images exclusively
    images = [file for file in msg.elements if "image" in file.mime]

    # Accessing the bytes of a specific image
    image_bytes = images[0].content # take the first image just for demo purposes
    print(len(image_bytes))
    # check the size of the image, max 1mb
    if len(image_bytes) > 1000000:
        return "too_large"
    
    # we need base64 encoded image
    image_base64 = base64.b64encode(image_bytes).decode('utf-8')
    return image_base64

async def process_stream(stream, msg: cl.Message):
    for part in stream:
            if token := part.choices[0].delta.content or "":
                await msg.stream_token(token)

def handle_vision_call(msg, image_history):
    image_base64 = None
    image_base64 = process_images(msg)
    if image_base64 == "too_large":
        return "too_large"
    
    if image_base64:
        # add the image to the image history
        image_history.append(
        {
            "role": "user",
            "content": [
                    {"type": "text", "text": msg.content},
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{image_base64}"
                        }
                    },
                ],
            }
        )
        stream = gpt_vision_call(image_history)
        # clear the image history
        image_history.clear()
        return stream

@traceable(run_type="llm", name="gpt 4 turbo call")
async def gpt_call(message_history: list = []):
    client = OpenAI()

    stream = client.chat.completions.create(
        model=model,
        messages=message_history,
        stream=True,
    )
    
    return stream


def gpt_vision_call(image_history: list = []):
    client = OpenAI()
  
    stream = client.chat.completions.create(
        model=model_vision,
        messages=image_history,
        max_tokens=300,
        stream=True,
    )

    return stream

@cl.on_chat_start
def start_chat():
    cl.user_session.set(
        "message_history",
        [{"role": "system", "content": "You are a helpful assistant."}],
    )
    cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant."}])

@cl.on_message
async def on_message(msg: cl.Message):
    message_history = cl.user_session.get("message_history")
    image_history = cl.user_session.get("image_history")
    
    stream_msg = cl.Message(content="") 
    stream = None

    if msg.elements:
        stream = handle_vision_call(msg, image_history)
        if stream == "too_large":
            return await cl.Message(content="Image too large, max 1mb").send()


    else:
        # add the message in both to keep the coherence between the two histories
        message_history.append({"role": "user", "content": msg.content})
        image_history.append({"role": "user", "content": msg.content})
        
        stream = await gpt_call(message_history)
    
    if stream:
        await process_stream(stream, msg=stream_msg)
        message_history.append({"role": "system", "content": stream_msg.content})

    return stream_msg.content