File size: 4,662 Bytes
1d52b89
 
 
 
 
 
d0b79fc
1d52b89
 
2b30723
1d52b89
 
d0b79fc
 
38bc0ad
1d52b89
 
 
 
 
 
38bc0ad
 
 
 
1d52b89
 
 
 
 
 
 
 
 
 
 
 
 
38bc0ad
 
1d52b89
 
 
 
 
 
 
 
8f4648b
 
 
 
 
 
1d52b89
 
 
 
38bc0ad
5f96d12
1d52b89
 
d0b79fc
1d52b89
 
 
 
 
 
 
d0b79fc
1d52b89
 
 
d0b79fc
1d52b89
 
 
 
 
 
31468a8
1d52b89
d0b79fc
1d52b89
 
 
 
 
 
 
 
1116695
1d52b89
1116695
d0b79fc
1d52b89
 
d0b79fc
1d52b89
 
 
 
 
 
 
 
 
38bc0ad
 
 
1d52b89
 
 
 
 
 
 
 
 
 
 
66fc472
da203a4
38bc0ad
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import chainlit as cl
from openai import OpenAI
from langsmith.run_helpers import traceable
from langsmith_config import setup_langsmith_config
import base64
import os
import uuid

os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
model = "gpt-3.5-turbo-1106"
model_vision = "gpt-4-vision-preview"
setup_langsmith_config()
# generate UUID for the user from python
user_id = str(uuid.uuid4())
   
def process_images(msg: cl.Message):
    # Processing images exclusively
    images = [file for file in msg.elements if "image" in file.mime]

    # Accessing the bytes of a specific image
    image_bytes = images[0].content # take the first image just for demo purposes
    print(len(image_bytes))
    # check the size of the image, max 1mb
    if len(image_bytes) > 1000000:
        return "too_large"
    
    # we need base64 encoded image
    image_base64 = base64.b64encode(image_bytes).decode('utf-8')
    return image_base64

async def process_stream(stream, msg: cl.Message):
    for part in stream:
            if token := part.choices[0].delta.content or "":
                await msg.stream_token(token)

def handle_vision_call(msg, image_history):
    image_base64 = None
    image_base64 = process_images(msg)
    if image_base64 == "too_large":
        return "too_large"
    
    if image_base64:
        # add the image to the image history
        image_history.append(
        {
            "role": "user",
            "content": [
                    {"type": "text", "text": msg.content},
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{image_base64}"
                        }
                    },
                ],
            }
        )
        stream = gpt_vision_call(image_history)
        # clear the image history
        image_history.clear()
        return stream

@traceable(run_type="llm", name="gpt 3 turbo call", metadata={"user": user_id})
async def gpt_call(message_history: list = []):
    client = OpenAI()

    stream = client.chat.completions.create(
        model=model,
        messages=message_history,
        stream=True,
        user=user_id,
    )
    return stream

@traceable(run_type="llm", name="gpt 4 turbo vision call", metadata={"user": user_id})
def gpt_vision_call(image_history: list = []):
    client = OpenAI()
  
    stream = client.chat.completions.create(
        model=model_vision,
        messages=image_history,
        max_tokens=1000,
        stream=True,
        user=user_id,
    )

    return stream

@cl.on_chat_start
def start_chat():
    cl.user_session.set(
        "message_history",
        [{"role": "system", "content": "You are a helpful assistant. You are made by GPT-3.5-turbo-1106, the latest version developed by Openai. You do not have the ability to receive images, but if the user uploads an image with the message, GPT-4-vision-preview will be used. So if a user asks you if you have the ability to analyze images, you can tell them that. And tell him that at the bottom left (above the text input) he has a button to upload images, or he can drag it to the chat, or he can just copy paste the input"}],
    )
    cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant. You are developed with GPT-4-vision-preview, if the user uploads an image, you have the ability to understand it. For normal messages GPT-3.5-turbo-1106 will be used, and for images you will use it. If the user asks about your capabilities you can tell them that."}])
 

@cl.on_message
@traceable(run_type="chain", name="message", metadata={"user": user_id})
async def on_message(msg: cl.Message):
    message_history = cl.user_session.get("message_history")
    image_history = cl.user_session.get("image_history")
    
    stream_msg = cl.Message(content="") 
    stream = None

    if msg.elements:
        stream = handle_vision_call(msg, image_history)
        if stream == "too_large":
            return await cl.Message(content="Image too large, max 1mb").send()


    else:
        # add the message in both to keep the coherence between the two histories
        message_history.append({"role": "user", "content": msg.content})
        image_history.append({"role": "user", "content": msg.content})
        
        stream = await gpt_call(message_history)
    
    if stream:
        await process_stream(stream, msg=stream_msg)
        message_history.append({"role": "system", "content": stream_msg.content})
        image_history.append({"role": "system", "content": stream_msg.content})

    return stream_msg.content