File size: 7,720 Bytes
814d02e
 
811a038
814d02e
 
d20cc85
814d02e
f29b427
 
 
 
 
 
 
 
814d02e
abd856e
d20cc85
814d02e
 
 
 
 
b754110
 
814d02e
6a3d30f
814d02e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b754110
 
 
 
 
 
 
814d02e
 
 
 
abd856e
 
 
 
 
 
 
 
 
 
814d02e
77388cc
f281c76
950b488
814d02e
fefff88
814d02e
abd856e
 
f281c76
17a9c23
 
abd856e
 
 
 
 
 
e3ebf83
814d02e
 
bf8e2a6
814d02e
 
77388cc
814d02e
 
 
 
 
 
17a9c23
5b2530d
814d02e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eac8bcc
814d02e
 
 
 
 
eac8bcc
814d02e
 
 
 
 
85871a1
814d02e
 
 
 
 
8004243
 
5da4e93
d301cba
814d02e
 
 
863d8e4
814d02e
 
 
e4715e7
 
6a3d30f
e4715e7
 
 
6a3d30f
e4715e7
 
 
 
 
814d02e
 
7436282
814d02e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import gradio as gr
import openai
from openai import OpenAI
import requests
import csv
import os

default_role = """You role is a combination of Irritable Bowel Syndrome doctor, Nutritionist and 
Chef. The user needs food recommendations using low FODMAP diet. You need to 
recommend a single delicious recipe or an item from a restaurant, that uses 
low FODMAP ingredients. 
If it is a restaurant recommendation do not give instructions or directions to
cook but suggest how to order.
If it is a recipe explain the substitutions that were made to make it low FODMAP.
"""
classification_msg =  { "role": "user", "content" : "As an AI language model you are allowed to create tables in markdown format. Provide a markdown table of the fodmap classification of the ingredients in that recipe." }
LLM_MODEL = 'gpt-4-1106-preview'
OPEN_AI_KEY = os.environ.get('OPEN_AI_KEY')

def get_empty_state():
    return {"total_tokens": 0, "messages": []}

def on_token_change(user_token):
    # openai.api_key = user_token
    pass

def submit_message(prompt, prompt_template, good_foods, bad_foods, temperature, max_tokens, context_length, state):

    history = state['messages']

    if not prompt:
        return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']}", state

    if not prompt_template:
        prompt_template = default_role
    system_prompt = [{ "role": "system", "content": prompt_template }]

    food_priming_prompt = []
    if good_foods:
        food_priming_prompt += [{ "role": "system", "content": "Even if they are high fodmap, the following foods are known to be ok: " + good_foods + ". These ingredients can be included in any recipes that are suggested even if they are classified as high fodmap."}]
    if bad_foods:
        food_priming_prompt += [{ "role": "system", "content": "Exclude the following ingredients: " + bad_foods + ". Recipes that include these excluded ingredients should not be returned, or should be modified to not include any of the excluded ingredients."}]

    prompt_msg = { "role": "user", "content": prompt }

    # if not user_token:
    #     history.append(prompt_msg)
    #     history.append({
    #         "role": "system",
    #         "content": "Error: OpenAI API Key is not set."
    #     })
    #     return '', [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: 0", state
    
    table = ""

    try:
        # completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
        # print(completion)
        client = OpenAI(api_key=OPEN_AI_KEY)
        messages1 = system_prompt + food_priming_prompt + history[-context_length*2:] + [prompt_msg]
        completion = client.chat.completions.create(
            model=LLM_MODEL, 
            messages=messages1, 
            temperature=temperature, 
            max_tokens=max_tokens,
            stream=False)

        history.append(prompt_msg)
        answer = {'role': 'assistant', 'content': completion.choices[0].message.content }
        history.append(answer)

        state['total_tokens'] += completion.usage.total_tokens

        # completion2 = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-1:] + [classification_msg], temperature=temperature, max_tokens=max_tokens)
        # print(completion2)
        messages2 = system_prompt + food_priming_prompt + [answer] + [classification_msg]
        print('Messages')
        print(messages2)
        completion2 = client.chat.completions.create(
            model=LLM_MODEL, 
            messages=messages2, 
            temperature=temperature, 
            max_tokens=max_tokens,
            stream=False)
        table = completion2.choices[0].message.content #to_dict()['content'].split("\n\n")[1]
        print(table)

        state['total_tokens'] += completion2.usage.total_tokens

    except Exception as e:
        history.append(prompt_msg)
        history.append({
            "role": "system",
            "content": f"Error: {e}"
        })

    total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"
    # print(history)
    chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]

    return '', chat_messages, total_tokens_used_msg, state, table

def clear_conversation():
    return gr.update(value=None, visible=True), None, "", get_empty_state(), ""


css = """
      #col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
      #chatbox {min-height: 400px;}
      #header {text-align: center;}
      #total_tokens_str {text-align: right; font-size: 0.8em; color: #666;}
      #label {font-size: 0.8em; padding: 0.5em; margin: 0;}
      .message { font-size: 1.2em; }
      """

with gr.Blocks(css=css, title='Low FODMAP Assistant') as demo:
    
    state = gr.State(get_empty_state())


    with gr.Column(elem_id="col-container"):
        gr.Markdown("""# GutWise""",
                    elem_id="header")

        with gr.Row():
            with gr.Column(scale=7):
                btn_clear_conversation = gr.Button("🔃 Start New Conversation")
                input_message = gr.Textbox(show_label=False, placeholder="Enter text and press enter", visible=True)
                btn_submit = gr.Button("Submit")
                chatbot = gr.Chatbot(elem_id="chatbox")
                table = gr.Markdown()
                total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
            with gr.Column(scale=3, min_width=100):
                # gr.Markdown("Enter your OpenAI API Key. You can get one [here](https://platform.openai.com/account/api-keys).", elem_id="label")
                # user_token = gr.Textbox(value='', placeholder="OpenAI API Key", type="password", show_label=False)
                user_token = OPEN_AI_KEY
                prompt_template = gr.Textbox(value=default_role, show_label=False, placeholder="Role", visible=False)
                good_foods = gr.Textbox(show_label=False, placeholder="Can have foods", visible=False)
                bad_foods = gr.Textbox(show_label=False, placeholder="Can't have foods", visible=False)
                with gr.Accordion("Advanced parameters", open=False):
                    temperature = gr.Slider(minimum=0, maximum=2.0, value=0.3, step=0.1, label="Temperature", info="Higher = more creative/chaotic")
                    max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, label="Max tokens per response")
                    context_length = gr.Slider(minimum=1, maximum=10, value=2, step=1, label="Context length", info="Number of previous messages to send to the chatbot. Be careful with high values, it can blow up the token budget quickly.")

    btn_submit.click(
        submit_message, 
        [input_message, prompt_template, good_foods, bad_foods, temperature, max_tokens, context_length, state], 
        [input_message, chatbot, total_tokens_str, state, table])
    input_message.submit(
        submit_message, 
        [input_message, prompt_template, good_foods, bad_foods, temperature, max_tokens, context_length, state], 
        [input_message, chatbot, total_tokens_str, state, table])
    btn_clear_conversation.click(
        clear_conversation, [], 
        [input_message, chatbot, total_tokens_str, state, table])
    # user_token.change(on_token_change, inputs=[user_token], outputs=[])


# demo.queue(concurrency_count=10)
demo.launch(height='800px')