File size: 14,381 Bytes
5a6b92c
 
3bae1b0
5a6b92c
632f915
 
 
5a6b92c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2168033
632f915
5a6b92c
 
 
 
c6a242f
5a6b92c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
632f915
 
5a6b92c
 
 
2168033
632f915
5a6b92c
 
 
 
c6a242f
5a6b92c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0fbcfd3
632f915
5a6b92c
 
 
632f915
5a6b92c
 
 
 
 
 
 
 
632f915
5a6b92c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0fbcfd3
632f915
5a6b92c
 
 
632f915
5a6b92c
 
 
 
 
 
 
 
 
632f915
5a6b92c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e650ab2
5a6b92c
 
 
 
 
 
 
 
 
 
 
 
 
632f915
edc121b
632f915
5a6b92c
 
 
 
 
 
 
 
 
 
 
 
edc121b
5a6b92c
ec20771
 
 
 
 
 
 
 
 
 
 
 
44334eb
 
 
 
 
 
ec20771
 
 
 
 
 
 
 
2168033
 
44334eb
2168033
 
edc121b
 
 
2168033
44334eb
2168033
edc121b
 
2168033
ec20771
 
 
 
 
 
 
 
 
 
 
5a6b92c
ec20771
 
5a6b92c
88c06dc
5a6b92c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
import streamlit as st
from openai import OpenAI
import json, os

#Enable for testing
debug_mode = True

# Initialize OpenAI client
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))  # Replace with your actual API key

# Initialize assistants and vector stores
# (This part should be done outside the main Streamlit app for performance reasons)
# ... (code to initialize assistants and vector stores) ...
#Processing Level
assistant1 = client.beta.assistants.create(
  name="Processing Level",
  instructions="You are an expert dietician. Use you knowledge base to answer questions about the processing level of food product.",
  model="gpt-4o",
  tools=[{"type": "file_search"}],
  temperature=0,
  top_p = 0.85
  )

#Harmful Ingredients
assistant2 = client.beta.assistants.create(
  name="Harmful Ingredients",
  instructions="You are an expert dietician. Use you knowledge base to answer questions about the ingredients in food product.",
  model="gpt-4o",
  tools=[{"type": "file_search"}],
  temperature=0,
  top_p = 0.85
  )

#Harmful Ingredients
assistant3 = client.beta.assistants.create(
  name="Misleading Claims",
  instructions="You are an expert dietician. Use you knowledge base to answer questions about the misleading claims about food product.",
  model="gpt-4o",
  tools=[{"type": "file_search"}],
  temperature=0,
  top_p = 0.85
  )

# Create a vector store
vector_store1 = client.beta.vector_stores.create(name="Processing Level Vec")

# Ready the files for upload to OpenAI
file_paths = ["Processing_Level.docx"]
file_streams = [open(path, "rb") for path in file_paths]

# Use the upload and poll SDK helper to upload the files, add them to the vector store,
# and poll the status of the file batch for completion.
file_batch1 = client.beta.vector_stores.file_batches.upload_and_poll(
  vector_store_id=vector_store1.id, files=file_streams
)

# You can print the status and the file counts of the batch to see the result of this operation.
print(file_batch1.status)
print(file_batch1.file_counts)

# Create a vector store
vector_store2 = client.beta.vector_stores.create(name="Harmful Ingredients Vec")

# Ready the files for upload to OpenAI
file_paths = ["Ingredients.docx"]
file_streams = [open(path, "rb") for path in file_paths]

# Use the upload and poll SDK helper to upload the files, add them to the vector store,
# and poll the status of the file batch for completion.
file_batch2 = client.beta.vector_stores.file_batches.upload_and_poll(
  vector_store_id=vector_store2.id, files=file_streams
)

# You can print the status and the file counts of the batch to see the result of this operation.
print(file_batch2.status)
print(file_batch2.file_counts)

# Create a vector store
vector_store3 = client.beta.vector_stores.create(name="Misleading Claims Vec")

# Ready the files for upload to OpenAI
file_paths = ["MisLeading_Claims.docx"]
file_streams = [open(path, "rb") for path in file_paths]

# Use the upload and poll SDK helper to upload the files, add them to the vector store,
# and poll the status of the file batch for completion.
file_batch3 = client.beta.vector_stores.file_batches.upload_and_poll(
  vector_store_id=vector_store3.id, files=file_streams
)

# You can print the status and the file counts of the batch to see the result of this operation.
print(file_batch3.status)
print(file_batch3.file_counts)

#Processing Level
assistant1 = client.beta.assistants.update(
  assistant_id=assistant1.id,
  tool_resources={"file_search": {"vector_store_ids": [vector_store1.id]}},
)

#harmful Ingredients
assistant2 = client.beta.assistants.update(
  assistant_id=assistant2.id,
  tool_resources={"file_search": {"vector_store_ids": [vector_store2.id]}},
)

#Misleading Claims
assistant3 = client.beta.assistants.update(
  assistant_id=assistant3.id,
  tool_resources={"file_search": {"vector_store_ids": [vector_store3.id]}},
)

def analyze_processing_level(ingredients, product_info, assistant_id):
    global debug_mode
    thread = client.beta.threads.create(
        messages=[
            {
                "role": "user",
                "content": "Categorize product " + product_info["brandName"] + product_info["productName"] + " that has following ingredients: " + ', '.join(ingredients) + " into Group A, Group B, or Group C based on the document. The output must only be the group category name (Group A, Group B, or Group C) alongwith the definition of the respective category. If the group category cannot be determined, output 'NOT FOUND'.",
            }
        ]
    )
    
    run = client.beta.threads.runs.create_and_poll(
        thread_id=thread.id,
        assistant_id=assistant_id,  # Replace with actual assistant ID
        include=["step_details.tool_calls[*].file_search.results[*].content"]
    )
    
    messages = list(client.beta.threads.messages.list(thread_id=thread.id, run_id=run.id))

    message_content = messages[0].content[0].text
    annotations = message_content.annotations
    #citations = []
    for index, annotation in enumerate(annotations):
        message_content.value = message_content.value.replace(annotation.text, "")
        #if file_citation := getattr(annotation, "file_citation", None):
        #    cited_file = client.files.retrieve(file_citation.file_id)
        #    citations.append(f"[{index}] {cited_file.filename}")

    if debug_mode:
        print(message_content.value)
    processing_level_str = message_content.value
    return processing_level_str

def analyze_harmful_ingredients(ingredients, product_info, assistant_id):
    global debug_mode
    thread = client.beta.threads.create(
        messages=[
            {
                "role": "user",
                "content": "Provide detailed information about product " + product_info["brandName"] + product_info["productName"] + " that has following ingredients: " + ', '.join(ingredients) + ". The output must be in JSON format: {<ingredient_name>: <information from the document>}. If information about an ingredient is not found in the documents, the value for that ingredient must start with the prefix '(NOT FOUND IN DOCUMENT)' followed by the LLM's response based on its own knowledge.",
            }
        ]
    )
    
    run = client.beta.threads.runs.create_and_poll(
        thread_id=thread.id,
        assistant_id=assistant_id,  # Replace with actual assistant ID
        include=["step_details.tool_calls[*].file_search.results[*].content"]
    )
    
    messages = list(client.beta.threads.messages.list(thread_id=thread.id, run_id=run.id))
    message_content = messages[0].content[0].text
    annotations = message_content.annotations

    #citations = []

    #print(f"Length of annotations is {len(annotations)}")

    for index, annotation in enumerate(annotations):
      if file_citation := getattr(annotation, "file_citation", None):
          #cited_file = client.files.retrieve(file_citation.file_id)
          #citations.append(f"[{index}] {cited_file.filename}")
          message_content.value = message_content.value.replace(annotation.text, "")
  
    if debug_mode:
      ingredients_not_found_in_doc = []
      print(message_content.value)
      for key, value in json.loads(message_content.value.replace("```", "").replace("json", "")).items():
          if value.startswith("(NOT FOUND IN DOCUMENT)"):
              ingredients_not_found_in_doc.append(key)
      print(f"Ingredients not found in the harmful ingredients doc are {','.join(ingredients_not_found_in_doc)}")
    harmful_ingredient_analysis = json.loads(message_content.value.replace("```", "").replace("json", "").replace("(NOT FOUND IN DOCUMENT) ", ""))
    
    harmful_ingredient_analysis_str = ""
    for key, value in harmful_ingredient_analysis.items():
      harmful_ingredient_analysis_str += f"{key}: {value}\n"
    return harmful_ingredient_analysis_str

def analyze_claims(claims, assistant_id):
    global debug_mode
    thread = client.beta.threads.create(
        messages=[
            {
                "role": "user",
                "content": "Provide detailed information about the following claims: " + ', '.join(claims) + ". The output must be in JSON format: {<claim_name>: <information from the document>}. If information about a claim is not found in the documents, the value for that claim must start with the prefix '(NOT FOUND IN DOCUMENT)' followed by the LLM's response based on its own knowledge.",
            }
        ]
    )
    
    run = client.beta.threads.runs.create_and_poll(
        thread_id=thread.id,
        assistant_id=assistant_id,  # Replace with actual assistant ID
        include=["step_details.tool_calls[*].file_search.results[*].content"]
    )
    
    messages = list(client.beta.threads.messages.list(thread_id=thread.id, run_id=run.id))

    message_content = messages[0].content[0].text
    
      
    annotations = message_content.annotations
    
    #citations = []
    
    #print(f"Length of annotations is {len(annotations)}")
    
    for index, annotation in enumerate(annotations):
          if file_citation := getattr(annotation, "file_citation", None):
              #cited_file = client.files.retrieve(file_citation.file_id)
              #citations.append(f"[{index}] {cited_file.filename}")
              message_content.value = message_content.value.replace(annotation.text, "")
      
    if debug_mode:
        claims_not_found_in_doc = []
        print(message_content.value)
        for key, value in json.loads(message_content.value.replace("```", "").replace("json", "")).items():
              if value.startswith("(NOT FOUND IN DOCUMENT)"):
                  claims_not_found_in_doc.append(key)
        print(f"Claims not found in the doc are {','.join(claims_not_found_in_doc)}")
    claims_analysis = json.loads(message_content.value.replace("```", "").replace("json", "").replace("(NOT FOUND IN DOCUMENT) ", ""))

    claims_analysis_str = ""
    for key, value in claims_analysis.items():
      claims_analysis_str += f"{key}: {value}\n"
    
    return claims_analysis_str

def generate_final_analysis(product_info, processing_level, harmful_ingredient_analysis, claims_analysis):
    global debug_mode
    system_prompt = """You are provided with a detailed analysis of a food product. Your task is to generate actionable insights to help the user decide whether to consume the product, at what frequency, and identify any potential harms or benefits. Consider the context of consumption to ensure the advice is personalized and practical.

Use the following criteria to generate your response:

1. **Nutrition Analysis:**
- How processed is the product?

2. **Harmful Ingredients:**
- Identify any harmful or questionable ingredients.

3. **Misleading Claims:**
- Are there any misleading claims made by the brand?

Additionally, consider the following while generating insights:

1. **Consumption Context:**
- Is the product being consumed for health reasons or as a treat?
- Could the consumer be overlooking hidden harms?
- If the product is something they could consume daily, should they?
- If they are consuming it daily, what potential harm are they not noticing?
- If the product is intended for health purposes, are there concerns the user might miss?

**Output:**
- Recommend whether the product should be consumed or avoided.
- If recommended, specify the appropriate frequency and intended functionality (e.g., treat vs. health).
- Highlight any risks or benefits at that level of consumption."""

    user_prompt = f"""
Product Name: {product_info['brandName']} {product_info['productName']}

Processing Level:
{processing_level}

Ingredient Analysis:
{harmful_ingredient_analysis}

Claims Analysis:
{claims_analysis}
"""
    if debug_mode:
        print(f"\nuser_prompt : \n {user_prompt}")
        
    completion = client.chat.completions.create(
        model="gpt-4o",  # Make sure to use an appropriate model
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )

    return completion.choices[0].message.content

# Streamlit app
def main():
    st.title("Food Product Analysis")

    # Create a form for all inputs
    with st.form("product_analysis_form"):
        st.write("Please enter the following information about the product:")
        brand_name = st.text_input("Brand Name")
        product_name = st.text_input("Product Name")
        ingredients = st.text_area("Ingredients (separated by commas)")
        claims = st.text_area("Product Claims (separated by commas)")

        submitted = st.form_submit_button("Analyze Product")

    if submitted:
        # Process inputs
        ingredients_list = []
        claims_list = []
        if ingredients:
            ingredients_list = [ingredient.strip() for ingredient in ingredients.split(',')]
        if claims:
            claims_list = [claim.strip() for claim in claims.split(',')]
        product_info = {
            "brandName": brand_name,
            "productName": product_name
        }

        # Display a message while analyzing
        with st.spinner("Analyzing the product... This may take a moment."):
            # Perform analysis
            processing_level = ""
            harmful_ingredient_analysis = ""
            if ingredients:
                processing_level = analyze_processing_level(ingredients_list, product_info, assistant1.id)
                harmful_ingredient_analysis = analyze_harmful_ingredients(ingredients_list, product_info, assistant2.id)
            else:
                print("No ingredients specified by the user!")
                
            claims_analysis = ""
            if claims:
                claims_analysis = analyze_claims(claims_list, assistant3.id)
            else:
                print("No claims specified by the user!")
            
            # Generate final analysis
            final_analysis = generate_final_analysis(
                product_info,
                processing_level,
                harmful_ingredient_analysis,
                claims_analysis
            )

        # Display results
        st.success("Analysis complete!")
        st.subheader("Final Analysis:")
        st.write(final_analysis)

        # Option to start over
        if st.button("Analyze Another Product"):
            st.rerun()

if __name__ == "__main__":
    main()