File size: 44,911 Bytes
5a6b92c
 
3bae1b0
7ea92da
009fc15
f7edc34
ae67f00
2db7785
0c88166
75a37a1
5a6b92c
0d16641
 
 
632f915
0d16641
 
 
 
 
 
 
 
 
 
5a6b92c
11c6843
 
f45f651
11c6843
2bbc148
009fc15
 
8037bb9
2bbc148
bef1a9e
d10db6f
8037bb9
8b6a430
d10db6f
9abb5da
8037bb9
6980695
2db7785
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae67f00
2db7785
327e0f7
2db7785
 
 
 
 
 
 
 
 
 
 
 
6980695
2db7785
0e5ee0b
4b7c020
0e5ee0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2db7785
 
 
327e0f7
2db7785
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae67f00
6980695
ae67f00
7f3a649
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99b346f
 
 
 
 
 
ae67f00
1279dc9
7f3a649
1279dc9
7f3a649
 
749cf15
 
7f3a649
 
 
 
 
 
 
 
6c27910
 
 
 
 
 
 
 
 
 
7f3a649
f2a7df5
7f3a649
f2a7df5
7f3a649
ae67f00
7f3a649
99b346f
5a6b92c
0d16641
 
11c6843
0d16641
11c6843
0d16641
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a6b92c
647a8bc
bb0ec55
647a8bc
 
 
 
 
0c88166
647a8bc
 
0178108
647a8bc
 
0178108
85d25e0
647a8bc
 
 
 
f45f651
 
 
647a8bc
bb0ec55
647a8bc
 
 
 
 
0c88166
647a8bc
0c88166
 
0b63445
 
 
 
0c88166
5dcb046
 
 
 
 
 
0b63445
5dcb046
0b63445
 
3629144
0b63445
3629144
 
 
1cf70cc
5dcb046
3629144
0b63445
 
 
 
f45f651
0b63445
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f45f651
b81be91
f45f651
 
b81be91
3629144
0b63445
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f45f651
 
 
0b63445
5a6b92c
cc89461
 
6c27910
 
 
 
 
 
 
 
 
cc89461
6c27910
 
 
cc89461
6c27910
 
 
 
 
 
 
 
 
 
 
cc89461
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f200b0
11c6843
5a6b92c
 
 
 
c4e9056
5a6b92c
 
 
 
 
 
5d6545b
5a6b92c
 
 
7381e90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59f7396
7381e90
5a6b92c
 
 
 
 
 
 
 
 
632f915
 
5a6b92c
 
 
0b63445
11c6843
5a6b92c
 
 
 
0b63445
5a6b92c
 
 
 
 
 
5d6545b
5a6b92c
 
7381e90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59f7396
7381e90
5a6b92c
 
 
 
 
 
 
 
 
 
 
 
 
 
0b63445
632f915
5a6b92c
 
 
e3fb5d4
0b63445
 
8a3b93f
5a6b92c
 
 
 
 
3f200b0
11c6843
5a6b92c
 
 
 
2e1f3a0
 
d611464
 
 
 
 
 
 
 
49be9b4
79eb04b
 
5a6b92c
 
 
 
5d6545b
5a6b92c
 
 
7381e90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a6b92c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49be9b4
 
 
 
 
 
 
 
e7a00b1
 
 
5a6b92c
 
 
 
 
 
 
cc89461
11c6843
2ca7d09
5a6b92c
 
 
 
9d84e1a
5a6b92c
9d84e1a
5a6b92c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e650ab2
5a6b92c
 
046807c
5a6b92c
0c811a0
cc89461
0c811a0
5a6b92c
 
 
 
 
 
 
 
 
632f915
edc121b
632f915
5a6b92c
 
 
 
 
 
 
 
7381e90
5a6b92c
33150a9
2ca7d09
7381e90
66a74a5
8b6a430
90d04a6
 
8b6a430
 
 
 
ae67f00
 
8b6a430
5760d4f
 
 
 
 
 
 
ae67f00
5e09799
1cf70cc
7381e90
 
 
0c811a0
5760d4f
ae67f00
f92438c
 
 
5760d4f
7381e90
ae67f00
5760d4f
cc89461
 
 
ae67f00
3f200b0
0b63445
 
5b0b86e
ae67f00
8b6a430
3f200b0
8b6a430
cc89461
7381e90
8b6a430
e7a00b1
 
7eb358f
 
c6ea484
 
 
 
2ca7d09
c6ea484
 
 
 
eb70115
8b6a430
 
346df21
9abb5da
452874e
 
a51bb26
 
 
 
 
8b6a430
 
 
 
d10db6f
7381e90
 
86016eb
 
 
 
 
8b6a430
 
83cf2b8
8b6a430
 
c6ea484
8a72e12
eb70115
5d6545b
eb70115
5d6545b
 
c6ea484
 
3001f12
90d04a6
86016eb
 
 
 
 
 
3001f12
c6ea484
bf1ebc4
c6ea484
57ee4ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db81e6c
 
57ee4ec
 
 
 
 
db81e6c
57ee4ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db81e6c
57ee4ec
 
 
e7a00b1
57ee4ec
 
e7a00b1
57ee4ec
e7a00b1
 
8dce46f
e7a00b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57ee4ec
e7a00b1
57ee4ec
e7a00b1
 
 
d5c8680
 
57ee4ec
 
 
c8edada
57ee4ec
 
 
 
 
 
 
 
e7a00b1
db81e6c
e7a00b1
 
 
db81e6c
f3f4df9
 
 
 
 
 
 
 
 
e7a00b1
f3f4df9
 
e7a00b1
 
 
f3f4df9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e7a00b1
 
 
 
 
 
 
 
 
 
 
f3f4df9
e7a00b1
57ee4ec
 
b49a88b
57ee4ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db81e6c
 
 
 
 
 
57ee4ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e7a00b1
 
 
 
 
57ee4ec
e7a00b1
57ee4ec
d5c8680
0c811a0
d5c8680
 
 
 
0c811a0
9313499
e7a00b1
 
 
 
57ee4ec
 
 
 
 
 
 
 
346df21
57ee4ec
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
import streamlit as st
from openai import OpenAI
import json, os
import requests, time
from data_extractor import extract_data, find_product, get_product
from nutrient_analyzer import analyze_nutrients
from rda import find_nutrition
from typing import Dict, Any
from calc_cosine_similarity import find_cosine_similarity, find_embedding , find_relevant_file_paths
import pickle

#Used the @st.cache_resource decorator on this function. 
#This Streamlit decorator ensures that the function is only executed once and its result (the OpenAI client) is cached. 
#Subsequent calls to this function will return the cached client, avoiding unnecessary recreation.

@st.cache_resource
def get_openai_client():
    #Enable debug mode for testing only
    return True, OpenAI(api_key=os.getenv("OPENAI_API_KEY"))


@st.cache_resource
def get_backend_urls():
    data_extractor_url = "https://data-extractor-67qj89pa0-sonikas-projects-9936eaad.vercel.app/"
    return data_extractor_url

debug_mode, client = get_openai_client()
data_extractor_url = get_backend_urls()
assistant_default_doc = None

def extract_data_from_product_image(image_links, data_extractor_url):
    response = extract_data(image_links)
    return response

def get_product_data_from_db(product_name, data_extractor_url):
    response = get_product(product_name)
    return response

def get_product_list(product_name_by_user, data_extractor_url):
    response = find_product(product_name_by_user)
    return response


def rda_analysis(product_info_from_db_nutritionalInformation: Dict[str, Any], 
                product_info_from_db_servingSize: float) -> Dict[str, Any]:
    """
    Analyze nutritional information and return RDA analysis data in a structured format.
    
    Args:
        product_info_from_db_nutritionalInformation: Dictionary containing nutritional information
        product_info_from_db_servingSize: Serving size value
        
    Returns:
        Dictionary containing nutrition per serving and user serving size
    """
    nutrient_name_list = [
        'energy', 'protein', 'carbohydrates', 'addedSugars', 'dietaryFiber',
        'totalFat', 'saturatedFat', 'monounsaturatedFat', 'polyunsaturatedFat',
        'transFat', 'sodium'
    ]

    try:
        response = client.chat.completions.create(
            model="gpt-4o",
            messages=[
                {
                    "role": "system",
                    "content": """You will be given nutritional information of a food product. 
                                Return the data in the exact JSON format specified in the schema, 
                                with all required fields."""
                },
                {
                    "role": "user",
                    "content": f"Nutritional content of food product is {json.dumps(product_info_from_db_nutritionalInformation)}. "
                              f"Extract the values of the following nutrients: {', '.join(nutrient_name_list)}."
                }
            ],
        response_format={"type": "json_schema", "json_schema": {
            "name": "Nutritional_Info_Label_Reader",
            "schema": {
                "type": "object",
                "properties": {
                    "energy": {"type": "number"},
                    "protein": {"type": "number"},
                    "carbohydrates": {"type": "number"},
                    "addedSugars": {"type": "number"},
                    "dietaryFiber": {"type": "number"},
                    "totalFat": {"type": "number"},
                    "saturatedFat": {"type": "number"},
                    "monounsaturatedFat": {"type": "number"},
                    "polyunsaturatedFat": {"type": "number"},
                    "transFat": {"type": "number"},
                    "sodium": {"type": "number"},
                    "servingSize": {"type": "number"},
                },
                "required": nutrient_name_list + ["servingSize"],
                "additionalProperties": False
            },
            "strict": True
        }}
        )
        
        # Parse the JSON response
        nutrition_data = json.loads(response.choices[0].message.content)
        
        # Validate that all required fields are present
        missing_fields = [field for field in nutrient_name_list + ["servingSize"] 
                         if field not in nutrition_data]
        if missing_fields:
            print(f"Missing required fields in API response: {missing_fields}")
        
        # Validate that all values are numbers
        non_numeric_fields = [field for field, value in nutrition_data.items() 
                            if not isinstance(value, (int, float))]
        if non_numeric_fields:
            raise ValueError(f"Non-numeric values found in fields: {non_numeric_fields}")
        
        return {
            'nutritionPerServing': nutrition_data,
            'userServingSize': product_info_from_db_servingSize
        }
        
    except Exception as e:
        # Log the error and raise it for proper handling
        print(f"Error in RDA analysis: {str(e)}")
        raise


def find_product_nutrients(product_info_from_db):
    #GET Response: {'_id': '6714f0487a0e96d7aae2e839',
    #'brandName': 'Parle', 'claims': ['This product does not contain gold'],
    #'fssaiLicenseNumbers': [10013022002253],
    #'ingredients': [{'metadata': '', 'name': 'Refined Wheat Flour (Maida)', 'percent': '63%'}, {'metadata': '', 'name': 'Sugar', 'percent': ''}, {'metadata': '', 'name': 'Refined Palm Oil', 'percent': ''}, {'metadata': '(Glucose, Levulose)', 'name': 'Invert Sugar Syrup', 'percent': ''}, {'metadata': 'I', 'name': 'Sugar Citric Acid', 'percent': ''}, {'metadata': '', 'name': 'Milk Solids', 'percent': '1%'}, {'metadata': '', 'name': 'Iodised Salt', 'percent': ''}, {'metadata': '503(I), 500 (I)', 'name': 'Raising Agents', 'percent': ''}, {'metadata': '1101 (i)', 'name': 'Flour Treatment Agent', 'percent': ''}, {'metadata': 'Diacetyl Tartaric and Fatty Acid Esters of Glycerol (of Vegetable Origin)', 'name': 'Emulsifier', 'percent': ''}, {'metadata': 'Vanilla', 'name': 'Artificial Flavouring Substances', 'percent': ''}],
    
    #'nutritionalInformation': [{'name': 'Energy', 'unit': 'kcal', 'values': [{'base': 'per 100 g','value': 462}]},
    #{'name': 'Protein', 'unit': 'g', 'values': [{'base': 'per 100 g', 'value': 6.7}]},
    #{'name': 'Carbohydrate', 'unit': 'g', 'values': [{'base': 'per 100 g', 'value': 76.0}, {'base': 'of which sugars', 'value': 26.9}]},
    #{'name': 'Fat', 'unit': 'g', 'values': [{'base': 'per 100 g', 'value': 14.6}, {'base': 'Saturated Fat', 'value': 6.8}, {'base': 'Trans Fat', 'value': 0}]},
    #{'name': 'Total Sugars', 'unit': 'g', 'values': [{'base': 'per 100 g', 'value': 27.7}]},
    #{'name': 'Added Sugars', 'unit': 'g', 'values': [{'base': 'per 100 g', 'value': 26.9}]},
    #{'name': 'Cholesterol', 'unit': 'mg', 'values': [{'base': 'per 100 g', 'value': 0}]},
    #{'name': 'Sodium', 'unit': 'mg', 'values': [{'base': 'per 100 g', 'value': 281}]}],
    
    #'packagingSize': {'quantity': 82, 'unit': 'g'},
    #'productName': 'Parle-G Gold Biscuits',
    #'servingSize': {'quantity': 18.8, 'unit': 'g'},
    #'servingsPerPack': 3.98,
    #'shelfLife': '7 months from packaging'}

    product_type = None
    calories = None
    sugar = None
    total_sugar = None
    added_sugar = None
    salt = None
    serving_size = None

    if product_info_from_db["servingSize"]["unit"].lower() == "g":
        product_type = "solid"
    elif product_info_from_db["servingSize"]["unit"].lower() == "ml":
        product_type = "liquid"
    serving_size = product_info_from_db["servingSize"]["quantity"]

    for item in product_info_from_db["nutritionalInformation"]:
        if 'energy' in item['name'].lower():
            calories = item['values'][0]['value']
        if 'total sugar' in item['name'].lower():
            total_sugar = item['values'][0]['value']
        if 'added sugar' in item['name'].lower():
            added_sugar = item['values'][0]['value']
        if 'sugar' in item['name'].lower() and 'added sugar' not in item['name'].lower() and 'total sugar' not in item['name'].lower():
            sugar = item['values'][0]['value']
        if 'salt' in item['name'].lower():
            if salt is None:
                salt = 0
            salt += item['values'][0]['value']

    if salt is None:
        salt = 0
        for item in product_info_from_db["nutritionalInformation"]:
            if 'sodium' in item['name'].lower():
                salt += item['values'][0]['value']

    if added_sugar is not None and added_sugar > 0 and sugar is None:
        sugar = added_sugar
    elif total_sugar is not None and total_sugar > 0 and added_sugar is None and sugar is None:
        sugar = total_sugar

    return product_type, calories, sugar, salt, serving_size
    
# Initialize assistants and vector stores
# Function to initialize vector stores and assistants
@st.cache_resource
def initialize_assistants_and_vector_stores():
    #Processing Level
    global client
    assistant1 = client.beta.assistants.create(
      name="Processing Level",
      instructions="You are an expert dietician. Use you knowledge base to answer questions about the processing level of food product.",
      model="gpt-4o",
      tools=[{"type": "file_search"}],
      temperature=0,
      top_p = 0.85
      )
    
    #Harmful Ingredients
    assistant3 = client.beta.assistants.create(
      name="Misleading Claims",
      instructions="You are an expert dietician. Use you knowledge base to answer questions about the misleading claims about food product.",
      model="gpt-4o",
      tools=[{"type": "file_search"}],
      temperature=0,
      top_p = 0.85
      )
    
    # Create a vector store
    vector_store1 = client.beta.vector_stores.create(name="Processing Level Vec")
    
    # Ready the files for upload to OpenAI
    file_paths = ["Processing_Level.docx"]
    file_streams = [open(path, "rb") for path in file_paths]
    
    # Use the upload and poll SDK helper to upload the files, add them to the vector store,
    # and poll the status of the file batch for completion.
    file_batch1 = client.beta.vector_stores.file_batches.upload_and_poll(
      vector_store_id=vector_store1.id, files=file_streams
    )
    
    # You can print the status and the file counts of the batch to see the result of this operation.
    print(file_batch1.status)
    print(file_batch1.file_counts)
    
    # Create a vector store
    vector_store3 = client.beta.vector_stores.create(name="Misleading Claims Vec")
    
    # Ready the files for upload to OpenAI
    file_paths = ["MisLeading_Claims.docx"]
    file_streams = [open(path, "rb") for path in file_paths]
    
    # Use the upload and poll SDK helper to upload the files, add them to the vector store,
    # and poll the status of the file batch for completion.
    file_batch3 = client.beta.vector_stores.file_batches.upload_and_poll(
      vector_store_id=vector_store3.id, files=file_streams
    )
    
    # You can print the status and the file counts of the batch to see the result of this operation.
    print(file_batch3.status)
    print(file_batch3.file_counts)
    
    #Processing Level
    assistant1 = client.beta.assistants.update(
      assistant_id=assistant1.id,
      tool_resources={"file_search": {"vector_store_ids": [vector_store1.id]}},
    )
    
    
    #Misleading Claims
    assistant3 = client.beta.assistants.update(
      assistant_id=assistant3.id,
      tool_resources={"file_search": {"vector_store_ids": [vector_store3.id]}},
    )

    embeddings_titles = []
    if not os.path.exists('embeddings.pkl'):
        #Find embeddings of titles from titles.txt
        titles = []
        #if embedding_titles.pkl is absent
        with open('titles.txt', 'r') as file:
            lines = file.readlines()
    
        titles = [line.strip() for line in lines]
        
        embeddings_titles = find_embedding(titles, lim=50)
        #Save embeddings_titles to embedding_titles.pkl
        data = {
            'sentences': titles[:50],
            'embeddings': embeddings_titles
        }
        with open('embeddings.pkl', 'wb') as f:
            pickle.dump(data, f)

        if os.path.exists("embeddings.pkl"):
            print("embeddings.pkl successfully written!")

    else: 
        print("Reading embeddings.pkl")
        # Load both sentences and embeddings
        with open('embeddings.pkl', 'rb') as f:
            loaded_data = pickle.load(f)
        embeddings_titles = loaded_data['embeddings']

    return assistant1, assistant3, embeddings_titles
    

assistant1, assistant3, embeddings_titles = initialize_assistants_and_vector_stores()

def get_files_with_ingredient_info(ingredient, N=1):
    file_paths = []
    #Find embedding for title of all files
    global embeddings_titles

    with open('titles.txt', 'r') as file:
        lines = file.readlines()
    
    titles = [line.strip() for line in lines]

    #Apply cosine similarity between embedding of ingredient name and title of all files
    file_paths_abs, file_titles = find_relevant_file_paths(ingredient, embeddings_titles, titles, N=N)
    #Fine top N titles that are the most similar to the ingredient's name
    #Find file names for those titles
    if len(file_paths_abs) == 0:
        file_paths.append("Ingredients.docx")
    else:
        for file_path in file_paths_abs:
            file_paths.append(f"articles/{file_path}")

        print(f"Titles are {file_titles}")
            
    return file_paths
    
def get_assistant_for_ingredient(ingredient, N=2):
    global client
    global assistant_default_doc
    
    #Harmful Ingredients
    assistant2 = client.beta.assistants.create(
      name="Harmful Ingredients",
      instructions=f"You are an expert dietician. Use you knowledge base to answer questions about the ingredient {ingredient} in a food product.",
      model="gpt-4o",
      tools=[{"type": "file_search"}],
      temperature=0,
      top_p = 0.85
      )

    # Create a vector store
    vector_store2 = client.beta.vector_stores.create(name="Harmful Ingredients Vec")
    
    # Ready the files for upload to OpenAI.     
    file_paths = get_files_with_ingredient_info(ingredient, N)
    if file_paths[0] == "Ingredients.docx" and assistant_default_doc:
        print(f"Using Ingredients.docx for analyzing ingredient {ingredient}")
        return assistant_default_doc
        
    print(f"DEBUG : Creating vector store for files {file_paths} to analyze ingredient {ingredient}")
    
    file_streams = [open(path, "rb") for path in file_paths]
    
    # Use the upload and poll SDK helper to upload the files, add them to the vector store,
    # and poll the status of the file batch for completion.
    file_batch2 = client.beta.vector_stores.file_batches.upload_and_poll(
      vector_store_id=vector_store2.id, files=file_streams
    )
    
    # You can print the status and the file counts of the batch to see the result of this operation.
    print(file_batch2.status)
    print(file_batch2.file_counts)

    #harmful Ingredients
    assistant2 = client.beta.assistants.update(
      assistant_id=assistant2.id,
      tool_resources={"file_search": {"vector_store_ids": [vector_store2.id]}},
    )

    if file_paths[0] == "Ingredients.docx" and assistant_default_doc is None:
        assistant_default_doc = assistant2
        
    return assistant2

def analyze_nutrition_icmr_rda(nutrient_analysis, nutrient_analysis_rda):
    global debug_mode, client
    system_prompt = """
Task: Analyze the nutritional content of the food item and compare it to the Recommended Daily Allowance (RDA) or threshold limits defined by ICMR. Provide practical, contextual insights based on the following nutrients:

Nutrient Breakdown and Analysis:
Calories:

Compare the calorie content to a well-balanced meal.
Calculate how many meals' worth of calories the product contains, providing context for balanced eating.
Sugar & Salt:

Convert the amounts of sugar and salt into teaspoons to help users easily understand their daily intake.
Explain whether the levels exceed the ICMR-defined limits and what that means for overall health.
Fat & Calories:

Analyze fat content, specifying whether it is high or low in relation to a balanced diet.
Offer insights on how the fat and calorie levels may impact the user’s overall diet, including potential risks or benefits.
Contextual Insights:
For each nutrient, explain how its levels (whether high or low) affect health and diet balance.
Provide actionable recommendations for the user, suggesting healthier alternatives or adjustments to consumption if necessary.
Tailor the advice to the user's lifestyle, such as recommending lower intake if sedentary or suggesting other dietary considerations based on the product's composition.

Output Structure:
For each nutrient (Calories, Sugar, Salt, Fat), specify if the levels exceed or are below the RDA or ICMR threshold.
Provide clear, concise comparisons (e.g., sugar exceeds the RDA by 20%, equivalent to X teaspoons).    
    """

    user_prompt = f"""
Nutrition Analysis :
{nutrient_analysis}
{nutrient_analysis_rda}
"""
    if debug_mode:
        print(f"\nuser_prompt : \n {user_prompt}")
        
    completion = client.chat.completions.create(
        model="gpt-4o",  # Make sure to use an appropriate model
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )

    return completion.choices[0].message.content
    
def analyze_processing_level(ingredients, assistant_id):
    global debug_mode, client
    thread = client.beta.threads.create(
        messages=[
            {
                "role": "user",
                "content": "Categorize food product that has following ingredients: " + ', '.join(ingredients) + " into Group A, Group B, or Group C based on the document. The output must only be the group category name (Group A, Group B, or Group C) alongwith the reason behind assigning that respective category to the product. If the group category cannot be determined, output 'NOT FOUND'.",
            }
        ]
    )
    
    run = client.beta.threads.runs.create_and_poll(
        thread_id=thread.id,
        assistant_id=assistant_id,
        include=["step_details.tool_calls[*].file_search.results[*].content"]
    )

    # Polling loop to wait for a response in the thread
    messages = []
    max_retries = 10  # You can set a maximum retry limit
    retries = 0
    wait_time = 2  # Seconds to wait between retries

    while retries < max_retries:
        messages = list(client.beta.threads.messages.list(thread_id=thread.id, run_id=run.id))
        if messages:  # If we receive any messages, break the loop
            break
        retries += 1
        time.sleep(wait_time)

    # Check if we got the message content
    if not messages:
        raise TimeoutError("Processing Level : No messages were returned after polling.")
        
    message_content = messages[0].content[0].text
    annotations = message_content.annotations
    #citations = []
    for index, annotation in enumerate(annotations):
        message_content.value = message_content.value.replace(annotation.text, "")
        #if file_citation := getattr(annotation, "file_citation", None):
        #    cited_file = client.files.retrieve(file_citation.file_id)
        #    citations.append(f"[{index}] {cited_file.filename}")

    if debug_mode:
        print(message_content.value)
    processing_level_str = message_content.value
    return processing_level_str

def analyze_harmful_ingredients(ingredient, assistant_id):
    global debug_mode, client
    thread = client.beta.threads.create(
        messages=[
            {
                "role": "user",
                "content": "A food product has the ingredient: " + ingredient + ". Is this ingredient safe to eat? The output must be in JSON format: {<ingredient_name>: <information from the document about why ingredient is harmful>}. If information about an ingredient is not found in the documents, the value for that ingredient must start with the prefix '(NOT FOUND IN DOCUMENT)' followed by the LLM's response based on its own knowledge.",
            }
        ]
    )
    
    run = client.beta.threads.runs.create_and_poll(
        thread_id=thread.id,
        assistant_id=assistant_id,
        include=["step_details.tool_calls[*].file_search.results[*].content"]
    )

    # Polling loop to wait for a response in the thread
    messages = []
    max_retries = 10  # You can set a maximum retry limit
    retries = 0
    wait_time = 2  # Seconds to wait between retries

    while retries < max_retries:
        messages = list(client.beta.threads.messages.list(thread_id=thread.id, run_id=run.id))
        if messages:  # If we receive any messages, break the loop
            break
        retries += 1
        time.sleep(wait_time)

    # Check if we got the message content
    if not messages:
        raise TimeoutError("Processing Ingredients : No messages were returned after polling.")
        
    message_content = messages[0].content[0].text
    annotations = message_content.annotations

    #citations = []

    #print(f"Length of annotations is {len(annotations)}")

    for index, annotation in enumerate(annotations):
      if file_citation := getattr(annotation, "file_citation", None):
          #cited_file = client.files.retrieve(file_citation.file_id)
          #citations.append(f"[{index}] {cited_file.filename}")
          message_content.value = message_content.value.replace(annotation.text, "")
  
    if debug_mode:
      ingredients_not_found_in_doc = []        
      print(message_content.value)
      for key, value in json.loads(message_content.value.replace("```", "").replace("json", "")).items():
          if value.startswith("(NOT FOUND IN DOCUMENT)"):
              ingredients_not_found_in_doc.append(key)
          print(f"Ingredients not found in database {','.join(ingredients_not_found_in_doc)}")
    
    harmful_ingredient_analysis = json.loads(message_content.value.replace("```", "").replace("json", "").replace("(NOT FOUND IN DOCUMENT) ", ""))
        
    harmful_ingredient_analysis_str = ""
    for key, value in harmful_ingredient_analysis.items():
      harmful_ingredient_analysis_str += f"{key}: {value}\n"
    return harmful_ingredient_analysis_str

def analyze_claims(claims, ingredients, assistant_id):
    global debug_mode, client
    thread = client.beta.threads.create(
        messages=[
            {
                "role": "user",
                "content": "A food product named has the following claims: " + ', '.join(claims) + " and ingredients: " + ', '.join(ingredients) + """. Please evaluate the validity of each claim as well as assess if the product name is misleading.
The output must be in JSON format as follows: 

{
  <claim_name>: {
    'Verdict': <A judgment on the claim's accuracy, ranging from 'Accurate' to varying degrees of 'Misleading'>,
    'Why?': <A concise, bulleted summary explaining the specific ingredients or aspects contributing to the discrepancy>,
    'Detailed Analysis': <An in-depth explanation of the claim, incorporating relevant regulatory guidelines and health perspectives to support the verdict>
  }
}
"""
            }
                ]
    )
    
    run = client.beta.threads.runs.create_and_poll(
        thread_id=thread.id,
        assistant_id=assistant_id,
        include=["step_details.tool_calls[*].file_search.results[*].content"]
    )
    
    # Polling loop to wait for a response in the thread
    messages = []
    max_retries = 10  # You can set a maximum retry limit
    retries = 0
    wait_time = 2  # Seconds to wait between retries

    while retries < max_retries:
        messages = list(client.beta.threads.messages.list(thread_id=thread.id, run_id=run.id))
        if messages:  # If we receive any messages, break the loop
            break
        retries += 1
        time.sleep(wait_time)

    # Check if we got the message content
    if not messages:
        raise TimeoutError("Processing Claims : No messages were returned after polling.")
        
    message_content = messages[0].content[0].text
    
      
    annotations = message_content.annotations
    
    #citations = []
    
    #print(f"Length of annotations is {len(annotations)}")
    
    for index, annotation in enumerate(annotations):
          if file_citation := getattr(annotation, "file_citation", None):
              #cited_file = client.files.retrieve(file_citation.file_id)
              #citations.append(f"[{index}] {cited_file.filename}")
              message_content.value = message_content.value.replace(annotation.text, "")
      
    #if debug_mode:
    #    claims_not_found_in_doc = []
    #    print(message_content.value)
    #    for key, value in json.loads(message_content.value.replace("```", "").replace("json", "")).items():
    #          if value.startswith("(NOT FOUND IN DOCUMENT)"):
    #              claims_not_found_in_doc.append(key)
    #    print(f"Claims not found in the doc are {','.join(claims_not_found_in_doc)}")
    #claims_analysis = json.loads(message_content.value.replace("```", "").replace("json", "").replace("(NOT FOUND IN DOCUMENT) ", ""))
    claims_analysis = {}
    if message_content.value != "":
        claims_analysis = json.loads(message_content.value.replace("```", "").replace("json", ""))

    claims_analysis_str = ""
    for key, value in claims_analysis.items():
      claims_analysis_str += f"{key}: {value}\n"
    
    return claims_analysis_str

def generate_final_analysis(brand_name, product_name, nutritional_level, processing_level, harmful_ingredient_analysis, claims_analysis, system_prompt):
    global debug_mode, client
    system_prompt_orig = """You are provided with a detailed analysis of a food product. Your task is to generate actionable insights to help the user decide whether to consume the product, at what frequency, and identify any potential harms or benefits. Consider the context of consumption to ensure the advice is personalized and practical.

Use the following criteria to generate your response:

1. **Nutrition Analysis:**
- How much do sugar, calories, or salt exceed the threshold limit?
- How processed is the product?
- How much of the Recommended Dietary Allowance (RDA) does the product provide for each nutrient?

2. **Harmful Ingredients:**
- Identify any harmful or questionable ingredients.

3. **Misleading Claims:**
- Are there any misleading claims made by the brand?

Additionally, consider the following while generating insights:

1. **Consumption Context:**
- Is the product being consumed for health reasons or as a treat?
- Could the consumer be overlooking hidden harms?
- If the product is something they could consume daily, should they?
- If they are consuming it daily, what potential harm are they not noticing?
- If the product is intended for health purposes, are there concerns the user might miss?

**Output:**
- Recommend whether the product should be consumed or avoided.
- If recommended, specify the appropriate frequency and intended functionality (e.g., treat vs. health).
- Highlight any risks or benefits at that level of consumption."""

    user_prompt = f"""
Product Name: {brand_name} {product_name}

Nutrition Analysis :
{nutritional_level}

Processing Level:
{processing_level}

Ingredient Analysis:
{harmful_ingredient_analysis}

Claims Analysis:
{claims_analysis}
"""
    if debug_mode:
        print(f"\nuser_prompt : \n {user_prompt}")
        
    completion = client.chat.completions.create(
        model="gpt-4o",  # Make sure to use an appropriate model
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )

    return f"Brand: {brand_name}\n\nProduct: {product_name}\n\nAnalysis:\n\n{completion.choices[0].message.content}"


def analyze_product(product_info_raw, system_prompt):
    
    global assistant1, assistant3
    
    if product_info_raw != "{}":
        product_info_from_db = json.loads(product_info_raw)
        brand_name = product_info_from_db.get("brandName", "")
        product_name = product_info_from_db.get("productName", "")
        ingredients_list = [ingredient["name"] for ingredient in product_info_from_db.get("ingredients", [])]
        claims_list = product_info_from_db.get("claims", [])
        nutritional_information = product_info_from_db['nutritionalInformation']
        serving_size = product_info_from_db["servingSize"]["quantity"]

        nutrient_analysis_rda = ""
        nutrient_analysis = ""
        nutritional_level = ""
        processing_level = ""
        harmful_ingredient_analysis = ""
        claims_analysis = ""
        
        if nutritional_information:
            product_type, calories, sugar, salt, serving_size = find_product_nutrients(product_info_from_db)
            if product_type is not None and serving_size is not None and serving_size > 0:                                                          
                nutrient_analysis = analyze_nutrients(product_type, calories, sugar, salt, serving_size)                       
            else:                                                                                                              
                return "product not found because product information in the db is corrupt"   
            print(f"DEBUG ! nutrient analysis is {nutrient_analysis}")

            nutrient_analysis_rda_data = rda_analysis(nutritional_information, serving_size)
            print(f"DEBUG ! Data for RDA nutrient analysis is of type {type(nutrient_analysis_rda_data)} - {nutrient_analysis_rda_data}")
            print(f"DEBUG : nutrient_analysis_rda_data['nutritionPerServing'] : {nutrient_analysis_rda_data['nutritionPerServing']}")
            print(f"DEBUG : nutrient_analysis_rda_data['userServingSize'] : {nutrient_analysis_rda_data['userServingSize']}")
            
            nutrient_analysis_rda = find_nutrition(nutrient_analysis_rda_data)
            print(f"DEBUG ! RDA nutrient analysis is {nutrient_analysis_rda}")
            
            #Call GPT for nutrient analysis
            nutritional_level = analyze_nutrition_icmr_rda(nutrient_analysis, nutrient_analysis_rda)
        
        if len(ingredients_list) > 0:    
            processing_level = analyze_processing_level(ingredients_list, assistant1.id) if ingredients_list else ""
            for ingredient in ingredients_list:
                assistant_id_ingredient = get_assistant_for_ingredient(ingredient, 2)
                harmful_ingredient_analysis += analyze_harmful_ingredients(ingredient, assistant_id_ingredient.id) + "\n"
        
        if len(claims_list) > 0:                    
            claims_analysis = analyze_claims(claims_list, ingredients_list, assistant3.id) if claims_list else ""
                
        final_analysis = generate_final_analysis(brand_name, product_name, nutritional_level, processing_level, harmful_ingredient_analysis, claims_analysis, system_prompt)

        return final_analysis
    #else:
    #    return "I'm sorry, product information could not be extracted from the url."    

# Streamlit app
# Initialize session state
if 'messages' not in st.session_state:
    st.session_state.messages = []

def chatbot_response(image_urls_str, product_name_by_user, data_extractor_url, system_prompt, extract_info = True):
    # Process the user input and generate a response
    processing_level = ""
    harmful_ingredient_analysis = ""
    claims_analysis = ""
    image_urls = []
    if product_name_by_user != "":
        similar_product_list_json = get_product_list(product_name_by_user, data_extractor_url)
        
        if similar_product_list_json and extract_info == False:
            with st.spinner("Fetching product information from our database... This may take a moment."):
                print(f"similar_product_list_json : {similar_product_list_json}")
                if 'error' not in similar_product_list_json.keys():
                    similar_product_list = similar_product_list_json['products']
                    return similar_product_list, "Product list found from our database"
                else:
                    return [], "Product list not found"
            
        elif extract_info == True:
            with st.spinner("Analyzing the product... This may take a moment."):
                product_info_raw = get_product_data_from_db(product_name_by_user, data_extractor_url)
                print(f"DEBUG product_info_raw from name: {product_info_raw}")
                if product_info_raw == "{}":
                    return [], "product not found because product information in the db is corrupt"
                if 'error' not in json.loads(product_info_raw).keys():
                    final_analysis = analyze_product(product_info_raw, system_prompt)
                    return [], final_analysis
                else:
                    return [], f"Product information could not be extracted from our database because of {json.loads(product_info_raw)['error']}"
                
        else:
            return [], "Product not found in our database."
                
    elif "http:/" in image_urls_str.lower() or "https:/" in image_urls_str.lower():
        # Extract image URL from user input
        if "," not in image_urls_str:
            image_urls.append(image_urls_str)
        else:
            for url in image_urls_str.split(","):
                if "http:/" in url.lower() or "https:/" in url.lower():
                    image_urls.append(url)

        with st.spinner("Analyzing the product... This may take a moment."):
            product_info_raw = extract_data_from_product_image(image_urls, data_extractor_url)
            print(f"DEBUG product_info_raw from image : {product_info_raw}")
            if 'error' not in json.loads(product_info_raw).keys():
                final_analysis = analyze_product(product_info_raw, system_prompt)
                return [], final_analysis
            else:
                return [], f"Product information could not be extracted from the image because of {json.loads(product_info_raw)['error']}"

            
    else:
        return [], "I'm here to analyze food products. Please provide an image URL (Example : http://example.com/image.jpg) or product name (Example : Harvest Gold Bread)"

class SessionState:
    """Handles all session state variables in a centralized way"""
    @staticmethod
    def initialize():
        initial_states = {
            "messages": [],
            "product_selected": False,
            "product_shared": False,
            "analyze_more": True,
            "welcome_shown": False,
            "yes_no_choice": None,
            "welcome_msg": "Welcome to ConsumeWise! What product would you like me to analyze today?",
            "system_prompt": "",
            "similar_products": [],
            "awaiting_selection": False,
            "current_user_input": "",
            "selected_product": None
        }
        
        for key, value in initial_states.items():
            if key not in st.session_state:
                st.session_state[key] = value

class SystemPromptManager:
    """Manages the system prompt input and related functionality"""
    @staticmethod
    def render_sidebar():
        st.sidebar.header("System Prompt")
        system_prompt = st.sidebar.text_area(
            "Enter your system prompt here (required):",
            value=st.session_state.system_prompt,
            height=150,
            key="system_prompt_input"
        )
        
        if st.sidebar.button("Submit Prompt"):
            if system_prompt.strip():
                st.session_state.system_prompt = system_prompt
                SessionState.initialize()  # Reset all states
                st.rerun()
            else:
                st.sidebar.error("Please enter a valid system prompt.")
        
        return system_prompt.strip()

class ProductSelector:
    """Handles product selection logic"""
    @staticmethod
    def handle_selection():
        if st.session_state.similar_products:
            # Create a container for the selection UI
            selection_container = st.container()
            
            with selection_container:
                # Radio button for product selection
                choice = st.radio(
                    "Select a product:",
                    st.session_state.similar_products + ["None of the above"],
                    key="product_choice"
                )
                
                # Confirm button
                confirm_clicked = st.button("Confirm Selection")
                msg = ""
                # Only process the selection when confirm is clicked
                if confirm_clicked:
                    st.session_state.awaiting_selection = False
                    if choice != "None of the above":
                        #st.session_state.selected_product = choice
                        st.session_state.messages.append({"role": "assistant", "content": f"You selected {choice}"})
                        _, msg = chatbot_response("", choice, "", st.session_state.system_prompt, extract_info=True)
                        #Check if analysis couldn't be done because db had incomplete information
                        if msg != "product not found because product information in the db is corrupt":
                            #Only when msg is acceptable
                            st.session_state.messages.append({"role": "assistant", "content": msg})
                            with st.chat_message("assistant"):
                                st.markdown(msg)
                                
                            st.session_state.product_selected = True
                            
                            keys_to_keep = ["system_prompt", "messages", "welcome_msg"]
                            keys_to_delete = [key for key in st.session_state.keys() if key not in keys_to_keep]
                        
                            for key in keys_to_delete:
                                del st.session_state[key]
                            st.session_state.welcome_msg = "What product would you like me to analyze next?"
                            
                    if choice == "None of the above" or msg == "product not found because product information in the db is corrupt":
                        st.session_state.messages.append(
                            {"role": "assistant", "content": "Please provide the image URL of the product to analyze based on the latest information."}
                        )
                        with st.chat_message("assistant"):
                            st.markdown("Please provide the image URL of the product to analyze based on the latest information.")
                        #st.session_state.selected_product = None
                        
                    st.rerun()
                
                # Prevent further chat input while awaiting selection
                return True  # Indicates selection is in progress
            
        return False  # Indicates no selection in progress

class ChatManager:
    """Manages chat interactions and responses"""
    @staticmethod
    def process_response(user_input):
        if not st.session_state.product_selected:
            if "http:/" not in user_input and "https:/" not in user_input:
                response, status = ChatManager._handle_product_name(user_input)
            else:
                response, status = ChatManager._handle_product_url(user_input)
                
        return response, status

    @staticmethod
    def _handle_product_name(user_input):
        st.session_state.product_shared = True
        st.session_state.current_user_input = user_input
        similar_products, _ = chatbot_response(
            "", user_input, data_extractor_url, 
            st.session_state.system_prompt, extract_info=False
        )
        
        if len(similar_products) > 0:
            st.session_state.similar_products = similar_products
            st.session_state.awaiting_selection = True
            return "Here are some similar products from our database. Please select:", "no success"
            
        return "Product not found in our database. Please provide the image URL of the product.", "no success"

    @staticmethod
    def _handle_product_url(user_input):
        is_valid_url = (".jpeg" in user_input or ".jpg" in user_input) and \
                       ("http:/" in user_input or "https:/" in user_input)
        
        if not st.session_state.product_shared:
            return "Please provide the product name first"
        
        if is_valid_url and st.session_state.product_shared:
            _, msg = chatbot_response(
                user_input, "", data_extractor_url, 
                st.session_state.system_prompt, extract_info=True
            )
            st.session_state.product_selected = True
            if msg != "product not found because image is not clear" and "Product information could not be extracted from the image" not in msg:
                response = msg
                status = "success"
            elif msg == "product not found because image is not clear":
                response = msg + ". Please share clear image URLs!"
                status = "no success"
            else:
                response = msg + ".Please re-try!!"
                status = "no success"
                
            return response, status
            
        return "Please provide valid image URL of the product.", "no success"

def main():
    #Initialize session state
    SessionState.initialize()
    
    # Display title
    st.title("ConsumeWise - Your Food Product Analysis Assistant")
    
    # Handle system prompt
    system_prompt = SystemPromptManager.render_sidebar()
    
    if not system_prompt:
        st.warning("⚠️ Please enter a system prompt in the sidebar before proceeding.")
        st.chat_input("Enter your message:", disabled=True)
        return
    
    # Show welcome message
    if not st.session_state.welcome_shown:
        st.session_state.messages.append({
            "role": "assistant", 
            "content": st.session_state.welcome_msg
        })
        st.session_state.welcome_shown = True
    
    # Display chat history
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])
    
    # Handle product selection if awaiting
    selection_in_progress = False
    if st.session_state.awaiting_selection:
        selection_in_progress = ProductSelector.handle_selection()
    
    # Only show chat input if not awaiting selection
    if not selection_in_progress:
        user_input = st.chat_input("Enter your message:", key="user_input")
        if user_input:
            # Add user message to chat
            st.session_state.messages.append({"role": "user", "content": user_input})
            with st.chat_message("user"):
                st.markdown(user_input)
            
            # Process response
            response, status = ChatManager.process_response(user_input)

            st.session_state.messages.append({"role": "assistant", "content": response})
            with st.chat_message("assistant"):
                st.markdown(response)
            
            if status == "success":
                SessionState.initialize()  # Reset states for next product
                #st.session_state.welcome_msg = "What is the next product you would like me to analyze today?"
                keys_to_keep = ["system_prompt", "messages", "welcome_msg"]
                keys_to_delete = [key for key in st.session_state.keys() if key not in keys_to_keep]
                    
                for key in keys_to_delete:
                    del st.session_state[key]
                st.session_state.welcome_msg = "What product would you like me to analyze next?"
                
            #elif response:  # Only add response if it's not None
            #    print(f"DEBUG : st.session_state.awaiting_selection : {st.session_state.awaiting_selection}")
            #    print(f"response : {response}")
            st.rerun()
    else:
        # Disable chat input while selection is in progress
        st.chat_input("Please confirm your selection above first...", disabled=True)
    
    # Clear chat history button
    if st.button("Clear Chat History"):
        st.session_state.clear()
        st.rerun()

if __name__ == "__main__":
    main()