ishworrsubedii commited on
Commit
2a77d6d
1 Parent(s): e5394ac

Added endpoints for graph

Browse files
Files changed (1) hide show
  1. app.py +231 -45
app.py CHANGED
@@ -1,5 +1,8 @@
1
  import io
2
  import tempfile
 
 
 
3
  import jwt
4
  import base64
5
  import json
@@ -8,7 +11,7 @@ from jwt import ExpiredSignatureError, InvalidTokenError
8
  from starlette import status
9
  from functions import *
10
  import pandas as pd
11
- from fastapi import FastAPI, File, UploadFile, HTTPException,Request
12
  from pydantic import BaseModel
13
  from fastapi.middleware.cors import CORSMiddleware
14
  from src.api.speech_api import speech_translator_router
@@ -19,8 +22,6 @@ from collections import Counter, defaultdict
19
  from datetime import datetime, timedelta
20
  from dateutil.parser import isoparse
21
 
22
-
23
-
24
  nltk.download('punkt_tab')
25
 
26
  app = FastAPI(title="ConversAI", root_path="/api/v1")
@@ -265,15 +266,15 @@ async def loadPDF(vectorstore: str, pdf: UploadFile = File(...)):
265
  "output": text,
266
  "source": source
267
  }
268
- dct = json.dumps(dct, indent = 1).encode("utf-8")
269
- fileName = createDataSourceName(sourceName = source)
270
  response = supabase.storage.from_("ConversAI").upload(file=dct, path=f"{fileName}_data.json")
271
  response = (
272
  supabase.table("ConversAI_ChatbotDataSources")
273
- .insert({"username": username,
274
- "chatbotName": chatbotName,
275
- "dataSourceName": fileName,
276
- "sourceEndpoint": "/loadPDF",
277
  "sourceContentURL": os.path.join(os.environ["SUPABASE_PUBLIC_BASE_URL"], f"{fileName}_data.json")})
278
  .execute()
279
  )
@@ -282,7 +283,6 @@ async def loadPDF(vectorstore: str, pdf: UploadFile = File(...)):
282
  }
283
 
284
 
285
-
286
  @app.post("/loadImagePDF")
287
  async def loadImagePDF(vectorstore: str, pdf: UploadFile = File(...)):
288
  username, chatbotName = vectorstore.split("$")[1], vectorstore.split("$")[2]
@@ -293,15 +293,15 @@ async def loadImagePDF(vectorstore: str, pdf: UploadFile = File(...)):
293
  "output": text,
294
  "source": source
295
  }
296
- dct = json.dumps(dct, indent = 1).encode("utf-8")
297
- fileName = createDataSourceName(sourceName = source)
298
  response = supabase.storage.from_("ConversAI").upload(file=dct, path=f"{fileName}_data.json")
299
  response = (
300
  supabase.table("ConversAI_ChatbotDataSources")
301
- .insert({"username": username,
302
- "chatbotName": chatbotName,
303
- "dataSourceName": fileName,
304
- "sourceEndpoint": "/loadImagePDF",
305
  "sourceContentURL": os.path.join(os.environ["SUPABASE_PUBLIC_BASE_URL"], f"{fileName}_data.json")})
306
  .execute()
307
  )
@@ -310,7 +310,6 @@ async def loadImagePDF(vectorstore: str, pdf: UploadFile = File(...)):
310
  }
311
 
312
 
313
-
314
  class AddText(BaseModel):
315
  vectorstore: str
316
  text: str
@@ -324,15 +323,15 @@ async def loadText(addTextConfig: AddText):
324
  "output": text,
325
  "source": "Text"
326
  }
327
- dct = json.dumps(dct, indent = 1).encode("utf-8")
328
- fileName = createDataSourceName(sourceName = "Text")
329
  response = supabase.storage.from_("ConversAI").upload(file=dct, path=f"{fileName}_data.json")
330
  response = (
331
  supabase.table("ConversAI_ChatbotDataSources")
332
- .insert({"username": username,
333
- "chatbotName": chatbotName,
334
- "dataSourceName": fileName,
335
- "sourceEndpoint": "/loadText",
336
  "sourceContentURL": os.path.join(os.environ["SUPABASE_PUBLIC_BASE_URL"], f"{fileName}_data.json")})
337
  .execute()
338
  )
@@ -366,7 +365,6 @@ async def addQAPairData(addQaPair: AddQAPair):
366
  }
367
 
368
 
369
-
370
  class LoadWebsite(BaseModel):
371
  vectorstore: str
372
  urls: list[str]
@@ -377,20 +375,20 @@ class LoadWebsite(BaseModel):
377
  async def loadWebURLs(loadWebsite: LoadWebsite):
378
  vectorstore, urls, source = loadWebsite.vectorstore, loadWebsite.urls, loadWebsite.source
379
  username, chatbotName = vectorstore.split("$")[1], vectorstore.split("$")[2]
380
- text = extractTextFromUrlList(urls=urls)
381
  dct = {
382
  "output": text,
383
  "source": source
384
  }
385
- dct = json.dumps(dct, indent = 1).encode("utf-8")
386
- fileName = createDataSourceName(sourceName = source)
387
  response = supabase.storage.from_("ConversAI").upload(file=dct, path=f"{fileName}_data.json")
388
  response = (
389
  supabase.table("ConversAI_ChatbotDataSources")
390
- .insert({"username": username,
391
- "chatbotName": chatbotName,
392
- "dataSourceName": fileName,
393
- "sourceEndpoint": "/loadWebURLs",
394
  "sourceContentURL": os.path.join(os.environ["SUPABASE_PUBLIC_BASE_URL"], f"{fileName}_data.json")})
395
  .execute()
396
  )
@@ -399,15 +397,19 @@ async def loadWebURLs(loadWebsite: LoadWebsite):
399
  }
400
 
401
 
402
-
403
  @app.post("/answerQuery")
404
- async def answerQuestion(query: str, vectorstore: str, llmModel: str = "llama3-70b-8192"):
405
  username, chatbotName = vectorstore.split("$")[1], vectorstore.split("$")[2]
406
  output = answerQuery(query=query, vectorstore=vectorstore, llmModel=llmModel)
 
 
 
 
407
  response = (
408
  supabase.table("ConversAI_ChatHistory")
409
  .insert({"username": username, "chatbotName": chatbotName, "llmModel": llmModel, "question": query,
410
- "response": output["output"]})
 
411
  .execute()
412
  )
413
  return output
@@ -456,15 +458,15 @@ async def loadYoutubeTranscript(ytTranscript: YtTranscript):
456
  "output": text,
457
  "source": "www.youtube.com"
458
  }
459
- dct = json.dumps(dct, indent = 1).encode("utf-8")
460
- fileName = createDataSourceName(sourceName = "youtube")
461
  response = supabase.storage.from_("ConversAI").upload(file=dct, path=f"{fileName}_data.json")
462
  response = (
463
  supabase.table("ConversAI_ChatbotDataSources")
464
- .insert({"username": username,
465
- "chatbotName": chatbotName,
466
- "dataSourceName": fileName,
467
- "sourceEndpoint": "/getYoutubeTranscript",
468
  "sourceContentURL": os.path.join(os.environ["SUPABASE_PUBLIC_BASE_URL"], f"{fileName}_data.json")})
469
  .execute()
470
  )
@@ -506,7 +508,8 @@ async def chatHistory(vectorstore: str):
506
  @app.post("/listChatbotSources")
507
  async def listChatbotSources(vectorstore: str):
508
  username, chatbotName = vectorstore.split("$")[1], vectorstore.split("$")[2]
509
- result = supabase.table("ConversAI_ChatbotDataSources").select("*").eq("username", username).eq("chatbotName", chatbotName).execute().data
 
510
  return result
511
 
512
 
@@ -520,14 +523,18 @@ async def trainChatbot(trainChatbotConfig: TrainChatbot):
520
  vectorstore, UrlSources = trainChatbotConfig.vectorstore, trainChatbotConfig.urls
521
  texts = []
522
  sources = []
523
- fileTypes = [supabase.table("ConversAI_ChatbotDataSources").select("sourceEndpoint").eq("sourceContentURL", x).execute().data[0]["sourceEndpoint"] for x in UrlSources]
 
 
524
  for source, fileType in zip(UrlSources, fileTypes):
525
  if ((fileType == "/loadPDF") | (fileType == "/loadImagePDF")):
526
  r = requests.get(source)
527
  file = eval(r.content.decode("utf-8"))
528
  content = file["output"]
529
  fileSource = file["source"]
530
- texts.append(".".join([base64.b64decode(content[key].encode("utf-8")).decode("utf-8") for key in content.keys()]).replace("\n", " "))
 
 
531
  sources.append(fileSource)
532
  elif fileType == "/loadText":
533
  r = requests.get(source)
@@ -541,9 +548,188 @@ async def trainChatbot(trainChatbotConfig: TrainChatbot):
541
  file = eval(r.content.decode("utf-8"))
542
  content = file["output"]
543
  fileSource = file["source"]
544
- texts.append(".".join([base64.b64decode(content[key].encode("utf-8")).decode("utf-8") for key in content.keys()]).replace("\n", " "))
 
 
545
  sources.append(fileSource)
546
  else:
547
  pass
548
  texts = [(text, source) for text, source in zip(texts, sources)]
549
- return addDocuments(texts = texts, vectorstore = vectorstore)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import io
2
  import tempfile
3
+ from ipaddress import ip_address
4
+ from typing import Optional
5
+
6
  import jwt
7
  import base64
8
  import json
 
11
  from starlette import status
12
  from functions import *
13
  import pandas as pd
14
+ from fastapi import FastAPI, File, UploadFile, HTTPException, Request, Query
15
  from pydantic import BaseModel
16
  from fastapi.middleware.cors import CORSMiddleware
17
  from src.api.speech_api import speech_translator_router
 
22
  from datetime import datetime, timedelta
23
  from dateutil.parser import isoparse
24
 
 
 
25
  nltk.download('punkt_tab')
26
 
27
  app = FastAPI(title="ConversAI", root_path="/api/v1")
 
266
  "output": text,
267
  "source": source
268
  }
269
+ dct = json.dumps(dct, indent=1).encode("utf-8")
270
+ fileName = createDataSourceName(sourceName=source)
271
  response = supabase.storage.from_("ConversAI").upload(file=dct, path=f"{fileName}_data.json")
272
  response = (
273
  supabase.table("ConversAI_ChatbotDataSources")
274
+ .insert({"username": username,
275
+ "chatbotName": chatbotName,
276
+ "dataSourceName": fileName,
277
+ "sourceEndpoint": "/loadPDF",
278
  "sourceContentURL": os.path.join(os.environ["SUPABASE_PUBLIC_BASE_URL"], f"{fileName}_data.json")})
279
  .execute()
280
  )
 
283
  }
284
 
285
 
 
286
  @app.post("/loadImagePDF")
287
  async def loadImagePDF(vectorstore: str, pdf: UploadFile = File(...)):
288
  username, chatbotName = vectorstore.split("$")[1], vectorstore.split("$")[2]
 
293
  "output": text,
294
  "source": source
295
  }
296
+ dct = json.dumps(dct, indent=1).encode("utf-8")
297
+ fileName = createDataSourceName(sourceName=source)
298
  response = supabase.storage.from_("ConversAI").upload(file=dct, path=f"{fileName}_data.json")
299
  response = (
300
  supabase.table("ConversAI_ChatbotDataSources")
301
+ .insert({"username": username,
302
+ "chatbotName": chatbotName,
303
+ "dataSourceName": fileName,
304
+ "sourceEndpoint": "/loadImagePDF",
305
  "sourceContentURL": os.path.join(os.environ["SUPABASE_PUBLIC_BASE_URL"], f"{fileName}_data.json")})
306
  .execute()
307
  )
 
310
  }
311
 
312
 
 
313
  class AddText(BaseModel):
314
  vectorstore: str
315
  text: str
 
323
  "output": text,
324
  "source": "Text"
325
  }
326
+ dct = json.dumps(dct, indent=1).encode("utf-8")
327
+ fileName = createDataSourceName(sourceName="Text")
328
  response = supabase.storage.from_("ConversAI").upload(file=dct, path=f"{fileName}_data.json")
329
  response = (
330
  supabase.table("ConversAI_ChatbotDataSources")
331
+ .insert({"username": username,
332
+ "chatbotName": chatbotName,
333
+ "dataSourceName": fileName,
334
+ "sourceEndpoint": "/loadText",
335
  "sourceContentURL": os.path.join(os.environ["SUPABASE_PUBLIC_BASE_URL"], f"{fileName}_data.json")})
336
  .execute()
337
  )
 
365
  }
366
 
367
 
 
368
  class LoadWebsite(BaseModel):
369
  vectorstore: str
370
  urls: list[str]
 
375
  async def loadWebURLs(loadWebsite: LoadWebsite):
376
  vectorstore, urls, source = loadWebsite.vectorstore, loadWebsite.urls, loadWebsite.source
377
  username, chatbotName = vectorstore.split("$")[1], vectorstore.split("$")[2]
378
+ text = extractTextFromUrlList(urls=urls)
379
  dct = {
380
  "output": text,
381
  "source": source
382
  }
383
+ dct = json.dumps(dct, indent=1).encode("utf-8")
384
+ fileName = createDataSourceName(sourceName=source)
385
  response = supabase.storage.from_("ConversAI").upload(file=dct, path=f"{fileName}_data.json")
386
  response = (
387
  supabase.table("ConversAI_ChatbotDataSources")
388
+ .insert({"username": username,
389
+ "chatbotName": chatbotName,
390
+ "dataSourceName": fileName,
391
+ "sourceEndpoint": "/loadWebURLs",
392
  "sourceContentURL": os.path.join(os.environ["SUPABASE_PUBLIC_BASE_URL"], f"{fileName}_data.json")})
393
  .execute()
394
  )
 
397
  }
398
 
399
 
 
400
  @app.post("/answerQuery")
401
+ async def answerQuestion(request: Request, query: str, vectorstore: str, llmModel: str = "llama3-70b-8192"):
402
  username, chatbotName = vectorstore.split("$")[1], vectorstore.split("$")[2]
403
  output = answerQuery(query=query, vectorstore=vectorstore, llmModel=llmModel)
404
+ ip_address = request.client.host
405
+ response_token_count = len(output["output"])
406
+ city = get_ip_info(ip_address)
407
+
408
  response = (
409
  supabase.table("ConversAI_ChatHistory")
410
  .insert({"username": username, "chatbotName": chatbotName, "llmModel": llmModel, "question": query,
411
+ "response": output["output"], "IpAddress": ip_address, "ResponseTokenCount": response_token_count,
412
+ "vectorstore": vectorstore, "City": city})
413
  .execute()
414
  )
415
  return output
 
458
  "output": text,
459
  "source": "www.youtube.com"
460
  }
461
+ dct = json.dumps(dct, indent=1).encode("utf-8")
462
+ fileName = createDataSourceName(sourceName="youtube")
463
  response = supabase.storage.from_("ConversAI").upload(file=dct, path=f"{fileName}_data.json")
464
  response = (
465
  supabase.table("ConversAI_ChatbotDataSources")
466
+ .insert({"username": username,
467
+ "chatbotName": chatbotName,
468
+ "dataSourceName": fileName,
469
+ "sourceEndpoint": "/getYoutubeTranscript",
470
  "sourceContentURL": os.path.join(os.environ["SUPABASE_PUBLIC_BASE_URL"], f"{fileName}_data.json")})
471
  .execute()
472
  )
 
508
  @app.post("/listChatbotSources")
509
  async def listChatbotSources(vectorstore: str):
510
  username, chatbotName = vectorstore.split("$")[1], vectorstore.split("$")[2]
511
+ result = supabase.table("ConversAI_ChatbotDataSources").select("*").eq("username", username).eq("chatbotName",
512
+ chatbotName).execute().data
513
  return result
514
 
515
 
 
523
  vectorstore, UrlSources = trainChatbotConfig.vectorstore, trainChatbotConfig.urls
524
  texts = []
525
  sources = []
526
+ fileTypes = [supabase.table("ConversAI_ChatbotDataSources").select("sourceEndpoint").eq("sourceContentURL",
527
+ x).execute().data[0][
528
+ "sourceEndpoint"] for x in UrlSources]
529
  for source, fileType in zip(UrlSources, fileTypes):
530
  if ((fileType == "/loadPDF") | (fileType == "/loadImagePDF")):
531
  r = requests.get(source)
532
  file = eval(r.content.decode("utf-8"))
533
  content = file["output"]
534
  fileSource = file["source"]
535
+ texts.append(".".join(
536
+ [base64.b64decode(content[key].encode("utf-8")).decode("utf-8") for key in content.keys()]).replace(
537
+ "\n", " "))
538
  sources.append(fileSource)
539
  elif fileType == "/loadText":
540
  r = requests.get(source)
 
548
  file = eval(r.content.decode("utf-8"))
549
  content = file["output"]
550
  fileSource = file["source"]
551
+ texts.append(".".join(
552
+ [base64.b64decode(content[key].encode("utf-8")).decode("utf-8") for key in content.keys()]).replace(
553
+ "\n", " "))
554
  sources.append(fileSource)
555
  else:
556
  pass
557
  texts = [(text, source) for text, source in zip(texts, sources)]
558
+ return addDocuments(texts=texts, vectorstore=vectorstore)
559
+
560
+
561
+ def get_ip_info(ip: str):
562
+ try:
563
+ response = requests.get(f"https://ipinfo.io/{ip}/json")
564
+ data = response.json()
565
+ return data.get("city", "Unknown")
566
+ except Exception as e:
567
+ return "Unknown"
568
+
569
+
570
+ @app.post("/daily_chat_count")
571
+ async def daily_chat_count(
572
+ start_date: Optional[str] = Query(None, description="Start date in ISO format (YYYY-MM-DD)"),
573
+ end_date: Optional[str] = Query(None, description="End date in ISO format (YYYY-MM-DD)")
574
+ ):
575
+ if not start_date or not end_date:
576
+ end_date = datetime.now().astimezone().date()
577
+ start_date = end_date - timedelta(days=7)
578
+ else:
579
+ start_date = isoparse(start_date).date()
580
+ end_date = isoparse(end_date).date()
581
+
582
+ response = supabase.table("ConversAI_ChatHistory").select("*").execute().data
583
+
584
+ dates = [
585
+ isoparse(i["timestamp"]).date()
586
+ for i in response
587
+ if start_date <= isoparse(i["timestamp"]).date() <= end_date
588
+ ]
589
+
590
+ date_count = Counter(dates)
591
+
592
+ data = [{"date": date.isoformat(), "count": count} for date, count in date_count.items()]
593
+
594
+ return {"data": data}
595
+
596
+
597
+ @app.post("/daily_active_end_user")
598
+ async def daily_active_end_user(
599
+ start_date: Optional[str] = Query(None, description="Start date in ISO format (YYYY-MM-DD)"),
600
+ end_date: Optional[str] = Query(None, description="End date in ISO format (YYYY-MM-DD)")
601
+ ):
602
+ if not start_date or not end_date:
603
+ end_date = datetime.now().astimezone().date()
604
+ start_date = end_date - timedelta(days=7)
605
+ else:
606
+ start_date = isoparse(start_date).date()
607
+ end_date = isoparse(end_date).date()
608
+
609
+ response = supabase.table("ConversAI_ChatHistory").select("*").execute().data
610
+
611
+ ip_by_date = defaultdict(set)
612
+
613
+ for i in response:
614
+ timestamp = isoparse(i["timestamp"])
615
+ ip_address = i["IpAddress"]
616
+ if start_date <= timestamp.date() <= end_date:
617
+ date = timestamp.date()
618
+ ip_by_date[date].add(ip_address)
619
+
620
+ data = [{"date": date.isoformat(), "terminal": len(ips)} for date, ips in ip_by_date.items() if len(ips) > 1]
621
+
622
+ return {"data": data}
623
+
624
+
625
+ @app.post("/average_session_interaction")
626
+ async def average_session_interaction(
627
+ start_date: Optional[str] = Query(None, description="Start date in ISO format (YYYY-MM-DD)"),
628
+ end_date: Optional[str] = Query(None, description="End date in ISO format (YYYY-MM-DD)")
629
+ ):
630
+ if not start_date or not end_date:
631
+ end_date = datetime.now().astimezone().date()
632
+ start_date = end_date - timedelta(days=7)
633
+ else:
634
+ start_date = isoparse(start_date).date()
635
+ end_date = isoparse(end_date).date()
636
+
637
+ response = supabase.table("ConversAI_ChatHistory").select("*").execute().data
638
+
639
+ total_messages_by_date = defaultdict(int)
640
+ unique_ips_by_date = defaultdict(set)
641
+
642
+ for i in response:
643
+ timestamp = isoparse(i["timestamp"])
644
+ ip_address = i["IpAddress"]
645
+ if start_date <= timestamp.date() <= end_date:
646
+ date = timestamp.date()
647
+ total_messages_by_date[date] += 1
648
+ unique_ips_by_date[date].add(ip_address)
649
+
650
+ data = []
651
+ for date in sorted(total_messages_by_date.keys()):
652
+ total_messages = total_messages_by_date[date]
653
+ unique_ips = len(unique_ips_by_date[date])
654
+ average_interactions = total_messages / unique_ips if unique_ips > 0 else 0
655
+ data.append({"date": date.isoformat(), "interactions": average_interactions})
656
+
657
+ return {"data": data}
658
+
659
+
660
+ @app.post("/token_usages")
661
+ async def token_usages(
662
+ start_date: Optional[str] = Query(None, description="Start date in ISO format (YYYY-MM-DD)"),
663
+ end_date: Optional[str] = Query(None, description="End date in ISO format (YYYY-MM-DD)")
664
+ ):
665
+ if not start_date or not end_date:
666
+ end_date = datetime.now().astimezone().date()
667
+ start_date = end_date - timedelta(days=7)
668
+ else:
669
+ start_date = isoparse(start_date).date()
670
+ end_date = isoparse(end_date).date()
671
+
672
+ response = supabase.table("ConversAI_ChatHistory").select("*").execute().data
673
+
674
+ token_usage_by_date = defaultdict(int)
675
+
676
+ for i in response:
677
+ timestamp = isoparse(i["timestamp"])
678
+ if start_date <= timestamp.date() <= end_date:
679
+ date = timestamp.date()
680
+ response_token_count = i.get("ResponseTokenCount")
681
+ if response_token_count is not None:
682
+ token_usage_by_date[date] += response_token_count
683
+
684
+ data = [{"date": date.isoformat(), "total_tokens": total_tokens} for date, total_tokens in
685
+ token_usage_by_date.items()]
686
+
687
+ return {"data": data}
688
+
689
+
690
+ @app.post("/add_feedback")
691
+ async def add_feedback(request: Request, feedback: str, user_id: str):
692
+ client_ip = request.client.host
693
+ city = get_ip_info(client_ip)
694
+
695
+ response = supabase.table("ConversAI_Feedback").insert(
696
+ {"feedback": feedback, "user_id": user_id, "city": city, "ip": client_ip}).execute()
697
+
698
+ return {"message": "success"}
699
+
700
+
701
+ @app.post("/user_satisfaction_rate")
702
+ async def user_satisfaction_rate(
703
+ start_date: Optional[str] = Query(None, description="Start date in ISO format (YYYY-MM-DD)"),
704
+ end_date: Optional[str] = Query(None, description="End date in ISO format (YYYY-MM-DD)")
705
+ ):
706
+ if not start_date or not end_date:
707
+ end_date = datetime.now().astimezone().date()
708
+ start_date = end_date - timedelta(days=7)
709
+ else:
710
+ start_date = isoparse(start_date).date()
711
+ end_date = isoparse(end_date).date()
712
+
713
+ response = supabase.table("ConversAI_Feedback").select("*").execute().data
714
+
715
+ feedback_counts = defaultdict(lambda: {"like": 0, "dislike": 0})
716
+
717
+ for i in response:
718
+ timestamp = isoparse(i["timestamp"])
719
+ if start_date <= timestamp.date() <= end_date:
720
+ date = timestamp.date()
721
+ feedback = i.get("feedback")
722
+ if feedback == "like":
723
+ feedback_counts[date]["like"] += 1
724
+ elif feedback == "dislike":
725
+ feedback_counts[date]["dislike"] += 1
726
+
727
+ data = []
728
+ for date in sorted(feedback_counts.keys()):
729
+ like_count = feedback_counts[date]["like"]
730
+ dislike_count = feedback_counts[date]["dislike"]
731
+ total_feedback = like_count + dislike_count
732
+ satisfaction_rate = (like_count / total_feedback * 100) if total_feedback > 0 else 0
733
+ data.append({"date": date.isoformat(), "rate": satisfaction_rate})
734
+
735
+ return {"data": data}