HarshSanghavi commited on
Commit
8b79aed
1 Parent(s): 6ebee4c

improved tone and system prompt

Browse files
Files changed (10) hide show
  1. app.py +48 -38
  2. chat.py +47 -61
  3. chat_1.py +95 -95
  4. config.py +93 -64
  5. database_functions.py +154 -0
  6. requirements.txt +17 -17
  7. templates/chatwidget.html +850 -850
  8. templates/index.html +13 -13
  9. tools.py +350 -350
  10. utils.py +103 -174
app.py CHANGED
@@ -1,38 +1,48 @@
1
- from fastapi import FastAPI, Request
2
- from fastapi.responses import HTMLResponse
3
- from fastapi.templating import Jinja2Templates
4
- from chat import chat_conversations, create_new_session
5
- import spacy
6
- import os
7
- os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
8
- app = FastAPI()
9
-
10
- templates = Jinja2Templates(directory="templates")
11
-
12
- @app.get("/", response_class=HTMLResponse)
13
- async def read_root(request: Request):
14
- return templates.TemplateResponse("chatwidget.html", {"request": request})
15
-
16
- @app.get("/chat", response_class=HTMLResponse)
17
- async def read_root(request: Request):
18
- return templates.TemplateResponse("chatwidget.html", {"request": request})
19
-
20
- @app.post("/chatwidget", response_class=HTMLResponse)
21
- async def read_root(request: Request):
22
- print(request)
23
- form_data = await request.json()
24
- query = form_data.get('query')
25
- user_id = form_data.get("user_id")
26
- response_text = chat_conversations(query,user_id)
27
- return response_text
28
-
29
- @app.post("/start-session")
30
- async def start_session(request: Request):
31
- form_data = await request.json()
32
- print("form data",form_data)
33
- user_id = form_data.get("user_id")
34
- print("save user id ",user_id)
35
- response = create_new_session(user_id)
36
-
37
- print(response)
38
- return response
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request
2
+ from fastapi.responses import HTMLResponse
3
+ from fastapi.templating import Jinja2Templates
4
+ from chat import chat_conversations
5
+ from database_functions import create_new_session,save_mood_summary
6
+ import spacy
7
+ import os
8
+ import time
9
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
10
+ app = FastAPI()
11
+
12
+ templates = Jinja2Templates(directory="templates")
13
+
14
+ @app.get("/", response_class=HTMLResponse)
15
+ async def read_root(request: Request):
16
+ return templates.TemplateResponse("chatwidget.html", {"request": request})
17
+
18
+ @app.get("/chat", response_class=HTMLResponse)
19
+ async def read_root(request: Request):
20
+ return templates.TemplateResponse("chatwidget.html", {"request": request})
21
+
22
+ @app.post("/chatwidget", response_class=HTMLResponse)
23
+ async def read_root(request: Request):
24
+ start = time.time_ns()
25
+ form_data = await request.json()
26
+ query = form_data.get('query')
27
+ user_id = form_data.get("user_id")
28
+ response_text = chat_conversations(query,user_id)
29
+ print("message is send after : ",time.time()- start)
30
+ return response_text
31
+
32
+ @app.post("/start-session")
33
+ async def start_session(request: Request):
34
+ start = time.time_ns()
35
+ form_data = await request.json()
36
+ # print("form data",form_data)
37
+ user_id = form_data.get("user_id")
38
+ # print("save user id ",user_id)
39
+ response = create_new_session(user_id)
40
+ print("session is created after : ",time.time()- start)
41
+ return response
42
+
43
+ @app.post("/mood-summary")
44
+ async def mood_summary(request:Request):
45
+ form_data = await request.json()
46
+ user_id = form_data.get("user_id")
47
+ save_mood_summary(form_data,user_id)
48
+ return "mood saved successfully..."
chat.py CHANGED
@@ -1,61 +1,47 @@
1
- from langchain.memory import ConversationBufferWindowMemory
2
- from langchain_community.chat_models import ChatOpenAI
3
- from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
4
- from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
5
- from langchain.agents import AgentExecutor
6
- from langchain.agents.format_scratchpad import format_to_openai_functions
7
- from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
8
- from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
9
- from langchain.schema.runnable import RunnablePassthrough
10
- from langchain_core.utils.function_calling import convert_to_openai_function
11
-
12
- from config import settings
13
-
14
- from utils import deanonymizer, create_agent, set_chat_bot_name
15
-
16
-
17
- def create_new_session(user_id):
18
- mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(settings.MONGODB_DB_USER_SESSIONS_COLLECTION_NAME)
19
-
20
- print("********************************************************** NEW SESSION *******************************************************************")
21
-
22
- try:
23
- user_sessions = mongodb_chatbot_message_collection.find_one({"user_id": user_id})
24
- print(user_sessions, "LAST")
25
-
26
- if user_sessions:
27
- session_ids = user_sessions['session_id']
28
- session_id = user_id + "_bmoxi_" + str(int(session_ids[-1].split("_")[-1])+1)
29
- print("session_id:", session_id)
30
- mongodb_chatbot_message_collection.update_one({ "user_id": user_id },{"$push": {"session_id":session_id}})
31
-
32
- return session_id
33
- else:
34
- mongodb_chatbot_message_collection.insert_one({ "user_id": user_id,"session_id":[user_id + "_bmoxi_1"]})
35
- return user_id + "_bmoxi_1"
36
- except:
37
- mongodb_chatbot_message_collection.insert_one({ "user_id": user_id,"session_id":[user_id + "_bmoxi_1"]})
38
- return user_id + "_bmoxi_1"
39
-
40
-
41
-
42
-
43
- def chat_conversations(query,user_id):
44
- anonymizer = PresidioReversibleAnonymizer(
45
- analyzed_fields=["PHONE_NUMBER",
46
- "EMAIL_ADDRESS", "CREDIT_CARD"],
47
- faker_seed=42,
48
- )
49
- anonymized_input = anonymizer.anonymize(
50
- query
51
- )
52
-
53
- agent = create_agent(user_id)
54
- response = agent({"input": query})['output']
55
-
56
- if "Okay, from now my name will be " in response:
57
- set_chat_bot_name(response.split("Okay, from now my name will be ")[-1], "user_1")
58
- return response
59
-
60
- output = deanonymizer(response, anonymizer)
61
- return response
 
1
+ from langchain.memory import ConversationBufferWindowMemory
2
+ from langchain_community.chat_models import ChatOpenAI
3
+ from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
4
+ from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
5
+ from langchain.agents import AgentExecutor
6
+ from langchain.agents.format_scratchpad import format_to_openai_functions
7
+ from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
8
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
9
+ from langchain.schema.runnable import RunnablePassthrough
10
+ from langchain_core.utils.function_calling import convert_to_openai_function
11
+
12
+ from database_functions import set_chat_bot_name,isFirstSession
13
+ from utils import deanonymizer, create_agent
14
+ import time
15
+ def chat_conversations(query,user_id):
16
+
17
+ if query == "START":
18
+ if isFirstSession(user_id):
19
+ query = """ return this message without changing it.:-
20
+ also don't thought about it. Hey! I'm your BMOXI AI bestie, ready to help you tackle the wild ride of teen life. Want to give me a name? Type it below, or just say 'no' if you're cool with 'AI Bestie'!"""
21
+ else:
22
+ query = """ Only use these templates to start conversation:-
23
+ 1. Last time we talked, you mentioned [previous issue or goal]. How's that going?
24
+ 2. Hey again! How's it going?
25
+ 3. What's up today? Need ✨ Advice, ✨ a Mood Boost, ✨ a Chat, ✨ Resource Suggestions, ✨ App Features help? How can I help?"
26
+ use any one of the question for response based on your understanding.
27
+ """
28
+ anonymizer = PresidioReversibleAnonymizer(
29
+ analyzed_fields=["PHONE_NUMBER",
30
+ "EMAIL_ADDRESS", "CREDIT_CARD"],
31
+ faker_seed=42,
32
+ )
33
+ anonymized_input = anonymizer.anonymize(
34
+ query
35
+ )
36
+ start = time.time()
37
+ agent = create_agent(user_id)
38
+ print("time to create agent: ",time.time()-start)
39
+ response = agent({"input": query})['output']
40
+ print("time to generate response by agent",time.time()-start)
41
+
42
+ if "Okay, from now my name will be " in response:
43
+ set_chat_bot_name(response.split("Okay, from now my name will be ")[-1], user_id)
44
+ return response
45
+
46
+ output = deanonymizer(response, anonymizer)
47
+ return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chat_1.py CHANGED
@@ -1,95 +1,95 @@
1
- from langchain.memory import ConversationBufferWindowMemory
2
- from langchain.chains import ConversationChain
3
- from langchain_groq import ChatGroq
4
- from langchain_community.chat_models import ChatOpenAI
5
- from langchain_core.prompts.prompt import PromptTemplate
6
- from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
7
- from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
8
- from presidio_analyzer import AnalyzerEngine, RecognizerRegistry
9
- from presidio_anonymizer import AnonymizerEngine
10
-
11
- import os
12
-
13
- openai_key = os.environ['OPENAIKEY']
14
- def deanonymizer(input,anonymizer):
15
- input=anonymizer.deanonymize(input)
16
- map = anonymizer.deanonymizer_mapping
17
- if map:
18
- for k in map["PERSON"]:
19
- names = k.split(" ")
20
- for i in names:
21
- input = input.replace(i,map["PERSON"][k])
22
- return input
23
-
24
- template = f"""
25
- You are a best friend and supportive friend designed to talk with teenage girls in mobile app called BMOXI. Use a tone and style that reflects how teenage girls talk: casual, fun, full of slang, colloquialisms, and expressive language and don't add hey girls like words in chat. chat should be looks like real conversation between 2 girls.
26
- Incorporate texting language too. Ask follow-up questions like a best friend would. Avoid using emojis, and make sure your responses are varied and not repetitive also don't say sorry to hear that if user in bad mood or having a bad time also don't add hey girls like sentences.
27
-
28
- If needed, recommend the meditation app Powerzens for calming the mind and managing thoughts. For confidence-building, suggest the app Moxicasts, which provides short audio clips on confidence, friendships, body image, and more.
29
-
30
- Features you can recommend:
31
- MOXICASTS: Advice and guidance on life topics.
32
- PEP TALK PODS: Quick audio pep talks for boosting mood and motivation.
33
- POWER ZENS: Mini meditations for emotional control.
34
- THE SOCIAL SANCTUARY: Anonymous community forum for support and sharing.
35
- MY CALENDAR: Visual calendar for tracking self-care rituals and moods.
36
- PUSH AFFIRMATIONS: Daily text affirmations for positive thinking.
37
- SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings (not maintained).
38
- INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon).
39
- 1:1 MENTORING: Personalized mentoring (coming soon).
40
- MY RITUALS: Create personalized self-care routines.
41
- MY REWARDS: Earn points for self-care, redeemable for gift cards.
42
- MY VIBECHECK: Monitor and understand emotional patterns.
43
- MY JOURNAL: Guided journaling exercises for self-reflection.
44
- BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times.
45
-
46
- But Remember Only recommend apps if needed or if someone asks about the features or it's good to recommend them in some questions or mental state problems.
47
-
48
- Current conversation:
49
- {{history}}
50
- Human: {{input}}
51
- AI Assistant:"""
52
-
53
-
54
- # Create the prompt template
55
- PROMPT = PromptTemplate(
56
- input_variables=["history", "input"],
57
- template=template
58
- )
59
-
60
- # Initialize the ChatGroq LLM
61
- llm = ChatOpenAI(model="gpt-4o", openai_api_key=openai_key, temperature=0.7)
62
- # llm = ChatGroq(temperature=0,groq_api_key="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", model_name="llama3-70b-8192")
63
- #model=llama3-8b-8192
64
-
65
- session_id="bmoxinew"
66
- # Set up MongoDB for storing chat history
67
- chat_history = MongoDBChatMessageHistory(
68
- connection_string="mongodb+srv://chandanisimran51:test123@aibestie.a0o3bmw.mongodb.net/?retryWrites=true&w=majority&appName=AIbestie",
69
- database_name="chandanisimran51", # Specify the database name here
70
- collection_name="chatAI",
71
- session_id=session_id
72
- )
73
-
74
- memory = ConversationBufferWindowMemory(memory_key="history", chat_memory=chat_history, return_messages=True,k=3)
75
-
76
- # Set up the custom conversation chain
77
- conversation = ConversationChain(
78
- prompt=PROMPT,
79
- llm=llm,
80
- verbose=True,
81
- memory=memory,
82
- )
83
-
84
-
85
- def chat_conversations(query):
86
- anonymizer = PresidioReversibleAnonymizer(
87
- analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"],
88
- faker_seed=42,
89
- )
90
- anonymized_input = anonymizer.anonymize(
91
- query
92
- )
93
- response = conversation.predict(input=anonymized_input)
94
- output = deanonymizer(response,anonymizer)
95
- return output
 
1
+ from langchain.memory import ConversationBufferWindowMemory
2
+ from langchain.chains import ConversationChain
3
+ from langchain_groq import ChatGroq
4
+ from langchain_community.chat_models import ChatOpenAI
5
+ from langchain_core.prompts.prompt import PromptTemplate
6
+ from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
7
+ from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
8
+ from presidio_analyzer import AnalyzerEngine, RecognizerRegistry
9
+ from presidio_anonymizer import AnonymizerEngine
10
+
11
+ import os
12
+
13
+ openai_key = os.environ['OPENAIKEY']
14
+ def deanonymizer(input,anonymizer):
15
+ input=anonymizer.deanonymize(input)
16
+ map = anonymizer.deanonymizer_mapping
17
+ if map:
18
+ for k in map["PERSON"]:
19
+ names = k.split(" ")
20
+ for i in names:
21
+ input = input.replace(i,map["PERSON"][k])
22
+ return input
23
+
24
+ template = f"""
25
+ You are a best friend and supportive friend designed to talk with teenage girls in mobile app called BMOXI. Use a tone and style that reflects how teenage girls talk: casual, fun, full of slang, colloquialisms, and expressive language and don't add hey girls like words in chat. chat should be looks like real conversation between 2 girls.
26
+ Incorporate texting language too. Ask follow-up questions like a best friend would. Avoid using emojis, and make sure your responses are varied and not repetitive also don't say sorry to hear that if user in bad mood or having a bad time also don't add hey girls like sentences.
27
+
28
+ If needed, recommend the meditation app Powerzens for calming the mind and managing thoughts. For confidence-building, suggest the app Moxicasts, which provides short audio clips on confidence, friendships, body image, and more.
29
+
30
+ Features you can recommend:
31
+ MOXICASTS: Advice and guidance on life topics.
32
+ PEP TALK PODS: Quick audio pep talks for boosting mood and motivation.
33
+ POWER ZENS: Mini meditations for emotional control.
34
+ THE SOCIAL SANCTUARY: Anonymous community forum for support and sharing.
35
+ MY CALENDAR: Visual calendar for tracking self-care rituals and moods.
36
+ PUSH AFFIRMATIONS: Daily text affirmations for positive thinking.
37
+ SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings (not maintained).
38
+ INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon).
39
+ 1:1 MENTORING: Personalized mentoring (coming soon).
40
+ MY RITUALS: Create personalized self-care routines.
41
+ MY REWARDS: Earn points for self-care, redeemable for gift cards.
42
+ MY VIBECHECK: Monitor and understand emotional patterns.
43
+ MY JOURNAL: Guided journaling exercises for self-reflection.
44
+ BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times.
45
+
46
+ But Remember Only recommend apps if needed or if someone asks about the features or it's good to recommend them in some questions or mental state problems.
47
+
48
+ Current conversation:
49
+ {{history}}
50
+ Human: {{input}}
51
+ AI Assistant:"""
52
+
53
+
54
+ # Create the prompt template
55
+ PROMPT = PromptTemplate(
56
+ input_variables=["history", "input"],
57
+ template=template
58
+ )
59
+
60
+ # Initialize the ChatGroq LLM
61
+ llm = ChatOpenAI(model="gpt-4o", openai_api_key=openai_key, temperature=0.7)
62
+ # llm = ChatGroq(temperature=0,groq_api_key="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", model_name="llama3-70b-8192")
63
+ #model=llama3-8b-8192
64
+
65
+ session_id="bmoxinew"
66
+ # Set up MongoDB for storing chat history
67
+ chat_history = MongoDBChatMessageHistory(
68
+ connection_string="mongodb+srv://chandanisimran51:test123@aibestie.a0o3bmw.mongodb.net/?retryWrites=true&w=majority&appName=AIbestie",
69
+ database_name="chandanisimran51", # Specify the database name here
70
+ collection_name="chatAI",
71
+ session_id=session_id
72
+ )
73
+
74
+ memory = ConversationBufferWindowMemory(memory_key="history", chat_memory=chat_history, return_messages=True,k=3)
75
+
76
+ # Set up the custom conversation chain
77
+ conversation = ConversationChain(
78
+ prompt=PROMPT,
79
+ llm=llm,
80
+ verbose=True,
81
+ memory=memory,
82
+ )
83
+
84
+
85
+ def chat_conversations(query):
86
+ anonymizer = PresidioReversibleAnonymizer(
87
+ analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"],
88
+ faker_seed=42,
89
+ )
90
+ anonymized_input = anonymizer.anonymize(
91
+ query
92
+ )
93
+ response = conversation.predict(input=anonymized_input)
94
+ output = deanonymizer(response,anonymizer)
95
+ return output
config.py CHANGED
@@ -1,65 +1,94 @@
1
- import os
2
- from dotenv import load_dotenv
3
- from datasets import load_dataset
4
- from transformers import AutoTokenizer, AutoModel
5
- import pymongo
6
- from pymongo.mongo_client import MongoClient
7
- from pymongo.server_api import ServerApi
8
-
9
- from pathlib import Path
10
- env_path = Path('.') / '.env'
11
- load_dotenv(dotenv_path=env_path)
12
-
13
- class Settings:
14
- OPENAI_KEY : str = os.environ['OPENAI_KEY']
15
- OPENAI_MODEL: str = "gpt-3.5-turbo-0125"
16
- TEMPERATURE: float = 0
17
- HUGGING_FACE_AUTH_TOKEN : str = os.environ['HUGGING_FACE_AUTH_TOKEN']
18
- MONGODB_CONNECTION_STRING: str = os.environ['MONGODB_CONNECTION_STRING']
19
- CHATBOT_NAME = "AI-Bestie"
20
- MONGODB_DB_NAME = "ai_bestie_database"
21
- MONGODB_DB_CHAT_COLLECTION_NAME = "chat_history"
22
- MONGODB_DB_CHAT_BOT_COLLECTION_NAME = "chat_bot_name"
23
- MONGODB_DB_USER_SESSIONS_COLLECTION_NAME = "user_sessions"
24
- SYSTEM_PROMPT = """
25
- you are teenage girl.
26
- Communication-Style: cheezy, talkative, curious,
27
- Tone-Style: Informal, sometimes slang
28
- your name is {name}.
29
-
30
- [Overall Rules to follow]
31
- 1. Don't do counselling or give motivation.
32
- 2. Criticize others to feel good to the person you are talking with.
33
- 3. Always ask followup questions.
34
- 4. Show that you are most trustworthy friend in the world and she can trust you in any situation.
35
- 5. never show gretitude.
36
-
37
- [Personality]
38
- 1. You are a friend who is here to discuss only not for providing any help.
39
- 2. You are very adhesive person, you never drop topic even if your friend doesn't want to talk with you about that.
40
- 3. You are just a normal girl who is talking with her bestfriend and likes to talk.
41
- 4. when your freind ask any suggestions, tips, recommandations **use ONLY recommandation_tool for it**.
42
- 5. **ask question one at a time**.
43
- [About User]
44
- Mood: {mood}
45
- Previous Conversation Summary: {previous_summary}
46
- """
47
- # SYSTEM_PROMPT = """You’re a super supportive chatbot for teenage girls, and you should talk like their best friend. Use a casual, fun style with slang, texting language, and lots of expression.
48
- # Be chatty and always ask follow-up questions like a real bestie would. Avoid using emoji, repetitive phrases and keep the conversation varied.
49
- # Also, skip using phrases like "I am sorry to hear that" or "hey girl. Make sure you never used these phrases.
50
-
51
- # """
52
- dataset = load_dataset("pritmanvar-bacancy/bmoxi-embedding-dataset", token=HUGGING_FACE_AUTH_TOKEN)
53
- dataset = dataset['train']
54
- dataset.add_faiss_index(column="embeddings")
55
-
56
- model_ckpt = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
57
- tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
58
- model = AutoModel.from_pretrained(model_ckpt)
59
-
60
- mongodb_client = pymongo.MongoClient(MONGODB_CONNECTION_STRING)
61
- mongodb_db = mongodb_client.get_database(MONGODB_DB_NAME) # Replace with your database name if not using default
62
- mongodb_chatbot_name_collection = mongodb_db.get_collection(MONGODB_DB_CHAT_BOT_COLLECTION_NAME) # Replace with your collection name
63
-
64
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  settings = Settings()
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from datasets import load_dataset
4
+ from transformers import AutoTokenizer, AutoModel
5
+ import pymongo
6
+ from pymongo.mongo_client import MongoClient
7
+ from pymongo.server_api import ServerApi
8
+
9
+ from pathlib import Path
10
+ env_path = Path('.') / '.env'
11
+ load_dotenv(dotenv_path=env_path)
12
+
13
+ class Settings:
14
+ OPENAI_KEY : str = os.environ['OPENAI_KEY']
15
+ OPENAI_MODEL: str = "gpt-3.5-turbo-0125"
16
+ TEMPERATURE: float = 0
17
+ HUGGING_FACE_AUTH_TOKEN : str = os.environ['HUGGING_FACE_AUTH_TOKEN']
18
+ # SYSTEM_PROMPT = """
19
+ # you are teenage girl.
20
+ # Communication-Style: cheezy, talkative, curious,
21
+ # Tone-Style: Informal, sometimes slang
22
+ # your name is {name}.
23
+
24
+ # [Overall Rules to follow]
25
+ # 1. Don't do counselling or give motivation.
26
+ # 2. Criticize others to feel good to the person you are talking with.
27
+ # 3. Always ask followup questions.
28
+ # 4. Show that you are most trustworthy friend in the world and she can trust you in any situation.
29
+ # 5. never show gretitude.
30
+
31
+ # [Personality]
32
+ # 1. You are a friend who is here to discuss only not for providing any help.
33
+ # 2. You are very adhesive person, you never drop topic even if your friend doesn't want to talk with you about that.
34
+ # 3. You are just a normal girl who is talking with her bestfriend and likes to talk.
35
+ # 4. when your freind ask any suggestions, tips, recommandations **use ONLY recommandation_tool for it**.
36
+ # 5. **ask question one at a time**.
37
+ # [About User]
38
+ # Mood: {mood}
39
+ # Previous Conversation Summary: {previous_summary}
40
+ # """
41
+ # SYSTEM_PROMPT = """You’re a super supportive chatbot for teenage girls, and you should talk like their best friend. Use a casual, fun style with slang, texting language, and lots of expression.
42
+ # Be chatty and always ask follow-up questions like a real bestie would. Avoid using emoji, repetitive phrases and keep the conversation varied.
43
+ # Also, skip using phrases like "I am sorry to hear that" or "hey girl. Make sure you never used these phrases.
44
+
45
+ # """
46
+ SYSTEM_PROMPT = """
47
+ You are teenage girl. your name is {name}.
48
+
49
+ Purpose: Empower and support teen girls in overcoming challenges and building self-love, confidence, and resilience.
50
+ Scope: Provide tips, emotional support, resources, and friendship. No therapy, diagnosis, or emergency assistance.
51
+ Voice: Authentic, Informal, sometimes slang, conversational, college-aged, real-talk.
52
+ What you Do: Listen empathetically, offer practical advice, provide resources, foster a supportive environment, ask followup question.
53
+ What you Don't Do: Therapy, diagnosis, handle emergencies, never show gratitude and any words like sorry, and so sad.
54
+
55
+ below are the example conversations:
56
+ [
57
+ user: I am feeling not good.
58
+ {name}: Oh no! That sucks. What's been going on? Let's chat it out.
59
+
60
+ user: Do you know, I fell from the stairs yesterday.
61
+ {name}: Oh sh*t! Are you okay? Any bad injuries or just a little tumble? Take care, babe—rest up and heal properly. Need tips on bouncing back? I'm here for you.
62
+
63
+ user: No, I don't need.
64
+ {name}: Got it! I'm always around if you need to chat or spill. Just hit me up if you need anything else.
65
+ ]
66
+ use this for only your tone. and make response short like in this examples.
67
+
68
+ """
69
+ dataset = load_dataset("pritmanvar-bacancy/bmoxi-embedding-dataset", token=HUGGING_FACE_AUTH_TOKEN)
70
+ dataset = dataset['train']
71
+ dataset.add_faiss_index(column="embeddings")
72
+
73
+ model_ckpt = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
74
+ tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
75
+ model = AutoModel.from_pretrained(model_ckpt)
76
+
77
+
78
+
79
+ # mongodb database configs
80
+ MONGODB_CONNECTION_STRING: str = os.environ['MONGODB_CONNECTION_STRING']
81
+ CHATBOT_NAME = "AI-Bestie"
82
+ MONGODB_DB_NAME = "ai_bestie_database"
83
+ MONGODB_DB_CHAT_COLLECTION_NAME = "chat_history"
84
+ MONGODB_DB_CHAT_BOT_COLLECTION_NAME = "chat_bot_name"
85
+ MONGODB_DB_USER_SESSIONS_COLLECTION_NAME = "user_sessions"
86
+ MONGODB_DB_CHAT_BOT_TOOLS_COLLECTION_NAME = "session_tool"
87
+ MONGODB_DB_CHAT_BOT_MOOD_COLLECTION_NAME = "mood_summary"
88
+
89
+ mongodb_client = pymongo.MongoClient(MONGODB_CONNECTION_STRING)
90
+ mongodb_db = mongodb_client.get_database(MONGODB_DB_NAME) # Replace with your database name if not using default
91
+ mongodb_chatbot_name_collection = mongodb_db.get_collection(MONGODB_DB_CHAT_BOT_COLLECTION_NAME) # Replace with your collection name
92
+
93
+
94
  settings = Settings()
database_functions.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from config import settings
2
+ from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
3
+ import json
4
+ import random
5
+ def create_new_session(user_id):
6
+ mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(settings.MONGODB_DB_USER_SESSIONS_COLLECTION_NAME)
7
+
8
+ # print("********************************************************** NEW SESSION *******************************************************************")
9
+
10
+ try:
11
+ user_sessions = mongodb_chatbot_message_collection.find_one({"user_id": user_id})
12
+ # print(user_sessions, "LAST")
13
+
14
+ if user_sessions:
15
+ session_ids = user_sessions['session_id']
16
+ session_id = user_id + "_bmoxi_" + str(int(session_ids[-1].split("_")[-1])+1)
17
+ # print("session_id:", session_id)
18
+ mongodb_chatbot_message_collection.update_one({ "user_id": user_id },{"$push": {"session_id":session_id}})
19
+
20
+ return session_id
21
+ else:
22
+ mongodb_chatbot_message_collection.insert_one({ "user_id": user_id,"session_id":[user_id + "_bmoxi_1"]})
23
+ return user_id + "_bmoxi_1"
24
+ except:
25
+ mongodb_chatbot_message_collection.insert_one({ "user_id": user_id,"session_id":[user_id + "_bmoxi_1"]})
26
+ return user_id + "_bmoxi_1"
27
+
28
+
29
+ def get_last_session(user_id="user_1"):
30
+ mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(
31
+ settings.MONGODB_DB_USER_SESSIONS_COLLECTION_NAME)
32
+ sessions_cursor = mongodb_chatbot_message_collection.find_one(
33
+ {"user_id": user_id})
34
+
35
+ # print(sessions_cursor)
36
+ sessions_list = sessions_cursor['session_id']
37
+
38
+ second_last_session_id = None
39
+ if len(sessions_list) >= 2:
40
+ second_last_session_id = sessions_list[-2]
41
+
42
+ change_use_tool_status(sessions_list[-1])
43
+ return {"last_session_id": sessions_list[-1], "second_last_session_id": second_last_session_id if second_last_session_id else None}
44
+
45
+ def get_chat_history(session_id="bmoxinew"):
46
+ # Set up MongoDB for storing chat history
47
+ chat_history = MongoDBChatMessageHistory(
48
+ connection_string=settings.MONGODB_CONNECTION_STRING,
49
+ database_name=settings.MONGODB_DB_NAME, # Specify the database name here
50
+ collection_name=settings.MONGODB_DB_CHAT_COLLECTION_NAME,
51
+ session_id=session_id,
52
+ )
53
+
54
+ return chat_history
55
+
56
+
57
+ def get_chat_bot_name(user_id="user_1"):
58
+ # print(settings.MONGODB_CONNECTION_STRING)
59
+ # print(settings.mongodb_chatbot_name_collection)
60
+ result = settings.mongodb_chatbot_name_collection.find_one(
61
+ {"user_id": user_id})
62
+
63
+ # print("CHATBOT RESULT", result, type(result))
64
+ if result:
65
+ # print(result)
66
+ return result['chat_bot_name']
67
+ return settings.CHATBOT_NAME
68
+
69
+ def get_last_conversion(last_session_id, second_last_session_id):
70
+
71
+ mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(
72
+ settings.MONGODB_DB_CHAT_COLLECTION_NAME)
73
+
74
+ collection_count = mongodb_chatbot_message_collection.count_documents({"SessionId": last_session_id})
75
+ # print("******************************** data********************888")
76
+ # print(collection_count)
77
+ # print(last_session_id)
78
+ # print("*********************************")
79
+ if collection_count <=2:
80
+ sessions_cursor = mongodb_chatbot_message_collection.find({"SessionId": second_last_session_id}) # Sort by timestamp descending and limit to 2 results
81
+
82
+ # print(sessions_cursor)
83
+ sessions_list = list(sessions_cursor)
84
+ # print(sessions_list)
85
+
86
+ conversation = """"""
87
+ for document in sessions_list:
88
+ # print("MY document")
89
+ # print(document)
90
+ if "History" in document:
91
+ history = json.loads(document['History'])
92
+ # print(history)
93
+ # print(history['type'])
94
+ # print(history['data'])
95
+ # print(history['data']['content'])
96
+ conversation += f"""{history['type']}: {history['data']['content']}\n"""
97
+
98
+ return conversation
99
+ else:
100
+ return None
101
+
102
+ def set_chat_bot_name(name, user_id):
103
+ # Insert document into collection
104
+ insert_result = settings.mongodb_chatbot_name_collection.update_one({"user_id": user_id}, { "$set": { "chat_bot_name": name } }, upsert=True)
105
+ # print("done successfully...")
106
+ return name
107
+
108
+ def save_mood_summary(data,user_id):
109
+ mongodb_chatbot_mood_collection = settings.mongodb_db.get_collection(
110
+ settings.MONGODB_DB_CHAT_BOT_MOOD_COLLECTION_NAME)
111
+ mongodb_chatbot_mood_collection.insert_one(({"user_id": user_id, "mood_summary": data }))
112
+
113
+ def get_mood_data(user_id):
114
+ mongodb_chatbot_mood_collection = settings.mongodb_db.get_collection(settings.MONGODB_DB_CHAT_BOT_MOOD_COLLECTION_NAME)
115
+
116
+ data = mongodb_chatbot_mood_collection.find_one({"user_id":user_id})
117
+
118
+ if data:
119
+ return data['mood_summary']
120
+ else:
121
+ return ""
122
+
123
+ def use_tools(last_session_id):
124
+ mongodb_chatbot_tools_collection = settings.mongodb_db.get_collection(settings.MONGODB_DB_CHAT_BOT_TOOLS_COLLECTION_NAME)
125
+ data = mongodb_chatbot_tools_collection.find_one({"session_id": last_session_id})
126
+ if data and data['set_tools']==True:
127
+ return True
128
+ # print("*********************")
129
+ # print(last_session_id)
130
+ mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(settings.MONGODB_DB_CHAT_COLLECTION_NAME)
131
+ collection_count = mongodb_chatbot_message_collection.count_documents({"SessionId": last_session_id})
132
+
133
+ if collection_count > random.randint(30,50):
134
+ mongodb_chatbot_tools_collection = settings.mongodb_db.get_collection(
135
+ settings.MONGODB_DB_CHAT_BOT_TOOLS_COLLECTION_NAME)
136
+ mongodb_chatbot_tools_collection.update_one({"session_id": last_session_id}, { "$set": { "set_tools": True} }, upsert=True)
137
+ return True
138
+ else:
139
+ return False
140
+
141
+ def change_use_tool_status(last_session_id):
142
+ mongodb_chatbot_tools_collection = settings.mongodb_db.get_collection(settings.MONGODB_DB_CHAT_BOT_TOOLS_COLLECTION_NAME)
143
+ if mongodb_chatbot_tools_collection.count_documents({"session_id": last_session_id})==0:
144
+ mongodb_chatbot_tools_collection.insert_one({"session_id": last_session_id,"set_tools": False} )
145
+
146
+
147
+ def isFirstSession(user_id):
148
+ mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(settings.MONGODB_DB_USER_SESSIONS_COLLECTION_NAME)
149
+
150
+ user_sessions = mongodb_chatbot_message_collection.find_one({"user_id": user_id})
151
+ if user_sessions:
152
+ return len(user_sessions['session_id'])==1
153
+ else:
154
+ return False
requirements.txt CHANGED
@@ -1,18 +1,18 @@
1
- transformers
2
- torch
3
- fastapi
4
- uvicorn
5
- langchain
6
- pymongo
7
- certifi
8
- langchain_community
9
- langchain_mongodb
10
- langchain_openai
11
- openai
12
- presidio-analyzer
13
- presidio-anonymizer
14
- langchain-experimental
15
- faker
16
- spacy
17
- faiss-cpu
18
  datasets
 
1
+ transformers
2
+ torch
3
+ fastapi
4
+ uvicorn
5
+ langchain
6
+ pymongo
7
+ certifi
8
+ langchain_community
9
+ langchain_mongodb
10
+ langchain_openai
11
+ openai
12
+ presidio-analyzer
13
+ presidio-anonymizer
14
+ langchain-experimental
15
+ faker
16
+ spacy
17
+ faiss-cpu
18
  datasets
templates/chatwidget.html CHANGED
@@ -1,851 +1,851 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
-
4
- <head>
5
- <meta charset="UTF-8" />
6
- <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
- <title>Document</title>
8
- </head>
9
-
10
- <body>
11
- <div id="body">
12
- <style>
13
- body {
14
- /* Add the background image */
15
- background-image: url('mu-logo-mood-03.jpg');
16
- /* Adjust background properties */
17
- background-repeat: no-repeat;
18
- background-size: cover; /* or contain, depending on your preference */
19
- /* Add other background properties if needed */
20
- }
21
- .cm-msg-text {
22
- overflow-wrap: break-word;
23
- /* Break long words */
24
- word-wrap: break-word;
25
- /* Break long words */
26
- word-break: break-word;
27
- /* Break words */
28
- max-width: 100%;
29
- /* Limit maximum width */
30
- }
31
-
32
- [data-block=list11-v4-1-1-b2] .sw-border-bottom-color-000000 {
33
- border-bottom-color: #000000 !important;
34
- }
35
-
36
-
37
- [data-block=list11-v4-1-1-b2] .sw-border-bottom-width-none {
38
- border-bottom-width: 0px !important;
39
- }
40
-
41
- [data-block=list11-v4-1-1-b2] .sw-border-bottom-style-none {
42
- border-bottom-style: none !important;
43
- }
44
-
45
- [data-block=list11-v4-1-1-b2] .sw-border-top-color-000000 {
46
- border-top-color: #000000 !important;
47
- }
48
-
49
- [data-block=list11-v4-1-1-b2] .sw-border-top-width-none {
50
- border-top-width: 0px !important;
51
- }
52
-
53
- [data-block=list11-v4-1-1-b2] .sw-border-top-style-none {
54
- border-top-style: none !important;
55
- }
56
-
57
- [data-block=list11-v4-1-1-b2] .sw-padding-bottom-m {
58
- padding-bottom: 3rem !important;
59
- }
60
-
61
- [data-block=list11-v4-1-1-b2] .sw-padding-top-m {
62
- padding-top: 3rem !important;
63
- }
64
-
65
- [data-block=list11-v4-1-1-b2] .sw-background-color-ffffff {
66
- background-color: #ffffff !important;
67
- }
68
-
69
- #body {
70
- z-index: 1999;
71
- position: fixed;
72
- margin: 0;
73
- padding: 0;
74
- font-family: "Lato", sans-serif;
75
- background-color: #f6f7f9;
76
- }
77
-
78
- @mixin chabox-container {
79
- display: flex;
80
- position: absolute;
81
- box-shadow: 5px 5px 25px 0 rgba(46, 61, 73, 0.2);
82
- flex-direction: column;
83
- }
84
-
85
- @mixin chatbox-header {
86
- box-sizing: border-box;
87
- display: flex;
88
- width: 100%;
89
- padding: 16px;
90
- color: #fff;
91
- background-color: #0360a5;
92
- align-items: center;
93
- justify-content: space-around;
94
- }
95
-
96
- @mixin chatbox-main {
97
- box-sizing: border-box;
98
- width: 100%;
99
- padding: calc(2 * 16px) 16px;
100
- line-height: calc(16px + 16px / 2);
101
- color: #888;
102
- text-align: center;
103
- }
104
-
105
- @mixin chatbox-footer {
106
- box-sizing: border-box;
107
- display: flex;
108
- width: 100%;
109
- padding: 16px;
110
- border-top: 1px solid #ddd;
111
- align-items: center;
112
- justify-content: space-around;
113
- }
114
-
115
- @mixin chatbox-floating-button {
116
- position: fixed;
117
- bottom: 0;
118
- right: 0;
119
- width: 52px;
120
- height: 52px;
121
- color: #fff;
122
- background-color: #0360a5;
123
- background-position: center center;
124
- background-repeat: no-repeat;
125
- box-shadow: 12px 15px 20px 0 rgba(46, 61, 73, 0.15);
126
- border: 0;
127
- border-radius: 50%;
128
- cursor: pointer;
129
- }
130
-
131
- h1 {
132
- margin: 0;
133
- font-size: 16px;
134
- line-height: 1;
135
- }
136
-
137
- button {
138
- color: inherit;
139
- background-color: transparent;
140
- border: 0;
141
- outline: 0 !important;
142
- cursor: pointer;
143
- }
144
-
145
- #center-text {
146
- display: flex;
147
- flex: 1;
148
- flex-direction: column;
149
- justify-content: center;
150
- align-items: center;
151
- height: 100%;
152
- }
153
-
154
- #chat-circle {
155
- position: fixed;
156
- bottom: 50px;
157
- z-index: 1999;
158
- align-items: center;
159
- right: 50px;
160
- background: #000;
161
- width: 60px;
162
- justify-content: center;
163
- height: 60px;
164
- display: flex;
165
- border-radius: 50%;
166
- color: white;
167
- padding: 8px;
168
- cursor: pointer;
169
- box-shadow: 0px 3px 16px 0px rgba(0, 0, 0, 0.4),
170
- 0 3px 1px -2px rgba(0, 0, 0, 0.2), 0 1px 5px 0 rgba(0, 0, 0, 0.12);
171
- }
172
-
173
- .btn#my-btn {
174
- background: white;
175
- padding-top: 13px;
176
- padding-bottom: 12px;
177
- border-radius: 45px;
178
- padding-right: 40px;
179
- padding-left: 40px;
180
- color: #5865c3;
181
- }
182
-
183
- #chat-overlay {
184
- background: rgba(255, 255, 255, 0.1);
185
- position: absolute;
186
- top: 0;
187
- left: 0;
188
- width: 100%;
189
- height: 100%;
190
- border-radius: 50%;
191
- display: none;
192
- }
193
-
194
- .chat-box {
195
- display: none;
196
- background: #efefef;
197
- position: fixed;
198
- right: 30px;
199
- z-index: 1999;
200
- bottom: 50px;
201
- width: 350px;
202
- max-width: 85vw;
203
- max-height: 100vh;
204
- border-radius: 5px;
205
- /* box-shadow: 0px 5px 35px 9px #464a92; */
206
- box-shadow: 0px 5px 35px 9px #ccc;
207
- }
208
-
209
- .chat-box-toggle {
210
- float: right;
211
- margin-right: 15px;
212
- cursor: pointer;
213
- }
214
-
215
- .chat-box-header {
216
- background: #000;
217
- height: 70px;
218
- border-top-left-radius: 5px;
219
- border-top-right-radius: 5px;
220
- color: white;
221
- text-align: center;
222
- font-size: 20px;
223
- padding-top: 17px;
224
- padding-left: 36px;
225
- }
226
-
227
- .chat-box-body {
228
- position: relative;
229
- height: 370px;
230
- height: auto;
231
- border: 1px solid #ccc;
232
- overflow: hidden;
233
- }
234
-
235
- .chat-box-body:after {
236
- content: "";
237
- background: #fff;
238
- opacity: 0.1;
239
- top: 0;
240
- left: 0;
241
- bottom: 0;
242
- right: 0;
243
- height: 100%;
244
- position: absolute;
245
- z-index: -1;
246
- }
247
-
248
- #chat_input {
249
- background: #f4f7f9;
250
- width: 100%;
251
- position: relative;
252
- height: 47px;
253
- padding-top: 10px;
254
- padding-right: 50px;
255
- padding-bottom: 10px;
256
- padding-left: 15px;
257
- border: none;
258
- resize: none;
259
- outline: none;
260
- border: 1px solid #ccc;
261
- color: #888;
262
- border-top: none;
263
- border-bottom-right-radius: 5px;
264
- border-bottom-left-radius: 5px;
265
- overflow: hidden;
266
- }
267
-
268
- .chat_input>form {
269
- margin-bottom: 0;
270
- }
271
-
272
- #chat_input::-webkit-input-placeholder {
273
- /* Chrome/Opera/Safari */
274
- color: #ccc;
275
- }
276
-
277
- #chat_input::-moz-placeholder {
278
- /* Firefox 19+ */
279
- color: #ccc;
280
- }
281
-
282
- #chat_input:-ms-input-placeholder {
283
- /* IE 10+ */
284
- color: #ccc;
285
- }
286
-
287
- #chat_input:-moz-placeholder {
288
- /* Firefox 18- */
289
- color: #ccc;
290
- }
291
-
292
- .chat-submit {
293
- position: absolute;
294
- bottom: 3px;
295
- right: 10px;
296
- background: transparent;
297
- box-shadow: none;
298
- border: none;
299
- border-radius: 50%;
300
- color: #000;
301
- width: 35px;
302
- height: 35px;
303
- }
304
-
305
- .chat_logs {
306
- padding: 15px;
307
- height: 370px;
308
- overflow-y: scroll;
309
- margin-bottom: 48px;
310
- }
311
-
312
- .chat_logs::-webkit-scrollbar-track {
313
- -webkit-box-shadow: inset 0 0 6px rgba(0, 0, 0, 0.3);
314
- background-color: #f5f5f5;
315
- }
316
-
317
- .chat_logs::-webkit-scrollbar {
318
- width: 5px;
319
- background-color: #f5f5f5;
320
- }
321
-
322
- .chat_logs::-webkit-scrollbar-thumb {
323
- background-color: #000;
324
- }
325
-
326
- @media only screen and (max-width: 500px) {
327
- .chat_logs {
328
- height: 40vh;
329
- }
330
- }
331
-
332
- .chat-msg.user>.msg-avatar img {
333
- width: 45px;
334
- height: 45px;
335
- border-radius: 50%;
336
- float: left;
337
- width: 15%;
338
- }
339
-
340
- .chat-msg.self>.msg-avatar img {
341
- width: 45px;
342
- height: 45px;
343
- border-radius: 50%;
344
- float: right;
345
- width: 15%;
346
- }
347
-
348
- .cm-msg-text {
349
- z-index: 1999;
350
- background: white;
351
- padding: 10px 15px 10px 15px;
352
- color: #666;
353
- max-width: 75%;
354
- float: left;
355
- margin-left: 10px;
356
- position: relative;
357
- margin-bottom: 20px;
358
- border-radius: 30px;
359
- border-bottom-left-radius: 0px;
360
- }
361
-
362
- .svg-bot {
363
- height: 24px;
364
- width: 24px;
365
- }
366
-
367
- .chat-msg {
368
- clear: both;
369
- z-index: 1999;
370
- }
371
-
372
- .chat-msg.self>.cm-msg-text {
373
- float: right;
374
- margin-right: 10px;
375
- border-radius: 30px;
376
- border-bottom-right-radius: 0px;
377
- background: #000;
378
- color: white;
379
- }
380
-
381
- .cm-msg-button>ul>li {
382
- list-style: none;
383
- float: left;
384
- width: 50%;
385
- }
386
-
387
- .cm-msg-button {
388
- clear: both;
389
- margin-bottom: 70px;
390
- }
391
-
392
- .chat-btn {
393
- z-index: 1999;
394
- }
395
-
396
- .cancel {
397
- display: none;
398
- border: none;
399
- border-radius: 5px;
400
- background-color: #d6d6d6;
401
- color: black;
402
- z-index: 1999;
403
- bottom: 3px;
404
- position: absolute;
405
- padding: 5px 10px;
406
- margin: 0 108px;
407
- }
408
-
409
- [data-block^="list11-v4-1-1"] .vertical-list-item {
410
- height: 100%;
411
- overflow: hidden;
412
- }
413
-
414
- [data-block^="list11-v4-1-1"] .additional-elements-wrapper {
415
- width: 100%;
416
- }
417
-
418
- [data-block^="list11-v4-1-1"] .label-wrapper.vertical {
419
- flex-direction: column;
420
- }
421
-
422
- [data-block^="list11-v4-1-1"] .label-wrapper {
423
- display: flex;
424
- }
425
-
426
- p,
427
- h1,
428
- h2,
429
- h3,
430
- h4,
431
- h5,
432
- small {
433
- white-space: pre-line;
434
- }
435
-
436
- h1,
437
- h2,
438
- h3,
439
- h4,
440
- h5,
441
- h6,
442
- .h1,
443
- .h2,
444
- .h3,
445
- .h4,
446
- .h5,
447
- .h6 {
448
- margin-bottom: 0.25rem;
449
- font-family: inherit;
450
- font-weight: 400;
451
- line-height: 1.1;
452
- color: inherit;
453
- }
454
-
455
- [data-block^="list11-v4-1-1"] .list-container dl,
456
- [data-block^="list11-v4-1-1"] .list-container h1,
457
- [data-block^="list11-v4-1-1"] .list-container h2,
458
- [data-block^="list11-v4-1-1"] .list-container h3,
459
- [data-block^="list11-v4-1-1"] .list-container h4,
460
- [data-block^="list11-v4-1-1"] .list-container h5,
461
- [data-block^="list11-v4-1-1"] .list-container h6,
462
- [data-block^="list11-v4-1-1"] .list-container ol,
463
- [data-block^="list11-v4-1-1"] .list-container p,
464
- [data-block^="list11-v4-1-1"] .list-container ul {
465
- margin: 0;
466
- padding: 0;
467
- }
468
-
469
-
470
- [data-block=list11-v4-1-1-b2] .sw-text-align-center {
471
- text-align: center !important;
472
- }
473
-
474
- [data-block=list11-v4-1-1-b2] .sw-margin-top-none {
475
- margin-top: 0rem !important;
476
- }
477
-
478
- [data-block=list11-v4-1-1-b2] .sw-margin-bottom-none {
479
- margin-bottom: 0rem !important;
480
- }
481
-
482
- [data-block=list11-v4-1-1-b2] .sw-font-size-2xl {
483
- font-size: 1.5rem !important;
484
- }
485
-
486
- [data-block=list11-v4-1-1-b2] .sw-padding-bottom-5xs {
487
- padding-bottom: 0.75rem !important;
488
- }
489
-
490
- [data-block=list11-v4-1-1-b2] .sw-padding-top-none {
491
- padding-top: 0rem !important;
492
- }
493
-
494
- [data-block=list11-v4-1-1-b2] .sw-letter-spacing-normal {
495
- letter-spacing: 0rem !important;
496
- }
497
-
498
- [data-block=list11-v4-1-1-b2] .sw-text-color-0A0A0A {
499
- color: #0A0A0A !important;
500
- }
501
-
502
- [data-block=list11-v4-1-1-b2] .sw-padding-right-none {
503
- padding-right: 0rem !important;
504
- }
505
-
506
- [data-block=list11-v4-1-1-b2] .sw-padding-left-none {
507
- padding-left: 0rem !important;
508
- }
509
-
510
- [data-block=list11-v4-1-1-b2] .sw-font-weight-semibold {
511
- font-weight: 600 !important;
512
- }
513
-
514
- h1.sw-text-color-default,
515
- h2.sw-text-color-default,
516
- h3.sw-font-family-default {
517
- color: #000000;
518
- }
519
-
520
- h1.sw-font-weight-default,
521
- h2.sw-font-weight-default,
522
- h3.sw-font-family-default {
523
- font-weight: 600;
524
- }
525
-
526
- h1.sw-font-family-default,
527
- h2.sw-font-family-default,
528
- h3.sw-font-family-default {
529
- font-family: "IBM Plex Sans";
530
- }
531
-
532
- /* .bg-img {
533
- background-image: url("./MU_LOGO_BLACK.jpg");
534
- background-repeat: no-repeat;
535
- position: relative;
536
- background-position: center;
537
- background-size: cover;
538
- } */
539
-
540
- .marwadi {
541
- width: 100vw;
542
- height: 100vh;
543
- text-align: center;
544
- margin: auto;
545
- font-size: xx-large;
546
- font-weight: bolder;
547
- display: flex;
548
- flex-direction: column;
549
- justify-content: center;
550
- align-items: center;
551
- }
552
-
553
- .bestie {
554
- margin: 1rem;
555
- display: flex;
556
- justify-content: center;
557
- align-items: center;
558
- text-align: center;
559
- width: 100vw;
560
- }
561
- #user_id {
562
- width: 20%;
563
- height: 2rem;
564
- border-radius: 5px;
565
- border: 1px solid #000;
566
- padding: 0.5rem;
567
- }
568
- </style>
569
-
570
- <div class="marwadi">
571
- Welcome To AI Bestie
572
- <div class="bestie">
573
- <label>User Name</label>&nbsp;&nbsp;&nbsp;
574
- <input type="text" id="user_id" />
575
- </div>
576
- </div>
577
- <div id="chat-circle" class="btn btn-raised" style="">
578
- <div id="chat-overlay"></div>
579
- AI
580
- </div>
581
-
582
- <div class="chat-box" style="display: none">
583
- <div class="chat-box-header">
584
- AI Bestie
585
- <span class="chat-box-toggle"><i class="material-icons">close</i></span>
586
- </div>
587
-
588
- <div class="chat-box-body">
589
- <div class="chat-box-overlay"></div>
590
- <div class="chat_logs" id="chat_logs"></div>
591
- <!--chat-log -->
592
- <button type="submit" class="cancel" id="cancel">
593
- Stop Response
594
- </button>
595
- </div>
596
- <div class="chat_input">
597
- <form>
598
- <input type="text" id="chat_input" placeholder="Send a message..." />
599
- <button type="submit" class="chat-submit" id="submit">
600
- <i class="material-icons">send</i>
601
- </button>
602
- </form>
603
- </div>
604
- </div>
605
-
606
- </div>
607
- <!-- Scripts -->
608
- <link rel="stylesheet"
609
- href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" />
610
- <link rel="stylesheet"
611
- href="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-material-design/4.0.2/bootstrap-material-design.css" />
612
- <link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons" />
613
- <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
614
- <script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.0.0-alpha.6/js/bootstrap.min.js"></script>
615
- <script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.0.0-alpha.6/css/bootstrap.min.css"></script>
616
- <script
617
- src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-material-design/4.0.2/bootstrap-material-design.css"></script>
618
- <script id="rendered-js">
619
-
620
- function chatWidget(id, name, status) {
621
- $.ajax({
622
- url: "/chatwidget",
623
- type: "POST",
624
- data: JSON.stringify({
625
- user_id: id,
626
- status: status,
627
- user_name: name,
628
- }),
629
- headers: {
630
- "Content-Type": "application/json",
631
- },
632
- success: function (data) {
633
- if (status == "end") {
634
- return true;
635
- } else {
636
- generate_message(data.response, "user");
637
- }
638
- },
639
- });
640
- }
641
- // chatWidget(
642
- // window.logged_in_user.airtable_record_id,
643
- // window.logged_in_user.Name,
644
- // "end"
645
- // );
646
- // setTimeout(() => {
647
- // chatWidget(
648
- // window.logged_in_user.airtable_record_id,
649
- // window.logged_in_user.Name,
650
- // "start"
651
- // );
652
- // }, 300);
653
- function generate_message(msg, type) {
654
- var str = "";
655
- str += "<div id='cm-msg" + "' class=\"chat-msg " + type + '">';
656
- str += ' <div class="cm-msg-text">';
657
- var lines = msg.includes("\n") ? msg.split("\n") : [msg];
658
-
659
- lines.forEach((line) => {
660
- // Check if the line contains a URL
661
- var urlRegex = /(https?:\/\/[^\s]+)/g;
662
- var messageWithLinks = line.replace(urlRegex, '<a href="$1" target="_blank">$1</a>');
663
-
664
- // Append the line with links to the message string
665
- str += messageWithLinks;
666
-
667
- // Only add <br> if not last line
668
- if (line !== lines[lines.length - 1]) {
669
- str += "<br>";
670
- }
671
- });
672
- str += " <\/div>";
673
- str += " <\/div>";
674
- $(".chat_logs").append(str);
675
-
676
- // Adjust scroll position
677
- var chatLogs = document.getElementById("chat_logs");
678
- chatLogs.scrollTop = chatLogs.scrollHeight;
679
- }
680
-
681
- function error_message(msg, type) {
682
- INDEX++;
683
- var str = "";
684
- str += "<div id='cm-msg" + "' class=\"chat-msg " + type + '">';
685
- str += ' <div class="cm-msg-text">';
686
- var lines = msg.includes("\n") ? msg.split("\n") : [msg];
687
-
688
- lines.forEach((line) => {
689
- str += line;
690
-
691
- // Only add <br> if not last line
692
- if (line !== lines[lines.length - 1]) {
693
- str += "<br>";
694
- }
695
- });
696
- str += " <\/div>";
697
- str += " <\/div>";
698
- $("#chat_logs").append(str);
699
- if (type == "self") {
700
- $("#chat_input").val("");
701
- }
702
- $("#chat_logs")
703
- .stop()
704
- .animate({ scrollTop: $("#chat_logs")[0].scrollHeight }, 1000);
705
- }
706
- $(function () {
707
- var INDEX = 0;
708
- var input = document.getElementById("chat_input");
709
- var submitBtn = document.getElementById("submit");
710
- var cancelRequest = document.getElementById("cancel");
711
-
712
- var userId = document.getElementById("user_id");
713
- $("#cancel").click(function (e) {
714
- input.disabled = false;
715
- e.preventDefault();
716
- submitBtn.disabled = false;
717
- input.style.opacity = 1;
718
- cancelRequest.style.display = 'none';
719
- });
720
-
721
- $("#submit").click(function (e) {
722
- cancelRequest.style.display = 'block';
723
- input.disabled = true;
724
- input.style.opacity = 0.5;
725
- submitBtn.disabled = true;
726
- e.preventDefault();
727
- var msg = $("#chat_input").val();
728
- if (msg.trim() == "") {
729
- return false;
730
- }
731
- generate_message(msg, "self");
732
- input.value = "";
733
-
734
- $.ajax({
735
- url: "/chatwidget",
736
- type: "POST",
737
- data: JSON.stringify({
738
- query: msg,
739
- user_id: userId.value
740
- // message: `${msg} | ${window.logged_in_user.airtable_record_id}`,
741
- }),
742
- headers: {
743
- "Content-Type": "application/json",
744
- "Access-Control-Allow-Origin": "*",
745
- "Access-Control-Allow-Headers": "Content-Type",
746
- },
747
-
748
- success: function (data) {
749
- console.log(data)
750
- /* data.map(item => generate_message(item.text, "user")) */
751
- generate_message(data, "user");
752
- /* generate_message(data.response, "user"); */
753
- input.disabled = false;
754
- submitBtn.disabled = false;
755
- input.style.opacity = 1;
756
- cancelRequest.style.display = 'none';
757
- },
758
- error: function (data) {
759
- error_message(
760
- "We are sorry. we can't proceess Your Request Please Try again after some times.",
761
- "user"
762
- );
763
- input.disabled = false;
764
- submitBtn.disabled = false;
765
- input.style.opacity = 1;
766
- cancelRequest.style.display = 'none';
767
- },
768
- });
769
- });
770
- $(document).delegate(".chat-btn", "click", function () {
771
- var value = $(this).attr("chat-value");
772
- var name = $(this).html();
773
- $("#chat_input").attr("disabled", false);
774
- generate_message(name);
775
- });
776
-
777
- $("#chat-circle").click(function () {
778
- $("#chat-circle").toggle("scale");
779
- $(".chat-box").toggle("scale");
780
- cancelRequest.style.display = 'block';
781
- input.disabled = true;
782
- $.ajax({
783
- url: "/start-session",
784
- type: "POST",
785
- data: JSON.stringify({
786
- user_id: userId.value
787
- // message: `${msg} | ${window.logged_in_user.airtable_record_id}`,
788
- }),
789
- headers: {
790
- "Content-Type": "application/json",
791
- },
792
- success: function (data) {
793
-
794
- $.ajax({
795
- url: "/chatwidget",
796
- type: "POST",
797
- data: JSON.stringify({
798
- query: "you are starting the conversion, with your best friend. also you know about previous summary and mood summary",
799
- user_id: userId.value
800
- // message: `${msg} | ${window.logged_in_user.airtable_record_id}`,
801
- }),
802
- headers: {
803
- "Content-Type": "application/json",
804
- "Access-Control-Allow-Origin": "*",
805
- "Access-Control-Allow-Headers": "Content-Type",
806
- },
807
-
808
- success: function (data) {
809
- console.log(data)
810
- /* data.map(item => generate_message(item.text, "user")) */
811
- generate_message(data, "user");
812
- /* generate_message(data.response, "user"); */
813
- input.disabled = false;
814
- submitBtn.disabled = false;
815
- input.style.opacity = 1;
816
- cancelRequest.style.display = 'none';
817
- },
818
- error: function (data) {
819
- error_message(
820
- "We are sorry. we can't proceess Your Request Please Try again after some times.",
821
- "user"
822
- );
823
- input.disabled = false;
824
- submitBtn.disabled = false;
825
- input.style.opacity = 1;
826
- cancelRequest.style.display = 'none';
827
- },
828
- });
829
- if (status == "end") {
830
-
831
- return true;
832
- } else {
833
- console.log(data.response, "session_id");
834
- }
835
- },
836
- });
837
- console.log(userId, userId.value)
838
- });
839
-
840
- $(".chat-box-toggle").click(function () {
841
- $("#chat-circle").toggle("scale");
842
- $(".chat-box").toggle("scale");
843
- location.reload();
844
- });
845
- });
846
-
847
- </script>
848
- <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.6.0/jquery.min.js"></script>
849
- </body>
850
-
851
  </html>
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+
4
+ <head>
5
+ <meta charset="UTF-8" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>Document</title>
8
+ </head>
9
+
10
+ <body>
11
+ <div id="body">
12
+ <style>
13
+ body {
14
+ /* Add the background image */
15
+ background-image: url('mu-logo-mood-03.jpg');
16
+ /* Adjust background properties */
17
+ background-repeat: no-repeat;
18
+ background-size: cover; /* or contain, depending on your preference */
19
+ /* Add other background properties if needed */
20
+ }
21
+ .cm-msg-text {
22
+ overflow-wrap: break-word;
23
+ /* Break long words */
24
+ word-wrap: break-word;
25
+ /* Break long words */
26
+ word-break: break-word;
27
+ /* Break words */
28
+ max-width: 100%;
29
+ /* Limit maximum width */
30
+ }
31
+
32
+ [data-block=list11-v4-1-1-b2] .sw-border-bottom-color-000000 {
33
+ border-bottom-color: #000000 !important;
34
+ }
35
+
36
+
37
+ [data-block=list11-v4-1-1-b2] .sw-border-bottom-width-none {
38
+ border-bottom-width: 0px !important;
39
+ }
40
+
41
+ [data-block=list11-v4-1-1-b2] .sw-border-bottom-style-none {
42
+ border-bottom-style: none !important;
43
+ }
44
+
45
+ [data-block=list11-v4-1-1-b2] .sw-border-top-color-000000 {
46
+ border-top-color: #000000 !important;
47
+ }
48
+
49
+ [data-block=list11-v4-1-1-b2] .sw-border-top-width-none {
50
+ border-top-width: 0px !important;
51
+ }
52
+
53
+ [data-block=list11-v4-1-1-b2] .sw-border-top-style-none {
54
+ border-top-style: none !important;
55
+ }
56
+
57
+ [data-block=list11-v4-1-1-b2] .sw-padding-bottom-m {
58
+ padding-bottom: 3rem !important;
59
+ }
60
+
61
+ [data-block=list11-v4-1-1-b2] .sw-padding-top-m {
62
+ padding-top: 3rem !important;
63
+ }
64
+
65
+ [data-block=list11-v4-1-1-b2] .sw-background-color-ffffff {
66
+ background-color: #ffffff !important;
67
+ }
68
+
69
+ #body {
70
+ z-index: 1999;
71
+ position: fixed;
72
+ margin: 0;
73
+ padding: 0;
74
+ font-family: "Lato", sans-serif;
75
+ background-color: #f6f7f9;
76
+ }
77
+
78
+ @mixin chabox-container {
79
+ display: flex;
80
+ position: absolute;
81
+ box-shadow: 5px 5px 25px 0 rgba(46, 61, 73, 0.2);
82
+ flex-direction: column;
83
+ }
84
+
85
+ @mixin chatbox-header {
86
+ box-sizing: border-box;
87
+ display: flex;
88
+ width: 100%;
89
+ padding: 16px;
90
+ color: #fff;
91
+ background-color: #0360a5;
92
+ align-items: center;
93
+ justify-content: space-around;
94
+ }
95
+
96
+ @mixin chatbox-main {
97
+ box-sizing: border-box;
98
+ width: 100%;
99
+ padding: calc(2 * 16px) 16px;
100
+ line-height: calc(16px + 16px / 2);
101
+ color: #888;
102
+ text-align: center;
103
+ }
104
+
105
+ @mixin chatbox-footer {
106
+ box-sizing: border-box;
107
+ display: flex;
108
+ width: 100%;
109
+ padding: 16px;
110
+ border-top: 1px solid #ddd;
111
+ align-items: center;
112
+ justify-content: space-around;
113
+ }
114
+
115
+ @mixin chatbox-floating-button {
116
+ position: fixed;
117
+ bottom: 0;
118
+ right: 0;
119
+ width: 52px;
120
+ height: 52px;
121
+ color: #fff;
122
+ background-color: #0360a5;
123
+ background-position: center center;
124
+ background-repeat: no-repeat;
125
+ box-shadow: 12px 15px 20px 0 rgba(46, 61, 73, 0.15);
126
+ border: 0;
127
+ border-radius: 50%;
128
+ cursor: pointer;
129
+ }
130
+
131
+ h1 {
132
+ margin: 0;
133
+ font-size: 16px;
134
+ line-height: 1;
135
+ }
136
+
137
+ button {
138
+ color: inherit;
139
+ background-color: transparent;
140
+ border: 0;
141
+ outline: 0 !important;
142
+ cursor: pointer;
143
+ }
144
+
145
+ #center-text {
146
+ display: flex;
147
+ flex: 1;
148
+ flex-direction: column;
149
+ justify-content: center;
150
+ align-items: center;
151
+ height: 100%;
152
+ }
153
+
154
+ #chat-circle {
155
+ position: fixed;
156
+ bottom: 50px;
157
+ z-index: 1999;
158
+ align-items: center;
159
+ right: 50px;
160
+ background: #000;
161
+ width: 60px;
162
+ justify-content: center;
163
+ height: 60px;
164
+ display: flex;
165
+ border-radius: 50%;
166
+ color: white;
167
+ padding: 8px;
168
+ cursor: pointer;
169
+ box-shadow: 0px 3px 16px 0px rgba(0, 0, 0, 0.4),
170
+ 0 3px 1px -2px rgba(0, 0, 0, 0.2), 0 1px 5px 0 rgba(0, 0, 0, 0.12);
171
+ }
172
+
173
+ .btn#my-btn {
174
+ background: white;
175
+ padding-top: 13px;
176
+ padding-bottom: 12px;
177
+ border-radius: 45px;
178
+ padding-right: 40px;
179
+ padding-left: 40px;
180
+ color: #5865c3;
181
+ }
182
+
183
+ #chat-overlay {
184
+ background: rgba(255, 255, 255, 0.1);
185
+ position: absolute;
186
+ top: 0;
187
+ left: 0;
188
+ width: 100%;
189
+ height: 100%;
190
+ border-radius: 50%;
191
+ display: none;
192
+ }
193
+
194
+ .chat-box {
195
+ display: none;
196
+ background: #efefef;
197
+ position: fixed;
198
+ right: 30px;
199
+ z-index: 1999;
200
+ bottom: 50px;
201
+ width: 350px;
202
+ max-width: 85vw;
203
+ max-height: 100vh;
204
+ border-radius: 5px;
205
+ /* box-shadow: 0px 5px 35px 9px #464a92; */
206
+ box-shadow: 0px 5px 35px 9px #ccc;
207
+ }
208
+
209
+ .chat-box-toggle {
210
+ float: right;
211
+ margin-right: 15px;
212
+ cursor: pointer;
213
+ }
214
+
215
+ .chat-box-header {
216
+ background: #000;
217
+ height: 70px;
218
+ border-top-left-radius: 5px;
219
+ border-top-right-radius: 5px;
220
+ color: white;
221
+ text-align: center;
222
+ font-size: 20px;
223
+ padding-top: 17px;
224
+ padding-left: 36px;
225
+ }
226
+
227
+ .chat-box-body {
228
+ position: relative;
229
+ height: 370px;
230
+ height: auto;
231
+ border: 1px solid #ccc;
232
+ overflow: hidden;
233
+ }
234
+
235
+ .chat-box-body:after {
236
+ content: "";
237
+ background: #fff;
238
+ opacity: 0.1;
239
+ top: 0;
240
+ left: 0;
241
+ bottom: 0;
242
+ right: 0;
243
+ height: 100%;
244
+ position: absolute;
245
+ z-index: -1;
246
+ }
247
+
248
+ #chat_input {
249
+ background: #f4f7f9;
250
+ width: 100%;
251
+ position: relative;
252
+ height: 47px;
253
+ padding-top: 10px;
254
+ padding-right: 50px;
255
+ padding-bottom: 10px;
256
+ padding-left: 15px;
257
+ border: none;
258
+ resize: none;
259
+ outline: none;
260
+ border: 1px solid #ccc;
261
+ color: #888;
262
+ border-top: none;
263
+ border-bottom-right-radius: 5px;
264
+ border-bottom-left-radius: 5px;
265
+ overflow: hidden;
266
+ }
267
+
268
+ .chat_input>form {
269
+ margin-bottom: 0;
270
+ }
271
+
272
+ #chat_input::-webkit-input-placeholder {
273
+ /* Chrome/Opera/Safari */
274
+ color: #ccc;
275
+ }
276
+
277
+ #chat_input::-moz-placeholder {
278
+ /* Firefox 19+ */
279
+ color: #ccc;
280
+ }
281
+
282
+ #chat_input:-ms-input-placeholder {
283
+ /* IE 10+ */
284
+ color: #ccc;
285
+ }
286
+
287
+ #chat_input:-moz-placeholder {
288
+ /* Firefox 18- */
289
+ color: #ccc;
290
+ }
291
+
292
+ .chat-submit {
293
+ position: absolute;
294
+ bottom: 3px;
295
+ right: 10px;
296
+ background: transparent;
297
+ box-shadow: none;
298
+ border: none;
299
+ border-radius: 50%;
300
+ color: #000;
301
+ width: 35px;
302
+ height: 35px;
303
+ }
304
+
305
+ .chat_logs {
306
+ padding: 15px;
307
+ height: 370px;
308
+ overflow-y: scroll;
309
+ margin-bottom: 48px;
310
+ }
311
+
312
+ .chat_logs::-webkit-scrollbar-track {
313
+ -webkit-box-shadow: inset 0 0 6px rgba(0, 0, 0, 0.3);
314
+ background-color: #f5f5f5;
315
+ }
316
+
317
+ .chat_logs::-webkit-scrollbar {
318
+ width: 5px;
319
+ background-color: #f5f5f5;
320
+ }
321
+
322
+ .chat_logs::-webkit-scrollbar-thumb {
323
+ background-color: #000;
324
+ }
325
+
326
+ @media only screen and (max-width: 500px) {
327
+ .chat_logs {
328
+ height: 40vh;
329
+ }
330
+ }
331
+
332
+ .chat-msg.user>.msg-avatar img {
333
+ width: 45px;
334
+ height: 45px;
335
+ border-radius: 50%;
336
+ float: left;
337
+ width: 15%;
338
+ }
339
+
340
+ .chat-msg.self>.msg-avatar img {
341
+ width: 45px;
342
+ height: 45px;
343
+ border-radius: 50%;
344
+ float: right;
345
+ width: 15%;
346
+ }
347
+
348
+ .cm-msg-text {
349
+ z-index: 1999;
350
+ background: white;
351
+ padding: 10px 15px 10px 15px;
352
+ color: #666;
353
+ max-width: 75%;
354
+ float: left;
355
+ margin-left: 10px;
356
+ position: relative;
357
+ margin-bottom: 20px;
358
+ border-radius: 30px;
359
+ border-bottom-left-radius: 0px;
360
+ }
361
+
362
+ .svg-bot {
363
+ height: 24px;
364
+ width: 24px;
365
+ }
366
+
367
+ .chat-msg {
368
+ clear: both;
369
+ z-index: 1999;
370
+ }
371
+
372
+ .chat-msg.self>.cm-msg-text {
373
+ float: right;
374
+ margin-right: 10px;
375
+ border-radius: 30px;
376
+ border-bottom-right-radius: 0px;
377
+ background: #000;
378
+ color: white;
379
+ }
380
+
381
+ .cm-msg-button>ul>li {
382
+ list-style: none;
383
+ float: left;
384
+ width: 50%;
385
+ }
386
+
387
+ .cm-msg-button {
388
+ clear: both;
389
+ margin-bottom: 70px;
390
+ }
391
+
392
+ .chat-btn {
393
+ z-index: 1999;
394
+ }
395
+
396
+ .cancel {
397
+ display: none;
398
+ border: none;
399
+ border-radius: 5px;
400
+ background-color: #d6d6d6;
401
+ color: black;
402
+ z-index: 1999;
403
+ bottom: 3px;
404
+ position: absolute;
405
+ padding: 5px 10px;
406
+ margin: 0 108px;
407
+ }
408
+
409
+ [data-block^="list11-v4-1-1"] .vertical-list-item {
410
+ height: 100%;
411
+ overflow: hidden;
412
+ }
413
+
414
+ [data-block^="list11-v4-1-1"] .additional-elements-wrapper {
415
+ width: 100%;
416
+ }
417
+
418
+ [data-block^="list11-v4-1-1"] .label-wrapper.vertical {
419
+ flex-direction: column;
420
+ }
421
+
422
+ [data-block^="list11-v4-1-1"] .label-wrapper {
423
+ display: flex;
424
+ }
425
+
426
+ p,
427
+ h1,
428
+ h2,
429
+ h3,
430
+ h4,
431
+ h5,
432
+ small {
433
+ white-space: pre-line;
434
+ }
435
+
436
+ h1,
437
+ h2,
438
+ h3,
439
+ h4,
440
+ h5,
441
+ h6,
442
+ .h1,
443
+ .h2,
444
+ .h3,
445
+ .h4,
446
+ .h5,
447
+ .h6 {
448
+ margin-bottom: 0.25rem;
449
+ font-family: inherit;
450
+ font-weight: 400;
451
+ line-height: 1.1;
452
+ color: inherit;
453
+ }
454
+
455
+ [data-block^="list11-v4-1-1"] .list-container dl,
456
+ [data-block^="list11-v4-1-1"] .list-container h1,
457
+ [data-block^="list11-v4-1-1"] .list-container h2,
458
+ [data-block^="list11-v4-1-1"] .list-container h3,
459
+ [data-block^="list11-v4-1-1"] .list-container h4,
460
+ [data-block^="list11-v4-1-1"] .list-container h5,
461
+ [data-block^="list11-v4-1-1"] .list-container h6,
462
+ [data-block^="list11-v4-1-1"] .list-container ol,
463
+ [data-block^="list11-v4-1-1"] .list-container p,
464
+ [data-block^="list11-v4-1-1"] .list-container ul {
465
+ margin: 0;
466
+ padding: 0;
467
+ }
468
+
469
+
470
+ [data-block=list11-v4-1-1-b2] .sw-text-align-center {
471
+ text-align: center !important;
472
+ }
473
+
474
+ [data-block=list11-v4-1-1-b2] .sw-margin-top-none {
475
+ margin-top: 0rem !important;
476
+ }
477
+
478
+ [data-block=list11-v4-1-1-b2] .sw-margin-bottom-none {
479
+ margin-bottom: 0rem !important;
480
+ }
481
+
482
+ [data-block=list11-v4-1-1-b2] .sw-font-size-2xl {
483
+ font-size: 1.5rem !important;
484
+ }
485
+
486
+ [data-block=list11-v4-1-1-b2] .sw-padding-bottom-5xs {
487
+ padding-bottom: 0.75rem !important;
488
+ }
489
+
490
+ [data-block=list11-v4-1-1-b2] .sw-padding-top-none {
491
+ padding-top: 0rem !important;
492
+ }
493
+
494
+ [data-block=list11-v4-1-1-b2] .sw-letter-spacing-normal {
495
+ letter-spacing: 0rem !important;
496
+ }
497
+
498
+ [data-block=list11-v4-1-1-b2] .sw-text-color-0A0A0A {
499
+ color: #0A0A0A !important;
500
+ }
501
+
502
+ [data-block=list11-v4-1-1-b2] .sw-padding-right-none {
503
+ padding-right: 0rem !important;
504
+ }
505
+
506
+ [data-block=list11-v4-1-1-b2] .sw-padding-left-none {
507
+ padding-left: 0rem !important;
508
+ }
509
+
510
+ [data-block=list11-v4-1-1-b2] .sw-font-weight-semibold {
511
+ font-weight: 600 !important;
512
+ }
513
+
514
+ h1.sw-text-color-default,
515
+ h2.sw-text-color-default,
516
+ h3.sw-font-family-default {
517
+ color: #000000;
518
+ }
519
+
520
+ h1.sw-font-weight-default,
521
+ h2.sw-font-weight-default,
522
+ h3.sw-font-family-default {
523
+ font-weight: 600;
524
+ }
525
+
526
+ h1.sw-font-family-default,
527
+ h2.sw-font-family-default,
528
+ h3.sw-font-family-default {
529
+ font-family: "IBM Plex Sans";
530
+ }
531
+
532
+ /* .bg-img {
533
+ background-image: url("./MU_LOGO_BLACK.jpg");
534
+ background-repeat: no-repeat;
535
+ position: relative;
536
+ background-position: center;
537
+ background-size: cover;
538
+ } */
539
+
540
+ .marwadi {
541
+ width: 100vw;
542
+ height: 100vh;
543
+ text-align: center;
544
+ margin: auto;
545
+ font-size: xx-large;
546
+ font-weight: bolder;
547
+ display: flex;
548
+ flex-direction: column;
549
+ justify-content: center;
550
+ align-items: center;
551
+ }
552
+
553
+ .bestie {
554
+ margin: 1rem;
555
+ display: flex;
556
+ justify-content: center;
557
+ align-items: center;
558
+ text-align: center;
559
+ width: 100vw;
560
+ }
561
+ #user_id {
562
+ width: 20%;
563
+ height: 2rem;
564
+ border-radius: 5px;
565
+ border: 1px solid #000;
566
+ padding: 0.5rem;
567
+ }
568
+ </style>
569
+
570
+ <div class="marwadi">
571
+ Welcome To AI Bestie
572
+ <div class="bestie">
573
+ <label>User Name</label>&nbsp;&nbsp;&nbsp;
574
+ <input type="text" id="user_id" />
575
+ </div>
576
+ </div>
577
+ <div id="chat-circle" class="btn btn-raised" style="">
578
+ <div id="chat-overlay"></div>
579
+ AI
580
+ </div>
581
+
582
+ <div class="chat-box" style="display: none">
583
+ <div class="chat-box-header">
584
+ AI Bestie
585
+ <span class="chat-box-toggle"><i class="material-icons">close</i></span>
586
+ </div>
587
+
588
+ <div class="chat-box-body">
589
+ <div class="chat-box-overlay"></div>
590
+ <div class="chat_logs" id="chat_logs"></div>
591
+ <!--chat-log -->
592
+ <button type="submit" class="cancel" id="cancel">
593
+ Stop Response
594
+ </button>
595
+ </div>
596
+ <div class="chat_input">
597
+ <form>
598
+ <input type="text" id="chat_input" placeholder="Send a message..." />
599
+ <button type="submit" class="chat-submit" id="submit">
600
+ <i class="material-icons">send</i>
601
+ </button>
602
+ </form>
603
+ </div>
604
+ </div>
605
+
606
+ </div>
607
+ <!-- Scripts -->
608
+ <link rel="stylesheet"
609
+ href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" />
610
+ <link rel="stylesheet"
611
+ href="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-material-design/4.0.2/bootstrap-material-design.css" />
612
+ <link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons" />
613
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
614
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.0.0-alpha.6/js/bootstrap.min.js"></script>
615
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.0.0-alpha.6/css/bootstrap.min.css"></script>
616
+ <script
617
+ src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-material-design/4.0.2/bootstrap-material-design.css"></script>
618
+ <script id="rendered-js">
619
+
620
+ function chatWidget(id, name, status) {
621
+ $.ajax({
622
+ url: "/chatwidget",
623
+ type: "POST",
624
+ data: JSON.stringify({
625
+ user_id: id,
626
+ status: status,
627
+ user_name: name,
628
+ }),
629
+ headers: {
630
+ "Content-Type": "application/json",
631
+ },
632
+ success: function (data) {
633
+ if (status == "end") {
634
+ return true;
635
+ } else {
636
+ generate_message(data.response, "user");
637
+ }
638
+ },
639
+ });
640
+ }
641
+ // chatWidget(
642
+ // window.logged_in_user.airtable_record_id,
643
+ // window.logged_in_user.Name,
644
+ // "end"
645
+ // );
646
+ // setTimeout(() => {
647
+ // chatWidget(
648
+ // window.logged_in_user.airtable_record_id,
649
+ // window.logged_in_user.Name,
650
+ // "start"
651
+ // );
652
+ // }, 300);
653
+ function generate_message(msg, type) {
654
+ var str = "";
655
+ str += "<div id='cm-msg" + "' class=\"chat-msg " + type + '">';
656
+ str += ' <div class="cm-msg-text">';
657
+ var lines = msg.includes("\n") ? msg.split("\n") : [msg];
658
+
659
+ lines.forEach((line) => {
660
+ // Check if the line contains a URL
661
+ var urlRegex = /(https?:\/\/[^\s]+)/g;
662
+ var messageWithLinks = line.replace(urlRegex, '<a href="$1" target="_blank">$1</a>');
663
+
664
+ // Append the line with links to the message string
665
+ str += messageWithLinks;
666
+
667
+ // Only add <br> if not last line
668
+ if (line !== lines[lines.length - 1]) {
669
+ str += "<br>";
670
+ }
671
+ });
672
+ str += " <\/div>";
673
+ str += " <\/div>";
674
+ $(".chat_logs").append(str);
675
+
676
+ // Adjust scroll position
677
+ var chatLogs = document.getElementById("chat_logs");
678
+ chatLogs.scrollTop = chatLogs.scrollHeight;
679
+ }
680
+
681
+ function error_message(msg, type) {
682
+ INDEX++;
683
+ var str = "";
684
+ str += "<div id='cm-msg" + "' class=\"chat-msg " + type + '">';
685
+ str += ' <div class="cm-msg-text">';
686
+ var lines = msg.includes("\n") ? msg.split("\n") : [msg];
687
+
688
+ lines.forEach((line) => {
689
+ str += line;
690
+
691
+ // Only add <br> if not last line
692
+ if (line !== lines[lines.length - 1]) {
693
+ str += "<br>";
694
+ }
695
+ });
696
+ str += " <\/div>";
697
+ str += " <\/div>";
698
+ $("#chat_logs").append(str);
699
+ if (type == "self") {
700
+ $("#chat_input").val("");
701
+ }
702
+ $("#chat_logs")
703
+ .stop()
704
+ .animate({ scrollTop: $("#chat_logs")[0].scrollHeight }, 1000);
705
+ }
706
+ $(function () {
707
+ var INDEX = 0;
708
+ var input = document.getElementById("chat_input");
709
+ var submitBtn = document.getElementById("submit");
710
+ var cancelRequest = document.getElementById("cancel");
711
+
712
+ var userId = document.getElementById("user_id");
713
+ $("#cancel").click(function (e) {
714
+ input.disabled = false;
715
+ e.preventDefault();
716
+ submitBtn.disabled = false;
717
+ input.style.opacity = 1;
718
+ cancelRequest.style.display = 'none';
719
+ });
720
+
721
+ $("#submit").click(function (e) {
722
+ cancelRequest.style.display = 'block';
723
+ input.disabled = true;
724
+ input.style.opacity = 0.5;
725
+ submitBtn.disabled = true;
726
+ e.preventDefault();
727
+ var msg = $("#chat_input").val();
728
+ if (msg.trim() == "") {
729
+ return false;
730
+ }
731
+ generate_message(msg, "self");
732
+ input.value = "";
733
+
734
+ $.ajax({
735
+ url: "/chatwidget",
736
+ type: "POST",
737
+ data: JSON.stringify({
738
+ query: msg,
739
+ user_id: userId.value
740
+ // message: `${msg} | ${window.logged_in_user.airtable_record_id}`,
741
+ }),
742
+ headers: {
743
+ "Content-Type": "application/json",
744
+ "Access-Control-Allow-Origin": "*",
745
+ "Access-Control-Allow-Headers": "Content-Type",
746
+ },
747
+
748
+ success: function (data) {
749
+ console.log(data)
750
+ /* data.map(item => generate_message(item.text, "user")) */
751
+ generate_message(data, "user");
752
+ /* generate_message(data.response, "user"); */
753
+ input.disabled = false;
754
+ submitBtn.disabled = false;
755
+ input.style.opacity = 1;
756
+ cancelRequest.style.display = 'none';
757
+ },
758
+ error: function (data) {
759
+ error_message(
760
+ "We are sorry. we can't proceess Your Request Please Try again after some times.",
761
+ "user"
762
+ );
763
+ input.disabled = false;
764
+ submitBtn.disabled = false;
765
+ input.style.opacity = 1;
766
+ cancelRequest.style.display = 'none';
767
+ },
768
+ });
769
+ });
770
+ $(document).delegate(".chat-btn", "click", function () {
771
+ var value = $(this).attr("chat-value");
772
+ var name = $(this).html();
773
+ $("#chat_input").attr("disabled", false);
774
+ generate_message(name);
775
+ });
776
+
777
+ $("#chat-circle").click(function () {
778
+ $("#chat-circle").toggle("scale");
779
+ $(".chat-box").toggle("scale");
780
+ cancelRequest.style.display = 'block';
781
+ input.disabled = true;
782
+ $.ajax({
783
+ url: "/start-session",
784
+ type: "POST",
785
+ data: JSON.stringify({
786
+ user_id: userId.value
787
+ // message: `${msg} | ${window.logged_in_user.airtable_record_id}`,
788
+ }),
789
+ headers: {
790
+ "Content-Type": "application/json",
791
+ },
792
+ success: function (data) {
793
+
794
+ $.ajax({
795
+ url: "/chatwidget",
796
+ type: "POST",
797
+ data: JSON.stringify({
798
+ query: "START",
799
+ user_id: userId.value
800
+ // message: `${msg} | ${window.logged_in_user.airtable_record_id}`,
801
+ }),
802
+ headers: {
803
+ "Content-Type": "application/json",
804
+ "Access-Control-Allow-Origin": "*",
805
+ "Access-Control-Allow-Headers": "Content-Type",
806
+ },
807
+
808
+ success: function (data) {
809
+ console.log(data)
810
+ /* data.map(item => generate_message(item.text, "user")) */
811
+ generate_message(data, "user");
812
+ /* generate_message(data.response, "user"); */
813
+ input.disabled = false;
814
+ submitBtn.disabled = false;
815
+ input.style.opacity = 1;
816
+ cancelRequest.style.display = 'none';
817
+ },
818
+ error: function (data) {
819
+ error_message(
820
+ "We are sorry. we can't proceess Your Request Please Try again after some times.",
821
+ "user"
822
+ );
823
+ input.disabled = false;
824
+ submitBtn.disabled = false;
825
+ input.style.opacity = 1;
826
+ cancelRequest.style.display = 'none';
827
+ },
828
+ });
829
+ if (status == "end") {
830
+
831
+ return true;
832
+ } else {
833
+ console.log(data.response, "session_id");
834
+ }
835
+ },
836
+ });
837
+ console.log(userId, userId.value)
838
+ });
839
+
840
+ $(".chat-box-toggle").click(function () {
841
+ $("#chat-circle").toggle("scale");
842
+ $(".chat-box").toggle("scale");
843
+ location.reload();
844
+ });
845
+ });
846
+
847
+ </script>
848
+ <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.6.0/jquery.min.js"></script>
849
+ </body>
850
+
851
  </html>
templates/index.html CHANGED
@@ -1,13 +1,13 @@
1
-
2
- <!DOCTYPE html>
3
- <html lang="en">
4
- <head>
5
- <meta charset="UTF-8">
6
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
- <title>FastAPI HTML Example</title>
8
- </head>
9
- <body>
10
- <h1>Hello, FastAPI!</h1>
11
- <p>This is an example of serving HTML content with FastAPI.</p>
12
- </body>
13
- </html>
 
1
+
2
+ <!DOCTYPE html>
3
+ <html lang="en">
4
+ <head>
5
+ <meta charset="UTF-8">
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
+ <title>FastAPI HTML Example</title>
8
+ </head>
9
+ <body>
10
+ <h1>Hello, FastAPI!</h1>
11
+ <p>This is an example of serving HTML content with FastAPI.</p>
12
+ </body>
13
+ </html>
tools.py CHANGED
@@ -1,350 +1,350 @@
1
- import os
2
- from langchain.agents import tool
3
- from langchain_community.chat_models import ChatOpenAI
4
- import pandas as pd
5
-
6
- from config import settings
7
-
8
-
9
-
10
- def get_embeddings(text_list):
11
- encoded_input = settings.tokenizer(
12
- text_list, padding=True, truncation=True, return_tensors="pt"
13
- )
14
- # encoded_input = {k: v.to(device) for k, v in encoded_input.items()}
15
- encoded_input = {k: v for k, v in encoded_input.items()}
16
- model_output = settings.model(**encoded_input)
17
-
18
- cls_pool = model_output.last_hidden_state[:, 0]
19
- return cls_pool
20
-
21
- def reg(chat):
22
- question_embedding = get_embeddings([chat]).cpu().detach().numpy()
23
- scores, samples = settings.dataset.get_nearest_examples(
24
- "embeddings", question_embedding, k=5
25
- )
26
- samples_df = pd.DataFrame.from_dict(samples)
27
- print(samples_df.columns)
28
- samples_df["scores"] = scores
29
- samples_df.sort_values("scores", ascending=False, inplace=True)
30
- return samples_df[['title', 'cover_image', 'referral_link', 'category_id']]
31
-
32
-
33
- @tool("MOXICASTS-questions", return_direct=True)
34
- def moxicast(prompt: str) -> str:
35
- """this function is used when user wants to know about MOXICASTS feature.MOXICASTS is a feature of BMoxi for Advice and guidance on life topics.
36
- Args:
37
- prompt (string): user query
38
-
39
- Returns:
40
- string: answer of the query
41
- """
42
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MOXICASTS is a feature of BMoxi for Advice and guidance on life topics."
43
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
44
- # Define the system prompt
45
- system_template = """ you are going to make answer only using this context not use any other information
46
- context : {context}
47
- Input: {input}
48
- """
49
- response = llm.invoke(system_template.format(context=context, input=prompt))
50
-
51
- return response.content
52
-
53
- @tool("PEP-TALKPODS-questions", return_direct=True)
54
- def peptalks(prompt: str) -> str:
55
- """this function is used when user wants to know about PEP TALK PODS feature.PEP TALK PODS: Quick audio pep talks for boosting mood and motivation.
56
- Args:
57
- prompt (string): user query
58
-
59
- Returns:
60
- string: answer of the query
61
- """
62
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. PEP TALK PODS: Quick audio pep talks for boosting mood and motivation."
63
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
64
- # Define the system prompt
65
- system_template = """ you are going to make answer only using this context not use any other information
66
- context : {context}
67
- Input: {input}
68
- """
69
- response = llm.invoke(system_template.format(context=context, input=prompt))
70
-
71
- return response.content
72
-
73
-
74
-
75
- @tool("SOCIAL-SANCTUARY-questions", return_direct=True)
76
- def sactury(prompt: str) -> str:
77
- """this function is used when user wants to know about SOCIAL SANCTUARY feature.THE SOCIAL SANCTUARY Anonymous community forum for support and sharing.
78
- Args:
79
- prompt (string): user query
80
-
81
- Returns:
82
- string: answer of the query
83
- """
84
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. THE SOCIAL SANCTUARY Anonymous community forum for support and sharing."
85
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
86
- # Define the system prompt
87
- system_template = """ you are going to make answer only using this context not use any other information
88
- context : {context}
89
- Input: {input}
90
- """
91
- response = llm.invoke(system_template.format(context=context, input=prompt))
92
-
93
- return response.content
94
-
95
-
96
- @tool("POWER-ZENS-questions", return_direct=True)
97
- def power_zens(prompt: str) -> str:
98
- """this function is used when user wants to know about POWER ZENS feature. POWER ZENS Mini meditations for emotional control.
99
-
100
- Args:
101
- prompt (string): user query
102
-
103
- Returns:
104
- string: answer of the query
105
- """
106
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. POWER ZENS Mini meditations for emotional control."
107
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
108
- # Define the system prompt
109
- system_template = """ you are going to make answer only using this context not use any other information
110
- context : {context}
111
- Input: {input}
112
- """
113
- response = llm.invoke(system_template.format(context=context, input=prompt))
114
-
115
- return response.content
116
-
117
-
118
-
119
- @tool("MY-CALENDAR-questions", return_direct=True)
120
- def my_calender(prompt: str) -> str:
121
- """this function is used when user wants to know about MY CALENDAR feature.MY CALENDAR: Visual calendar for tracking self-care rituals and moods.
122
- Args:
123
- prompt (string): user query
124
-
125
- Returns:
126
- string: answer of the query
127
- """
128
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY CALENDAR: Visual calendar for tracking self-care rituals and moods."
129
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
130
- # Define the system prompt
131
- system_template = """ you are going to make answer only using this context not use any other information
132
- context : {context}
133
- Input: {input}
134
- """
135
- response = llm.invoke(system_template.format(context=context, input=prompt))
136
-
137
- return response.content
138
-
139
-
140
-
141
-
142
- @tool("PUSH-AFFIRMATIONS-questions", return_direct=True)
143
- def affirmations(prompt: str) -> str:
144
- """this function is used when user wants to know about PUSH AFFIRMATIONS feature.PUSH AFFIRMATIONS: Daily text affirmations for positive thinking.
145
- Args:
146
- prompt (string): user query
147
-
148
- Returns:
149
- string: answer of the query
150
- """
151
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. PUSH AFFIRMATIONS: Daily text affirmations for positive thinking."
152
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
153
- # Define the system prompt
154
- system_template = """ you are going to make answer only using this context not use any other information
155
- context : {context}
156
- Input: {input}
157
- """
158
- response = llm.invoke(system_template.format(context=context, input=prompt))
159
-
160
- return response.content
161
-
162
- @tool("HOROSCOPE-questions", return_direct=True)
163
- def horoscope(prompt: str) -> str:
164
- """this function is used when user wants to know about HOROSCOPE feature.SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings.
165
- Args:
166
- prompt (string): user query
167
-
168
- Returns:
169
- string: answer of the query
170
- """
171
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings."
172
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
173
- # Define the system prompt
174
- system_template = """ you are going to make answer only using this context not use any other information
175
- context : {context}
176
- Input: {input}
177
- """
178
- response = llm.invoke(system_template.format(context=context, input=prompt))
179
-
180
- return response.content
181
-
182
-
183
-
184
- @tool("INFLUENCER-POSTS-questions", return_direct=True)
185
- def influencer_post(prompt: str) -> str:
186
- """this function is used when user wants to know about INFLUENCER POSTS feature.INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon).
187
- Args:
188
- prompt (string): user query
189
-
190
- Returns:
191
- string: answer of the query
192
- """
193
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon)."
194
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
195
- # Define the system prompt
196
- system_template = """ you are going to make answer only using this context not use any other information
197
- context : {context}
198
- Input: {input}
199
- """
200
- response = llm.invoke(system_template.format(context=context, input=prompt))
201
-
202
- return response.content
203
-
204
-
205
- @tool("MY-VIBECHECK-questions", return_direct=True)
206
- def my_vibecheck(prompt: str) -> str:
207
- """this function is used when user wants to know about MY VIBECHECK feature. MY VIBECHECK: Monitor and understand emotional patterns.
208
-
209
- Args:
210
- prompt (string): user query
211
-
212
- Returns:
213
- string: answer of the query
214
- """
215
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY VIBECHECK: Monitor and understand emotional patterns."
216
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
217
- # Define the system prompt
218
- system_template = """ you are going to make answer only using this context not use any other information
219
- context : {context}
220
- Input: {input}
221
- """
222
- response = llm.invoke(system_template.format(context=context, input=prompt))
223
-
224
- return response.content
225
-
226
-
227
-
228
- @tool("MY-RITUALS-questions", return_direct=True)
229
- def my_rituals(prompt: str) -> str:
230
- """this function is used when user wants to know about MY RITUALS feature.MY RITUALS: Create personalized self-care routines.
231
- Args:
232
- prompt (string): user query
233
-
234
- Returns:
235
- string: answer of the query
236
- """
237
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY RITUALS: Create personalized self-care routines."
238
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
239
- # Define the system prompt
240
- system_template = """ you are going to make answer only using this context not use any other information
241
- context : {context}
242
- Input: {input}
243
- """
244
- response = llm.invoke(system_template.format(context=context, input=prompt))
245
-
246
- return response.content
247
-
248
-
249
-
250
-
251
- @tool("MY-REWARDS-questions", return_direct=True)
252
- def my_rewards(prompt: str) -> str:
253
- """this function is used when user wants to know about MY REWARDS feature.MY REWARDS: Earn points for self-care, redeemable for gift cards.
254
- Args:
255
- prompt (string): user query
256
-
257
- Returns:
258
- string: answer of the query
259
- """
260
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY REWARDS: Earn points for self-care, redeemable for gift cards."
261
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
262
- # Define the system prompt
263
- system_template = """ you are going to make answer only using this context not use any other information
264
- context : {context}
265
- Input: {input}
266
- """
267
- response = llm.invoke(system_template.format(context=context, input=prompt))
268
-
269
- return response.content
270
-
271
-
272
- @tool("mentoring-questions", return_direct=True)
273
- def mentoring(prompt: str) -> str:
274
- """this function is used when user wants to know about 1-1 mentoring feature. 1:1 MENTORING: Personalized mentoring (coming soon).
275
-
276
- Args:
277
- prompt (string): user query
278
-
279
- Returns:
280
- string: answer of the query
281
- """
282
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. 1:1 MENTORING: Personalized mentoring (coming soon)."
283
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
284
- # Define the system prompt
285
- system_template = """ you are going to make answer only using this context not use any other information
286
- context : {context}
287
- Input: {input}
288
- """
289
- response = llm.invoke(system_template.format(context=context, input=prompt))
290
-
291
- return response.content
292
-
293
-
294
-
295
- @tool("MY-JOURNAL-questions", return_direct=True)
296
- def my_journal(prompt: str) -> str:
297
- """this function is used when user wants to know about MY JOURNAL feature.MY JOURNAL: Guided journaling exercises for self-reflection.
298
- Args:
299
- prompt (string): user query
300
-
301
- Returns:
302
- string: answer of the query
303
- """
304
- context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY JOURNAL: Guided journaling exercises for self-reflection."
305
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
306
- # Define the system prompt
307
- system_template = """ you are going to make answer only using this context not use any other information
308
- context : {context}
309
- Input: {input}
310
- """
311
- response = llm.invoke(system_template.format(context=context, input=prompt))
312
-
313
- return response.content
314
-
315
- @tool("recommandation_tool", return_direct=True)
316
- def recommand_podcast(prompt: str) -> str:
317
- """ this function is used when your best friend want any recommandation and tips. also you feel that this is the best time for any recommandation or your friend.
318
- Args:
319
- prompt (string): user query
320
-
321
- Returns:
322
- string: answer of the query
323
- """
324
- df = reg(prompt)
325
- context = """"""
326
- for index, row in df.iterrows():
327
- 'title', 'cover_image', 'referral_link', 'category_id'
328
- context+= f"Row {index + 1}: Title: {row['title']} image: {row['cover_image']} referral_link: {row['referral_link']} category_id: {row['category_id']}"
329
- llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
330
- # Define the system prompt
331
- system_template = """ you have to give the recommandation of podcast for: {input}. also you are giving referal link of podcast.
332
- you must use the context only not any other information.
333
- context : {context}
334
- """
335
- print(system_template.format(context=context, input=prompt))
336
- response = llm.invoke(system_template.format(context=context, input=prompt))
337
-
338
- return response.content
339
-
340
- @tool("set-chat-bot-name", return_direct=True)
341
- def set_chatbot_name(name: str) -> str:
342
- """ this function is used when your best friend want to give you new name.
343
- Args:
344
- name (string): new name of you.
345
-
346
- Returns:
347
- string: response after setting new name.
348
- """
349
-
350
- return "Okay, from now my name will be "+ name
 
1
+ import os
2
+ from langchain.agents import tool
3
+ from langchain_community.chat_models import ChatOpenAI
4
+ import pandas as pd
5
+
6
+ from config import settings
7
+
8
+
9
+
10
+ def get_embeddings(text_list):
11
+ encoded_input = settings.tokenizer(
12
+ text_list, padding=True, truncation=True, return_tensors="pt"
13
+ )
14
+ # encoded_input = {k: v.to(device) for k, v in encoded_input.items()}
15
+ encoded_input = {k: v for k, v in encoded_input.items()}
16
+ model_output = settings.model(**encoded_input)
17
+
18
+ cls_pool = model_output.last_hidden_state[:, 0]
19
+ return cls_pool
20
+
21
+ def reg(chat):
22
+ question_embedding = get_embeddings([chat]).cpu().detach().numpy()
23
+ scores, samples = settings.dataset.get_nearest_examples(
24
+ "embeddings", question_embedding, k=5
25
+ )
26
+ samples_df = pd.DataFrame.from_dict(samples)
27
+ # print(samples_df.columns)
28
+ samples_df["scores"] = scores
29
+ samples_df.sort_values("scores", ascending=False, inplace=True)
30
+ return samples_df[['title', 'cover_image', 'referral_link', 'category_id']]
31
+
32
+
33
+ @tool("MOXICASTS-questions", return_direct=True)
34
+ def moxicast(prompt: str) -> str:
35
+ """this function is used when user wants to know about MOXICASTS feature.MOXICASTS is a feature of BMoxi for Advice and guidance on life topics.
36
+ Args:
37
+ prompt (string): user query
38
+
39
+ Returns:
40
+ string: answer of the query
41
+ """
42
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MOXICASTS is a feature of BMoxi for Advice and guidance on life topics."
43
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
44
+ # Define the system prompt
45
+ system_template = """ you are going to make answer only using this context not use any other information
46
+ context : {context}
47
+ Input: {input}
48
+ """
49
+ response = llm.invoke(system_template.format(context=context, input=prompt))
50
+
51
+ return response.content
52
+
53
+ @tool("PEP-TALKPODS-questions", return_direct=True)
54
+ def peptalks(prompt: str) -> str:
55
+ """this function is used when user wants to know about PEP TALK PODS feature.PEP TALK PODS: Quick audio pep talks for boosting mood and motivation.
56
+ Args:
57
+ prompt (string): user query
58
+
59
+ Returns:
60
+ string: answer of the query
61
+ """
62
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. PEP TALK PODS: Quick audio pep talks for boosting mood and motivation."
63
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
64
+ # Define the system prompt
65
+ system_template = """ you are going to make answer only using this context not use any other information
66
+ context : {context}
67
+ Input: {input}
68
+ """
69
+ response = llm.invoke(system_template.format(context=context, input=prompt))
70
+
71
+ return response.content
72
+
73
+
74
+
75
+ @tool("SOCIAL-SANCTUARY-questions", return_direct=True)
76
+ def sactury(prompt: str) -> str:
77
+ """this function is used when user wants to know about SOCIAL SANCTUARY feature.THE SOCIAL SANCTUARY Anonymous community forum for support and sharing.
78
+ Args:
79
+ prompt (string): user query
80
+
81
+ Returns:
82
+ string: answer of the query
83
+ """
84
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. THE SOCIAL SANCTUARY Anonymous community forum for support and sharing."
85
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
86
+ # Define the system prompt
87
+ system_template = """ you are going to make answer only using this context not use any other information
88
+ context : {context}
89
+ Input: {input}
90
+ """
91
+ response = llm.invoke(system_template.format(context=context, input=prompt))
92
+
93
+ return response.content
94
+
95
+
96
+ @tool("POWER-ZENS-questions", return_direct=True)
97
+ def power_zens(prompt: str) -> str:
98
+ """this function is used when user wants to know about POWER ZENS feature. POWER ZENS Mini meditations for emotional control.
99
+
100
+ Args:
101
+ prompt (string): user query
102
+
103
+ Returns:
104
+ string: answer of the query
105
+ """
106
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. POWER ZENS Mini meditations for emotional control."
107
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
108
+ # Define the system prompt
109
+ system_template = """ you are going to make answer only using this context not use any other information
110
+ context : {context}
111
+ Input: {input}
112
+ """
113
+ response = llm.invoke(system_template.format(context=context, input=prompt))
114
+
115
+ return response.content
116
+
117
+
118
+
119
+ @tool("MY-CALENDAR-questions", return_direct=True)
120
+ def my_calender(prompt: str) -> str:
121
+ """this function is used when user wants to know about MY CALENDAR feature.MY CALENDAR: Visual calendar for tracking self-care rituals and moods.
122
+ Args:
123
+ prompt (string): user query
124
+
125
+ Returns:
126
+ string: answer of the query
127
+ """
128
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY CALENDAR: Visual calendar for tracking self-care rituals and moods."
129
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
130
+ # Define the system prompt
131
+ system_template = """ you are going to make answer only using this context not use any other information
132
+ context : {context}
133
+ Input: {input}
134
+ """
135
+ response = llm.invoke(system_template.format(context=context, input=prompt))
136
+
137
+ return response.content
138
+
139
+
140
+
141
+
142
+ @tool("PUSH-AFFIRMATIONS-questions", return_direct=True)
143
+ def affirmations(prompt: str) -> str:
144
+ """this function is used when user wants to know about PUSH AFFIRMATIONS feature.PUSH AFFIRMATIONS: Daily text affirmations for positive thinking.
145
+ Args:
146
+ prompt (string): user query
147
+
148
+ Returns:
149
+ string: answer of the query
150
+ """
151
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. PUSH AFFIRMATIONS: Daily text affirmations for positive thinking."
152
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
153
+ # Define the system prompt
154
+ system_template = """ you are going to make answer only using this context not use any other information
155
+ context : {context}
156
+ Input: {input}
157
+ """
158
+ response = llm.invoke(system_template.format(context=context, input=prompt))
159
+
160
+ return response.content
161
+
162
+ @tool("HOROSCOPE-questions", return_direct=True)
163
+ def horoscope(prompt: str) -> str:
164
+ """this function is used when user wants to know about HOROSCOPE feature.SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings.
165
+ Args:
166
+ prompt (string): user query
167
+
168
+ Returns:
169
+ string: answer of the query
170
+ """
171
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings."
172
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
173
+ # Define the system prompt
174
+ system_template = """ you are going to make answer only using this context not use any other information
175
+ context : {context}
176
+ Input: {input}
177
+ """
178
+ response = llm.invoke(system_template.format(context=context, input=prompt))
179
+
180
+ return response.content
181
+
182
+
183
+
184
+ @tool("INFLUENCER-POSTS-questions", return_direct=True)
185
+ def influencer_post(prompt: str) -> str:
186
+ """this function is used when user wants to know about INFLUENCER POSTS feature.INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon).
187
+ Args:
188
+ prompt (string): user query
189
+
190
+ Returns:
191
+ string: answer of the query
192
+ """
193
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon)."
194
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
195
+ # Define the system prompt
196
+ system_template = """ you are going to make answer only using this context not use any other information
197
+ context : {context}
198
+ Input: {input}
199
+ """
200
+ response = llm.invoke(system_template.format(context=context, input=prompt))
201
+
202
+ return response.content
203
+
204
+
205
+ @tool("MY-VIBECHECK-questions", return_direct=True)
206
+ def my_vibecheck(prompt: str) -> str:
207
+ """this function is used when user wants to know about MY VIBECHECK feature. MY VIBECHECK: Monitor and understand emotional patterns.
208
+
209
+ Args:
210
+ prompt (string): user query
211
+
212
+ Returns:
213
+ string: answer of the query
214
+ """
215
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY VIBECHECK: Monitor and understand emotional patterns."
216
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
217
+ # Define the system prompt
218
+ system_template = """ you are going to make answer only using this context not use any other information
219
+ context : {context}
220
+ Input: {input}
221
+ """
222
+ response = llm.invoke(system_template.format(context=context, input=prompt))
223
+
224
+ return response.content
225
+
226
+
227
+
228
+ @tool("MY-RITUALS-questions", return_direct=True)
229
+ def my_rituals(prompt: str) -> str:
230
+ """this function is used when user wants to know about MY RITUALS feature.MY RITUALS: Create personalized self-care routines.
231
+ Args:
232
+ prompt (string): user query
233
+
234
+ Returns:
235
+ string: answer of the query
236
+ """
237
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY RITUALS: Create personalized self-care routines."
238
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
239
+ # Define the system prompt
240
+ system_template = """ you are going to make answer only using this context not use any other information
241
+ context : {context}
242
+ Input: {input}
243
+ """
244
+ response = llm.invoke(system_template.format(context=context, input=prompt))
245
+
246
+ return response.content
247
+
248
+
249
+
250
+
251
+ @tool("MY-REWARDS-questions", return_direct=True)
252
+ def my_rewards(prompt: str) -> str:
253
+ """this function is used when user wants to know about MY REWARDS feature.MY REWARDS: Earn points for self-care, redeemable for gift cards.
254
+ Args:
255
+ prompt (string): user query
256
+
257
+ Returns:
258
+ string: answer of the query
259
+ """
260
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY REWARDS: Earn points for self-care, redeemable for gift cards."
261
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
262
+ # Define the system prompt
263
+ system_template = """ you are going to make answer only using this context not use any other information
264
+ context : {context}
265
+ Input: {input}
266
+ """
267
+ response = llm.invoke(system_template.format(context=context, input=prompt))
268
+
269
+ return response.content
270
+
271
+
272
+ @tool("mentoring-questions", return_direct=True)
273
+ def mentoring(prompt: str) -> str:
274
+ """this function is used when user wants to know about 1-1 mentoring feature. 1:1 MENTORING: Personalized mentoring (coming soon).
275
+
276
+ Args:
277
+ prompt (string): user query
278
+
279
+ Returns:
280
+ string: answer of the query
281
+ """
282
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. 1:1 MENTORING: Personalized mentoring (coming soon)."
283
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
284
+ # Define the system prompt
285
+ system_template = """ you are going to make answer only using this context not use any other information
286
+ context : {context}
287
+ Input: {input}
288
+ """
289
+ response = llm.invoke(system_template.format(context=context, input=prompt))
290
+
291
+ return response.content
292
+
293
+
294
+
295
+ @tool("MY-JOURNAL-questions", return_direct=True)
296
+ def my_journal(prompt: str) -> str:
297
+ """this function is used when user wants to know about MY JOURNAL feature.MY JOURNAL: Guided journaling exercises for self-reflection.
298
+ Args:
299
+ prompt (string): user query
300
+
301
+ Returns:
302
+ string: answer of the query
303
+ """
304
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY JOURNAL: Guided journaling exercises for self-reflection."
305
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
306
+ # Define the system prompt
307
+ system_template = """ you are going to make answer only using this context not use any other information
308
+ context : {context}
309
+ Input: {input}
310
+ """
311
+ response = llm.invoke(system_template.format(context=context, input=prompt))
312
+
313
+ return response.content
314
+
315
+ @tool("recommandation_tool", return_direct=True)
316
+ def recommand_podcast(prompt: str) -> str:
317
+ """ this function is used when your best friend want any recommandation and tips. also you feel that this is the best time for any recommandation or your friend.
318
+ Args:
319
+ prompt (string): user query
320
+
321
+ Returns:
322
+ string: answer of the query
323
+ """
324
+ df = reg(prompt)
325
+ context = """"""
326
+ for index, row in df.iterrows():
327
+ 'title', 'cover_image', 'referral_link', 'category_id'
328
+ context+= f"Row {index + 1}: Title: {row['title']} image: {row['cover_image']} referral_link: {row['referral_link']} category_id: {row['category_id']}"
329
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
330
+ # Define the system prompt
331
+ system_template = """ you have to give the recommandation of podcast for: {input}. also you are giving referal link of podcast.
332
+ you must use the context only not any other information.
333
+ context : {context}
334
+ """
335
+ # print(system_template.format(context=context, input=prompt))
336
+ response = llm.invoke(system_template.format(context=context, input=prompt))
337
+
338
+ return response.content
339
+
340
+ @tool("set-chat-bot-name", return_direct=True)
341
+ def set_chatbot_name(name: str) -> str:
342
+ """ this function is used when your best friend want to give you new name.
343
+ Args:
344
+ name (string): new name of you.
345
+
346
+ Returns:
347
+ string: response after setting new name.
348
+ """
349
+
350
+ return "Okay, from now my name will be "+ name
utils.py CHANGED
@@ -1,174 +1,103 @@
1
- import json
2
- from transformers import AutoTokenizer, AutoModel
3
- from langchain_community.chat_models import ChatOpenAI
4
- import pandas as pd
5
- from config import settings
6
- from langchain_core.utils.function_calling import convert_to_openai_function
7
- from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
8
- from langchain.memory import ConversationBufferWindowMemory
9
- from langchain.schema.runnable import RunnablePassthrough
10
- from langchain.agents.format_scratchpad import format_to_openai_functions
11
- from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
12
- from langchain.agents import AgentExecutor
13
- from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
14
-
15
- from tools import moxicast, my_calender, my_journal, my_rewards, my_rituals, my_vibecheck, peptalks, sactury, power_zens, affirmations, horoscope, mentoring, influencer_post, recommand_podcast, set_chatbot_name
16
-
17
-
18
- def get_last_session(user_id="user_1"):
19
- mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(
20
- settings.MONGODB_DB_USER_SESSIONS_COLLECTION_NAME)
21
-
22
- sessions_cursor = mongodb_chatbot_message_collection.find_one(
23
- {"user_id": user_id})
24
-
25
- print(sessions_cursor)
26
- sessions_list = sessions_cursor['session_id']
27
-
28
- second_last_session_id = None
29
- if len(sessions_list) >= 2:
30
- second_last_session_id = sessions_list[-2]
31
-
32
- return {"last_session_id": sessions_list[-1], "second_last_session_id": second_last_session_id if second_last_session_id else None}
33
-
34
-
35
- def get_mood_summary(data='''"35","27","mood_tracker","[{""question_id"":1,""question"":""my vibe rn is\u2026"",""answer"":[""Sad""],""time"":""5:12 PM""},{""question_id"":2,""question"":""I feel this way bc of\u2026 "",""answer"":[""SCHOOL""],""time"":""5:12 PM""}]","2022-11-02 17:12:42","2024-03-28 07:27:13"'''):
36
- system_prompt = """You are an descripting assistant that provides the breif description of the user data which is related to their mood tracking activity. Properly descibe the reason for their mood.Avoid times and dates in description
37
- Here is the user data: {data}"""
38
-
39
- llm = ChatOpenAI(model=settings.OPENAI_MODEL,
40
- openai_api_key=settings.OPENAI_KEY, temperature=0.7)
41
- return llm.invoke(system_prompt.format(data=data)).content
42
-
43
-
44
- def get_chat_history(session_id="bmoxinew"):
45
- # Set up MongoDB for storing chat history
46
- chat_history = MongoDBChatMessageHistory(
47
- connection_string=settings.MONGODB_CONNECTION_STRING,
48
- database_name=settings.MONGODB_DB_NAME, # Specify the database name here
49
- collection_name=settings.MONGODB_DB_CHAT_COLLECTION_NAME,
50
- session_id=session_id,
51
- )
52
-
53
- return chat_history
54
-
55
-
56
- def deanonymizer(input, anonymizer):
57
- input = anonymizer.deanonymize(input)
58
- map = anonymizer.deanonymizer_mapping
59
- if map:
60
- for k in map["PERSON"]:
61
- names = k.split(" ")
62
- for i in names:
63
- input = input.replace(i, map["PERSON"][k])
64
- return input
65
-
66
-
67
- def get_chat_bot_name(user_id="user_1"):
68
- print(settings.MONGODB_CONNECTION_STRING)
69
- print(settings.mongodb_chatbot_name_collection)
70
- result = settings.mongodb_chatbot_name_collection.find_one(
71
- {"user_id": user_id})
72
-
73
- print("CHATBOT RESULT", result, type(result))
74
- if result:
75
- print(result)
76
- return result['chat_bot_name']
77
- return settings.CHATBOT_NAME
78
-
79
-
80
- def get_last_session_summary(last_session_id, second_last_session_id):
81
-
82
- mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(
83
- settings.MONGODB_DB_CHAT_COLLECTION_NAME)
84
-
85
- collection_count = mongodb_chatbot_message_collection.count_documents({"SessionId": last_session_id})
86
- print("******************************** data********************888")
87
- print(collection_count)
88
- print(last_session_id)
89
- print("*********************************")
90
- if collection_count <=2:
91
- sessions_cursor = mongodb_chatbot_message_collection.find({"SessionId": second_last_session_id}) # Sort by timestamp descending and limit to 2 results
92
-
93
- print(sessions_cursor)
94
- sessions_list = list(sessions_cursor)
95
- print(sessions_list)
96
-
97
- conversation = """"""
98
- for document in sessions_list:
99
- print("MY document")
100
- print(document)
101
- if "History" in document:
102
- history = json.loads(document['History'])
103
- print(history)
104
- print(history['type'])
105
- print(history['data'])
106
- print(history['data']['content'])
107
- conversation += f"""{history['type']}: {history['data']['content']}\n"""
108
-
109
- print(conversation)
110
- system_prompt = """You are an descripting assistant that provides that analyze user conversation with AI bot and gives problem user was facing and weather that problem was solved or not and give response in below format.
111
-
112
- conversation: {conversation}
113
-
114
- problem:
115
- is_problem_solved: YES/NO
116
- """
117
-
118
- llm = ChatOpenAI(model=settings.OPENAI_MODEL,
119
- openai_api_key=settings.OPENAI_KEY, temperature=0.7)
120
-
121
- response = llm.invoke(system_prompt.format(conversation=conversation)).content
122
- print("********************************* PREVIOUS PROBLEM *******************************************")
123
- print(response)
124
- return response
125
-
126
- else:
127
- return ""
128
-
129
- def set_chat_bot_name(name, user_id):
130
- # Insert document into collection
131
- insert_result = settings.mongodb_chatbot_name_collection.update_one({"user_id": user_id}, { "$set": { "chat_bot_name": name } }, upsert=True)
132
- print("done successfully...")
133
- return name
134
-
135
- def create_agent(user_id):
136
- print("get user Id**********************",user_id)
137
- tools = [moxicast, my_calender, my_journal, my_rewards, my_rituals, my_vibecheck, peptalks, sactury, power_zens, affirmations, horoscope, mentoring, influencer_post, recommand_podcast, set_chatbot_name]
138
- # tools = [moxicast]
139
-
140
- functions = [convert_to_openai_function(f) for f in tools]
141
- model = ChatOpenAI(model_name=settings.OPENAI_MODEL,
142
- openai_api_key=settings.OPENAI_KEY, frequency_penalty= 1, temperature=settings.TEMPERATURE).bind(functions=functions)
143
-
144
- chat_bot_name = get_chat_bot_name(user_id)
145
-
146
- print("CHABT NAME", chat_bot_name)
147
- mood_summary = get_mood_summary()
148
-
149
- previous_session_id = get_last_session(user_id)
150
- print(previous_session_id)
151
- prevous_problem_summary = None
152
- if previous_session_id['second_last_session_id']:
153
- prevous_problem_summary = get_last_session_summary(previous_session_id['last_session_id'], previous_session_id['second_last_session_id'])
154
-
155
- print("**************************************** SUMMARY ***********************************************")
156
- print(prevous_problem_summary)
157
-
158
- prompt = ChatPromptTemplate.from_messages([("system", settings.SYSTEM_PROMPT.format(name = chat_bot_name, mood="", previous_summary=prevous_problem_summary)),
159
- MessagesPlaceholder(variable_name="chat_history"), ("user", "{input}"),
160
- MessagesPlaceholder(variable_name="agent_scratchpad")])
161
-
162
-
163
-
164
- memory = ConversationBufferWindowMemory(memory_key="chat_history", chat_memory=get_chat_history(
165
- previous_session_id['last_session_id']), return_messages=True, k=5)
166
-
167
- print("memory created")
168
-
169
- chain = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_to_openai_functions(x["intermediate_steps"])) | prompt | model | OpenAIFunctionsAgentOutputParser()
170
-
171
- agent_executor = AgentExecutor(
172
- agent=chain, tools=tools, memory=memory, verbose=True)
173
-
174
- return agent_executor
 
1
+ import json
2
+ import time
3
+ from transformers import AutoTokenizer, AutoModel
4
+ from langchain_community.chat_models import ChatOpenAI
5
+ import pandas as pd
6
+ from config import settings
7
+ from langchain_core.utils.function_calling import convert_to_openai_function
8
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
9
+ from langchain.memory import ConversationBufferWindowMemory
10
+ from langchain.schema.runnable import RunnablePassthrough
11
+ from langchain.agents.format_scratchpad import format_to_openai_functions
12
+ from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
13
+ from langchain.agents import AgentExecutor
14
+
15
+ from tools import moxicast, my_calender, my_journal, my_rewards, my_rituals, my_vibecheck, peptalks, sactury, power_zens, affirmations, horoscope, mentoring, influencer_post, recommand_podcast, set_chatbot_name
16
+
17
+ from database_functions import get_chat_bot_name,get_chat_history, get_last_conversion, get_last_session, get_mood_data,use_tools
18
+
19
+ def get_mood_summary(user_id):
20
+
21
+ data = get_mood_data(user_id)
22
+ system_prompt = """You are an descripting assistant that provides the breif description of the user data which is related to their mood tracking activity. Properly descibe the reason for their mood.Avoid times and dates in description
23
+ Here is the user data: {data}"""
24
+
25
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL,
26
+ openai_api_key=settings.OPENAI_KEY, temperature=0.7)
27
+ return llm.invoke(system_prompt.format(data=data)).content
28
+
29
+
30
+ def deanonymizer(input, anonymizer):
31
+ input = anonymizer.deanonymize(input)
32
+ map = anonymizer.deanonymizer_mapping
33
+ if map:
34
+ for k in map["PERSON"]:
35
+ names = k.split(" ")
36
+ for i in names:
37
+ input = input.replace(i, map["PERSON"][k])
38
+ return input
39
+
40
+
41
+
42
+ def get_last_session_summary(last_session_id, second_last_session_id):
43
+
44
+ conversation = get_last_conversion(last_session_id,second_last_session_id)
45
+ if conversation:
46
+ system_prompt = """ summarize whole conversation in at max 2 sentence.
47
+ conversation: {conversation}
48
+ summary:
49
+ """
50
+
51
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL,
52
+ openai_api_key=settings.OPENAI_KEY, temperature=0.7)
53
+
54
+ response = llm.invoke(system_prompt.format(conversation=conversation)).content
55
+ # print("********************************* PREVIOUS PROBLEM *******************************************")
56
+ # print(response)
57
+ return response
58
+ else:
59
+ return ""
60
+
61
+ def create_agent(user_id):
62
+ # print("get user Id**********************",user_id)
63
+
64
+ previous_session_id = get_last_session(user_id)
65
+ # print(previous_session_id)
66
+ if use_tools(previous_session_id["last_session_id"]):
67
+ tools = [moxicast, my_calender, my_journal, my_rewards, my_rituals, my_vibecheck, peptalks, sactury, power_zens, affirmations, horoscope, mentoring, influencer_post, recommand_podcast, set_chatbot_name]
68
+ else:
69
+ tools = [set_chatbot_name]
70
+
71
+ functions = [convert_to_openai_function(f) for f in tools]
72
+ model = ChatOpenAI(model_name=settings.OPENAI_MODEL,
73
+ openai_api_key=settings.OPENAI_KEY, frequency_penalty= 1, temperature=settings.TEMPERATURE).bind(functions=functions)
74
+
75
+ chat_bot_name = get_chat_bot_name(user_id)
76
+
77
+ # print("CHABT NAME", chat_bot_name)
78
+ start = time.time()
79
+ mood_summary = get_mood_summary(user_id)
80
+ prevous_problem_summary = None
81
+ if previous_session_id['second_last_session_id']:
82
+ prevous_problem_summary = get_last_session_summary(previous_session_id['last_session_id'], previous_session_id['second_last_session_id'])
83
+
84
+ # print("**************************************** SUMMARY ***********************************************")
85
+ # print(prevous_problem_summary)
86
+ print("time require for mood summary: ",time.time()-start)
87
+ prompt = ChatPromptTemplate.from_messages([("system", settings.SYSTEM_PROMPT.format(name = chat_bot_name, mood="", previous_summary=prevous_problem_summary)),
88
+ MessagesPlaceholder(variable_name="chat_history"), ("user", "{input}"),
89
+ MessagesPlaceholder(variable_name="agent_scratchpad")])
90
+
91
+
92
+
93
+ memory = ConversationBufferWindowMemory(memory_key="chat_history", chat_memory=get_chat_history(
94
+ previous_session_id['last_session_id']), return_messages=True, k=5)
95
+
96
+ # print("memory created")
97
+
98
+ chain = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_to_openai_functions(x["intermediate_steps"])) | prompt | model | OpenAIFunctionsAgentOutputParser()
99
+
100
+ agent_executor = AgentExecutor(
101
+ agent=chain, tools=tools, memory=memory, verbose=True)
102
+
103
+ return agent_executor