HarshSanghavi commited on
Commit
d7762a9
1 Parent(s): 9e815f5

Upload 6 files

Browse files
Files changed (6) hide show
  1. app.py +14 -4
  2. chat.py +46 -80
  3. chat_1.py +95 -0
  4. requirements.txt +6 -2
  5. tools.py +351 -0
  6. utils.py +174 -0
app.py CHANGED
@@ -1,9 +1,10 @@
1
  from fastapi import FastAPI, Request
2
  from fastapi.responses import HTMLResponse
3
  from fastapi.templating import Jinja2Templates
4
- from chat import chat_conversations
5
  import spacy
6
-
 
7
  app = FastAPI()
8
 
9
  templates = Jinja2Templates(directory="templates")
@@ -21,8 +22,17 @@ async def read_root(request: Request):
21
  print(request)
22
  form_data = await request.json()
23
  query = form_data.get('query')
24
- response_text = chat_conversations(query)
 
25
  return response_text
26
 
 
 
 
 
 
 
 
27
 
28
-
 
 
1
  from fastapi import FastAPI, Request
2
  from fastapi.responses import HTMLResponse
3
  from fastapi.templating import Jinja2Templates
4
+ from chat import chat_conversations, create_new_session
5
  import spacy
6
+ import os
7
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
8
  app = FastAPI()
9
 
10
  templates = Jinja2Templates(directory="templates")
 
22
  print(request)
23
  form_data = await request.json()
24
  query = form_data.get('query')
25
+ user_id = form_data.get("user_id")
26
+ response_text = chat_conversations(query,user_id)
27
  return response_text
28
 
29
+ @app.post("/start-session")
30
+ async def start_session(request: Request):
31
+ form_data = await request.json()
32
+ print("form data",form_data)
33
+ user_id = form_data.get("user_id")
34
+ print("save user id ",user_id)
35
+ response = create_new_session(user_id)
36
 
37
+ print(response)
38
+ return response
chat.py CHANGED
@@ -1,95 +1,61 @@
1
  from langchain.memory import ConversationBufferWindowMemory
2
- from langchain.chains import ConversationChain
3
- from langchain_groq import ChatGroq
4
- from langchain.chat_models import ChatOpenAI
5
- from langchain_core.prompts.prompt import PromptTemplate
6
  from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
7
  from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
8
- from presidio_analyzer import AnalyzerEngine, RecognizerRegistry
9
- from presidio_anonymizer import AnonymizerEngine
 
 
 
 
10
 
11
- import os
12
 
13
- openai_key = os.environ['OPENAIKEY']
14
- def deanonymizer(input,anonymizer):
15
- input=anonymizer.deanonymize(input)
16
- map = anonymizer.deanonymizer_mapping
17
- if map:
18
- for k in map["PERSON"]:
19
- names = k.split(" ")
20
- for i in names:
21
- input = input.replace(i,map["PERSON"][k])
22
- return input
23
-
24
- template = f"""
25
- You are a best friend and supportive friend designed to talk with teenage girls in mobile app called BMOXI. Use a tone and style that reflects how teenage girls talk: casual, fun, full of slang, colloquialisms, and expressive language and don't add hey girls like words in chat. chat should be looks like real conversation between 2 girls.
26
- Incorporate texting language too. Ask follow-up questions like a best friend would. Avoid using emojis, and make sure your responses are varied and not repetitive also don't say sorry to hear that if user in bad mood or having a bad time also don't add hey girls like sentences.
27
-
28
- If needed, recommend the meditation app Powerzens for calming the mind and managing thoughts. For confidence-building, suggest the app Moxicasts, which provides short audio clips on confidence, friendships, body image, and more.
29
-
30
- Features you can recommend:
31
- MOXICASTS: Advice and guidance on life topics.
32
- PEP TALK PODS: Quick audio pep talks for boosting mood and motivation.
33
- POWER ZENS: Mini meditations for emotional control.
34
- THE SOCIAL SANCTUARY: Anonymous community forum for support and sharing.
35
- MY CALENDAR: Visual calendar for tracking self-care rituals and moods.
36
- PUSH AFFIRMATIONS: Daily text affirmations for positive thinking.
37
- SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings (not maintained).
38
- INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon).
39
- 1:1 MENTORING: Personalized mentoring (coming soon).
40
- MY RITUALS: Create personalized self-care routines.
41
- MY REWARDS: Earn points for self-care, redeemable for gift cards.
42
- MY VIBECHECK: Monitor and understand emotional patterns.
43
- MY JOURNAL: Guided journaling exercises for self-reflection.
44
- BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times.
45
-
46
- But Remember Only recommend apps if needed or if someone asks about the features or it's good to recommend them in some questions or mental state problems.
47
-
48
- Current conversation:
49
- {{history}}
50
- Human: {{input}}
51
- AI Assistant:"""
52
-
53
-
54
- # Create the prompt template
55
- PROMPT = PromptTemplate(
56
- input_variables=["history", "input"],
57
- template=template
58
- )
59
 
60
- # Initialize the ChatGroq LLM
61
- llm = ChatOpenAI(model="gpt-4o", openai_api_key=openai_key, temperature=0.7)
62
- # llm = ChatGroq(temperature=0,groq_api_key="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", model_name="llama3-70b-8192")
63
- #model=llama3-8b-8192
64
 
65
- session_id="bmoxinew"
66
- # Set up MongoDB for storing chat history
67
- chat_history = MongoDBChatMessageHistory(
68
- connection_string="mongodb+srv://chandanisimran51:test123@aibestie.a0o3bmw.mongodb.net/?retryWrites=true&w=majority&appName=AIbestie",
69
- database_name="chandanisimran51", # Specify the database name here
70
- collection_name="chatAI",
71
- session_id=session_id
72
- )
73
-
74
- memory = ConversationBufferWindowMemory(memory_key="history", chat_memory=chat_history, return_messages=True,k=3)
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
- # Set up the custom conversation chain
77
- conversation = ConversationChain(
78
- prompt=PROMPT,
79
- llm=llm,
80
- verbose=True,
81
- memory=memory,
82
- )
83
 
84
 
85
- def chat_conversations(query):
86
  anonymizer = PresidioReversibleAnonymizer(
87
- analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"],
88
- faker_seed=42,
 
89
  )
90
  anonymized_input = anonymizer.anonymize(
91
  query
92
  )
93
- response = conversation.predict(input=anonymized_input)
94
- output = deanonymizer(response,anonymizer)
95
- return output
 
 
 
 
 
 
 
 
1
  from langchain.memory import ConversationBufferWindowMemory
2
+ from langchain_community.chat_models import ChatOpenAI
 
 
 
3
  from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
4
  from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
5
+ from langchain.agents import AgentExecutor
6
+ from langchain.agents.format_scratchpad import format_to_openai_functions
7
+ from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
8
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
9
+ from langchain.schema.runnable import RunnablePassthrough
10
+ from langchain_core.utils.function_calling import convert_to_openai_function
11
 
12
+ from config import settings
13
 
14
+ from utils import deanonymizer, create_agent, set_chat_bot_name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
 
 
 
 
16
 
17
+ def create_new_session(user_id):
18
+ mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(settings.MONGODB_DB_USER_SESSIONS_COLLECTION_NAME)
19
+
20
+ print("********************************************************** NEW SESSION *******************************************************************")
21
+
22
+ try:
23
+ user_sessions = mongodb_chatbot_message_collection.find_one({"user_id": user_id})
24
+ print(user_sessions, "LAST")
25
+
26
+ if user_sessions:
27
+ session_ids = user_sessions['session_id']
28
+ session_id = "bmoxi_" + str(int(session_ids[-1].split("_")[-1])+1)
29
+ print("session_id:", session_id)
30
+ mongodb_chatbot_message_collection.update_one({ "user_id": user_id },{"$push": {"session_id":session_id}})
31
+
32
+ return session_id
33
+ else:
34
+ mongodb_chatbot_message_collection.insert_one({ "user_id": user_id,"session_id":["bmoxi_1"]})
35
+ return "bmoxi_1"
36
+ except:
37
+ mongodb_chatbot_message_collection.insert_one({ "user_id": user_id,"session_id":["bmoxi_1"]})
38
+ return "bmoxi_1"
39
+
40
 
 
 
 
 
 
 
 
41
 
42
 
43
+ def chat_conversations(query,user_id):
44
  anonymizer = PresidioReversibleAnonymizer(
45
+ analyzed_fields=["PHONE_NUMBER",
46
+ "EMAIL_ADDRESS", "CREDIT_CARD"],
47
+ faker_seed=42,
48
  )
49
  anonymized_input = anonymizer.anonymize(
50
  query
51
  )
52
+
53
+ agent = create_agent(user_id)
54
+ response = agent({"input": query})['output']
55
+
56
+ if "Okay, from now my name will be " in response:
57
+ set_chat_bot_name(response.split("Okay, from now my name will be ")[-1], "user_1")
58
+ return response
59
+
60
+ output = deanonymizer(response, anonymizer)
61
+ return response
chat_1.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.memory import ConversationBufferWindowMemory
2
+ from langchain.chains import ConversationChain
3
+ from langchain_groq import ChatGroq
4
+ from langchain_community.chat_models import ChatOpenAI
5
+ from langchain_core.prompts.prompt import PromptTemplate
6
+ from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
7
+ from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
8
+ from presidio_analyzer import AnalyzerEngine, RecognizerRegistry
9
+ from presidio_anonymizer import AnonymizerEngine
10
+
11
+ import os
12
+
13
+ openai_key = os.environ['OPENAIKEY']
14
+ def deanonymizer(input,anonymizer):
15
+ input=anonymizer.deanonymize(input)
16
+ map = anonymizer.deanonymizer_mapping
17
+ if map:
18
+ for k in map["PERSON"]:
19
+ names = k.split(" ")
20
+ for i in names:
21
+ input = input.replace(i,map["PERSON"][k])
22
+ return input
23
+
24
+ template = f"""
25
+ You are a best friend and supportive friend designed to talk with teenage girls in mobile app called BMOXI. Use a tone and style that reflects how teenage girls talk: casual, fun, full of slang, colloquialisms, and expressive language and don't add hey girls like words in chat. chat should be looks like real conversation between 2 girls.
26
+ Incorporate texting language too. Ask follow-up questions like a best friend would. Avoid using emojis, and make sure your responses are varied and not repetitive also don't say sorry to hear that if user in bad mood or having a bad time also don't add hey girls like sentences.
27
+
28
+ If needed, recommend the meditation app Powerzens for calming the mind and managing thoughts. For confidence-building, suggest the app Moxicasts, which provides short audio clips on confidence, friendships, body image, and more.
29
+
30
+ Features you can recommend:
31
+ MOXICASTS: Advice and guidance on life topics.
32
+ PEP TALK PODS: Quick audio pep talks for boosting mood and motivation.
33
+ POWER ZENS: Mini meditations for emotional control.
34
+ THE SOCIAL SANCTUARY: Anonymous community forum for support and sharing.
35
+ MY CALENDAR: Visual calendar for tracking self-care rituals and moods.
36
+ PUSH AFFIRMATIONS: Daily text affirmations for positive thinking.
37
+ SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings (not maintained).
38
+ INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon).
39
+ 1:1 MENTORING: Personalized mentoring (coming soon).
40
+ MY RITUALS: Create personalized self-care routines.
41
+ MY REWARDS: Earn points for self-care, redeemable for gift cards.
42
+ MY VIBECHECK: Monitor and understand emotional patterns.
43
+ MY JOURNAL: Guided journaling exercises for self-reflection.
44
+ BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times.
45
+
46
+ But Remember Only recommend apps if needed or if someone asks about the features or it's good to recommend them in some questions or mental state problems.
47
+
48
+ Current conversation:
49
+ {{history}}
50
+ Human: {{input}}
51
+ AI Assistant:"""
52
+
53
+
54
+ # Create the prompt template
55
+ PROMPT = PromptTemplate(
56
+ input_variables=["history", "input"],
57
+ template=template
58
+ )
59
+
60
+ # Initialize the ChatGroq LLM
61
+ llm = ChatOpenAI(model="gpt-4o", openai_api_key=openai_key, temperature=0.7)
62
+ # llm = ChatGroq(temperature=0,groq_api_key="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", model_name="llama3-70b-8192")
63
+ #model=llama3-8b-8192
64
+
65
+ session_id="bmoxinew"
66
+ # Set up MongoDB for storing chat history
67
+ chat_history = MongoDBChatMessageHistory(
68
+ connection_string="mongodb+srv://chandanisimran51:test123@aibestie.a0o3bmw.mongodb.net/?retryWrites=true&w=majority&appName=AIbestie",
69
+ database_name="chandanisimran51", # Specify the database name here
70
+ collection_name="chatAI",
71
+ session_id=session_id
72
+ )
73
+
74
+ memory = ConversationBufferWindowMemory(memory_key="history", chat_memory=chat_history, return_messages=True,k=3)
75
+
76
+ # Set up the custom conversation chain
77
+ conversation = ConversationChain(
78
+ prompt=PROMPT,
79
+ llm=llm,
80
+ verbose=True,
81
+ memory=memory,
82
+ )
83
+
84
+
85
+ def chat_conversations(query):
86
+ anonymizer = PresidioReversibleAnonymizer(
87
+ analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"],
88
+ faker_seed=42,
89
+ )
90
+ anonymized_input = anonymizer.anonymize(
91
+ query
92
+ )
93
+ response = conversation.predict(input=anonymized_input)
94
+ output = deanonymizer(response,anonymizer)
95
+ return output
requirements.txt CHANGED
@@ -1,14 +1,18 @@
 
 
1
  fastapi
2
  uvicorn
3
  langchain
4
  pymongo
5
  certifi
6
  langchain_community
7
- langchain_groq
8
  langchain_mongodb
 
9
  openai
10
  presidio-analyzer
11
  presidio-anonymizer
12
  langchain-experimental
13
  faker
14
- spacy
 
 
 
1
+ transformers
2
+ torch
3
  fastapi
4
  uvicorn
5
  langchain
6
  pymongo
7
  certifi
8
  langchain_community
 
9
  langchain_mongodb
10
+ langchain_openai
11
  openai
12
  presidio-analyzer
13
  presidio-anonymizer
14
  langchain-experimental
15
  faker
16
+ spacy
17
+ faiss-cpu
18
+ datasets
tools.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain.agents import tool
3
+ from langchain_community.chat_models import ChatOpenAI
4
+ import pandas as pd
5
+
6
+ from config import settings
7
+
8
+
9
+
10
+ def get_embeddings(text_list):
11
+ encoded_input = settings.tokenizer(
12
+ text_list, padding=True, truncation=True, return_tensors="pt"
13
+ )
14
+ # encoded_input = {k: v.to(device) for k, v in encoded_input.items()}
15
+ encoded_input = {k: v for k, v in encoded_input.items()}
16
+ model_output = settings.model(**encoded_input)
17
+
18
+ cls_pool = model_output.last_hidden_state[:, 0]
19
+ return cls_pool
20
+
21
+ def reg(chat):
22
+ question_embedding = get_embeddings([chat]).cpu().detach().numpy()
23
+ scores, samples = settings.dataset.get_nearest_examples(
24
+ "embeddings", question_embedding, k=5
25
+ )
26
+ samples_df = pd.DataFrame.from_dict(samples)
27
+ print(samples_df.columns)
28
+ samples_df["scores"] = scores
29
+ samples_df.sort_values("scores", ascending=False, inplace=True)
30
+ return samples_df[['title', 'cover_image', 'referral_link', 'category_id']]
31
+
32
+
33
+ @tool("MOXICASTS-questions", return_direct=True)
34
+ def moxicast(prompt: str) -> str:
35
+ """this function is used when user wants to know about MOXICASTS feature.MOXICASTS is a feature of BMoxi for Advice and guidance on life topics.
36
+ Args:
37
+ prompt (string): user query
38
+
39
+ Returns:
40
+ string: answer of the query
41
+ """
42
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MOXICASTS is a feature of BMoxi for Advice and guidance on life topics."
43
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
44
+ # Define the system prompt
45
+ system_template = """ you are going to make answer only using this context not use any other information
46
+ context : {context}
47
+ Input: {input}
48
+ """
49
+ response = llm.invoke(system_template.format(context=context, input=prompt))
50
+
51
+ return response.content
52
+
53
+ @tool("PEP-TALKPODS-questions", return_direct=True)
54
+ def peptalks(prompt: str) -> str:
55
+ """this function is used when user wants to know about PEP TALK PODS feature.PEP TALK PODS: Quick audio pep talks for boosting mood and motivation.
56
+ Args:
57
+ prompt (string): user query
58
+
59
+ Returns:
60
+ string: answer of the query
61
+ """
62
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. PEP TALK PODS: Quick audio pep talks for boosting mood and motivation."
63
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
64
+ # Define the system prompt
65
+ system_template = """ you are going to make answer only using this context not use any other information
66
+ context : {context}
67
+ Input: {input}
68
+ """
69
+ response = llm.invoke(system_template.format(context=context, input=prompt))
70
+
71
+ return response.content
72
+
73
+
74
+
75
+ @tool("SOCIAL-SANCTUARY-questions", return_direct=True)
76
+ def sactury(prompt: str) -> str:
77
+ """this function is used when user wants to know about SOCIAL SANCTUARY feature.THE SOCIAL SANCTUARY Anonymous community forum for support and sharing.
78
+ Args:
79
+ prompt (string): user query
80
+
81
+ Returns:
82
+ string: answer of the query
83
+ """
84
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. THE SOCIAL SANCTUARY Anonymous community forum for support and sharing."
85
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
86
+ # Define the system prompt
87
+ system_template = """ you are going to make answer only using this context not use any other information
88
+ context : {context}
89
+ Input: {input}
90
+ """
91
+ response = llm.invoke(system_template.format(context=context, input=prompt))
92
+
93
+ return response.content
94
+
95
+
96
+ @tool("POWER-ZENS-questions", return_direct=True)
97
+ def power_zens(prompt: str) -> str:
98
+ """this function is used when user wants to know about POWER ZENS feature. POWER ZENS Mini meditations for emotional control.
99
+
100
+ Args:
101
+ prompt (string): user query
102
+
103
+ Returns:
104
+ string: answer of the query
105
+ """
106
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. POWER ZENS Mini meditations for emotional control."
107
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
108
+ # Define the system prompt
109
+ system_template = """ you are going to make answer only using this context not use any other information
110
+ context : {context}
111
+ Input: {input}
112
+ """
113
+ response = llm.invoke(system_template.format(context=context, input=prompt))
114
+
115
+ return response.content
116
+
117
+
118
+
119
+ @tool("MY-CALENDAR-questions", return_direct=True)
120
+ def my_calender(prompt: str) -> str:
121
+ """this function is used when user wants to know about MY CALENDAR feature.MY CALENDAR: Visual calendar for tracking self-care rituals and moods.
122
+ Args:
123
+ prompt (string): user query
124
+
125
+ Returns:
126
+ string: answer of the query
127
+ """
128
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY CALENDAR: Visual calendar for tracking self-care rituals and moods."
129
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
130
+ # Define the system prompt
131
+ system_template = """ you are going to make answer only using this context not use any other information
132
+ context : {context}
133
+ Input: {input}
134
+ """
135
+ response = llm.invoke(system_template.format(context=context, input=prompt))
136
+
137
+ return response.content
138
+
139
+
140
+
141
+
142
+ @tool("PUSH-AFFIRMATIONS-questions", return_direct=True)
143
+ def affirmations(prompt: str) -> str:
144
+ """this function is used when user wants to know about PUSH AFFIRMATIONS feature.PUSH AFFIRMATIONS: Daily text affirmations for positive thinking.
145
+ Args:
146
+ prompt (string): user query
147
+
148
+ Returns:
149
+ string: answer of the query
150
+ """
151
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. PUSH AFFIRMATIONS: Daily text affirmations for positive thinking."
152
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
153
+ # Define the system prompt
154
+ system_template = """ you are going to make answer only using this context not use any other information
155
+ context : {context}
156
+ Input: {input}
157
+ """
158
+ response = llm.invoke(system_template.format(context=context, input=prompt))
159
+
160
+ return response.content
161
+
162
+ @tool("HOROSCOPE-questions", return_direct=True)
163
+ def horoscope(prompt: str) -> str:
164
+ """this function is used when user wants to know about HOROSCOPE feature.SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings.
165
+ Args:
166
+ prompt (string): user query
167
+
168
+ Returns:
169
+ string: answer of the query
170
+ """
171
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings."
172
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
173
+ # Define the system prompt
174
+ system_template = """ you are going to make answer only using this context not use any other information
175
+ context : {context}
176
+ Input: {input}
177
+ """
178
+ response = llm.invoke(system_template.format(context=context, input=prompt))
179
+
180
+ return response.content
181
+
182
+
183
+
184
+ @tool("INFLUENCER-POSTS-questions", return_direct=True)
185
+ def influencer_post(prompt: str) -> str:
186
+ """this function is used when user wants to know about INFLUENCER POSTS feature.INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon).
187
+ Args:
188
+ prompt (string): user query
189
+
190
+ Returns:
191
+ string: answer of the query
192
+ """
193
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon)."
194
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
195
+ # Define the system prompt
196
+ system_template = """ you are going to make answer only using this context not use any other information
197
+ context : {context}
198
+ Input: {input}
199
+ """
200
+ response = llm.invoke(system_template.format(context=context, input=prompt))
201
+
202
+ return response.content
203
+
204
+
205
+ @tool("MY-VIBECHECK-questions", return_direct=True)
206
+ def my_vibecheck(prompt: str) -> str:
207
+ """this function is used when user wants to know about MY VIBECHECK feature. MY VIBECHECK: Monitor and understand emotional patterns.
208
+
209
+ Args:
210
+ prompt (string): user query
211
+
212
+ Returns:
213
+ string: answer of the query
214
+ """
215
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY VIBECHECK: Monitor and understand emotional patterns."
216
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
217
+ # Define the system prompt
218
+ system_template = """ you are going to make answer only using this context not use any other information
219
+ context : {context}
220
+ Input: {input}
221
+ """
222
+ response = llm.invoke(system_template.format(context=context, input=prompt))
223
+
224
+ return response.content
225
+
226
+
227
+
228
+ @tool("MY-RITUALS-questions", return_direct=True)
229
+ def my_rituals(prompt: str) -> str:
230
+ """this function is used when user wants to know about MY RITUALS feature.MY RITUALS: Create personalized self-care routines.
231
+ Args:
232
+ prompt (string): user query
233
+
234
+ Returns:
235
+ string: answer of the query
236
+ """
237
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY RITUALS: Create personalized self-care routines."
238
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
239
+ # Define the system prompt
240
+ system_template = """ you are going to make answer only using this context not use any other information
241
+ context : {context}
242
+ Input: {input}
243
+ """
244
+ response = llm.invoke(system_template.format(context=context, input=prompt))
245
+
246
+ return response.content
247
+
248
+
249
+
250
+
251
+ @tool("MY-REWARDS-questions", return_direct=True)
252
+ def my_rewards(prompt: str) -> str:
253
+ """this function is used when user wants to know about MY REWARDS feature.MY REWARDS: Earn points for self-care, redeemable for gift cards.
254
+ Args:
255
+ prompt (string): user query
256
+
257
+ Returns:
258
+ string: answer of the query
259
+ """
260
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY REWARDS: Earn points for self-care, redeemable for gift cards."
261
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
262
+ # Define the system prompt
263
+ system_template = """ you are going to make answer only using this context not use any other information
264
+ context : {context}
265
+ Input: {input}
266
+ """
267
+ response = llm.invoke(system_template.format(context=context, input=prompt))
268
+
269
+ return response.content
270
+
271
+
272
+ @tool("mentoring-questions", return_direct=True)
273
+ def mentoring(prompt: str) -> str:
274
+ """this function is used when user wants to know about 1-1 mentoring feature. 1:1 MENTORING: Personalized mentoring (coming soon).
275
+
276
+ Args:
277
+ prompt (string): user query
278
+
279
+ Returns:
280
+ string: answer of the query
281
+ """
282
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. 1:1 MENTORING: Personalized mentoring (coming soon)."
283
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
284
+ # Define the system prompt
285
+ system_template = """ you are going to make answer only using this context not use any other information
286
+ context : {context}
287
+ Input: {input}
288
+ """
289
+ response = llm.invoke(system_template.format(context=context, input=prompt))
290
+
291
+ return response.content
292
+
293
+
294
+
295
+ @tool("MY-JOURNAL-questions", return_direct=True)
296
+ def my_journal(prompt: str) -> str:
297
+ """this function is used when user wants to know about MY JOURNAL feature.MY JOURNAL: Guided journaling exercises for self-reflection.
298
+ Args:
299
+ prompt (string): user query
300
+
301
+ Returns:
302
+ string: answer of the query
303
+ """
304
+ context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY JOURNAL: Guided journaling exercises for self-reflection."
305
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
306
+ # Define the system prompt
307
+ system_template = """ you are going to make answer only using this context not use any other information
308
+ context : {context}
309
+ Input: {input}
310
+ """
311
+ response = llm.invoke(system_template.format(context=context, input=prompt))
312
+
313
+ return response.content
314
+
315
+ @tool("recommandation_tool", return_direct=True)
316
+ def recommand_podcast(prompt: str) -> str:
317
+ """ this function is used when your best friend want any recommandation and tips. also you feel that this is the best time for any recommandation or your friend.
318
+ Args:
319
+ prompt (string): user query
320
+
321
+ Returns:
322
+ string: answer of the query
323
+ """
324
+ df = reg(prompt)
325
+ context = """"""
326
+ for index, row in df.iterrows():
327
+ 'title', 'cover_image', 'referral_link', 'category_id'
328
+ context+= f"Row {index + 1}: Title: {row['title']} image: {row['cover_image']} referral_link: {row['referral_link']} category_id: {row['category_id']}"
329
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7)
330
+ # Define the system prompt
331
+ system_template = """ you are give the recommandation of podcast. also you are giving referal link of podcast.
332
+ you must use the context only not any other information.
333
+ context : {context}
334
+ Input: {input}
335
+ """
336
+ print(system_template.format(context=context, input=prompt))
337
+ response = llm.invoke(system_template.format(context=context, input=prompt))
338
+
339
+ return response.content
340
+
341
+ @tool("set-chat-bot-name", return_direct=True)
342
+ def set_chatbot_name(name: str) -> str:
343
+ """ this function is used when your best friend want to give you new name.
344
+ Args:
345
+ name (string): new name of you.
346
+
347
+ Returns:
348
+ string: response after setting new name.
349
+ """
350
+
351
+ return "Okay, from now my name will be "+ name
utils.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from transformers import AutoTokenizer, AutoModel
3
+ from langchain_community.chat_models import ChatOpenAI
4
+ import pandas as pd
5
+ from config import settings
6
+ from langchain_core.utils.function_calling import convert_to_openai_function
7
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
8
+ from langchain.memory import ConversationBufferWindowMemory
9
+ from langchain.schema.runnable import RunnablePassthrough
10
+ from langchain.agents.format_scratchpad import format_to_openai_functions
11
+ from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
12
+ from langchain.agents import AgentExecutor
13
+ from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory
14
+
15
+ from tools import moxicast, my_calender, my_journal, my_rewards, my_rituals, my_vibecheck, peptalks, sactury, power_zens, affirmations, horoscope, mentoring, influencer_post, recommand_podcast, set_chatbot_name
16
+
17
+
18
+ def get_last_session(user_id="user_1"):
19
+ mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(
20
+ settings.MONGODB_DB_USER_SESSIONS_COLLECTION_NAME)
21
+
22
+ sessions_cursor = mongodb_chatbot_message_collection.find_one(
23
+ {"user_id": user_id})
24
+
25
+ print(sessions_cursor)
26
+ sessions_list = sessions_cursor['session_id']
27
+
28
+ second_last_session_id = None
29
+ if len(sessions_list) >= 2:
30
+ second_last_session_id = sessions_list[-2]
31
+
32
+ return {"last_session_id": sessions_list[-1], "second_last_session_id": second_last_session_id if second_last_session_id else None}
33
+
34
+
35
+ def get_mood_summary(data='''"35","27","mood_tracker","[{""question_id"":1,""question"":""my vibe rn is\u2026"",""answer"":[""Sad""],""time"":""5:12 PM""},{""question_id"":2,""question"":""I feel this way bc of\u2026 "",""answer"":[""SCHOOL""],""time"":""5:12 PM""}]","2022-11-02 17:12:42","2024-03-28 07:27:13"'''):
36
+ system_prompt = """You are an descripting assistant that provides the breif description of the user data which is related to their mood tracking activity. Properly descibe the reason for their mood.Avoid times and dates in description
37
+ Here is the user data: {data}"""
38
+
39
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL,
40
+ openai_api_key=settings.OPENAI_KEY, temperature=0.7)
41
+ return llm.invoke(system_prompt.format(data=data)).content
42
+
43
+
44
+ def get_chat_history(session_id="bmoxinew"):
45
+ # Set up MongoDB for storing chat history
46
+ chat_history = MongoDBChatMessageHistory(
47
+ connection_string=settings.MONGODB_CONNECTION_STRING,
48
+ database_name=settings.MONGODB_DB_NAME, # Specify the database name here
49
+ collection_name=settings.MONGODB_DB_CHAT_COLLECTION_NAME,
50
+ session_id=session_id,
51
+ )
52
+
53
+ return chat_history
54
+
55
+
56
+ def deanonymizer(input, anonymizer):
57
+ input = anonymizer.deanonymize(input)
58
+ map = anonymizer.deanonymizer_mapping
59
+ if map:
60
+ for k in map["PERSON"]:
61
+ names = k.split(" ")
62
+ for i in names:
63
+ input = input.replace(i, map["PERSON"][k])
64
+ return input
65
+
66
+
67
+ def get_chat_bot_name(user_id="user_1"):
68
+ print(settings.MONGODB_CONNECTION_STRING)
69
+ print(settings.mongodb_chatbot_name_collection)
70
+ result = settings.mongodb_chatbot_name_collection.find_one(
71
+ {"user_id": user_id})
72
+
73
+ print("CHATBOT RESULT", result, type(result))
74
+ if result:
75
+ print(result)
76
+ return result['chat_bot_name']
77
+ return settings.CHATBOT_NAME
78
+
79
+
80
+ def get_last_session_summary(last_session_id, second_last_session_id):
81
+
82
+ mongodb_chatbot_message_collection = settings.mongodb_db.get_collection(
83
+ settings.MONGODB_DB_CHAT_COLLECTION_NAME)
84
+
85
+ collection_count = mongodb_chatbot_message_collection.count_documents({"SessionId": last_session_id})
86
+ print("******************************** data********************888")
87
+ print(collection_count)
88
+ print(last_session_id)
89
+ print("*********************************")
90
+ if collection_count <=2:
91
+ sessions_cursor = mongodb_chatbot_message_collection.find({"SessionId": second_last_session_id}) # Sort by timestamp descending and limit to 2 results
92
+
93
+ print(sessions_cursor)
94
+ sessions_list = list(sessions_cursor)
95
+ print(sessions_list)
96
+
97
+ conversation = """"""
98
+ for document in sessions_list:
99
+ print("MY document")
100
+ print(document)
101
+ if "History" in document:
102
+ history = json.loads(document['History'])
103
+ print(history)
104
+ print(history['type'])
105
+ print(history['data'])
106
+ print(history['data']['content'])
107
+ conversation += f"""{history['type']}: {history['data']['content']}\n"""
108
+
109
+ print(conversation)
110
+ system_prompt = """You are an descripting assistant that provides that analyze user conversation with AI bot and gives problem user was facing and weather that problem was solved or not and give response in below format.
111
+
112
+ conversation: {conversation}
113
+
114
+ problem:
115
+ is_problem_solved: YES/NO
116
+ """
117
+
118
+ llm = ChatOpenAI(model=settings.OPENAI_MODEL,
119
+ openai_api_key=settings.OPENAI_KEY, temperature=0.7)
120
+
121
+ response = llm.invoke(system_prompt.format(conversation=conversation)).content
122
+ print("********************************* PREVIOUS PROBLEM *******************************************")
123
+ print(response)
124
+ return response
125
+
126
+ else:
127
+ return ""
128
+
129
+ def set_chat_bot_name(name, user_id):
130
+ # Insert document into collection
131
+ insert_result = settings.mongodb_chatbot_name_collection.update_one({"user_id": user_id}, { "$set": { "chat_bot_name": name } }, upsert=True)
132
+ print("done successfully...")
133
+ return name
134
+
135
+ def create_agent(user_id):
136
+ print("get user Id**********************",user_id)
137
+ tools = [moxicast, my_calender, my_journal, my_rewards, my_rituals, my_vibecheck, peptalks, sactury, power_zens, affirmations, horoscope, mentoring, influencer_post, recommand_podcast, set_chatbot_name]
138
+ # tools = [moxicast]
139
+
140
+ functions = [convert_to_openai_function(f) for f in tools]
141
+ model = ChatOpenAI(model_name=settings.OPENAI_MODEL,
142
+ openai_api_key=settings.OPENAI_KEY, frequency_penalty= 1, temperature=settings.TEMPERATURE).bind(functions=functions)
143
+
144
+ chat_bot_name = get_chat_bot_name(user_id)
145
+
146
+ print("CHABT NAME", chat_bot_name)
147
+ mood_summary = get_mood_summary()
148
+
149
+ previous_session_id = get_last_session(user_id)
150
+ print(previous_session_id)
151
+ prevous_problem_summary = None
152
+ if previous_session_id['second_last_session_id']:
153
+ prevous_problem_summary = get_last_session_summary(previous_session_id['last_session_id'], previous_session_id['second_last_session_id'])
154
+
155
+ print("**************************************** SUMMARY ***********************************************")
156
+ print(prevous_problem_summary)
157
+
158
+ prompt = ChatPromptTemplate.from_messages([("system", settings.SYSTEM_PROMPT.format(name = chat_bot_name, mood=mood_summary, previous_summary=prevous_problem_summary)),
159
+ MessagesPlaceholder(variable_name="chat_history"), ("user", "{input}"),
160
+ MessagesPlaceholder(variable_name="agent_scratchpad")])
161
+
162
+
163
+
164
+ memory = ConversationBufferWindowMemory(memory_key="chat_history", chat_memory=get_chat_history(
165
+ previous_session_id['last_session_id']), return_messages=True, k=5)
166
+
167
+ print("memory created")
168
+
169
+ chain = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_to_openai_functions(x["intermediate_steps"])) | prompt | model | OpenAIFunctionsAgentOutputParser()
170
+
171
+ agent_executor = AgentExecutor(
172
+ agent=chain, tools=tools, memory=memory, verbose=True)
173
+
174
+ return agent_executor