Spaces:
Runtime error
Runtime error
import os | |
from langchain.agents import tool | |
from langchain_community.chat_models import ChatOpenAI | |
import pandas as pd | |
from config import settings | |
def get_embeddings(text_list): | |
encoded_input = settings.tokenizer( | |
text_list, padding=True, truncation=True, return_tensors="pt" | |
) | |
# encoded_input = {k: v.to(device) for k, v in encoded_input.items()} | |
encoded_input = {k: v for k, v in encoded_input.items()} | |
model_output = settings.model(**encoded_input) | |
cls_pool = model_output.last_hidden_state[:, 0] | |
return cls_pool | |
def reg(chat): | |
question_embedding = get_embeddings([chat]).cpu().detach().numpy() | |
scores, samples = settings.dataset.get_nearest_examples( | |
"embeddings", question_embedding, k=5 | |
) | |
samples_df = pd.DataFrame.from_dict(samples) | |
# print(samples_df.columns) | |
samples_df["scores"] = scores | |
samples_df.sort_values("scores", ascending=False, inplace=True) | |
return samples_df[['title', 'cover_image', 'referral_link', 'category_id']] | |
def moxicast(prompt: str) -> str: | |
"""this function is used when user wants to know about MOXICASTS feature.MOXICASTS is a feature of BMoxi for Advice and guidance on life topics. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MOXICASTS is a feature of BMoxi for Advice and guidance on life topics." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def peptalks(prompt: str) -> str: | |
"""this function is used when user wants to know about PEP TALK PODS feature.PEP TALK PODS: Quick audio pep talks for boosting mood and motivation. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. PEP TALK PODS: Quick audio pep talks for boosting mood and motivation." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def sactury(prompt: str) -> str: | |
"""this function is used when user wants to know about SOCIAL SANCTUARY feature.THE SOCIAL SANCTUARY Anonymous community forum for support and sharing. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. THE SOCIAL SANCTUARY Anonymous community forum for support and sharing." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def power_zens(prompt: str) -> str: | |
"""this function is used when user wants to know about POWER ZENS feature. POWER ZENS Mini meditations for emotional control. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. POWER ZENS Mini meditations for emotional control." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def my_calender(prompt: str) -> str: | |
"""this function is used when user wants to know about MY CALENDAR feature.MY CALENDAR: Visual calendar for tracking self-care rituals and moods. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY CALENDAR: Visual calendar for tracking self-care rituals and moods." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def affirmations(prompt: str) -> str: | |
"""this function is used when user wants to know about PUSH AFFIRMATIONS feature.PUSH AFFIRMATIONS: Daily text affirmations for positive thinking. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. PUSH AFFIRMATIONS: Daily text affirmations for positive thinking." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def horoscope(prompt: str) -> str: | |
"""this function is used when user wants to know about HOROSCOPE feature.SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. SELF-LOVE HOROSCOPE: Weekly personalized horoscope readings." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def influencer_post(prompt: str) -> str: | |
"""this function is used when user wants to know about INFLUENCER POSTS feature.INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon). | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. INFLUENCER POSTS: Exclusive access to social media influencer advice (coming soon)." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def my_vibecheck(prompt: str) -> str: | |
"""this function is used when user wants to know about MY VIBECHECK feature. MY VIBECHECK: Monitor and understand emotional patterns. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY VIBECHECK: Monitor and understand emotional patterns." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def my_rituals(prompt: str) -> str: | |
"""this function is used when user wants to know about MY RITUALS feature.MY RITUALS: Create personalized self-care routines. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY RITUALS: Create personalized self-care routines." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def my_rewards(prompt: str) -> str: | |
"""this function is used when user wants to know about MY REWARDS feature.MY REWARDS: Earn points for self-care, redeemable for gift cards. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY REWARDS: Earn points for self-care, redeemable for gift cards." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def mentoring(prompt: str) -> str: | |
"""this function is used when user wants to know about 1-1 mentoring feature. 1:1 MENTORING: Personalized mentoring (coming soon). | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. 1:1 MENTORING: Personalized mentoring (coming soon)." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def my_journal(prompt: str) -> str: | |
"""this function is used when user wants to know about MY JOURNAL feature.MY JOURNAL: Guided journaling exercises for self-reflection. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
context = "BMOXI app is designed for teenage girls where they can listen some musics explore some contents had 1:1 mentoring sessions with all above features for helping them in their hard times. MY JOURNAL: Guided journaling exercises for self-reflection." | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you are going to make answer only using this context not use any other information | |
context : {context} | |
Input: {input} | |
""" | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def recommand_podcast(prompt: str) -> str: | |
""" this function is used when your best friend want any recommandation and tips. also you feel that this is the best time for any recommandation or your friend. | |
Args: | |
prompt (string): user query | |
Returns: | |
string: answer of the query | |
""" | |
df = reg(prompt) | |
context = """""" | |
for index, row in df.iterrows(): | |
'title', 'cover_image', 'referral_link', 'category_id' | |
context+= f"Row {index + 1}: Title: {row['title']} image: {row['cover_image']} referral_link: {row['referral_link']} category_id: {row['category_id']}" | |
llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.7) | |
# Define the system prompt | |
system_template = """ you have to give the recommandation of podcast for: {input}. also you are giving referal link of podcast. | |
you must use the context only not any other information. | |
context : {context} | |
""" | |
# print(system_template.format(context=context, input=prompt)) | |
response = llm.invoke(system_template.format(context=context, input=prompt)) | |
return response.content | |
def set_chatbot_name(name: str) -> str: | |
""" this function is used when your best friend want to give you new name. | |
Args: | |
name (string): new name of you. | |
Returns: | |
string: response after setting new name. | |
""" | |
return "Okay, from now my name will be "+ name | |