StudyMama / app.py
RithikaChalam's picture
Update app.py
69024e9 verified
import gradio as gr
from huggingface_hub import InferenceClient
# STEP 1 FROM SEMANTIC SEARCH
from sentence_transformers import SentenceTransformer
import torch
# STEP 2 FROM SEMANTIC SEARCH
# Open the water_cycle.txt file in read mode with UTF-8 encoding
with open("cool_mom_phrases.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
cool_mom_text = file.read()
with open("tutor_mom_phrases.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
tutor_mom_text = file.read()
with open("strict_mom_phrases.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
strict_mom_text = file.read()
with open("study_techniques.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
study_techniques_text = file.read()
# STEP 3 FROM SEMANTIC SEARCH
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every newline character (\n)
chunks = cleaned_text.split("\n")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
chunk = chunk.strip()
if chunk != "":
cleaned_chunks.append(chunk)
# Return the cleaned_chunks
return cleaned_chunks
# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_cool_chunks = preprocess_text(cool_mom_text) # Complete this line
cleaned_tutor_chunks = preprocess_text(tutor_mom_text)
cleaned_strict_chunks = preprocess_text(strict_mom_text)
#STEP 4 FROM SEMANTIC SEARCH
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
cool_chunk_embeddings = create_embeddings(cleaned_cool_chunks) # Complete this line
tutor_chunk_embeddings = create_embeddings(cleaned_tutor_chunks)
strict_chunk_embeddings = create_embeddings(cleaned_strict_chunks)
#STEP 5 FROM SEMANTIC SEARCH
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=3).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
for i in top_indices:
top_chunks.append(text_chunks[i])
# Return the list of most relevant chunks
return top_chunks
# STEP 6 FROM SEMANTIC SEARCH
# Call the get_top_chunks function with the original query
#top_cool_results = get_top_chunks(message, cool_chunk_embeddings, cleaned_cool_chunks) # Complete this line
#top_tutor_results = get_top_chunks(message, tutor_chunk_embeddings, cleaned_tutor_chunks)
#top_tiger_results = get_top_chunks(message, tiger_chunk_embeddings, cleaned_tiger_chunks)
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
# NN commented out both
# mom_type = []
def respond(message, history, mom_type) :
# mom_type = []
if not mom_type:
return "Please choose atleast one mom"
selected = mom_type[0]
# NN changed from mom_type to selected in each if statement
if selected == "Cool Mom" :
top_results = get_top_chunks(message, cool_chunk_embeddings, cleaned_cool_chunks)
messages = [{"role": "system", "content": f"You are a chatbot that plays the role of the user's cool, friendly, extremely nice and supportive mom. Respond in full sentences and use really nice and sweet language, don't cut yourself off. Base your response on the provided context: {top_results}"},
{"role": "user",
"content": (
f"Question{message}"
)}]
elif selected == "Tutor Mom" :
top_results = get_top_chunks(message, tutor_chunk_embeddings, cleaned_tutor_chunks)
messages = [{"role": "system", "content": f"You are a chatbot that plays the role of the user's tutor-like mom who knows how to help and teaches her kid everything. Respond in full sentences, speak very knowledgeable and don't cut yourself off. Base your response on the provided context: {top_results}"},
{"role": "user",
"content": (
f"Question{message}"
)}]
elif selected == "Strict Mom":
top_results = get_top_chunks(message, strict_chunk_embeddings, cleaned_strict_chunks)
messages = [{"role": "system", "content": f"You are a chatbot that plays the role of the user's extremely strict mom who is focused on doing well in school, studies, and academics. Respond in action-oriented and stern full sentences, don't cut yourself off. Base your response on the provided context: {top_results}"},
{"role": "user",
"content": (
f"Question{message}"
)}]
#updated JC
if history:
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
#messages.append({"role": "user", "content": message})
response = client.chat_completion(
messages,
temperature = 0.2
)
return response['choices'][0]['message']['content'].strip()
# NN commented out
# chatbot = gr.ChatInterface(respond, type="messages")
'''custom_theme = gr.themes.Soft().set(
primary_hue="purple",
secondary_hue="fuchsia",
neutral_hue="gray",
spacing_size="lg",
radius_size="lg",
text_size="lg",
font=[gr.themes.GoogleFont("IBM Plex Sans"), "sans-serif"],
font_mono=[gr.themes.GoogleFont("IBM Plex Mono"), "monospace"]
)'''
custom_theme = gr.themes.Soft(
primary_hue="yellow",
secondary_hue="violet",
neutral_hue="purple",
spacing_size="md",
radius_size="md",
text_size="md",
font=[gr.themes.GoogleFont("IBM Plex Sans"), "sans-serif"],
font_mono=[gr.themes.GoogleFont("IBM Plex Mono"), "monospace"]
)
with gr.Blocks(theme=custom_theme) as chatbot:
with gr.Row():
mom_type = gr.CheckboxGroup(["Cool Mom", "Tutor Mom", "Strict Mom"],label = "Choose Your Mom")
gr.ChatInterface(
fn=respond,
additional_inputs=[mom_type],
title="StudyMama"
)
# with gr.Blocks() as chatbot:
# gr.Image(value="ezgif.com-webp-to-gif-converter (1).gif")
# gr.ChatInterface(respond, type="messages")
# gr.ChatInterface(respond, type="messages")
#def respond_tutor(message, history, mom_type):
# top_tutor_results = get_top_chunks(message, tutor_chunk_embeddings, cleaned_tutor_chunks)
# #str_chunks = "\n".join(best_chunks)
# messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely studious, tutor-like mom. Respond in full sentences, don't cut yourself off. Base your response on the provided context: {mom_type}"},
# {"role": "user",
# "content": (
# f"Context:\n{top_tutor_results}\n\n"
# f"Question{message}"
# )}]
# if history:
# messages.extend(history)
# messages.append({"role": "user", "content": message})
# response = client.chat_completion(
# messages,
# temperature = 0.2
# )
# return response['choices'][0]['message']['content'].strip()
#def respond_strict(message, history):
# top_strict_results = get_top_chunks(message, strict_chunk_embeddings, cleaned_strict_chunks)
#str_chunks = "\n".join(best_chunks)
# messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely strict mom. Respond in full sentences, don't cut yourself off. Base your response on the provided context: {top_strict_results}"},
# {"role": "user",
# "content": (
# f"Context:\n{top_strict_results}\n\n"
# f"Question{message}"
# )}]
# if history:
# messages.extend(history)
#
# messages.append({"role": "user", "content": message})
# response = client.chat_completion(
# messages,
# temperature = 0.2
# )
# return response['choices'][0]['message']['content'].strip()
chatbot.launch(ssr_mode=False)