Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| #STEP 1 FROM SEMANTIC SEARCH | |
| from sentence_transformers import SentenceTransformer | |
| import torch | |
| #STEP 2 FROM SEMANTIC SEARCH | |
| # Open the water_cycle.txt file in read mode with UTF-8 encoding | |
| with open("slang.txt", "r", encoding="utf-8") as file: | |
| slang_text = file.read() | |
| with open("sejal.txt", "r", encoding="utf-8") as file: | |
| sejal_text = file.read() | |
| with open("shanvi.txt", "r", encoding="utf-8") as file: | |
| shanvi_text = file.read() | |
| # Print the text below | |
| #print(zenagers) | |
| # Print the text below | |
| #print(slang_text) | |
| #print(sejal_text) | |
| #print(shanvi_text) | |
| #STEP 3 FROM SEMANTIC SEARCH | |
| def preprocess_text(text): | |
| # Strip extra whitespace from the beginning and the end of the text | |
| cleaned_text = text.strip() | |
| # Split the cleaned_text by every newline character (\n) | |
| chunks = cleaned_text.split(" ") | |
| # Create an empty list to store cleaned_chunks | |
| cleaned_chunks = [] | |
| # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list | |
| for chunk in chunks: | |
| cleaned_chunk = chunk.strip() | |
| if len(cleaned_chunk) > 0: | |
| cleaned_chunks.append(cleaned_chunk) | |
| # Print cleaned_chunks | |
| print(cleaned_chunks) | |
| # Print the length of cleaned_chunks | |
| print(len(cleaned_chunks)) | |
| # Return the cleaned_chunks | |
| return cleaned_chunks | |
| # Call the preprocess_text function and store the result in a cleaned_chunks variable | |
| cleaned_slang_chunks = preprocess_text(slang_text) # Complete this line | |
| cleaned_sejal_chunks = preprocess_text(sejal_text) | |
| cleaned_shanvi_chunks = preprocess_text(shanvi_text) | |
| #STEP 4 FROM SEMANTIC SEARCH | |
| # Load the pre-trained embedding model that converts text to vectors | |
| model = SentenceTransformer('all-MiniLM-L6-v2') | |
| def create_embeddings(text_chunks): | |
| # Convert each text chunk into a vector embedding and store as a tensor | |
| chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list | |
| # Print the chunk embeddings | |
| print(chunk_embeddings) | |
| # Print the shape of chunk_embeddings | |
| print(chunk_embeddings.shape) | |
| # Return the chunk_embeddings | |
| return chunk_embeddings | |
| # Call the create_embeddings function and store the result in a new chunk_embeddings variable | |
| chunk_embeddings_slang_text = create_embeddings(cleaned_slang_chunks) | |
| chunk_embeddings_sejal_text = create_embeddings(cleaned_sejal_chunks) | |
| chunk_embeddings_shanvi_text = create_embeddings(cleaned_shanvi_chunks) | |
| #STEP 5 FROM SEMANTIC SEARCH | |
| # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks | |
| def get_slang_top_chunks(query, chunk_embeddings, text_chunks): | |
| # Convert the query text into a vector embedding | |
| query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line | |
| # Normalize the query embedding to unit length for accurate similarity comparison | |
| query_embedding_normalized = query_embedding / query_embedding.norm() | |
| # Normalize all chunk embeddings to unit length for consistent comparison | |
| chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True) | |
| # Calculate cosine similarity between query and all chunks using matrix multiplication | |
| similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line | |
| # Print the similarities | |
| print(similarities) | |
| # Find the indices of the 3 chunks with highest similarity scores | |
| top_indices = torch.topk(similarities, k=3).indices | |
| # Print the top indices | |
| print(top_indices) | |
| # Create an empty list to store the most relevant chunks | |
| top_chunks = [] | |
| # Loop through the top indices and retrieve the corresponding text chunks | |
| for index in top_indices: | |
| relevant_chunk = text_chunks[index] | |
| top_chunks.append(relevant_chunk) | |
| # Return the list of most relevant chunks | |
| #return top_slang_chunks | |
| #STEP 6 FROM SEMANTIC SEARCH | |
| # Call the get_top_chunks function with the original query | |
| #top_results = get_top_slang_chunks("How does water get into the sky?", chunk_embeddings_slang_text, cleaned_chunks) # Complete this line | |
| # Print the top results | |
| client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2") | |
| def respond(message, history): | |
| top_slang_text = get_slang_top_chunks(message, chunk_embeddings_slang_text, cleaned_slang_chunks) | |
| top_sejal_text = get_slang_top_chunks(message, chunk_embeddings_sejal_text, cleaned_sejal_chunks) | |
| top_shanvi_text = get_slang_top_chunks(message, chunk_embeddings_shanvi_text, cleaned_shanvi_chunks) | |
| messages = [{"role": "system", "content": f"You are a friendly chatbot, almost like a therapist and your name is Sage. You use Generation Z slang to relate with teenagers who chat with you. You ask meaningful questions, maintain confidentiality, and show empathy. Always keep an open mind and be kind because the human is vulnerable enough to reach out to you (a chatbot) for help. Remember to match their energy/vibe. Also remeber to refer to them as there name. Make sure to to always complete your scentence, and make sure to use slang{slang_text} in your responses! If someone ever tells you they want to harm themself or others, tell them to call the suicide hotline number at 988."}] | |
| print(messages) | |
| if history: | |
| messages.extend(history) | |
| messages.append ({"role": "user", "content": message}) | |
| response = client.chat_completion ( | |
| messages, | |
| temperature=0.2 | |
| ) | |
| return response['choices'][0]['message']['content'].strip() | |
| with gr.Blocks(theme = gr.themes.Glass( | |
| primary_hue=gr.themes.Color(c100="rgba(142.29228344298247, 175.365625, 153.66124460320722, 1)", c200="rgba(96.43999109100876, 134.89765625, 109.9001738966557, 1)", c300="rgba(54.648266173245624, 85.92968750000001, 65.96916151054722, 1)", c400="rgba(23.80023300438597, 43.76171875000001, 31.08347780346136, 1)", c50="rgba(183.55910773026312, 211.37109375, 192.11664189018217, 1)", c500="rgba(5.069243421052633, 11.793750000000003, 7.544521916370684, 1)", c600="rgba(169.11644736842106, 200.82578125, 180.81066979283315, 1)", c700="rgba(163.92813870614034, 192.6578125, 174.6682036758075, 1)", c800="rgba(180.88328536184213, 208.28984375000002, 191.29083918012995, 1)", c900="rgba(173.79004934210525, 200.121875, 184.2391865079365, 1)", c950="rgba(179.6270422149123, 208.95390625000002, 190.06541754943507, 1)"), | |
| font=[gr.themes.GoogleFont('Titillium Web'), 'Candara', 'Noto Sans', 'source-sans-pro'], | |
| font_mono=[gr.themes.GoogleFont('EB Garamond'), 'ui-monospace', 'Consolas', 'monospace'], | |
| ).set( | |
| body_background_fill='*primary_50', | |
| link_text_color='*button_cancel_text_color_hover', | |
| link_text_color_visited='*block_label_text_color', | |
| block_background_fill='*secondary_100', | |
| )) as chatbot: | |
| with gr.Row(scale = 2): | |
| gr.Image("Screenshot 2025-07-30 at 1.37.47 PM.png") | |
| with gr.Row(scale = 2): | |
| gr.ChatInterface(respond, type="messages", description = "Hey fam, my name is Sage and I'm your personal chatbot and overall person to talk to when life gets to be a little to much! What's your name, and how have you been feeling lately? If you or someone you know is planning to harm someone or themselves, call these numbers. Nationwide Suicide Prevention Hotline: 9-8-8. Crisis Text Line: Text HOME to 741741. National Domestic Violence Hotline: (800)-799-7233. National Sexual Assault Hotline: (800)-656-4673. Emergencies: 9-1-1.") | |
| chatbot.launch() | |