DreamStream-1 commited on
Commit
4525308
·
verified ·
1 Parent(s): ef41952

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -87
app.py CHANGED
@@ -1,23 +1,23 @@
1
  import gradio as gr
2
- import requests
3
- import time
4
- import re
5
- import csv
6
- import json
7
- import random
8
  import nltk
9
  import numpy as np
10
  import tflearn
11
- import os
 
12
  import pickle
13
- from selenium import webdriver
14
- from selenium.webdriver.chrome.options import Options
15
- from bs4 import BeautifulSoup
16
- import chromedriver_autoinstaller
17
- import pandas as pd
18
  from nltk.tokenize import word_tokenize
19
  from nltk.stem.lancaster import LancasterStemmer
20
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
 
 
 
 
 
 
 
 
 
21
 
22
  # Ensure necessary NLTK resources are downloaded
23
  nltk.download('punkt')
@@ -25,14 +25,14 @@ nltk.download('punkt')
25
  # Initialize the stemmer
26
  stemmer = LancasterStemmer()
27
 
28
- # Load intents.json (directly in the app directory)
29
  try:
30
  with open("intents.json") as file:
31
  data = json.load(file)
32
  except FileNotFoundError:
33
- raise FileNotFoundError("Error: 'intents.json' file not found in the app directory.")
34
 
35
- # Load preprocessed data from pickle (directly in the app directory)
36
  try:
37
  with open("data.pickle", "rb") as f:
38
  words, labels, training, output = pickle.load(f)
@@ -46,12 +46,12 @@ net = tflearn.fully_connected(net, 8)
46
  net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
47
  net = tflearn.regression(net)
48
 
49
- # Load the trained model (directly in the app directory)
50
  model = tflearn.DNN(net)
51
  try:
52
  model.load("MentalHealthChatBotmodel.tflearn")
53
  except FileNotFoundError:
54
- raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found in the app directory.")
55
 
56
  # Function to process user input into a bag-of-words format
57
  def bag_of_words(s, words):
@@ -68,7 +68,7 @@ def bag_of_words(s, words):
68
  def chat(message, history):
69
  history = history or []
70
  message = message.lower()
71
-
72
  try:
73
  # Predict the tag
74
  results = model.predict([bag_of_words(message, words)])
@@ -85,7 +85,7 @@ def chat(message, history):
85
  response = "I'm sorry, I didn't understand that. Could you please rephrase?"
86
  except Exception as e:
87
  response = f"An error occurred: {str(e)}"
88
-
89
  history.append((message, response))
90
  return history, history
91
 
@@ -153,26 +153,13 @@ def provide_suggestions(emotion):
153
  "Article URL": "https://www.health.harvard.edu/health-a-to-z",
154
  "Video URL": "https://youtu.be/m1vaUGtyo-A"
155
  }, ignore_index=True)
156
-
157
  return suggestions
158
 
159
  # Google Places API to get nearby wellness professionals
160
- api_key = "YOUR_GOOGLE_API_KEY" # Replace with your actual API key
161
-
162
- def install_chrome_and_driver():
163
- os.system("apt-get update")
164
- os.system("apt-get install -y wget curl")
165
- os.system("wget -q https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb")
166
- os.system("dpkg -i google-chrome-stable_current_amd64.deb")
167
- os.system("apt-get install -y -f")
168
- os.system("google-chrome-stable --version")
169
- chromedriver_autoinstaller.install()
170
-
171
- # Install Chrome and Chromedriver
172
- install_chrome_and_driver()
173
-
174
- # Fetch places data using Google Places API
175
- def get_places_data(query, location, radius, api_key):
176
  url = "https://maps.googleapis.com/maps/api/place/textsearch/json"
177
  params = {
178
  "query": query,
@@ -180,48 +167,62 @@ def get_places_data(query, location, radius, api_key):
180
  "radius": radius,
181
  "key": api_key
182
  }
 
 
183
  response = requests.get(url, params=params)
184
- if response.status_code == 200:
185
- return response.json()
186
- return None
187
-
188
- # Scrape website URL from Google Maps results (using Selenium)
189
- def scrape_website_from_google_maps(place_name):
190
- chrome_options = Options()
191
- chrome_options.add_argument("--headless")
192
- chrome_options.add_argument("--no-sandbox")
193
- chrome_options.add_argument("--disable-dev-shm-usage")
194
- driver = webdriver.Chrome(options=chrome_options)
195
- search_url = f"https://www.google.com/maps/search/{place_name.replace(' ', '+')}"
196
- driver.get(search_url)
197
- time.sleep(5)
198
- try:
199
- website_element = driver.find_element_by_xpath('//a[contains(@aria-label, "Visit") and contains(@aria-label, "website")]')
200
- website_url = website_element.get_attribute('href')
201
- except:
202
- website_url = "Not available"
203
- driver.quit()
204
- return website_url
205
-
206
- # Get all wellness professionals based on the location
207
- def get_wellness_professionals(location):
208
- query = "therapist OR counselor OR mental health professional OR marriage and family therapist OR psychotherapist OR psychiatrist OR psychologist OR nutritionist OR wellness doctor OR holistic practitioner"
209
- radius = 50000 # 50 km radius
210
- data = get_places_data(query, location, radius, api_key)
211
- if data:
212
- results = data.get('results', [])
213
- wellness_data = []
214
- for place in results:
215
- name = place.get('name')
216
- address = place.get('formatted_address')
217
- website = place.get('website', 'Not available')
218
- if website == 'Not available':
219
- website = scrape_website_from_google_maps(name)
220
- wellness_data.append([name, address, website])
221
- return pd.DataFrame(wellness_data, columns=["Name", "Address", "Website"])
222
- return pd.DataFrame()
223
-
224
- # Gradio Interface Setup
 
 
 
 
 
 
 
 
 
 
 
 
225
  iface = gr.Interface(
226
  fn=gradio_interface,
227
  inputs=[
@@ -230,16 +231,17 @@ iface = gr.Interface(
230
  gr.State() # One state input
231
  ],
232
  outputs=[
233
- gr.Chatbot(label="Chat History", type="messages"), # Set type="messages"
234
- gr.Textbox(label="Sentiment Analysis"),
235
- gr.Textbox(label="Detected Emotion"),
236
- gr.Dataframe(label="Suggestions & Resources"),
237
- gr.Dataframe(label="Nearby Wellness Professionals"),
238
- gr.State() # One state output
239
  ],
240
- allow_flagging="never",
241
- title="Mental Wellbeing App with AI Assistance",
242
- description="This app provides a mental health chatbot, sentiment analysis, emotion detection, and wellness professional search functionality.",
243
  )
244
 
245
- iface.launch(debug=True, share=True) # Launch with share=True to create a public link
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
2
  import nltk
3
  import numpy as np
4
  import tflearn
5
+ import random
6
+ import json
7
  import pickle
8
+ import torch
 
 
 
 
9
  from nltk.tokenize import word_tokenize
10
  from nltk.stem.lancaster import LancasterStemmer
11
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
12
+ import requests
13
+ import pandas as pd
14
+ from selenium import webdriver
15
+ from selenium.webdriver.chrome.options import Options
16
+ import chromedriver_autoinstaller
17
+ import os
18
+ import time
19
+ import re
20
+ from bs4 import BeautifulSoup
21
 
22
  # Ensure necessary NLTK resources are downloaded
23
  nltk.download('punkt')
 
25
  # Initialize the stemmer
26
  stemmer = LancasterStemmer()
27
 
28
+ # Load intents.json
29
  try:
30
  with open("intents.json") as file:
31
  data = json.load(file)
32
  except FileNotFoundError:
33
+ raise FileNotFoundError("Error: 'intents.json' file not found. Ensure it exists in the current directory.")
34
 
35
+ # Load preprocessed data from pickle
36
  try:
37
  with open("data.pickle", "rb") as f:
38
  words, labels, training, output = pickle.load(f)
 
46
  net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
47
  net = tflearn.regression(net)
48
 
49
+ # Load the trained model
50
  model = tflearn.DNN(net)
51
  try:
52
  model.load("MentalHealthChatBotmodel.tflearn")
53
  except FileNotFoundError:
54
+ raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found.")
55
 
56
  # Function to process user input into a bag-of-words format
57
  def bag_of_words(s, words):
 
68
  def chat(message, history):
69
  history = history or []
70
  message = message.lower()
71
+
72
  try:
73
  # Predict the tag
74
  results = model.predict([bag_of_words(message, words)])
 
85
  response = "I'm sorry, I didn't understand that. Could you please rephrase?"
86
  except Exception as e:
87
  response = f"An error occurred: {str(e)}"
88
+
89
  history.append((message, response))
90
  return history, history
91
 
 
153
  "Article URL": "https://www.health.harvard.edu/health-a-to-z",
154
  "Video URL": "https://youtu.be/m1vaUGtyo-A"
155
  }, ignore_index=True)
156
+
157
  return suggestions
158
 
159
  # Google Places API to get nearby wellness professionals
160
+ api_key = "GOOGLE_API_KEY" # Replace with your API key
161
+
162
+ def get_places_data(query, location, radius, api_key, next_page_token=None):
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  url = "https://maps.googleapis.com/maps/api/place/textsearch/json"
164
  params = {
165
  "query": query,
 
167
  "radius": radius,
168
  "key": api_key
169
  }
170
+ if next_page_token:
171
+ params["pagetoken"] = next_page_token
172
  response = requests.get(url, params=params)
173
+ return response.json() if response.status_code == 200 else None
174
+
175
+ def get_all_places(query, location, radius, api_key):
176
+ all_results = []
177
+ next_page_token = None
178
+ while True:
179
+ data = get_places_data(query, location, radius, api_key, next_page_token)
180
+ if data:
181
+ results = data.get('results', [])
182
+ for place in results:
183
+ place_id = place.get("place_id")
184
+ name = place.get("name")
185
+ address = place.get("formatted_address")
186
+ website = place.get("website", "Not available")
187
+ all_results.append([name, address, website])
188
+ next_page_token = data.get('next_page_token')
189
+ if not next_page_token:
190
+ break
191
+ else:
192
+ break
193
+ return all_results
194
+
195
+ def search_wellness_professionals(location):
196
+ query = "therapist OR counselor OR mental health professional"
197
+ radius = 50000
198
+ google_places_data = get_all_places(query, location, radius, api_key)
199
+ if google_places_data:
200
+ df = pd.DataFrame(google_places_data, columns=["Name", "Address", "Website"])
201
+ return df
202
+ else:
203
+ return pd.DataFrame([["No data found.", "", ""]], columns=["Name", "Address", "Website"])
204
+
205
+ # Gradio Interface
206
+ def gradio_interface(message, location, state):
207
+ history = state or [] # If state is None, initialize it as an empty list
208
+
209
+ # Stage 1: Mental Health Chatbot
210
+ history, _ = chat(message, history)
211
+
212
+ # Stage 2: Sentiment Analysis
213
+ sentiment = analyze_sentiment(message)
214
+
215
+ # Stage 3: Emotion Detection and Suggestions
216
+ emotion = detect_emotion(message)
217
+ suggestions = provide_suggestions(emotion)
218
+
219
+ # Stage 4: Search for Wellness Professionals
220
+ wellness_results = search_wellness_professionals(location)
221
+
222
+ # Return the results in a tabular form within the Gradio interface
223
+ return history, sentiment, emotion, suggestions, wellness_results, history # Last 'history' is for state
224
+
225
+ # Gradio interface setup
226
  iface = gr.Interface(
227
  fn=gradio_interface,
228
  inputs=[
 
231
  gr.State() # One state input
232
  ],
233
  outputs=[
234
+ gr.Chatbot(label="Chatbot History"),
235
+ gr.Textbox(label="Sentiment"),
236
+ gr.Textbox(label="Emotion"),
237
+ gr.Dataframe(label="Wellness Professionals"),
238
+ gr.State() # State output (maintains conversation history)
 
239
  ],
240
+ title="Mental Health Chatbot",
241
+ description="This chatbot helps with mental health inquiries and provides suggestions for wellness professionals.",
242
+ live=True
243
  )
244
 
245
+ # Run the interface
246
+ if __name__ == "__main__":
247
+ iface.launch(debug=True)