cindyy3 commited on
Commit
756a809
·
verified ·
1 Parent(s): d8be181

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -94
app.py CHANGED
@@ -2,142 +2,134 @@ import gradio as gr
2
  from sentence_transformers import SentenceTransformer, util
3
  import openai
4
  import os
 
5
 
6
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
7
 
8
- # Initialize paths and model identifiers for easy configuration and maintenance
9
- filename = "output_topic_details.txt" # Path to the file storing chess-specific details
10
  retrieval_model_name = 'output/sentence-transformer-finetuned/'
11
 
12
  openai.api_key = os.environ["OPENAI_API_KEY"]
13
 
14
- system_message = "You are a restaurant recommending chatbot that suggests one restaurant based on the criteria the user provides."
15
- # Initial system message to set the behavior of the assistant
 
 
 
16
  messages = [{"role": "system", "content": system_message}]
17
 
18
- # Attempt to load the necessary models and provide feedback on success or failure
19
- try:
20
- retrieval_model = SentenceTransformer(retrieval_model_name)
21
- print("Models loaded successfully.")
22
- except Exception as e:
23
- print(f"Failed to load models: {e}")
24
-
25
- def load_and_preprocess_text(filename):
26
- """
27
- Load and preprocess text from a file, removing empty lines and stripping whitespace.
28
- """
29
  try:
30
  with open(filename, 'r', encoding='utf-8') as file:
31
- segments = [line.strip() for line in file if line.strip()]
32
- print("Text loaded and preprocessed successfully.")
33
- return segments
34
- except Exception as e:
35
- print(f"Failed to load or preprocess text: {e}")
36
- return []
37
-
38
- segments = load_and_preprocess_text(filename)
39
-
40
- def find_relevant_segment(user_query, segments):
41
- """
42
- Find the most relevant text segment for a user's query using cosine similarity among sentence embeddings.
43
- This version finds the best match based on the content of the query.
44
- """
45
- try:
46
- # Lowercase the query for better matching
47
- lower_query = user_query.lower()
48
 
49
- # Encode the query and the segments
50
- query_embedding = retrieval_model.encode(lower_query)
51
- segment_embeddings = retrieval_model.encode(segments)
 
 
 
 
 
 
52
 
53
- # Compute cosine similarities between the query and the segments
54
- similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0]
55
-
56
- # Find the index of the most similar segment
57
- best_idx = similarities.argmax()
58
-
59
- # Return the most relevant segment
60
- return segments[best_idx]
61
  except Exception as e:
62
- print(f"Error in finding relevant segment: {e}")
63
- return ""
64
 
65
- def generate_response(user_query, relevant_segment):
66
- """
67
- Generate a response emphasizing the bot's capability in suggesting a restaurant.
68
- """
69
- try:
70
- user_message = f"Here is a local restaurant based on your information: {relevant_segment}"
71
 
72
- # Append user's message to messages list
73
- messages.append({"role": "user", "content": user_message})
74
-
75
- response = openai.ChatCompletion.create(
76
- model="gpt-4o",
77
- messages=messages,
78
- max_tokens=150,
79
- temperature=0.2,
80
- top_p=1,
81
- frequency_penalty=0,
82
- presence_penalty=0
83
- )
84
-
85
- # Extract the response text
86
- output_text = response['choices'][0]['message']['content'].strip()
87
-
88
- # Append assistant's message to messages list for context
89
- messages.append({"role": "assistant", "content": output_text})
90
-
91
- return output_text
92
-
93
- except Exception as e:
94
- print(f"Error in generating response: {e}")
95
- return f"Error in generating response: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  def query_model(question):
98
- """
99
- Process a question, find relevant information, and generate a response.
100
- """
101
  if question == "":
102
- return "Give me your preferences..."
103
- relevant_segment = find_relevant_segment(question, segments)
104
- if not relevant_segment:
105
- return "Could not find specific information. Please refine your question."
106
- response = generate_response(question, relevant_segment)
107
  return response
108
 
109
- # Define the welcome message and specific topics the chatbot can provide information about
110
  welcome_message = """
111
  # Welcome to Ethical Eats Explorer!
112
-
113
  ## Your AI-driven assistant for restaurant recs in Seattle. Created by Saranya, Cindy, and Liana of the 2024 Kode With Klossy Seattle Camp.
114
  """
115
 
116
  topics = """
117
  ### Please give me your restaurant preferences:
118
-
119
  - Dietary Restrictions
120
  - Cuisine Preferences (optional)
121
  - Cuisines: American, Indian, Middle Eastern, Chinese, Italian, Thai, Hawaiian-Korean, Japanese, Ethiopian, Pakistani, Mexican, Ghanaian, Vietnamese, Filipino, Spanish, Turkish
122
  - Budget Preferences (Low: $0 - $20, Moderate: $20 - $30, High: $30+ - per person)
123
-
124
  Please send your message in the format: "Could you give me a (cuisine) restaurant with (dietary restriction) options that is (budget) budget?"
125
-
126
  """
127
 
128
- # Setup the Gradio Blocks interface with custom layout components
129
  with gr.Blocks(theme='JohnSmith9982/small_and_pretty') as demo:
130
- gr.Markdown(welcome_message) # Display the formatted welcome message
131
  with gr.Row():
132
  with gr.Column():
133
- gr.Markdown(topics) # Show the topics on the left side
134
  with gr.Row():
135
  with gr.Column():
136
  question = gr.Textbox(label="Your question", placeholder="Give me your information...")
137
  answer = gr.Textbox(label="Explorer's Response", placeholder="Explorer will respond here...", interactive=False, lines=10)
138
  submit_button = gr.Button("Submit")
139
  submit_button.click(fn=query_model, inputs=question, outputs=answer)
140
-
141
 
142
- # Launch the Gradio app to allow user interaction
143
- demo.launch(share=True)
 
2
  from sentence_transformers import SentenceTransformer, util
3
  import openai
4
  import os
5
+ import pandas as pd
6
 
7
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
8
 
9
+ # Initialize paths and model identifiers
10
+ filename = "output_topic_details.txt"
11
  retrieval_model_name = 'output/sentence-transformer-finetuned/'
12
 
13
  openai.api_key = os.environ["OPENAI_API_KEY"]
14
 
15
+ system_message = (
16
+ "You are a restaurant recommending chatbot that takes details about a restaurant including type of restaurant, "
17
+ "dietary restrictions, and budget and chooses a restaurant in Seattle which best fits the user's criteria. "
18
+ "Then you output the restaurant name and website link."
19
+ )
20
  messages = [{"role": "system", "content": system_message}]
21
 
22
+ # Load the data into a DataFrame for easier querying
23
+ def load_and_preprocess_data(filename):
 
 
 
 
 
 
 
 
 
24
  try:
25
  with open(filename, 'r', encoding='utf-8') as file:
26
+ data = file.read()
27
+ # Split into sections based on "Topic:" and then split into lines
28
+ sections = data.split("Topic: ")
29
+ restaurant_data = []
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ for section in sections[1:]:
32
+ lines = section.strip().split("\n")
33
+ topic = lines[0]
34
+ description = "\n".join(lines[1:])
35
+ if topic == "Details about Restaurants":
36
+ lines = description.split("\n")
37
+ # Convert to a DataFrame
38
+ df = pd.DataFrame([line.split(",") for line in lines[1:]], columns=lines[0].split(","))
39
+ restaurant_data.append(df)
40
 
41
+ # Concatenate all DataFrames into one
42
+ full_df = pd.concat(restaurant_data, ignore_index=True)
43
+ full_df.columns = full_df.columns.str.strip() # Strip any extra whitespace from column names
44
+ print("Data loaded and preprocessed successfully.")
45
+ return full_df
 
 
 
46
  except Exception as e:
47
+ print(f"Failed to load or preprocess data: {e}")
48
+ return pd.DataFrame()
49
 
50
+ data_df = load_and_preprocess_data(filename)
 
 
 
 
 
51
 
52
+ def filter_restaurants(cuisine=None, dietary_restrictions=None, budget=None):
53
+ df_filtered = data_df
54
+
55
+ if cuisine:
56
+ df_filtered = df_filtered[df_filtered['Type of Restaurant'].str.contains(cuisine, case=False, na=False)]
57
+ if dietary_restrictions:
58
+ for restriction in dietary_restrictions:
59
+ df_filtered = df_filtered[df_filtered[restriction].str.contains('Yes', case=False, na=False)]
60
+ if budget:
61
+ df_filtered = df_filtered[df_filtered['Price'].str.contains(budget, case=False, na=False)]
62
+
63
+ if df_filtered.empty:
64
+ return "No matching restaurants found."
65
+
66
+ # Convert DataFrame to a list of dictionaries for easier handling
67
+ restaurants = df_filtered[['Restaurant', 'Website']].to_dict(orient='records')
68
+ return restaurants
69
+
70
+ def generate_response(user_query):
71
+ # Example of parsing the query for simplicity
72
+ # You might want to use more sophisticated parsing and NLP for better results
73
+ # Dummy parsing based on example query format
74
+ cuisine = None
75
+ dietary_restrictions = []
76
+ budget = None
77
+
78
+ if 'gluten-free' in user_query.lower():
79
+ dietary_restrictions.append('Gluten-free Options?')
80
+ if 'vegan' in user_query.lower():
81
+ dietary_restrictions.append('Vegan Options?')
82
+ if 'lactose-intolerant' in user_query.lower():
83
+ dietary_restrictions.append('Lactose-Intolerant Options?')
84
+ if 'pescatarian' in user_query.lower():
85
+ dietary_restrictions.append('Pescatarian Options?')
86
+
87
+ if 'low' in user_query.lower():
88
+ budget = 'Low'
89
+ elif 'moderate' in user_query.lower():
90
+ budget = 'Moderate'
91
+ elif 'high' in user_query.lower():
92
+ budget = 'High'
93
+
94
+ # Handle cuisine extraction if needed
95
+
96
+ results = filter_restaurants(cuisine=cuisine, dietary_restrictions=dietary_restrictions, budget=budget)
97
+ if isinstance(results, str): # If no restaurants found
98
+ return results
99
+
100
+ response = "\n".join([f"{r['Restaurant']}: {r['Website']}" for r in results])
101
+ return response
102
 
103
  def query_model(question):
 
 
 
104
  if question == "":
105
+ return "Please provide your restaurant preferences."
106
+ response = generate_response(question)
 
 
 
107
  return response
108
 
 
109
  welcome_message = """
110
  # Welcome to Ethical Eats Explorer!
 
111
  ## Your AI-driven assistant for restaurant recs in Seattle. Created by Saranya, Cindy, and Liana of the 2024 Kode With Klossy Seattle Camp.
112
  """
113
 
114
  topics = """
115
  ### Please give me your restaurant preferences:
 
116
  - Dietary Restrictions
117
  - Cuisine Preferences (optional)
118
  - Cuisines: American, Indian, Middle Eastern, Chinese, Italian, Thai, Hawaiian-Korean, Japanese, Ethiopian, Pakistani, Mexican, Ghanaian, Vietnamese, Filipino, Spanish, Turkish
119
  - Budget Preferences (Low: $0 - $20, Moderate: $20 - $30, High: $30+ - per person)
 
120
  Please send your message in the format: "Could you give me a (cuisine) restaurant with (dietary restriction) options that is (budget) budget?"
 
121
  """
122
 
 
123
  with gr.Blocks(theme='JohnSmith9982/small_and_pretty') as demo:
124
+ gr.Markdown(welcome_message)
125
  with gr.Row():
126
  with gr.Column():
127
+ gr.Markdown(topics)
128
  with gr.Row():
129
  with gr.Column():
130
  question = gr.Textbox(label="Your question", placeholder="Give me your information...")
131
  answer = gr.Textbox(label="Explorer's Response", placeholder="Explorer will respond here...", interactive=False, lines=10)
132
  submit_button = gr.Button("Submit")
133
  submit_button.click(fn=query_model, inputs=question, outputs=answer)
 
134
 
135
+ demo.launch(share=True)