lokesh341 commited on
Commit
d82403f
Β·
verified Β·
1 Parent(s): 7f928e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -106
app.py CHANGED
@@ -1,45 +1,37 @@
1
  import os
2
- import time
3
- import torch
4
- from flask import Flask, render_template, request, jsonify
5
  from simple_salesforce import Salesforce
6
- from transformers import pipeline
 
 
7
  from gtts import gTTS
8
  from pydub import AudioSegment
9
  from pydub.silence import detect_nonsilent
 
10
  from transformers import AutoConfig # Import AutoConfig for the config object
11
  from waitress import serve
12
 
13
- # Flask setup
14
  app = Flask(__name__, template_folder="templates")
15
  app.secret_key = os.urandom(24)
16
 
17
- # Check for available GPU
18
  device = "cuda" if torch.cuda.is_available() else "cpu"
19
 
20
- # Set up Whisper model config
21
  config = AutoConfig.from_pretrained("openai/whisper-small")
22
  config.update({"timeout": 60}) # Set timeout to 60 seconds
23
 
24
- # Salesforce connection
25
- try:
26
- print("Attempting to connect to Salesforce...")
27
- sf = Salesforce(username='diggavalli98@gmail.com', password='Sati@1020', security_token='sSSjyhInIsUohKpG8sHzty2q')
28
- print("Connected to Salesforce successfully!")
29
- except Exception as e:
30
- print(f"Failed to connect to Salesforce: {str(e)}")
31
-
32
- # Function to generate audio prompt using gTTS
33
  def generate_audio_prompt(text, filename):
34
  try:
35
  tts = gTTS(text)
36
  tts.save(os.path.join("static", filename))
37
  except Exception as e:
38
- print(f"Error generating audio prompt: {e}")
39
- time.sleep(5)
40
  generate_audio_prompt(text, filename)
41
 
42
- # Example prompts for voice interaction
43
  prompts = {
44
  "welcome": "Welcome to Biryani Hub.",
45
  "ask_name": "Tell me your name.",
@@ -47,95 +39,52 @@ prompts = {
47
  "thank_you": "Thank you for registration."
48
  }
49
 
50
- # Generate audio prompts
51
  for key, text in prompts.items():
52
  generate_audio_prompt(text, f"{key}.mp3")
53
 
54
- # Function to check if the audio is silent
 
 
 
 
 
 
 
 
 
55
  def is_silent_audio(audio_path):
56
  audio = AudioSegment.from_wav(audio_path)
57
  nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16)
58
  return len(nonsilent_parts) == 0
59
 
60
- # Function to fetch menu items from Salesforce
61
- def get_menu_items():
62
- try:
63
- # Salesforce query to fetch all menu items
64
- query = """
65
- SELECT Name, Price__c, Ingredients__c, Category__c
66
- FROM Menu_Item__c
67
- """
68
- result = sf.query(query)
69
- menu_items = []
70
- for item in result["records"]:
71
- menu_items.append({
72
- "name": item["Name"],
73
- "price": item["Price__c"],
74
- "ingredients": item["Ingredients__c"],
75
- "category": item["Category__c"]
76
- })
77
- return menu_items
78
- except Exception as e:
79
- print(f"Error fetching menu items: {str(e)}")
80
- return []
81
-
82
- # Function to check if the customer exists in Salesforce (login check)
83
- def get_customer_login(name, email, phone_number):
84
- try:
85
- # Salesforce query to fetch customer based on Name, Email, and Phone Number
86
- query = f"""
87
- SELECT Id, Name, Email__c, Phone_Number__c
88
- FROM Customer_Login__c
89
- WHERE Name = '{name}'
90
- AND Email__c = '{email}'
91
- AND Phone_Number__c = '{phone_number}'
92
- """
93
- result = sf.query(query)
94
- if result["records"]:
95
- customer = result["records"][0]
96
- return {
97
- "id": customer["Id"],
98
- "name": customer["Name"],
99
- "email": customer["Email__c"],
100
- "phone": customer["Phone_Number__c"]
101
- }
102
- else:
103
- return None
104
- except Exception as e:
105
- print(f"Error fetching customer login details: {str(e)}")
106
- return None
107
 
108
- # Function to create a new customer login in Salesforce
109
- def create_customer_login(name, email, phone):
110
- try:
111
- # Create a new customer login record in Salesforce
112
- customer_login = sf.Customer_Login__c.create({
113
- 'Name': name,
114
- 'Email__c': email,
115
- 'Phone_Number__c': phone
116
- })
117
- return customer_login
118
- except Exception as e:
119
- print(f"Error creating customer login: {str(e)}")
120
- return None
121
 
122
- # Home Route (loads index.html)
123
  @app.route("/", methods=["GET"])
124
  def index():
125
  return render_template("index.html")
126
 
127
- # Dashboard Route
128
  @app.route("/dashboard", methods=["GET"])
129
  def dashboard():
130
  return render_template("dashboard.html")
131
 
132
- # Menu Page Route
133
  @app.route("/menu_page", methods=["GET"])
134
  def menu_page():
135
- menu_items = get_menu_items()
136
- return render_template("menu_page.html", menu=menu_items)
137
 
138
- # Login API
139
  @app.route('/login', methods=['POST'])
140
  def login():
141
  data = request.json
@@ -146,14 +95,17 @@ def login():
146
  if not name or not email or not phone_number:
147
  return jsonify({'error': 'Missing required fields'}), 400
148
 
149
- # Check if the customer exists in Salesforce
150
- customer = get_customer_login(name, email, phone_number)
151
- if customer:
152
- return jsonify({'success': True, 'customer': customer}), 200
153
- else:
154
- return jsonify({'error': 'Customer not found'}), 404
 
 
 
155
 
156
- # Register API (Create customer login)
157
  @app.route("/submit", methods=["POST"])
158
  def submit():
159
  data = request.json
@@ -164,14 +116,17 @@ def submit():
164
  if not name or not email or not phone:
165
  return jsonify({'error': 'Missing data'}), 400
166
 
167
- # Create customer login record in Salesforce
168
- customer_login = create_customer_login(name, email, phone)
169
- if customer_login:
 
 
 
170
  return jsonify({'success': True}), 200
171
- else:
172
- return jsonify({'error': 'Failed to create customer record'}), 500
173
 
174
- # Transcribe Audio API
175
  @app.route("/transcribe", methods=["POST"])
176
  def transcribe():
177
  if "audio" not in request.files:
@@ -183,18 +138,15 @@ def transcribe():
183
  audio_file.save(input_audio_path)
184
 
185
  try:
186
- # Convert the audio to WAV format and check if it contains speech
187
  convert_to_wav(input_audio_path, output_audio_path)
188
  if is_silent_audio(output_audio_path):
189
  return jsonify({"error": "No speech detected. Please try again."}), 400
190
 
191
- # Transcribe the audio using Whisper model
192
  asr_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1, config=config)
193
  result = asr_pipeline(output_audio_path)
194
 
195
  transcribed_text = result["text"].strip().capitalize()
196
 
197
- # Extract details from transcribed text (Assumed format: Name Email Phone)
198
  parts = transcribed_text.split()
199
  name = parts[0] if len(parts) > 0 else "Unknown Name"
200
  email = parts[1] if '@' in parts[1] else "unknown@domain.com"
@@ -203,7 +155,6 @@ def transcribe():
203
  confirmation = f"Is this correct? Name: {name}, Email: {email}, Phone: {phone_number}"
204
  generate_audio_prompt(confirmation, "confirmation.mp3")
205
 
206
- # Create a customer login record
207
  salesforce_response = sf.Customer_Login__c.create({
208
  'Name': name,
209
  'Email__c': email,
@@ -215,7 +166,30 @@ def transcribe():
215
  except Exception as e:
216
  return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500
217
 
218
- # Start the Flask server
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  if __name__ == "__main__":
220
- print("Starting Flask API Server on port 7860...")
221
  serve(app, host="0.0.0.0", port=7860)
 
1
  import os
 
 
 
2
  from simple_salesforce import Salesforce
3
+ from flask import Flask, render_template, request, jsonify
4
+ import json
5
+ import time
6
  from gtts import gTTS
7
  from pydub import AudioSegment
8
  from pydub.silence import detect_nonsilent
9
+ from transformers import pipeline
10
  from transformers import AutoConfig # Import AutoConfig for the config object
11
  from waitress import serve
12
 
13
+ # Initialize Flask App
14
  app = Flask(__name__, template_folder="templates")
15
  app.secret_key = os.urandom(24)
16
 
17
+ # Use whisper-small for faster processing and better speed
18
  device = "cuda" if torch.cuda.is_available() else "cpu"
19
 
20
+ # Create config object to set timeout and other parameters
21
  config = AutoConfig.from_pretrained("openai/whisper-small")
22
  config.update({"timeout": 60}) # Set timeout to 60 seconds
23
 
24
+ # Function to generate audio prompts
 
 
 
 
 
 
 
 
25
  def generate_audio_prompt(text, filename):
26
  try:
27
  tts = gTTS(text)
28
  tts.save(os.path.join("static", filename))
29
  except Exception as e:
30
+ print(f"Error: {e}")
31
+ time.sleep(5) # Wait before retrying
32
  generate_audio_prompt(text, filename)
33
 
34
+ # Generate required voice prompts
35
  prompts = {
36
  "welcome": "Welcome to Biryani Hub.",
37
  "ask_name": "Tell me your name.",
 
39
  "thank_you": "Thank you for registration."
40
  }
41
 
 
42
  for key, text in prompts.items():
43
  generate_audio_prompt(text, f"{key}.mp3")
44
 
45
+ # Function to convert audio to WAV format
46
+ def convert_to_wav(input_path, output_path):
47
+ try:
48
+ audio = AudioSegment.from_file(input_path)
49
+ audio = audio.set_frame_rate(16000).set_channels(1) # Convert to 16kHz, mono
50
+ audio.export(output_path, format="wav")
51
+ except Exception as e:
52
+ raise Exception(f"Audio conversion failed: {str(e)}")
53
+
54
+ # Function to check if audio contains actual speech
55
  def is_silent_audio(audio_path):
56
  audio = AudioSegment.from_wav(audio_path)
57
  nonsilent_parts = detect_nonsilent(audio, min_silence_len=500, silence_thresh=audio.dBFS-16)
58
  return len(nonsilent_parts) == 0
59
 
60
+ # Salesforce connection details (hardcoded)
61
+ username = 'diggavalli98@gmail.com'
62
+ password = 'Sati@1020'
63
+ security_token = 'sSSjyhInIsUohKpG8sHzty2q'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
+ try:
66
+ print("Attempting to connect to Salesforce...")
67
+ sf = Salesforce(username=username, password=password, security_token=security_token)
68
+ print("Connected to Salesforce successfully!")
69
+ except Exception as e:
70
+ print(f"Failed to connect to Salesforce: {str(e)}")
 
 
 
 
 
 
 
71
 
72
+ # βœ… HOME ROUTE (Loads `index.html`)
73
  @app.route("/", methods=["GET"])
74
  def index():
75
  return render_template("index.html")
76
 
77
+ # βœ… DASHBOARD ROUTE
78
  @app.route("/dashboard", methods=["GET"])
79
  def dashboard():
80
  return render_template("dashboard.html")
81
 
82
+ # βœ… MENU PAGE ROUTE
83
  @app.route("/menu_page", methods=["GET"])
84
  def menu_page():
85
+ return render_template("menu_page.html")
 
86
 
87
+ # βœ… LOGIN API
88
  @app.route('/login', methods=['POST'])
89
  def login():
90
  data = request.json
 
95
  if not name or not email or not phone_number:
96
  return jsonify({'error': 'Missing required fields'}), 400
97
 
98
+ try:
99
+ customer_login = sf.Customer_Login__c.create({
100
+ 'Name': name,
101
+ 'Email__c': email,
102
+ 'Phone_Number__c': phone_number
103
+ })
104
+ return jsonify({'success': True, 'id': customer_login['id']}), 200
105
+ except Exception as e:
106
+ return jsonify({'error': f'Failed to create record in Salesforce: {str(e)}'}), 500
107
 
108
+ # βœ… REGISTER API
109
  @app.route("/submit", methods=["POST"])
110
  def submit():
111
  data = request.json
 
116
  if not name or not email or not phone:
117
  return jsonify({'error': 'Missing data'}), 400
118
 
119
+ try:
120
+ customer_login = sf.Customer_Login__c.create({
121
+ 'Name': name,
122
+ 'Email__c': email,
123
+ 'Phone_Number__c': phone
124
+ })
125
  return jsonify({'success': True}), 200
126
+ except Exception as e:
127
+ return jsonify({'error': str(e)}), 500
128
 
129
+ # βœ… TRANSCRIBE AUDIO API
130
  @app.route("/transcribe", methods=["POST"])
131
  def transcribe():
132
  if "audio" not in request.files:
 
138
  audio_file.save(input_audio_path)
139
 
140
  try:
 
141
  convert_to_wav(input_audio_path, output_audio_path)
142
  if is_silent_audio(output_audio_path):
143
  return jsonify({"error": "No speech detected. Please try again."}), 400
144
 
 
145
  asr_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1, config=config)
146
  result = asr_pipeline(output_audio_path)
147
 
148
  transcribed_text = result["text"].strip().capitalize()
149
 
 
150
  parts = transcribed_text.split()
151
  name = parts[0] if len(parts) > 0 else "Unknown Name"
152
  email = parts[1] if '@' in parts[1] else "unknown@domain.com"
 
155
  confirmation = f"Is this correct? Name: {name}, Email: {email}, Phone: {phone_number}"
156
  generate_audio_prompt(confirmation, "confirmation.mp3")
157
 
 
158
  salesforce_response = sf.Customer_Login__c.create({
159
  'Name': name,
160
  'Email__c': email,
 
166
  except Exception as e:
167
  return jsonify({"error": f"Speech recognition error: {str(e)}"}), 500
168
 
169
+ # βœ… MENU API
170
+ @app.route("/menu", methods=["GET"])
171
+ def get_menu():
172
+ try:
173
+ # Fetch menu items from Salesforce
174
+ query = "SELECT Name, Price__c, Ingredients__c, Category__c FROM Menu_Item__c"
175
+ result = sf.query(query)
176
+
177
+ menu_items = []
178
+ for item in result["records"]:
179
+ menu_items.append({
180
+ "name": item["Name"],
181
+ "price": item["Price__c"],
182
+ "ingredients": item["Ingredients__c"],
183
+ "category": item["Category__c"]
184
+ })
185
+
186
+ # Pass the menu items to the template
187
+ return render_template("menu_page.html", menu=menu_items)
188
+
189
+ except Exception as e:
190
+ return jsonify({"error": f"Failed to fetch menu: {str(e)}"}), 500
191
+
192
+ # βœ… START PRODUCTION SERVER
193
  if __name__ == "__main__":
194
+ print("βœ… Starting Flask API Server on port 7860...")
195
  serve(app, host="0.0.0.0", port=7860)