Update routes/input_handler.py
Browse files- routes/input_handler.py +2 -2
routes/input_handler.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
from fastapi import APIRouter, status, HTTPException
|
2 |
from models.input import Input
|
3 |
from routes import search_products, purchase, order_management, account_management, customer_support
|
4 |
-
from services.nlp import recognize_intent, generate_response,
|
5 |
from services.utils import extract_order_id_from_query, generate_image_embedding, generate_text_embedding
|
6 |
|
7 |
|
@@ -39,7 +39,7 @@ def query_processing(input: Input):
|
|
39 |
if input.files:
|
40 |
for file in input.files:
|
41 |
if file.endswith(audio_extensions):
|
42 |
-
text_from_audio =
|
43 |
print(f'Transcription: {text_from_audio}')
|
44 |
# history_openai_format.append({"role": "user", "content": message})
|
45 |
elif file.endswith(image_extensions):
|
|
|
1 |
from fastapi import APIRouter, status, HTTPException
|
2 |
from models.input import Input
|
3 |
from routes import search_products, purchase, order_management, account_management, customer_support
|
4 |
+
from services.nlp import recognize_intent, generate_response, transcribe
|
5 |
from services.utils import extract_order_id_from_query, generate_image_embedding, generate_text_embedding
|
6 |
|
7 |
|
|
|
39 |
if input.files:
|
40 |
for file in input.files:
|
41 |
if file.endswith(audio_extensions):
|
42 |
+
text_from_audio = transcribe(file)
|
43 |
print(f'Transcription: {text_from_audio}')
|
44 |
# history_openai_format.append({"role": "user", "content": message})
|
45 |
elif file.endswith(image_extensions):
|