Spaces:
Running
Running
File size: 3,790 Bytes
07b50c0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import json
# Remove direct model/util imports if calling analyze_text
# from model_setup import zero_shot, ner
# from utils import parse_entities
# from config import CATEGORY_KEYWORDS
# Import the centralized analysis function
from nlp_service import analyze_text
def lambda_handler(event, context):
# ... (Keep body parsing logic) ...
body_str = event.get("body", "{}")
try:
body = json.loads(body_str)
except json.JSONDecodeError:
print(f"Error decoding JSON body: {body_str}")
return {
"statusCode": 400,
"body": json.dumps({"error": "Invalid JSON in request body"})
}
text = body.get("text", "")
if not text:
return {
"statusCode": 400,
"body": json.dumps({"error": "Missing 'text' field in request body"})
}
print(f"Processing text via nlp_service: {text}") # Log input
# Call the centralized NLP service function
try:
analysis_result = analyze_text(text)
status = analysis_result.get("status")
if status == "failed":
print(f"NLP analysis failed: {analysis_result.get('message')}")
# Return 400 for input errors, 500 for internal NLP errors?
# Let's return 400 if it's a known failure from analyze_text
return {
"statusCode": 400,
"body": json.dumps(analysis_result)
}
elif status == "fallback_required":
print(f"NLP analysis requires fallback: {analysis_result.get('message')}")
# Return 200 but indicate fallback needed
return {
"statusCode": 200,
"body": json.dumps(analysis_result)
}
elif status == "success":
print(f"NLP analysis successful: {analysis_result}")
# Return the successful analysis result
return {
"statusCode": 200,
"body": json.dumps(analysis_result) # Already contains status
}
else:
# Should not happen if analyze_text always returns a status
print(f"Error: Unknown status from analyze_text: {status}")
return {
"statusCode": 500,
"body": json.dumps({"error": "Internal server error: Unexpected NLP response"})
}
except Exception as e:
print(f"Error calling analyze_text from handler: {e}")
import traceback
traceback.print_exc()
return {
"statusCode": 500,
"body": json.dumps({"error": "Internal server error during NLP processing", "details": str(e)})
}
# Example event structure (for local testing if needed)
if __name__ == '__main__':
# ... (Keep example test cases, they should still work) ...
example_event = {
"body": json.dumps({
"text": "spent 5 eur on coffee"
})
}
context = {}
response = lambda_handler(example_event, context)
print("\n--- Lambda Response ---")
# The body is already a JSON string containing the result from analyze_text
print(json.dumps(json.loads(response['body']), indent=2))
example_event_query = {
"body": json.dumps({
"text": "how much did I spend last month"
})
}
response_query = lambda_handler(example_event_query, context)
print("\n--- Lambda Response (Query) ---")
print(json.dumps(json.loads(response_query['body']), indent=2))
example_event_income = {
"body": json.dumps({
"text": "salary credited 50000"
})
}
response_income = lambda_handler(example_event_income, context)
print("\n--- Lambda Response (Income) ---")
print(json.dumps(json.loads(response_income['body']), indent=2)) |