|
|
""" |
|
|
Intent Recognition Agent |
|
|
Specialized in understanding user goals using Chain of Thought reasoning |
|
|
""" |
|
|
|
|
|
import logging |
|
|
from typing import Dict, Any, List |
|
|
import json |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class IntentRecognitionAgent: |
|
|
def __init__(self, llm_router=None): |
|
|
self.llm_router = llm_router |
|
|
self.agent_id = "INTENT_REC_001" |
|
|
self.specialization = "Multi-class intent classification with context awareness" |
|
|
|
|
|
|
|
|
self.intent_categories = [ |
|
|
"information_request", |
|
|
"task_execution", |
|
|
"creative_generation", |
|
|
"analysis_research", |
|
|
"casual_conversation", |
|
|
"troubleshooting", |
|
|
"education_learning", |
|
|
"technical_support" |
|
|
] |
|
|
|
|
|
async def execute(self, user_input: str, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]: |
|
|
""" |
|
|
Execute intent recognition with Chain of Thought reasoning |
|
|
""" |
|
|
try: |
|
|
logger.info(f"{self.agent_id} processing user input: {user_input[:100]}...") |
|
|
|
|
|
|
|
|
if self.llm_router: |
|
|
intent_result = await self._llm_based_intent_recognition(user_input, context) |
|
|
else: |
|
|
|
|
|
intent_result = await self._rule_based_intent_recognition(user_input, context) |
|
|
|
|
|
|
|
|
intent_result.update({ |
|
|
"agent_id": self.agent_id, |
|
|
"processing_time": intent_result.get("processing_time", 0), |
|
|
"confidence_calibration": self._calibrate_confidence(intent_result) |
|
|
}) |
|
|
|
|
|
logger.info(f"{self.agent_id} completed with intent: {intent_result['primary_intent']}") |
|
|
return intent_result |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"{self.agent_id} error: {str(e)}") |
|
|
return self._get_fallback_intent(user_input, context) |
|
|
|
|
|
async def _llm_based_intent_recognition(self, user_input: str, context: Dict[str, Any]) -> Dict[str, Any]: |
|
|
"""Use LLM for sophisticated intent classification with Chain of Thought""" |
|
|
|
|
|
try: |
|
|
cot_prompt = self._build_chain_of_thought_prompt(user_input, context) |
|
|
|
|
|
logger.info(f"{self.agent_id} calling LLM for intent recognition") |
|
|
llm_response = await self.llm_router.route_inference( |
|
|
task_type="intent_classification", |
|
|
prompt=cot_prompt, |
|
|
max_tokens=1000, |
|
|
temperature=0.3 |
|
|
) |
|
|
|
|
|
if llm_response and isinstance(llm_response, str) and len(llm_response.strip()) > 0: |
|
|
|
|
|
parsed_result = self._parse_llm_intent_response(llm_response) |
|
|
parsed_result["processing_time"] = 0.8 |
|
|
parsed_result["method"] = "llm_enhanced" |
|
|
return parsed_result |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"{self.agent_id} LLM intent recognition failed: {e}") |
|
|
|
|
|
|
|
|
logger.info(f"{self.agent_id} falling back to rule-based classification") |
|
|
return await self._rule_based_intent_recognition(user_input, context) |
|
|
|
|
|
async def _rule_based_intent_recognition(self, user_input: str, context: Dict[str, Any]) -> Dict[str, Any]: |
|
|
"""Rule-based fallback intent classification""" |
|
|
|
|
|
primary_intent, confidence = self._analyze_intent_patterns(user_input) |
|
|
secondary_intents = self._get_secondary_intents(user_input, primary_intent) |
|
|
|
|
|
return { |
|
|
"primary_intent": primary_intent, |
|
|
"secondary_intents": secondary_intents, |
|
|
"confidence_scores": {primary_intent: confidence}, |
|
|
"reasoning_chain": ["Rule-based pattern matching applied"], |
|
|
"context_tags": [], |
|
|
"processing_time": 0.02 |
|
|
} |
|
|
|
|
|
def _build_chain_of_thought_prompt(self, user_input: str, context: Dict[str, Any]) -> str: |
|
|
"""Build Chain of Thought prompt for intent recognition""" |
|
|
|
|
|
|
|
|
|
|
|
context_info = "" |
|
|
if context: |
|
|
|
|
|
combined_context = context.get('combined_context', '') |
|
|
if combined_context: |
|
|
|
|
|
context_info = f"\n\nAvailable Context:\n{combined_context[:1000]}..." |
|
|
else: |
|
|
|
|
|
session_context = context.get('session_context', {}) |
|
|
session_summary = session_context.get('summary', '') if isinstance(session_context, dict) else "" |
|
|
interaction_contexts = context.get('interaction_contexts', []) |
|
|
user_context = context.get('user_context', '') |
|
|
|
|
|
context_parts = [] |
|
|
if session_summary: |
|
|
context_parts.append(f"Session Context: {session_summary[:300]}...") |
|
|
if user_context: |
|
|
context_parts.append(f"User Context: {user_context[:300]}...") |
|
|
|
|
|
if interaction_contexts: |
|
|
|
|
|
recent_contexts = interaction_contexts[-2:] |
|
|
context_parts.append("Recent Interactions:") |
|
|
for idx, ic in enumerate(recent_contexts, 1): |
|
|
summary = ic.get('summary', '') |
|
|
if summary: |
|
|
context_parts.append(f" {idx}. {summary}") |
|
|
|
|
|
if context_parts: |
|
|
context_info = "\n\nAvailable Context:\n" + "\n".join(context_parts) |
|
|
|
|
|
if not context_info: |
|
|
context_info = "\n\nAvailable Context: No previous context available (first interaction in session)." |
|
|
|
|
|
return f""" |
|
|
Analyze the user's intent step by step: |
|
|
|
|
|
User Input: "{user_input}" |
|
|
{context_info} |
|
|
|
|
|
Step 1: Identify key entities, actions, and questions in the input |
|
|
Step 2: Map to intent categories: {', '.join(self.intent_categories)} |
|
|
Step 3: Consider the conversation flow and user's likely goals (if context available) |
|
|
Step 4: Assign confidence scores (0.0-1.0) for each relevant intent |
|
|
Step 5: Provide reasoning for the classification |
|
|
|
|
|
Respond with JSON format containing primary_intent, secondary_intents, confidence_scores, and reasoning_chain. |
|
|
""" |
|
|
|
|
|
def _analyze_intent_patterns(self, user_input: str) -> tuple: |
|
|
"""Analyze user input patterns to determine intent""" |
|
|
user_input_lower = user_input.lower() |
|
|
|
|
|
|
|
|
patterns = { |
|
|
"information_request": [ |
|
|
"what is", "how to", "explain", "tell me about", "what are", |
|
|
"define", "meaning of", "information about" |
|
|
], |
|
|
"task_execution": [ |
|
|
"do this", "make a", "create", "build", "generate", "automate", |
|
|
"set up", "configure", "execute", "run" |
|
|
], |
|
|
"creative_generation": [ |
|
|
"write a", "compose", "create content", "make a story", |
|
|
"generate poem", "creative", "artistic" |
|
|
], |
|
|
"analysis_research": [ |
|
|
"analyze", "research", "compare", "study", "investigate", |
|
|
"data analysis", "find patterns", "statistics" |
|
|
], |
|
|
"troubleshooting": [ |
|
|
"error", "problem", "fix", "debug", "not working", |
|
|
"help with", "issue", "broken" |
|
|
], |
|
|
"technical_support": [ |
|
|
"how do i", "help me", "guide me", "tutorial", "step by step" |
|
|
] |
|
|
} |
|
|
|
|
|
|
|
|
for intent, pattern_list in patterns.items(): |
|
|
for pattern in pattern_list: |
|
|
if pattern in user_input_lower: |
|
|
confidence = min(0.9, 0.6 + (len(pattern) * 0.1)) |
|
|
return intent, confidence |
|
|
|
|
|
|
|
|
return "casual_conversation", 0.7 |
|
|
|
|
|
def _get_secondary_intents(self, user_input: str, primary_intent: str) -> List[str]: |
|
|
"""Get secondary intents based on input complexity""" |
|
|
user_input_lower = user_input.lower() |
|
|
secondary = [] |
|
|
|
|
|
|
|
|
if "research" in user_input_lower and primary_intent != "analysis_research": |
|
|
secondary.append("analysis_research") |
|
|
if "help" in user_input_lower and primary_intent != "technical_support": |
|
|
secondary.append("technical_support") |
|
|
|
|
|
return secondary[:2] |
|
|
|
|
|
def _extract_context_tags(self, user_input: str, context: Dict[str, Any]) -> List[str]: |
|
|
"""Extract relevant context tags from user input""" |
|
|
tags = [] |
|
|
user_input_lower = user_input.lower() |
|
|
|
|
|
|
|
|
if "research" in user_input_lower: |
|
|
tags.append("research") |
|
|
if "technical" in user_input_lower or "code" in user_input_lower: |
|
|
tags.append("technical") |
|
|
if "academic" in user_input_lower or "study" in user_input_lower: |
|
|
tags.append("academic") |
|
|
if "quick" in user_input_lower or "simple" in user_input_lower: |
|
|
tags.append("quick_request") |
|
|
|
|
|
return tags |
|
|
|
|
|
def _calibrate_confidence(self, intent_result: Dict[str, Any]) -> Dict[str, Any]: |
|
|
"""Calibrate confidence scores based on various factors""" |
|
|
primary_intent = intent_result["primary_intent"] |
|
|
confidence = intent_result["confidence_scores"][primary_intent] |
|
|
|
|
|
calibration_factors = { |
|
|
"input_length_impact": min(1.0, len(intent_result.get('user_input', '')) / 100), |
|
|
"context_enhancement": 0.1 if intent_result.get('context_tags') else 0.0, |
|
|
"reasoning_depth_bonus": 0.05 if len(intent_result.get('reasoning_chain', [])) > 2 else 0.0 |
|
|
} |
|
|
|
|
|
calibrated_confidence = min(0.95, confidence + sum(calibration_factors.values())) |
|
|
|
|
|
return { |
|
|
"original_confidence": confidence, |
|
|
"calibrated_confidence": calibrated_confidence, |
|
|
"calibration_factors": calibration_factors |
|
|
} |
|
|
|
|
|
def _parse_llm_intent_response(self, response: str) -> Dict[str, Any]: |
|
|
"""Parse LLM response for intent classification""" |
|
|
try: |
|
|
import json |
|
|
import re |
|
|
|
|
|
|
|
|
json_match = re.search(r'\{.*\}', response, re.DOTALL) |
|
|
if json_match: |
|
|
parsed = json.loads(json_match.group()) |
|
|
return parsed |
|
|
except json.JSONDecodeError: |
|
|
logger.warning(f"{self.agent_id} Failed to parse LLM intent JSON") |
|
|
|
|
|
|
|
|
response_lower = response.lower() |
|
|
primary_intent = "casual_conversation" |
|
|
confidence = 0.7 |
|
|
|
|
|
|
|
|
if any(word in response_lower for word in ['question', 'ask', 'what', 'how', 'why']): |
|
|
primary_intent = "information_request" |
|
|
confidence = 0.8 |
|
|
elif any(word in response_lower for word in ['task', 'action', 'do', 'help', 'assist']): |
|
|
primary_intent = "task_execution" |
|
|
confidence = 0.8 |
|
|
elif any(word in response_lower for word in ['create', 'generate', 'write', 'make']): |
|
|
primary_intent = "creative_generation" |
|
|
confidence = 0.8 |
|
|
|
|
|
return { |
|
|
"primary_intent": primary_intent, |
|
|
"secondary_intents": [], |
|
|
"confidence_scores": {primary_intent: confidence}, |
|
|
"reasoning_chain": [f"LLM response parsed: {response[:100]}..."], |
|
|
"context_tags": ["llm_parsed"], |
|
|
"method": "llm_parsed" |
|
|
} |
|
|
|
|
|
def _get_fallback_intent(self, user_input: str, context: Dict[str, Any]) -> Dict[str, Any]: |
|
|
"""Provide fallback intent when processing fails""" |
|
|
return { |
|
|
"primary_intent": "casual_conversation", |
|
|
"secondary_intents": [], |
|
|
"confidence_scores": {"casual_conversation": 0.5}, |
|
|
"reasoning_chain": ["Fallback: Default to casual conversation"], |
|
|
"context_tags": ["fallback"], |
|
|
"processing_time": 0.01, |
|
|
"agent_id": self.agent_id, |
|
|
"error_handled": True |
|
|
} |
|
|
|
|
|
|
|
|
def create_intent_agent(llm_router=None): |
|
|
return IntentRecognitionAgent(llm_router) |
|
|
|
|
|
|