| """
|
| SocialAgent - AI agent behavior for ad reaction simulation.
|
|
|
| No longer a Ray actor — agents are plain Python objects.
|
| LLM calls go through a shared QwenLLM actor pool (Ray-backed, Ollama API).
|
| This follows AgentSociety's pattern where agents are regular objects
|
| and only LLM calls are distributed via Ray actors.
|
| """
|
| import random
|
| import logging
|
| import json
|
| import re
|
| from typing import List, Dict, Any, Optional
|
|
|
| logger = logging.getLogger(__name__)
|
|
|
|
|
| class SocialAgent:
|
| """
|
| Simulated person reacting to advertisements.
|
|
|
| Implements:
|
| - Profile & Memory
|
| - Emotional state
|
| - Social interactions
|
| - Mind-Behavior Coupling for decision making
|
|
|
| NOTE: No @ray.remote — this is a plain object. LLM calls are dispatched
|
| through the GeminiLLM actor pool passed in via `llm_pool`.
|
| """
|
|
|
| def __init__(
|
| self,
|
| agent_id: str,
|
| profile: Dict[str, Any],
|
| experiment_id: str,
|
| llm_pool=None,
|
| friends: List[str] = None,
|
| memory_store=None,
|
| ):
|
| """
|
| Initialize agent with profile.
|
|
|
| Args:
|
| agent_id: Unique identifier
|
| profile: Demographics and values dict
|
| experiment_id: Experiment this agent belongs to
|
| llm_pool: QwenLLM actor pool for LLM calls
|
| friends: List of friend agent IDs
|
| memory_store: Optional AgentMemoryStore instance (shared)
|
| """
|
| self.agent_id = agent_id
|
| self.profile = profile
|
| self.experiment_id = experiment_id
|
| self.llm_pool = llm_pool
|
| self.friends = friends or []
|
|
|
|
|
| self.emotion = "neutral"
|
| self.emotion_intensity = 0.0
|
| self.opinion_on_ad = None
|
| self.has_seen_ad = False
|
| self.reasoning = ""
|
| self.inbox: List[Dict] = []
|
| self.opinion_history: List[Dict] = []
|
| self.day: int = 0
|
|
|
|
|
| self.event_log: List[Dict[str, Any]] = []
|
|
|
|
|
| self.memory = None
|
| if memory_store:
|
| try:
|
| memory_store.create_agent_profile(agent_id, profile)
|
| self.memory = memory_store
|
| except Exception as e:
|
| logger.debug(f"Agent {agent_id}: Memory init skipped: {e}")
|
|
|
| logger.debug(
|
| f"Agent {agent_id} initialized: "
|
| f"{profile.get('age')}yo {profile.get('gender')} "
|
| f"from {profile.get('location')}"
|
| )
|
|
|
| async def perceive_ad(self, ad_content: str) -> Dict[str, Any]:
|
| """
|
| Main decision-making: React to advertisement content (async).
|
|
|
| Implements Mind-Behavior Coupling:
|
| 1. Retrieve relevant memories (RAG)
|
| 2. Generate emotional response via LLM
|
| 3. Form opinion
|
| 4. Take action
|
| """
|
| self.has_seen_ad = True
|
|
|
|
|
| memory_context = []
|
| if self.memory:
|
| try:
|
| logger.info(f"[{self.agent_id}] Querying memory for context...")
|
| memory_context = self.memory.query_relevant_context(
|
| self.agent_id, ad_content, n_results=3
|
| )
|
| logger.info(f"[{self.agent_id}] Memory returned {len(memory_context)} items")
|
| except Exception as me:
|
| logger.warning(f"[{self.agent_id}] Memory query failed: {me}")
|
|
|
|
|
| prompt = self._build_reaction_prompt(ad_content, memory_context)
|
|
|
|
|
| try:
|
| if self.llm_pool:
|
| import time as _time
|
| _t0 = _time.time()
|
| logger.info(f"[{self.agent_id}] Sending LLM request...")
|
| response = await self.llm_pool.atext_request(
|
| prompt=prompt, max_tokens=500
|
| )
|
| _elapsed = _time.time() - _t0
|
| logger.info(f"[{self.agent_id}] LLM responded in {_elapsed:.1f}s ({len(response)} chars)")
|
| else:
|
| logger.warning(f"[{self.agent_id}] No LLM pool available!")
|
| response = ""
|
|
|
| parsed = self._parse_llm_response(response)
|
| logger.info(f"[{self.agent_id}] Result: {parsed['emotion']} / {parsed['opinion']} (intensity: {parsed['intensity']})")
|
|
|
| self.opinion_on_ad = parsed["opinion"]
|
| self.emotion = parsed["emotion"]
|
| self.emotion_intensity = parsed["intensity"]
|
| self.reasoning = parsed["reasoning"]
|
|
|
|
|
| if parsed["opinion"] == "NEGATIVE":
|
| self._log_event("BOYCOTT", parsed["reasoning"])
|
| elif parsed["opinion"] == "POSITIVE":
|
| if random.random() < self._get_sharing_probability():
|
| self._log_event("ENDORSEMENT", parsed["reasoning"])
|
| else:
|
| self._log_event("ENDORSEMENT", parsed["reasoning"])
|
| else:
|
| self._log_event("IGNORE", "No strong reaction")
|
|
|
|
|
| if self.memory:
|
| try:
|
| self.memory.add_experience(
|
| self.agent_id,
|
| f"Reacted {parsed['opinion']} to ad: {parsed['reasoning'][:100]}",
|
| experience_type=parsed["opinion"].lower(),
|
| )
|
| except Exception:
|
| pass
|
|
|
| except Exception as e:
|
| logger.error(f"Agent {self.agent_id} failed to process ad: {e}")
|
| self.opinion_on_ad = "NEUTRAL"
|
| self._log_event("ERROR", str(e))
|
|
|
|
|
| self.opinion_history.append({"day": 0, "opinion": self.opinion_on_ad})
|
|
|
| return self.get_state()
|
|
|
| def _build_reaction_prompt(
|
| self, ad_content: str, memory_context: List[str]
|
| ) -> str:
|
| """Build LLM prompt with agent personality"""
|
| values = self.profile.get("values", [])
|
| values_str = ", ".join(values) if values else "Not specified"
|
|
|
| memory_str = (
|
| "\n".join(memory_context) if memory_context else "No past experiences"
|
| )
|
|
|
| bio = self.profile.get('bio', '')
|
| name = self.profile.get('name', 'a person')
|
| personality_str = ", ".join(self.profile.get('personality_traits', [])) if self.profile.get('personality_traits') else 'Not specified'
|
|
|
| return f"""You are roleplaying as {name}, a real person with this profile:
|
| - Age: {self.profile.get('age', 'Unknown')}, Gender: {self.profile.get('gender', 'Unknown')}
|
| - Location: {self.profile.get('location', 'Unknown')}, Sri Lanka
|
| - Occupation: {self.profile.get('occupation', 'Not specified')}
|
| - Education: {self.profile.get('education', 'Not specified')}
|
| - Income Level: {self.profile.get('income_level', 'Not specified')}
|
| - Religion: {self.profile.get('religion', 'Not specified')}
|
| - Ethnicity: {self.profile.get('ethnicity', 'Not specified')}
|
| - Political Leaning: {self.profile.get('political_leaning', 'Not specified')}
|
| - Social Media Usage: {self.profile.get('social_media_usage', 'Not specified')}
|
| - Core Values: {values_str}
|
| - Personality: {personality_str}
|
| - Background: {bio if bio else 'No specific background provided'}
|
|
|
| Your past experiences relevant to this ad:
|
| {memory_str}
|
|
|
| You just saw this advertisement:
|
| {ad_content}
|
|
|
| React authentically as {name}. Consider your background,
|
| values, religion, and social context when forming your opinion.
|
| Strong reactions should reflect genuine conflicts or alignments
|
| with your identity and values.
|
|
|
| Analyze your reaction as this person:
|
| 1. How does this ad make you FEEL? (Choose one: HAPPY, ANGRY, SAD, NEUTRAL)
|
| 2. What is your OPINION? (Choose one: POSITIVE, NEUTRAL, NEGATIVE)
|
| 3. WHY do you feel this way? (2-3 sentences from YOUR perspective as this person)
|
|
|
| You MUST respond in this JSON format:
|
| {{"emotion": "ANGRY", "opinion": "NEGATIVE", "reasoning": "This ad shows something I disagree with because..."}}"""
|
|
|
| def _parse_llm_response(self, response: str) -> Dict[str, Any]:
|
| """Extract structured data from LLM response"""
|
| default = {
|
| "emotion": "neutral",
|
| "opinion": "NEUTRAL",
|
| "reasoning": "",
|
| "intensity": 0.3,
|
| }
|
|
|
| if not response:
|
| return default
|
|
|
| try:
|
|
|
| json_match = re.search(r"\{[^{}]*\}", response, re.DOTALL)
|
| if json_match:
|
| data = json.loads(json_match.group())
|
|
|
| emotion = data.get("emotion", "NEUTRAL").upper()
|
| opinion = data.get("opinion", "NEUTRAL").upper()
|
|
|
|
|
| if opinion not in ["POSITIVE", "NEUTRAL", "NEGATIVE"]:
|
| opinion = "NEUTRAL"
|
|
|
|
|
| intensity = 0.8 if opinion in ["POSITIVE", "NEGATIVE"] else 0.3
|
|
|
| return {
|
| "emotion": emotion.lower(),
|
| "opinion": opinion,
|
| "reasoning": data.get("reasoning", ""),
|
| "intensity": intensity,
|
| }
|
| except Exception as e:
|
| logger.warning(f"Failed to parse LLM response: {e}")
|
|
|
| return default
|
|
|
| def receive_peer_message(self, from_agent_id: str, opinion: str, message: str):
|
| """Receive a message from a peer and store it in inbox."""
|
| self.inbox.append({
|
| "from": from_agent_id,
|
| "opinion": opinion,
|
| "message": message
|
| })
|
|
|
| async def generate_social_message(self) -> str:
|
| """Generate a custom message to share with a friend."""
|
| if self.opinion_on_ad is None:
|
| return ""
|
|
|
| fallback = f"I thought the ad was {self.opinion_on_ad.lower()}, what did you think?"
|
|
|
| prompt = f"""You are Roleplaying as this person:
|
| - Age: {self.profile.get('age', 'Unknown')}
|
| - Gender: {self.profile.get('gender', 'Unknown')}
|
| - Location: {self.profile.get('location', 'Unknown')}
|
| - Core values: {", ".join(self.profile.get('values', []))}
|
|
|
| Your current opinion on the ad is {self.opinion_on_ad}.
|
| Write 1-2 casual sentences that you would send to a friend about the ad, in character. Do not include quotes."""
|
|
|
| try:
|
| if self.llm_pool:
|
| response = await self.llm_pool.atext_request(prompt=prompt, max_tokens=100)
|
| if response:
|
| return response.strip(' "')
|
| except Exception:
|
| pass
|
|
|
| return fallback
|
|
|
| async def social_deliberation(self, ad_content: str) -> Dict[str, Any]:
|
| """Review inbox messages from friends and reconsider opinion."""
|
| if not self.inbox:
|
| return self.get_state()
|
|
|
| friends_summary = "\n".join(
|
| f"- A friend thinks {msg['opinion']}: {msg['message']}"
|
| for msg in self.inbox
|
| )
|
|
|
| prompt = f"""You are Roleplaying as this person:
|
| - Age: {self.profile.get('age', 'Unknown')}
|
| - Gender: {self.profile.get('gender', 'Unknown')}
|
| - Location: {self.profile.get('location', 'Unknown')}
|
| - Core values: {", ".join(self.profile.get('values', []))}
|
|
|
| Your current opinion on the ad is {self.opinion_on_ad}.
|
|
|
| Here is a summary of what your friends said:
|
| {friends_summary}
|
|
|
| Given your values and what your friends think, do you change your opinion?
|
| Respond in JSON format:
|
| {{"new_opinion": "POSITIVE/NEUTRAL/NEGATIVE", "changed": true/false, "reasoning": "..."}}"""
|
|
|
| try:
|
| if self.llm_pool:
|
| response = await self.llm_pool.atext_request(prompt=prompt, max_tokens=200)
|
| json_match = re.search(r"\{[^{}]*\}", response, re.DOTALL)
|
|
|
| if json_match:
|
| data = json.loads(json_match.group())
|
| new_opinion = data.get("new_opinion", self.opinion_on_ad).upper()
|
|
|
| if new_opinion not in ["POSITIVE", "NEUTRAL", "NEGATIVE"]:
|
| new_opinion = self.opinion_on_ad
|
|
|
| if new_opinion != self.opinion_on_ad:
|
| self._log_event("OPINION_CHANGE", data.get("reasoning", "Influenced by friends"))
|
| self.opinion_on_ad = new_opinion
|
|
|
| except Exception as e:
|
| logger.warning(f"[{self.agent_id}] Failed social deliberation: {e}")
|
|
|
| self.opinion_history.append({"day": self.day, "opinion": self.opinion_on_ad})
|
| self.inbox.clear()
|
| self.day += 1
|
|
|
| return self.get_state()
|
|
|
|
|
| def _get_sharing_probability(self) -> float:
|
| """Calculate likelihood of sharing based on personality"""
|
| base_prob = 0.3
|
| age = self.profile.get("age", 35)
|
| if age < 25:
|
| base_prob += 0.2
|
| elif age < 35:
|
| base_prob += 0.1
|
| return min(base_prob, 0.6)
|
|
|
| def _log_event(self, event_type: str, details: str):
|
| """Log an event locally"""
|
| self.event_log.append(
|
| {
|
| "agent_id": self.agent_id,
|
| "event_type": event_type,
|
| "details": details,
|
| "opinion": self.opinion_on_ad,
|
| "emotion": self.emotion,
|
| }
|
| )
|
|
|
| def get_state(self) -> Dict[str, Any]:
|
| """Get current agent state"""
|
| return {
|
| "agent_id": self.agent_id,
|
| "opinion": self.opinion_on_ad,
|
| "emotion": self.emotion,
|
| "emotion_intensity": self.emotion_intensity,
|
| "reasoning": self.reasoning,
|
| "has_seen_ad": self.has_seen_ad,
|
| "profile": self.profile,
|
| "opinion_history": self.opinion_history,
|
| "day": self.day,
|
| }
|
|
|
| def get_event_log(self) -> List[Dict[str, Any]]:
|
| """Get all logged events"""
|
| return self.event_log
|
|
|