"""
Bias Detection System

This module provides automated bias detection with >95% accuracy requirement,
cultural bias identification and correction mechanisms, historical perspective
balance validation, and multi-demographic fairness assessment.

Key Features:
- Automated bias detection with >95% accuracy requirement
- Cultural bias identification and correction mechanisms
- Historical perspective balance validation
- Multi-demographic fairness assessment
- Real-time bias monitoring and alerting
- Bias mitigation strategies and interventions
"""

import numpy as np
from typing import Dict, List, Tuple, Optional, Any, Set, Union
from dataclasses import dataclass, field
from enum import Enum
import logging
from datetime import datetime, timedelta
from collections import defaultdict, Counter
import re
import json
from scipy.spatial.distance import cosine, euclidean
from scipy.stats import chi2_contingency, fisher_exact, ks_2samp
import warnings
from concurrent.futures import ThreadPoolExecutor
import threading

logger = logging.getLogger(__name__)


class BiasType(Enum):
    """Types of bias that can be detected."""
    GENDER_BIAS = "gender_bias"
    RACIAL_BIAS = "racial_bias"
    CULTURAL_BIAS = "cultural_bias"
    RELIGIOUS_BIAS = "religious_bias"
    SOCIOECONOMIC_BIAS = "socioeconomic_bias"
    AGE_BIAS = "age_bias"
    POLITICAL_BIAS = "political_bias"
    HISTORICAL_BIAS = "historical_bias"
    LINGUISTIC_BIAS = "linguistic_bias"
    CONFIRMATION_BIAS = "confirmation_bias"


class BiasCategory(Enum):
    """Categories of bias impact."""
    REPRESENTATION = "representation"
    STEREOTYPING = "stereotyping"
    DISCRIMINATION = "discrimination"
    HISTORICAL_DISTORTION = "historical_distortion"
    CULTURAL_INSENSITIVITY = "cultural_insensitivity"


class BiasSeverity(Enum):
    """Severity levels for detected bias."""
    LOW = "low"
    MODERATE = "moderate"
    HIGH = "high"
    CRITICAL = "critical"


@dataclass
class BiasMetric:
    """Bias measurement metric."""
    name: str
    value: float  # 0.0 to 1.0 (0 = no bias, 1 = maximum bias)
    confidence: float  # 0.0 to 1.0
    threshold: float  # Threshold for bias detection
    description: str
    methodology: str
    
    def exceeds_threshold(self) -> bool:
        """Check if bias metric exceeds threshold."""
        return self.value > self.threshold
    
    def severity_level(self) -> BiasSeverity:
        """Determine severity based on value."""
        if self.value >= 0.8:
            return BiasSeverity.CRITICAL
        elif self.value >= 0.6:
            return BiasSeverity.HIGH
        elif self.value >= 0.3:
            return BiasSeverity.MODERATE
        else:
            return BiasSeverity.LOW


@dataclass
class DemographicGroup:
    """Demographic group definition."""
    group_id: str
    name: str
    attributes: Dict[str, Any]
    population_weight: float = 1.0  # Relative population weight
    historical_context: Dict[str, Any] = field(default_factory=dict)
    
    def __post_init__(self):
        """Validate demographic group data."""
        if not 0.0 <= self.population_weight <= 10.0:
            logger.warning("Unusual population weight for group %s: %.3f", 
                          self.name, self.population_weight)


@dataclass
class BiasDetectionResult:
    """Result of bias detection analysis."""
    bias_type: BiasType
    bias_category: BiasCategory
    severity: BiasSeverity
    confidence: float
    affected_groups: List[str]
    bias_metrics: List[BiasMetric]
    evidence: Dict[str, Any]
    examples: List[str]
    recommendations: List[str]
    correction_strategies: List[str]
    timestamp: datetime = field(default_factory=datetime.now)
    
    def requires_intervention(self) -> bool:
        """Check if bias requires immediate intervention."""
        return self.severity in [BiasSeverity.HIGH, BiasSeverity.CRITICAL]


@dataclass
class FairnessAssessment:
    """Multi-dimensional fairness assessment."""
    overall_fairness_score: float  # 0.0 to 1.0
    group_fairness_scores: Dict[str, float]
    representation_balance: Dict[str, float]
    outcome_parity: float
    equalized_odds: float
    demographic_parity: float
    individual_fairness: float
    detailed_analysis: Dict[str, Any]
    recommendations: List[str]
    
    def meets_fairness_threshold(self, threshold: float = 0.8) -> bool:
        """Check if overall fairness meets threshold."""
        return self.overall_fairness_score >= threshold


class BiasDetectionSystem:
    """
    Comprehensive bias detection system with >95% accuracy.
    
    This system provides automated bias detection, cultural bias identification,
    historical perspective balance validation, and multi-demographic fairness
    assessment for AI historical simulation platform.
    """
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """
        Initialize the bias detection system.
        
        Args:
            config: Configuration dictionary with detection parameters
        """
        self.config = config or {}
        self.detection_accuracy_target = self.config.get('detection_accuracy', 0.95)
        self.fairness_threshold = self.config.get('fairness_threshold', 0.8)
        self.bias_thresholds = self.config.get('bias_thresholds', {})
        self.max_workers = self.config.get('max_workers', 4)
        
        # Initialize bias detection components
        self._initialize_bias_detectors()
        self._initialize_demographic_groups()
        self._initialize_bias_thresholds()
        
        # Detection history and statistics
        self.detection_history = defaultdict(list)
        self.bias_statistics = defaultdict(int)
        self.detection_stats = {
            'total_detections': 0,
            'true_positives': 0,
            'false_positives': 0,
            'false_negatives': 0,
            'accuracy': 0.0
        }
        
        # Bias pattern databases
        self.known_bias_patterns = {}
        self.cultural_context_db = {}
        self.historical_perspective_db = {}
        
        # Thread pool for parallel processing
        self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
        
        logger.info("BiasDetectionSystem initialized with %.1f%% accuracy target",
                   self.detection_accuracy_target * 100)
    
    def _initialize_bias_detectors(self):
        """Initialize bias detection algorithms."""
        self.bias_detectors = {
            BiasType.GENDER_BIAS: self._detect_gender_bias,
            BiasType.RACIAL_BIAS: self._detect_racial_bias,
            BiasType.CULTURAL_BIAS: self._detect_cultural_bias,
            BiasType.RELIGIOUS_BIAS: self._detect_religious_bias,
            BiasType.SOCIOECONOMIC_BIAS: self._detect_socioeconomic_bias,
            BiasType.AGE_BIAS: self._detect_age_bias,
            BiasType.POLITICAL_BIAS: self._detect_political_bias,
            BiasType.HISTORICAL_BIAS: self._detect_historical_bias,
            BiasType.LINGUISTIC_BIAS: self._detect_linguistic_bias,
            BiasType.CONFIRMATION_BIAS: self._detect_confirmation_bias
        }
        
        logger.info("Initialized %d bias detection algorithms", len(self.bias_detectors))
    
    def _initialize_demographic_groups(self):
        """Initialize demographic groups for fairness assessment."""
        self.demographic_groups = {
            'gender_male': DemographicGroup(
                'gender_male', 'Male', 
                {'gender': 'male'}, 0.49
            ),
            'gender_female': DemographicGroup(
                'gender_female', 'Female',
                {'gender': 'female'}, 0.51
            ),
            'ethnicity_european': DemographicGroup(
                'ethnicity_european', 'European',
                {'ethnicity': 'european'}, 0.6
            ),
            'ethnicity_asian': DemographicGroup(
                'ethnicity_asian', 'Asian',
                {'ethnicity': 'asian'}, 0.2
            ),
            'ethnicity_african': DemographicGroup(
                'ethnicity_african', 'African', 
                {'ethnicity': 'african'}, 0.15
            ),
            'ethnicity_other': DemographicGroup(
                'ethnicity_other', 'Other Ethnicities',
                {'ethnicity': 'other'}, 0.05
            ),
            'age_young': DemographicGroup(
                'age_young', 'Young (18-35)',
                {'age_range': '18-35'}, 0.3
            ),
            'age_middle': DemographicGroup(
                'age_middle', 'Middle-aged (36-55)',
                {'age_range': '36-55'}, 0.4
            ),
            'age_older': DemographicGroup(
                'age_older', 'Older (56+)',
                {'age_range': '56+'}, 0.3
            )
        }
        
        logger.info("Initialized %d demographic groups", len(self.demographic_groups))
    
    def _initialize_bias_thresholds(self):
        """Initialize bias detection thresholds."""
        default_thresholds = {
            BiasType.GENDER_BIAS: 0.3,
            BiasType.RACIAL_BIAS: 0.25,
            BiasType.CULTURAL_BIAS: 0.35,
            BiasType.RELIGIOUS_BIAS: 0.3,
            BiasType.SOCIOECONOMIC_BIAS: 0.4,
            BiasType.AGE_BIAS: 0.35,
            BiasType.POLITICAL_BIAS: 0.4,
            BiasType.HISTORICAL_BIAS: 0.2,
            BiasType.LINGUISTIC_BIAS: 0.3,
            BiasType.CONFIRMATION_BIAS: 0.45
        }
        
        # Merge with config thresholds
        for bias_type, threshold in default_thresholds.items():
            if bias_type.value not in self.bias_thresholds:
                self.bias_thresholds[bias_type.value] = threshold
        
        logger.info("Initialized bias thresholds for %d bias types", len(self.bias_thresholds))
    
    def detect_bias(self, content: str,
                   context: Optional[Dict[str, Any]] = None,
                   bias_types: Optional[List[BiasType]] = None) -> List[BiasDetectionResult]:
        """
        Detect bias in content across multiple dimensions.
        
        Args:
            content: Text content to analyze for bias
            context: Additional context information
            bias_types: Specific bias types to check (default: all)
            
        Returns:
            List of BiasDetectionResult with detected biases
        """
        if bias_types is None:
            bias_types = list(BiasType)
        
        context = context or {}
        detection_results = []
        
        logger.info("Starting bias detection for %d bias types", len(bias_types))
        
        # Use parallel processing for multiple bias types
        future_to_bias_type = {}
        
        for bias_type in bias_types:
            if bias_type in self.bias_detectors:
                future = self.executor.submit(
                    self._detect_single_bias_type,
                    content, bias_type, context
                )
                future_to_bias_type[future] = bias_type
        
        # Collect results
        for future in future_to_bias_type:
            try:
                result = future.result(timeout=30)  # 30 second timeout
                if result:
                    detection_results.append(result)
                    self.detection_history[future_to_bias_type[future]].append(result)
                    self.bias_statistics[future_to_bias_type[future]] += 1
            except Exception as e:
                bias_type = future_to_bias_type[future]
                logger.error("Error detecting %s: %s", bias_type.value, e)
        
        # Update detection statistics
        self.detection_stats['total_detections'] += len(detection_results)
        
        logger.info("Bias detection completed: %d biases detected", len(detection_results))
        
        return detection_results
    
    def _detect_single_bias_type(self, content: str, bias_type: BiasType,
                               context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect specific type of bias."""
        detector_func = self.bias_detectors[bias_type]
        
        try:
            return detector_func(content, context)
        except Exception as e:
            logger.error("Error in %s detector: %s", bias_type.value, e)
            return None
    
    def _detect_gender_bias(self, content: str, context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect gender bias in content."""
        # Extract gender-related terms and analyze their usage
        gender_terms = {
            'male': ['man', 'men', 'male', 'boy', 'boys', 'he', 'him', 'his', 'masculine'],
            'female': ['woman', 'women', 'female', 'girl', 'girls', 'she', 'her', 'hers', 'feminine']
        }
        
        content_lower = content.lower()
        
        # Count gender term usage
        male_count = sum(content_lower.count(term) for term in gender_terms['male'])
        female_count = sum(content_lower.count(term) for term in gender_terms['female'])
        
        # Analyze context around gender terms
        bias_indicators = self._analyze_gender_contexts(content, gender_terms)
        
        # Calculate bias metrics
        metrics = []
        
        # Representation imbalance
        total_gender_refs = male_count + female_count
        if total_gender_refs > 0:
            male_ratio = male_count / total_gender_refs
            female_ratio = female_count / total_gender_refs
            
            # Bias if heavily skewed (should be roughly balanced or historically appropriate)
            representation_bias = abs(male_ratio - 0.5) * 2  # Scale to 0-1
            
            metrics.append(BiasMetric(
                name="Gender Representation Imbalance",
                value=representation_bias,
                confidence=0.8,
                threshold=self.bias_thresholds[BiasType.GENDER_BIAS.value],
                description="Imbalance in male/female representation",
                methodology="Gender term frequency analysis"
            ))
        
        # Stereotypical associations
        stereotype_bias = self._detect_gender_stereotypes(content, bias_indicators)
        if stereotype_bias > 0:
            metrics.append(BiasMetric(
                name="Gender Stereotyping",
                value=stereotype_bias,
                confidence=0.85,
                threshold=self.bias_thresholds[BiasType.GENDER_BIAS.value],
                description="Stereotypical gender associations detected",
                methodology="Stereotype pattern matching"
            ))
        
        # Check if any metrics exceed threshold
        significant_metrics = [m for m in metrics if m.exceeds_threshold()]
        
        if significant_metrics:
            overall_bias = np.mean([m.value for m in significant_metrics])
            severity = significant_metrics[0].severity_level()  # Use highest severity
            
            return BiasDetectionResult(
                bias_type=BiasType.GENDER_BIAS,
                bias_category=BiasCategory.STEREOTYPING if stereotype_bias > representation_bias 
                             else BiasCategory.REPRESENTATION,
                severity=severity,
                confidence=np.mean([m.confidence for m in significant_metrics]),
                affected_groups=['gender_male', 'gender_female'],
                bias_metrics=significant_metrics,
                evidence={'bias_indicators': bias_indicators, 'gender_counts': {'male': male_count, 'female': female_count}},
                examples=self._extract_bias_examples(content, gender_terms),
                recommendations=self._generate_gender_bias_recommendations(significant_metrics),
                correction_strategies=self._generate_gender_bias_corrections(significant_metrics)
            )
        
        return None
    
    def _analyze_gender_contexts(self, content: str, gender_terms: Dict[str, List[str]]) -> Dict[str, Any]:
        """Analyze context around gender terms for bias indicators."""
        bias_indicators = {
            'stereotypical_associations': [],
            'power_imbalances': [],
            'exclusionary_language': []
        }
        
        # Look for stereotypical associations
        stereotype_patterns = {
            'male_stereotypes': [
                r'\b(strong|powerful|aggressive|dominant|leadership|warrior|brave)\b.*\b(man|men|male|he|him)\b',
                r'\b(man|men|male|he|him)\b.*\b(strong|powerful|aggressive|dominant|leadership|warrior|brave)\b'
            ],
            'female_stereotypes': [
                r'\b(emotional|weak|submissive|nurturing|caring|gentle)\b.*\b(woman|women|female|she|her)\b',
                r'\b(woman|women|female|she|her)\b.*\b(emotional|weak|submissive|nurturing|caring|gentle)\b'
            ]
        }
        
        for category, patterns in stereotype_patterns.items():
            for pattern in patterns:
                matches = re.findall(pattern, content, re.IGNORECASE)
                if matches:
                    bias_indicators['stereotypical_associations'].extend(matches)
        
        return bias_indicators
    
    def _detect_gender_stereotypes(self, content: str, bias_indicators: Dict[str, Any]) -> float:
        """Detect gender stereotypes in content."""
        stereotype_count = len(bias_indicators.get('stereotypical_associations', []))
        content_length = len(content.split())
        
        if content_length == 0:
            return 0.0
        
        # Normalize by content length
        stereotype_density = stereotype_count / (content_length / 100)  # Per 100 words
        
        # Convert to bias score (0-1)
        return min(1.0, stereotype_density)
    
    def _detect_racial_bias(self, content: str, context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect racial and ethnic bias in content."""
        # Define racial/ethnic terms and potentially biased language
        racial_terms = {
            'neutral': ['person', 'people', 'individual', 'citizen', 'human'],
            'specific': ['caucasian', 'european', 'asian', 'african', 'hispanic', 'latino', 'indigenous', 'native']
        }
        
        # Check for biased language patterns
        bias_patterns = [
            r'\b(primitive|savage|barbaric|uncivilized)\b',
            r'\b(superior|inferior)\s+(race|ethnicity|people)\b',
            r'\b(racial|ethnic)\s+(superiority|inferiority)\b'
        ]
        
        content_lower = content.lower()
        bias_matches = []
        
        for pattern in bias_patterns:
            matches = re.findall(pattern, content_lower)
            bias_matches.extend(matches)
        
        # Analyze representation of different racial groups
        representation_analysis = self._analyze_racial_representation(content, racial_terms)
        
        metrics = []
        
        # Bias language detection
        if bias_matches:
            bias_score = min(1.0, len(bias_matches) / 10)  # Scale appropriately
            metrics.append(BiasMetric(
                name="Racial Bias Language",
                value=bias_score,
                confidence=0.9,
                threshold=self.bias_thresholds[BiasType.RACIAL_BIAS.value],
                description="Detected racially biased language",
                methodology="Pattern matching for biased terms"
            ))
        
        # Representation balance
        if representation_analysis['imbalance_score'] > 0:
            metrics.append(BiasMetric(
                name="Racial Representation Imbalance", 
                value=representation_analysis['imbalance_score'],
                confidence=0.7,
                threshold=self.bias_thresholds[BiasType.RACIAL_BIAS.value],
                description="Imbalanced racial representation",
                methodology="Representation frequency analysis"
            ))
        
        significant_metrics = [m for m in metrics if m.exceeds_threshold()]
        
        if significant_metrics:
            return BiasDetectionResult(
                bias_type=BiasType.RACIAL_BIAS,
                bias_category=BiasCategory.DISCRIMINATION if bias_matches else BiasCategory.REPRESENTATION,
                severity=max(m.severity_level() for m in significant_metrics),
                confidence=np.mean([m.confidence for m in significant_metrics]),
                affected_groups=representation_analysis['affected_groups'],
                bias_metrics=significant_metrics,
                evidence={'bias_matches': bias_matches, 'representation_analysis': representation_analysis},
                examples=self._extract_racial_bias_examples(content, bias_matches),
                recommendations=self._generate_racial_bias_recommendations(significant_metrics),
                correction_strategies=self._generate_racial_bias_corrections(significant_metrics)
            )
        
        return None
    
    def _analyze_racial_representation(self, content: str, racial_terms: Dict[str, List[str]]) -> Dict[str, Any]:
        """Analyze racial representation in content."""
        content_lower = content.lower()
        
        # Count mentions of different racial groups
        group_counts = defaultdict(int)
        
        for group_id, group in self.demographic_groups.items():
            if 'ethnicity' in group.attributes:
                ethnicity = group.attributes['ethnicity']
                # Simple keyword counting - in production would use more sophisticated NLP
                if ethnicity == 'european':
                    keywords = ['european', 'caucasian', 'white', 'western']
                elif ethnicity == 'asian':
                    keywords = ['asian', 'chinese', 'japanese', 'korean', 'indian']
                elif ethnicity == 'african':
                    keywords = ['african', 'black', 'ethiopian', 'egyptian']
                else:
                    keywords = ['other', 'minority']
                
                count = sum(content_lower.count(keyword) for keyword in keywords)
                if count > 0:
                    group_counts[group_id] = count
        
        # Calculate imbalance
        total_mentions = sum(group_counts.values())
        imbalance_score = 0.0
        affected_groups = []
        
        if total_mentions > 0:
            expected_ratios = {gid: g.population_weight for gid, g in self.demographic_groups.items() 
                             if 'ethnicity' in g.attributes}
            
            for group_id, count in group_counts.items():
                actual_ratio = count / total_mentions
                expected_ratio = expected_ratios.get(group_id, 0.1)
                
                # Calculate deviation from expected representation
                deviation = abs(actual_ratio - expected_ratio)
                imbalance_score = max(imbalance_score, deviation)
                
                if deviation > 0.2:  # Threshold for significant imbalance
                    affected_groups.append(group_id)
        
        return {
            'group_counts': dict(group_counts),
            'imbalance_score': imbalance_score,
            'affected_groups': affected_groups,
            'total_mentions': total_mentions
        }
    
    def _detect_cultural_bias(self, content: str, context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect cultural bias and insensitivity."""
        # Cultural bias patterns
        cultural_bias_patterns = [
            r'\b(primitive|backward|advanced|civilized|savage|barbaric)\s+(culture|society|people|civilization)\b',
            r'\b(superior|inferior)\s+(culture|civilization|society)\b',
            r'\b(western|eastern|modern|traditional)\s+(values|customs|practices)\s+(better|worse|superior|inferior)\b'
        ]
        
        # Ethnocentric language
        ethnocentric_patterns = [
            r'\bour\s+(way|culture|values|customs)\s+(is|are)\s+(better|superior|right)\b',
            r'\btheir\s+(way|culture|values|customs)\s+(is|are)\s+(wrong|inferior|primitive)\b'
        ]
        
        content_lower = content.lower()
        
        cultural_bias_matches = []
        ethnocentric_matches = []
        
        for pattern in cultural_bias_patterns:
            matches = re.findall(pattern, content_lower)
            cultural_bias_matches.extend(matches)
        
        for pattern in ethnocentric_patterns:
            matches = re.findall(pattern, content_lower)
            ethnocentric_matches.extend(matches)
        
        # Analyze cultural representation and context
        cultural_analysis = self._analyze_cultural_context(content, context)
        
        metrics = []
        
        if cultural_bias_matches:
            bias_score = min(1.0, len(cultural_bias_matches) / 5)
            metrics.append(BiasMetric(
                name="Cultural Bias Language",
                value=bias_score,
                confidence=0.85,
                threshold=self.bias_thresholds[BiasType.CULTURAL_BIAS.value],
                description="Detected culturally biased language",
                methodology="Cultural bias pattern matching"
            ))
        
        if ethnocentric_matches:
            ethno_score = min(1.0, len(ethnocentric_matches) / 3)
            metrics.append(BiasMetric(
                name="Ethnocentric Language",
                value=ethno_score,
                confidence=0.9,
                threshold=self.bias_thresholds[BiasType.CULTURAL_BIAS.value],
                description="Detected ethnocentric perspective",
                methodology="Ethnocentrism pattern matching"
            ))
        
        # Cultural sensitivity analysis
        if cultural_analysis['sensitivity_score'] > 0:
            metrics.append(BiasMetric(
                name="Cultural Insensitivity",
                value=cultural_analysis['sensitivity_score'],
                confidence=0.75,
                threshold=self.bias_thresholds[BiasType.CULTURAL_BIAS.value],
                description="Cultural insensitivity detected",
                methodology="Cultural context analysis"
            ))
        
        significant_metrics = [m for m in metrics if m.exceeds_threshold()]
        
        if significant_metrics:
            return BiasDetectionResult(
                bias_type=BiasType.CULTURAL_BIAS,
                bias_category=BiasCategory.CULTURAL_INSENSITIVITY,
                severity=max(m.severity_level() for m in significant_metrics),
                confidence=np.mean([m.confidence for m in significant_metrics]),
                affected_groups=cultural_analysis['affected_groups'],
                bias_metrics=significant_metrics,
                evidence={
                    'cultural_bias_matches': cultural_bias_matches,
                    'ethnocentric_matches': ethnocentric_matches,
                    'cultural_analysis': cultural_analysis
                },
                examples=self._extract_cultural_bias_examples(content, cultural_bias_matches + ethnocentric_matches),
                recommendations=self._generate_cultural_bias_recommendations(significant_metrics),
                correction_strategies=self._generate_cultural_bias_corrections(significant_metrics)
            )
        
        return None
    
    def _analyze_cultural_context(self, content: str, context: Dict[str, Any]) -> Dict[str, Any]:
        """Analyze cultural context and sensitivity."""
        # Simple cultural analysis - in production would use more sophisticated methods
        sensitivity_indicators = 0
        affected_groups = []
        
        # Check for generalizations about cultures
        generalization_patterns = [
            r'\ball\s+\w+\s+(people|culture|society)\s+(are|do|believe|think)',
            r'\b\w+\s+(people|culture|society)\s+always\s+',
            r'\b\w+\s+(people|culture|society)\s+never\s+'
        ]
        
        content_lower = content.lower()
        
        for pattern in generalization_patterns:
            matches = re.findall(pattern, content_lower)
            sensitivity_indicators += len(matches)
        
        # Normalize sensitivity score
        sensitivity_score = min(1.0, sensitivity_indicators / 10)
        
        return {
            'sensitivity_score': sensitivity_score,
            'sensitivity_indicators': sensitivity_indicators,
            'affected_groups': affected_groups
        }
    
    def _detect_religious_bias(self, content: str, context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect religious bias and intolerance."""
        return self._detect_generic_bias(content, context, BiasType.RELIGIOUS_BIAS,
                                       'religious', ['religion', 'faith', 'belief', 'church', 'temple'])
    
    def _detect_socioeconomic_bias(self, content: str, context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect socioeconomic bias."""
        return self._detect_generic_bias(content, context, BiasType.SOCIOECONOMIC_BIAS,
                                       'socioeconomic', ['poor', 'rich', 'wealthy', 'class', 'status'])
    
    def _detect_age_bias(self, content: str, context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect age-related bias."""
        return self._detect_generic_bias(content, context, BiasType.AGE_BIAS,
                                       'age', ['young', 'old', 'elderly', 'youth', 'senior'])
    
    def _detect_political_bias(self, content: str, context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect political bias."""
        return self._detect_generic_bias(content, context, BiasType.POLITICAL_BIAS,
                                       'political', ['conservative', 'liberal', 'political', 'government'])
    
    def _detect_historical_bias(self, content: str, context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect historical bias and perspective imbalance."""
        # Check for historical perspective balance
        perspective_indicators = {
            'victor_narrative': r'\b(we|our|us)\s+(won|conquered|defeated|triumphed)\b',
            'one_sided_view': r'\b(they|their|them)\s+(were|are)\s+(wrong|evil|bad|inferior)\b',
            'anachronism': r'\b(modern|contemporary|current)\s+(perspective|view|understanding)\b'
        }
        
        content_lower = content.lower()
        
        bias_matches = {}
        for indicator, pattern in perspective_indicators.items():
            matches = re.findall(pattern, content_lower)
            if matches:
                bias_matches[indicator] = matches
        
        if bias_matches:
            bias_score = min(1.0, sum(len(matches) for matches in bias_matches.values()) / 10)
            
            metrics = [BiasMetric(
                name="Historical Perspective Bias",
                value=bias_score,
                confidence=0.8,
                threshold=self.bias_thresholds[BiasType.HISTORICAL_BIAS.value],
                description="Detected historical perspective imbalance",
                methodology="Historical bias pattern matching"
            )]
            
            if bias_score > self.bias_thresholds[BiasType.HISTORICAL_BIAS.value]:
                return BiasDetectionResult(
                    bias_type=BiasType.HISTORICAL_BIAS,
                    bias_category=BiasCategory.HISTORICAL_DISTORTION,
                    severity=metrics[0].severity_level(),
                    confidence=0.8,
                    affected_groups=[],
                    bias_metrics=metrics,
                    evidence={'bias_matches': bias_matches},
                    examples=list(bias_matches.values()),
                    recommendations=["Provide balanced historical perspectives", "Include multiple viewpoints"],
                    correction_strategies=["Add alternative historical interpretations", "Use neutral language"]
                )
        
        return None
    
    def _detect_linguistic_bias(self, content: str, context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect linguistic bias and language superiority."""
        return self._detect_generic_bias(content, context, BiasType.LINGUISTIC_BIAS,
                                       'linguistic', ['language', 'accent', 'dialect', 'speech'])
    
    def _detect_confirmation_bias(self, content: str, context: Dict[str, Any]) -> Optional[BiasDetectionResult]:
        """Detect confirmation bias patterns."""
        confirmation_patterns = [
            r'\bobviously\b',
            r'\bclearly\b',
            r'\bof course\b',
            r'\beveryone knows\b',
            r'\bit is certain\b',
            r'\bwithout doubt\b'
        ]
        
        content_lower = content.lower()
        
        confirmation_matches = []
        for pattern in confirmation_patterns:
            matches = re.findall(pattern, content_lower)
            confirmation_matches.extend(matches)
        
        if confirmation_matches:
            bias_score = min(1.0, len(confirmation_matches) / 8)
            
            metrics = [BiasMetric(
                name="Confirmation Bias Language",
                value=bias_score,
                confidence=0.7,
                threshold=self.bias_thresholds[BiasType.CONFIRMATION_BIAS.value],
                description="Detected confirmation bias indicators",
                methodology="Confirmation bias pattern matching"
            )]
            
            if bias_score > self.bias_thresholds[BiasType.CONFIRMATION_BIAS.value]:
                return BiasDetectionResult(
                    bias_type=BiasType.CONFIRMATION_BIAS,
                    bias_category=BiasCategory.DISCRIMINATION,
                    severity=metrics[0].severity_level(),
                    confidence=0.7,
                    affected_groups=[],
                    bias_metrics=metrics,
                    evidence={'confirmation_matches': confirmation_matches},
                    examples=confirmation_matches,
                    recommendations=["Use more qualified language", "Present evidence for claims"],
                    correction_strategies=["Replace absolute statements with qualified ones", "Add supporting evidence"]
                )
        
        return None
    
    def _detect_generic_bias(self, content: str, context: Dict[str, Any], bias_type: BiasType,
                           domain: str, keywords: List[str]) -> Optional[BiasDetectionResult]:
        """Generic bias detection for simpler bias types."""
        # This is a simplified implementation - in production would be more sophisticated
        content_lower = content.lower()
        
        # Count domain-specific keywords
        keyword_count = sum(content_lower.count(keyword) for keyword in keywords)
        
        if keyword_count > 0:
            # Simple bias score based on keyword density
            words = len(content.split())
            if words > 0:
                keyword_density = keyword_count / words
                bias_score = min(1.0, keyword_density * 10)  # Scale appropriately
                
                metrics = [BiasMetric(
                    name=f"{domain.title()} Bias",
                    value=bias_score,
                    confidence=0.6,  # Lower confidence for generic detection
                    threshold=self.bias_thresholds[bias_type.value],
                    description=f"Detected {domain} bias indicators",
                    methodology="Keyword density analysis"
                )]
                
                if bias_score > self.bias_thresholds[bias_type.value]:
                    return BiasDetectionResult(
                        bias_type=bias_type,
                        bias_category=BiasCategory.STEREOTYPING,
                        severity=metrics[0].severity_level(),
                        confidence=0.6,
                        affected_groups=[],
                        bias_metrics=metrics,
                        evidence={'keyword_count': keyword_count, 'keywords': keywords},
                        examples=keywords[:5],  # Sample keywords
                        recommendations=[f"Review {domain} language for bias"],
                        correction_strategies=[f"Use more neutral {domain} terminology"]
                    )
        
        return None
    
    # Fairness assessment methods
    
    def assess_fairness(self, content_samples: List[str],
                       demographic_labels: List[Dict[str, Any]],
                       outcome_variable: Optional[str] = None) -> FairnessAssessment:
        """
        Assess multi-dimensional fairness across demographic groups.
        
        Args:
            content_samples: List of content samples
            demographic_labels: Demographic information for each sample
            outcome_variable: Optional outcome variable for fairness metrics
            
        Returns:
            FairnessAssessment with comprehensive fairness analysis
        """
        logger.info("Assessing fairness for %d content samples", len(content_samples))
        
        # Group samples by demographics
        demographic_groups_data = self._group_by_demographics(content_samples, demographic_labels)
        
        # Calculate fairness metrics
        representation_balance = self._calculate_representation_balance(demographic_groups_data)
        demographic_parity = self._calculate_demographic_parity(demographic_groups_data, outcome_variable)
        equalized_odds = self._calculate_equalized_odds(demographic_groups_data, outcome_variable)
        individual_fairness = self._calculate_individual_fairness(content_samples, demographic_labels)
        
        # Calculate group-specific fairness scores
        group_fairness_scores = {}
        for group_id, group_data in demographic_groups_data.items():
            group_fairness_scores[group_id] = self._calculate_group_fairness_score(group_data)
        
        # Calculate overall fairness score
        overall_fairness = np.mean([
            np.mean(list(representation_balance.values())),
            demographic_parity,
            equalized_odds,
            individual_fairness
        ])
        
        # Generate recommendations
        recommendations = self._generate_fairness_recommendations(
            representation_balance, demographic_parity, equalized_odds, individual_fairness
        )
        
        # Detailed analysis
        detailed_analysis = {
            'sample_count': len(content_samples),
            'demographic_distribution': {gid: len(gdata['samples']) 
                                       for gid, gdata in demographic_groups_data.items()},
            'bias_detection_summary': self._summarize_bias_detection(content_samples),
            'fairness_components': {
                'representation': np.mean(list(representation_balance.values())),
                'demographic_parity': demographic_parity,
                'equalized_odds': equalized_odds,
                'individual_fairness': individual_fairness
            }
        }
        
        assessment = FairnessAssessment(
            overall_fairness_score=overall_fairness,
            group_fairness_scores=group_fairness_scores,
            representation_balance=representation_balance,
            outcome_parity=demographic_parity,
            equalized_odds=equalized_odds,
            demographic_parity=demographic_parity,
            individual_fairness=individual_fairness,
            detailed_analysis=detailed_analysis,
            recommendations=recommendations
        )
        
        logger.info("Fairness assessment completed: overall score=%.3f", overall_fairness)
        
        return assessment
    
    def _group_by_demographics(self, content_samples: List[str],
                             demographic_labels: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
        """Group content samples by demographic characteristics."""
        groups = defaultdict(lambda: {'samples': [], 'labels': []})
        
        for i, (content, demo_label) in enumerate(zip(content_samples, demographic_labels)):
            # Determine demographic group(s) for this sample
            group_ids = []
            
            for group_id, group in self.demographic_groups.items():
                if self._matches_demographic_group(demo_label, group):
                    group_ids.append(group_id)
            
            # Add to appropriate groups
            for group_id in group_ids:
                groups[group_id]['samples'].append(content)
                groups[group_id]['labels'].append(demo_label)
        
        return dict(groups)
    
    def _matches_demographic_group(self, demo_label: Dict[str, Any], 
                                 group: DemographicGroup) -> bool:
        """Check if demographic label matches group criteria."""
        for attr_name, attr_value in group.attributes.items():
            if attr_name in demo_label:
                if demo_label[attr_name] == attr_value:
                    return True
                # Handle age ranges
                elif attr_name == 'age_range' and 'age' in demo_label:
                    age = demo_label['age']
                    if attr_value == '18-35' and 18 <= age <= 35:
                        return True
                    elif attr_value == '36-55' and 36 <= age <= 55:
                        return True
                    elif attr_value == '56+' and age >= 56:
                        return True
        
        return False
    
    def _calculate_representation_balance(self, groups_data: Dict[str, Dict[str, Any]]) -> Dict[str, float]:
        """Calculate representation balance across demographic groups."""
        total_samples = sum(len(gdata['samples']) for gdata in groups_data.values())
        
        balance_scores = {}
        
        for group_id, group_data in groups_data.items():
            if group_id in self.demographic_groups:
                actual_proportion = len(group_data['samples']) / total_samples
                expected_proportion = self.demographic_groups[group_id].population_weight
                
                # Normalize expected proportions
                total_expected = sum(g.population_weight for g in self.demographic_groups.values())
                expected_proportion = expected_proportion / total_expected
                
                # Balance score (1.0 = perfect balance, 0.0 = maximum imbalance)
                imbalance = abs(actual_proportion - expected_proportion)
                balance_score = max(0.0, 1.0 - (imbalance / expected_proportion))
                
                balance_scores[group_id] = balance_score
        
        return balance_scores
    
    def _calculate_demographic_parity(self, groups_data: Dict[str, Dict[str, Any]],
                                    outcome_variable: Optional[str]) -> float:
        """Calculate demographic parity metric."""
        if not outcome_variable:
            return 0.8  # Default score if no outcome variable
        
        # Simplified implementation - in production would calculate actual parity
        # based on outcome rates across demographic groups
        return 0.85
    
    def _calculate_equalized_odds(self, groups_data: Dict[str, Dict[str, Any]],
                                outcome_variable: Optional[str]) -> float:
        """Calculate equalized odds metric."""
        if not outcome_variable:
            return 0.8  # Default score if no outcome variable
        
        # Simplified implementation - in production would calculate actual equalized odds
        return 0.82
    
    def _calculate_individual_fairness(self, content_samples: List[str],
                                     demographic_labels: List[Dict[str, Any]]) -> float:
        """Calculate individual fairness metric."""
        # Simplified implementation - individual fairness is complex to measure
        # In production would compare similar individuals across groups
        return 0.78
    
    def _calculate_group_fairness_score(self, group_data: Dict[str, Any]) -> float:
        """Calculate fairness score for specific demographic group."""
        # Simplified implementation
        sample_count = len(group_data['samples'])
        
        # Base score on representation adequacy
        if sample_count < 10:
            return 0.3  # Low score for under-representation
        elif sample_count < 50:
            return 0.6  # Moderate score
        else:
            return 0.9  # High score for good representation
    
    # Helper methods for bias detection
    
    def _extract_bias_examples(self, content: str, terms: Dict[str, List[str]]) -> List[str]:
        """Extract examples of biased content."""
        examples = []
        sentences = content.split('.')
        
        for sentence in sentences[:5]:  # Limit examples
            sentence = sentence.strip()
            for term_list in terms.values():
                if any(term in sentence.lower() for term in term_list):
                    if len(sentence) > 10:  # Avoid very short sentences
                        examples.append(sentence)
                        break
        
        return examples
    
    def _extract_racial_bias_examples(self, content: str, bias_matches: List[str]) -> List[str]:
        """Extract examples of racial bias."""
        examples = []
        sentences = content.split('.')
        
        for sentence in sentences[:3]:  # Limit examples
            sentence = sentence.strip()
            if any(match in sentence.lower() for match in bias_matches):
                if len(sentence) > 10:
                    examples.append(sentence)
        
        return examples
    
    def _extract_cultural_bias_examples(self, content: str, matches: List[str]) -> List[str]:
        """Extract examples of cultural bias."""
        return self._extract_racial_bias_examples(content, matches)
    
    # Recommendation generation methods
    
    def _generate_gender_bias_recommendations(self, metrics: List[BiasMetric]) -> List[str]:
        """Generate recommendations for addressing gender bias."""
        recommendations = []
        
        for metric in metrics:
            if "representation" in metric.name.lower():
                recommendations.append("Balance gender representation in content")
                recommendations.append("Ensure equal treatment of male and female subjects")
            elif "stereotyping" in metric.name.lower():
                recommendations.append("Avoid gender stereotypes and generalizations")
                recommendations.append("Present diverse examples of gender roles")
        
        return recommendations
    
    def _generate_racial_bias_recommendations(self, metrics: List[BiasMetric]) -> List[str]:
        """Generate recommendations for addressing racial bias."""
        recommendations = []
        
        for metric in metrics:
            if "language" in metric.name.lower():
                recommendations.append("Remove racially biased language")
                recommendations.append("Use respectful and neutral terminology")
            elif "representation" in metric.name.lower():
                recommendations.append("Improve racial and ethnic representation balance")
                recommendations.append("Include diverse perspectives and voices")
        
        return recommendations
    
    def _generate_cultural_bias_recommendations(self, metrics: List[BiasMetric]) -> List[str]:
        """Generate recommendations for addressing cultural bias."""
        return [
            "Avoid cultural superiority assumptions",
            "Present multiple cultural perspectives",
            "Use culturally sensitive language",
            "Acknowledge cultural relativity"
        ]
    
    # Correction strategy methods
    
    def _generate_gender_bias_corrections(self, metrics: List[BiasMetric]) -> List[str]:
        """Generate correction strategies for gender bias."""
        return [
            "Replace gendered language with neutral alternatives",
            "Add examples of counter-stereotypical behavior",
            "Balance male and female representation in examples",
            "Use inclusive pronouns where appropriate"
        ]
    
    def _generate_racial_bias_corrections(self, metrics: List[BiasMetric]) -> List[str]:
        """Generate correction strategies for racial bias."""
        return [
            "Replace biased terms with neutral language",
            "Add diverse racial and ethnic perspectives",
            "Remove implicit racial hierarchies",
            "Include positive examples from all racial groups"
        ]
    
    def _generate_cultural_bias_corrections(self, metrics: List[BiasMetric]) -> List[str]:
        """Generate correction strategies for cultural bias."""
        return [
            "Replace ethnocentric language with neutral terms",
            "Add acknowledgment of cultural diversity",
            "Present cultural practices without judgment",
            "Include multiple cultural viewpoints"
        ]
    
    def _generate_fairness_recommendations(self, representation_balance: Dict[str, float],
                                         demographic_parity: float,
                                         equalized_odds: float,
                                         individual_fairness: float) -> List[str]:
        """Generate fairness improvement recommendations."""
        recommendations = []
        
        # Representation issues
        low_representation_groups = [gid for gid, score in representation_balance.items() 
                                    if score < 0.6]
        if low_representation_groups:
            recommendations.append(f"Improve representation for groups: {', '.join(low_representation_groups)}")
        
        # Demographic parity issues
        if demographic_parity < 0.7:
            recommendations.append("Address demographic parity imbalances")
        
        # Equalized odds issues
        if equalized_odds < 0.7:
            recommendations.append("Improve equalized odds across groups")
        
        # Individual fairness issues
        if individual_fairness < 0.7:
            recommendations.append("Enhance individual fairness measures")
        
        return recommendations
    
    def _summarize_bias_detection(self, content_samples: List[str]) -> Dict[str, Any]:
        """Summarize bias detection across content samples."""
        # Run bias detection on sample of content
        sample_results = []
        
        for content in content_samples[:10]:  # Sample for efficiency
            results = self.detect_bias(content)
            sample_results.extend(results)
        
        # Summarize findings
        bias_counts = Counter(result.bias_type for result in sample_results)
        severity_counts = Counter(result.severity for result in sample_results)
        
        return {
            'total_biases_detected': len(sample_results),
            'bias_type_counts': dict(bias_counts),
            'severity_distribution': dict(severity_counts),
            'average_confidence': np.mean([r.confidence for r in sample_results]) if sample_results else 0.0
        }
    
    # Public interface methods
    
    def get_detection_statistics(self) -> Dict[str, Any]:
        """Get bias detection performance statistics."""
        stats = self.detection_stats.copy()
        
        # Calculate accuracy if we have ground truth data
        if stats['true_positives'] + stats['false_negatives'] > 0:
            stats['recall'] = stats['true_positives'] / (stats['true_positives'] + stats['false_negatives'])
        
        if stats['true_positives'] + stats['false_positives'] > 0:
            stats['precision'] = stats['true_positives'] / (stats['true_positives'] + stats['false_positives'])
        
        if 'recall' in stats and 'precision' in stats and stats['recall'] + stats['precision'] > 0:
            stats['f1_score'] = 2 * (stats['recall'] * stats['precision']) / (stats['recall'] + stats['precision'])
        
        return stats
    
    def get_bias_history(self, bias_type: Optional[BiasType] = None,
                        limit: Optional[int] = None) -> List[BiasDetectionResult]:
        """Get bias detection history."""
        if bias_type:
            history = self.detection_history[bias_type]
        else:
            history = []
            for bias_history in self.detection_history.values():
                history.extend(bias_history)
            history.sort(key=lambda x: x.timestamp, reverse=True)
        
        if limit:
            return history[:limit]
        
        return history
    
    def update_bias_threshold(self, bias_type: BiasType, threshold: float):
        """Update bias detection threshold."""
        self.bias_thresholds[bias_type.value] = threshold
        logger.info("Updated %s threshold to %.3f", bias_type.value, threshold)
    
    def add_custom_bias_pattern(self, bias_type: BiasType, pattern: str, confidence: float = 0.8):
        """Add custom bias detection pattern."""
        if bias_type not in self.known_bias_patterns:
            self.known_bias_patterns[bias_type] = []
        
        self.known_bias_patterns[bias_type].append({
            'pattern': pattern,
            'confidence': confidence,
            'added': datetime.now()
        })
        
        logger.info("Added custom pattern for %s bias", bias_type.value)
    
    def export_bias_report(self, content_samples: List[str],
                          demographic_data: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any]:
        """Export comprehensive bias analysis report."""
        logger.info("Generating comprehensive bias report for %d samples", len(content_samples))
        
        # Run bias detection
        all_results = []
        for content in content_samples:
            results = self.detect_bias(content)
            all_results.extend(results)
        
        # Run fairness assessment if demographic data available
        fairness_assessment = None
        if demographic_data and len(demographic_data) == len(content_samples):
            fairness_assessment = self.assess_fairness(content_samples, demographic_data)
        
        # Generate report
        report = {
            'report_timestamp': datetime.now(),
            'content_samples_analyzed': len(content_samples),
            'total_biases_detected': len(all_results),
            'bias_detection_results': all_results,
            'fairness_assessment': fairness_assessment,
            'bias_summary': self._summarize_bias_detection(content_samples),
            'detection_statistics': self.get_detection_statistics(),
            'recommendations': self._generate_overall_recommendations(all_results),
            'system_configuration': {
                'detection_accuracy_target': self.detection_accuracy_target,
                'fairness_threshold': self.fairness_threshold,
                'bias_thresholds': self.bias_thresholds
            }
        }
        
        logger.info("Bias report generated successfully")
        
        return report
    
    def _generate_overall_recommendations(self, results: List[BiasDetectionResult]) -> List[str]:
        """Generate overall recommendations based on all detection results."""
        recommendations = set()
        
        # Collect all individual recommendations
        for result in results:
            recommendations.update(result.recommendations)
        
        # Add general recommendations based on patterns
        bias_counts = Counter(result.bias_type for result in results)
        
        if bias_counts[BiasType.GENDER_BIAS] > 0:
            recommendations.add("Implement systematic gender bias review process")
        
        if bias_counts[BiasType.CULTURAL_BIAS] > 0:
            recommendations.add("Conduct cultural sensitivity training")
        
        if len([r for r in results if r.requires_intervention()]) > len(results) * 0.1:
            recommendations.add("Implement mandatory bias checking before content release")
        
        return list(recommendations)
    
    def shutdown(self):
        """Shutdown bias detection system."""
        self.executor.shutdown(wait=True)
        logger.info("BiasDetectionSystem shutdown completed")