"""
Multi-perspective analyzer for extracting diverse viewpoints
"""
import logging
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass
from enum import Enum
from collections import defaultdict
import re

logger = logging.getLogger(__name__)

class ViewpointStance(Enum):
    """Stance of a viewpoint"""
    SUPPORTIVE = "supportive"  # 支持
    OPPOSING = "opposing"  # 反对  
    NEUTRAL = "neutral"  # 中立
    MIXED = "mixed"  # 混合

class SourceCredibility(Enum):
    """Source credibility levels"""
    HIGH = "high"  # 高可信度
    MEDIUM = "medium"  # 中等可信度
    LOW = "low"  # 低可信度
    UNKNOWN = "unknown"  # 未知

@dataclass
class Viewpoint:
    """Represents a single viewpoint"""
    content: str
    stance: ViewpointStance
    key_arguments: List[str]
    source: Optional[str] = None
    credibility: SourceCredibility = SourceCredibility.UNKNOWN
    support_count: int = 0
    weight: float = 1.0

@dataclass
class PerspectiveAnalysis:
    """Complete perspective analysis result"""
    topic: str
    viewpoints: List[Viewpoint]
    key_arguments: Dict[ViewpointStance, List[str]]
    consensus_points: List[str]
    controversy_points: List[str]
    perspective_matrix: Dict[str, Any]

class PerspectiveAnalyzer:
    """Analyze and extract multiple perspectives from content"""
    
    def __init__(self):
        """Initialize perspective analyzer"""
        self.stance_indicators = self._init_stance_indicators()
        self.credibility_markers = self._init_credibility_markers()
        
    def _init_stance_indicators(self) -> Dict[ViewpointStance, List[str]]:
        """Initialize stance indicator keywords"""
        return {
            ViewpointStance.SUPPORTIVE: [
                "支持", "赞同", "认为", "应该", "必须", "有利于",
                "积极", "正面", "好处", "优势", "重要", "关键"
            ],
            ViewpointStance.OPPOSING: [
                "反对", "不同意", "不应该", "否定", "质疑", "批评",
                "消极", "负面", "坏处", "劣势", "问题", "风险"
            ],
            ViewpointStance.NEUTRAL: [
                "客观", "中立", "两面", "既有", "一方面", "另一方面",
                "需要考虑", "值得讨论", "有待观察"
            ]
        }
    
    def _init_credibility_markers(self) -> Dict[SourceCredibility, List[str]]:
        """Initialize source credibility markers"""
        return {
            SourceCredibility.HIGH: [
                "专家", "教授", "研究", "数据", "报告", "官方",
                "权威", "学术", "科学", "统计"
            ],
            SourceCredibility.MEDIUM: [
                "分析", "观察", "经验", "案例", "实践", "调查"
            ],
            SourceCredibility.LOW: [
                "据说", "可能", "听说", "网传", "疑似", "猜测"
            ]
        }
    
    def analyze_perspectives(
        self,
        content_sources: List[Dict[str, Any]],
        topic: str
    ) -> PerspectiveAnalysis:
        """
        Analyze multiple perspectives from various content sources
        
        Args:
            content_sources: List of content sources (articles, comments, etc.)
            topic: The main topic being analyzed
            
        Returns:
            Comprehensive perspective analysis
        """
        # Extract viewpoints from all sources
        all_viewpoints = []
        
        for source in content_sources:
            viewpoints = self.extract_viewpoints(
                source.get("content", ""),
                source.get("source", "")
            )
            all_viewpoints.extend(viewpoints)
        
        # Classify viewpoints by stance
        classified = self.classify_viewpoints(all_viewpoints)
        
        # Aggregate and deduplicate viewpoints
        aggregated = self.aggregate_viewpoints(classified)
        
        # Extract key arguments for each stance
        key_arguments = self.extract_key_arguments(aggregated)
        
        # Find consensus and controversy points
        consensus = self.find_consensus_points(aggregated)
        controversy = self.find_controversy_points(aggregated)
        
        # Generate perspective matrix
        matrix = self.generate_perspective_matrix(aggregated)
        
        return PerspectiveAnalysis(
            topic=topic,
            viewpoints=aggregated,
            key_arguments=key_arguments,
            consensus_points=consensus,
            controversy_points=controversy,
            perspective_matrix=matrix
        )
    
    def extract_viewpoints(
        self,
        content: str,
        source: Optional[str] = None
    ) -> List[Viewpoint]:
        """
        Extract individual viewpoints from content
        
        Args:
            content: Text content to analyze
            source: Source identifier
            
        Returns:
            List of extracted viewpoints
        """
        if not content:
            return []
        
        viewpoints = []
        
        # Split content into sentences or paragraphs
        segments = self._segment_content(content)
        
        for segment in segments:
            # Detect stance
            stance = self.detect_stance(segment)
            
            # Extract arguments
            arguments = self.extract_arguments(segment)
            
            if arguments:  # Only create viewpoint if arguments found
                # Assess credibility
                credibility = self.assess_credibility(segment)
                
                viewpoint = Viewpoint(
                    content=segment,
                    stance=stance,
                    key_arguments=arguments,
                    source=source,
                    credibility=credibility,
                    support_count=1,
                    weight=self._calculate_weight(credibility)
                )
                
                viewpoints.append(viewpoint)
        
        return viewpoints
    
    def _segment_content(self, content: str) -> List[str]:
        """Segment content into analyzable units"""
        # Split by sentences (simplified Chinese sentence splitting)
        sentences = re.split(r'[。！？\n]+', content)
        
        # Filter out very short segments
        segments = [s.strip() for s in sentences if len(s.strip()) > 10]
        
        # Group related sentences (simple heuristic)
        grouped = []
        current_group = []
        
        for segment in segments:
            if current_group and self._should_group(current_group[-1], segment):
                current_group.append(segment)
            else:
                if current_group:
                    grouped.append("。".join(current_group))
                current_group = [segment]
        
        if current_group:
            grouped.append("。".join(current_group))
        
        return grouped
    
    def _should_group(self, prev: str, curr: str) -> bool:
        """Check if two segments should be grouped"""
        # Simple heuristic: group if current starts with connector words
        connectors = ["而且", "并且", "另外", "此外", "同时", "因此", "所以"]
        return any(curr.startswith(c) for c in connectors)
    
    def detect_stance(self, text: str) -> ViewpointStance:
        """
        Detect the stance expressed in text
        
        Args:
            text: Text to analyze
            
        Returns:
            Detected stance
        """
        text_lower = text.lower()
        
        stance_scores = {
            ViewpointStance.SUPPORTIVE: 0,
            ViewpointStance.OPPOSING: 0,
            ViewpointStance.NEUTRAL: 0
        }
        
        # Count stance indicators
        for stance, indicators in self.stance_indicators.items():
            for indicator in indicators:
                if indicator in text_lower:
                    stance_scores[stance] += 1
        
        # Determine dominant stance
        max_score = max(stance_scores.values())
        
        if max_score == 0:
            return ViewpointStance.NEUTRAL
        
        # Check for mixed stance
        high_scores = [s for s, score in stance_scores.items() if score >= max_score - 1]
        
        if len(high_scores) > 1 and ViewpointStance.NEUTRAL not in high_scores:
            return ViewpointStance.MIXED
        
        # Return stance with highest score
        for stance, score in stance_scores.items():
            if score == max_score:
                return stance
        
        return ViewpointStance.NEUTRAL
    
    def extract_arguments(self, text: str) -> List[str]:
        """
        Extract key arguments from text
        
        Args:
            text: Text to analyze
            
        Returns:
            List of key arguments
        """
        arguments = []
        
        # Pattern for argument extraction
        argument_patterns = [
            r'因为(.+?)所以',
            r'由于(.+?)因此',
            r'首先(.+?)其次',
            r'第一(.+?)第二',
            r'一方面(.+?)另一方面',
            r'不仅(.+?)而且',
            r'关键.{0,2}是(.+?)[，。]',
            r'重要.{0,2}是(.+?)[，。]',
            r'原因.{0,2}是(.+?)[，。]'
        ]
        
        for pattern in argument_patterns:
            matches = re.findall(pattern, text)
            arguments.extend(matches)
        
        # Also extract statements with strong assertion words
        if "必须" in text or "应该" in text or "不能" in text:
            # Extract the clause containing these words
            clauses = re.split(r'[，。；]', text)
            for clause in clauses:
                if any(word in clause for word in ["必须", "应该", "不能"]):
                    arguments.append(clause.strip())
        
        # Clean and deduplicate
        arguments = list(set([arg.strip() for arg in arguments if len(arg.strip()) > 5]))
        
        return arguments[:5]  # Limit to top 5 arguments
    
    def assess_credibility(self, text: str) -> SourceCredibility:
        """
        Assess credibility based on content markers
        
        Args:
            text: Text to assess
            
        Returns:
            Credibility level
        """
        text_lower = text.lower()
        
        credibility_scores = {
            SourceCredibility.HIGH: 0,
            SourceCredibility.MEDIUM: 0,
            SourceCredibility.LOW: 0
        }
        
        # Check for credibility markers
        for level, markers in self.credibility_markers.items():
            for marker in markers:
                if marker in text_lower:
                    credibility_scores[level] += 1
        
        # Check for citations or data
        if re.search(r'\d+%', text) or re.search(r'研究.{0,10}表明', text):
            credibility_scores[SourceCredibility.HIGH] += 2
        
        # Determine credibility
        max_score = max(credibility_scores.values())
        
        if max_score == 0:
            return SourceCredibility.UNKNOWN
        
        for level, score in credibility_scores.items():
            if score == max_score:
                return level
        
        return SourceCredibility.UNKNOWN
    
    def _calculate_weight(self, credibility: SourceCredibility) -> float:
        """Calculate weight based on credibility"""
        weights = {
            SourceCredibility.HIGH: 1.5,
            SourceCredibility.MEDIUM: 1.0,
            SourceCredibility.LOW: 0.5,
            SourceCredibility.UNKNOWN: 0.7
        }
        return weights.get(credibility, 0.7)
    
    def classify_viewpoints(
        self,
        viewpoints: List[Viewpoint]
    ) -> Dict[ViewpointStance, List[Viewpoint]]:
        """
        Classify viewpoints by stance
        
        Args:
            viewpoints: List of viewpoints
            
        Returns:
            Dictionary mapping stance to viewpoints
        """
        classified = defaultdict(list)
        
        for viewpoint in viewpoints:
            classified[viewpoint.stance].append(viewpoint)
        
        return dict(classified)
    
    def aggregate_viewpoints(
        self,
        classified_viewpoints: Dict[ViewpointStance, List[Viewpoint]]
    ) -> List[Viewpoint]:
        """
        Aggregate and deduplicate similar viewpoints
        
        Args:
            classified_viewpoints: Viewpoints classified by stance
            
        Returns:
            Aggregated list of unique viewpoints
        """
        aggregated = []
        
        for stance, viewpoints in classified_viewpoints.items():
            # Group similar viewpoints
            groups = self._group_similar_viewpoints(viewpoints)
            
            for group in groups:
                # Merge group into single viewpoint
                merged = self._merge_viewpoint_group(group)
                aggregated.append(merged)
        
        # Sort by weight and support count
        aggregated.sort(key=lambda v: v.weight * v.support_count, reverse=True)
        
        return aggregated
    
    def _group_similar_viewpoints(
        self,
        viewpoints: List[Viewpoint]
    ) -> List[List[Viewpoint]]:
        """Group similar viewpoints together"""
        if not viewpoints:
            return []
        
        groups = []
        used = set()
        
        for i, vp1 in enumerate(viewpoints):
            if i in used:
                continue
            
            group = [vp1]
            used.add(i)
            
            for j, vp2 in enumerate(viewpoints[i+1:], i+1):
                if j in used:
                    continue
                
                # Check similarity of arguments
                if self._are_similar_viewpoints(vp1, vp2):
                    group.append(vp2)
                    used.add(j)
            
            groups.append(group)
        
        return groups
    
    def _are_similar_viewpoints(
        self,
        vp1: Viewpoint,
        vp2: Viewpoint
    ) -> bool:
        """Check if two viewpoints are similar"""
        # Check argument overlap
        args1 = set(vp1.key_arguments)
        args2 = set(vp2.key_arguments)
        
        if not args1 or not args2:
            return False
        
        overlap = len(args1 & args2) / min(len(args1), len(args2))
        
        return overlap > 0.5
    
    def _merge_viewpoint_group(
        self,
        group: List[Viewpoint]
    ) -> Viewpoint:
        """Merge a group of similar viewpoints"""
        if len(group) == 1:
            return group[0]
        
        # Combine arguments
        all_arguments = []
        for vp in group:
            all_arguments.extend(vp.key_arguments)
        
        # Deduplicate and take most common
        unique_args = list(set(all_arguments))
        
        # Use highest credibility
        max_credibility = max(vp.credibility for vp in group)
        
        # Sum support counts
        total_support = sum(vp.support_count for vp in group)
        
        # Average weights
        avg_weight = sum(vp.weight for vp in group) / len(group)
        
        return Viewpoint(
            content=group[0].content,  # Use first as representative
            stance=group[0].stance,
            key_arguments=unique_args[:5],
            source="; ".join(filter(None, [vp.source for vp in group])),
            credibility=max_credibility,
            support_count=total_support,
            weight=avg_weight
        )
    
    def extract_key_arguments(
        self,
        viewpoints: List[Viewpoint]
    ) -> Dict[ViewpointStance, List[str]]:
        """
        Extract key arguments for each stance
        
        Args:
            viewpoints: List of viewpoints
            
        Returns:
            Key arguments by stance
        """
        arguments_by_stance = defaultdict(list)
        
        for viewpoint in viewpoints:
            arguments_by_stance[viewpoint.stance].extend(
                viewpoint.key_arguments
            )
        
        # Deduplicate and rank by frequency
        result = {}
        for stance, args in arguments_by_stance.items():
            arg_counts = defaultdict(int)
            for arg in args:
                arg_counts[arg] += 1
            
            # Sort by frequency
            sorted_args = sorted(
                arg_counts.items(),
                key=lambda x: x[1],
                reverse=True
            )
            
            result[stance] = [arg for arg, _ in sorted_args[:10]]
        
        return result
    
    def find_consensus_points(
        self,
        viewpoints: List[Viewpoint]
    ) -> List[str]:
        """
        Find points of consensus across viewpoints
        
        Args:
            viewpoints: List of viewpoints
            
        Returns:
            List of consensus points
        """
        if len(viewpoints) < 2:
            return []
        
        # Find arguments that appear across multiple stances
        stance_arguments = defaultdict(set)
        
        for vp in viewpoints:
            for arg in vp.key_arguments:
                stance_arguments[vp.stance].add(arg)
        
        # Find overlaps
        consensus = []
        
        if len(stance_arguments) >= 2:
            # Get arguments that appear in multiple stances
            all_args = []
            for args in stance_arguments.values():
                all_args.extend(list(args))
            
            arg_counts = defaultdict(int)
            for arg in all_args:
                arg_counts[arg] += 1
            
            # Arguments that appear in multiple stances
            consensus = [
                arg for arg, count in arg_counts.items()
                if count >= 2
            ]
        
        return consensus[:5]
    
    def find_controversy_points(
        self,
        viewpoints: List[Viewpoint]
    ) -> List[str]:
        """
        Find points of controversy
        
        Args:
            viewpoints: List of viewpoints
            
        Returns:
            List of controversy points
        """
        controversies = []
        
        # Find opposing viewpoints
        supportive = [v for v in viewpoints if v.stance == ViewpointStance.SUPPORTIVE]
        opposing = [v for v in viewpoints if v.stance == ViewpointStance.OPPOSING]
        
        if supportive and opposing:
            # Extract main controversy points
            support_args = set()
            for vp in supportive:
                support_args.update(vp.key_arguments)
            
            oppose_args = set()
            for vp in opposing:
                oppose_args.update(vp.key_arguments)
            
            # Find directly conflicting arguments
            for s_arg in support_args:
                for o_arg in oppose_args:
                    if self._are_conflicting(s_arg, o_arg):
                        controversy = f"支持方: {s_arg} vs 反对方: {o_arg}"
                        controversies.append(controversy)
        
        return controversies[:5]
    
    def _are_conflicting(self, arg1: str, arg2: str) -> bool:
        """Check if two arguments are conflicting"""
        # Simple heuristic: check for opposite keywords
        opposites = [
            ("好", "坏"), ("积极", "消极"), ("有利", "有害"),
            ("增加", "减少"), ("提高", "降低"), ("支持", "反对")
        ]
        
        for pos, neg in opposites:
            if (pos in arg1 and neg in arg2) or (neg in arg1 and pos in arg2):
                return True
        
        return False
    
    def generate_perspective_matrix(
        self,
        viewpoints: List[Viewpoint]
    ) -> Dict[str, Any]:
        """
        Generate a perspective comparison matrix
        
        Args:
            viewpoints: List of viewpoints
            
        Returns:
            Perspective matrix
        """
        matrix = {
            "total_viewpoints": len(viewpoints),
            "stance_distribution": {},
            "credibility_distribution": {},
            "weighted_support": {},
            "top_arguments_by_stance": {}
        }
        
        # Calculate distributions
        stance_counts = defaultdict(int)
        credibility_counts = defaultdict(int)
        weighted_support = defaultdict(float)
        
        for vp in viewpoints:
            stance_counts[vp.stance.value] += 1
            credibility_counts[vp.credibility.value] += 1
            weighted_support[vp.stance.value] += vp.weight * vp.support_count
        
        # Convert to percentages
        total = len(viewpoints)
        if total > 0:
            matrix["stance_distribution"] = {
                stance: (count / total * 100)
                for stance, count in stance_counts.items()
            }
            
            matrix["credibility_distribution"] = {
                cred: (count / total * 100)
                for cred, count in credibility_counts.items()
            }
        
        matrix["weighted_support"] = dict(weighted_support)
        
        # Get top arguments by stance
        for stance in ViewpointStance:
            stance_vps = [v for v in viewpoints if v.stance == stance]
            if stance_vps:
                all_args = []
                for vp in stance_vps:
                    all_args.extend(vp.key_arguments)
                
                # Get most common
                if all_args:
                    matrix["top_arguments_by_stance"][stance.value] = all_args[:3]
        
        return matrix