"""
情感分析服务
基于情感分析结果进行信访数据的情感倾向统计和分析
"""
from typing import Dict, Any, List, Optional
from datetime import datetime
from sqlalchemy.orm import Session
from sqlalchemy import func, desc, and_
import json

from .base_analyzer import BaseAnalyzer, AnalysisFilters, AnalysisResult
from models.petition_record import SentimentAnalysisResult
from models.analysis_task import AnalysisTask
from core.logging_config import get_logger

# 日志记录器
logger = get_logger("sentiment_analyzer")


class SentimentAnalysisFilters(AnalysisFilters):
    """情感分析过滤器"""

    def __init__(self):
        super().__init__()
        self.sentiment_type: Optional[str] = None  # 情感类型: positive, negative, neutral
        self.min_confidence: Optional[float] = None  # 最小置信度
        self.keyword_filter: Optional[str] = None  # 关键词过滤


class SentimentAnalyzer(BaseAnalyzer):
    """情感分析器"""

    def get_model_class(self):
        """获取数据模型类"""
        return SentimentAnalysisResult

    def analyze_data(self, filters: SentimentAnalysisFilters) -> AnalysisResult:
        """分析情感数据"""
        try:
            # 构建查询
            query = self.build_base_query(filters)

            # 应用特殊筛选
            if filters.sentiment_type:
                query = query.filter(
                    SentimentAnalysisResult.qglx_yj == filters.sentiment_type
                )

            if filters.min_confidence is not None:
                query = self._apply_confidence_filter(query, filters.min_confidence)

            if filters.keyword_filter:
                query = self._apply_keyword_filter(query, filters.keyword_filter)

            # 获取基础统计
            basic_stats = self.get_basic_statistics(filters)

            # 获取情感分布
            sentiment_distribution = self.get_sentiment_distribution(query, filters)

            # 获取情感趋势
            sentiment_trends = self.get_sentiment_trends(query, filters)

            # 获取关键词分析
            keyword_analysis = self.get_keyword_analysis(query, filters)

            # 获取置信度分析
            confidence_analysis = self.get_confidence_analysis(query, filters)

            # 获取时间序列
            time_series = self.get_time_series_data(filters, 'day')

            # 获取热门情感内容
            popular_sentiments = self.get_popular_sentiments(query, filters)

            # 组装结果
            result_data = {
                "basic_statistics": basic_stats,
                "sentiment_distribution": sentiment_distribution,
                "sentiment_trends": sentiment_trends,
                "keyword_analysis": keyword_analysis,
                "confidence_analysis": confidence_analysis,
                "time_series": time_series,
                "popular_sentiments": popular_sentiments,
                "analysis_metadata": {
                    "total_records": query.count(),
                    "unique_sentiments": len(sentiment_distribution.get('types', [])),
                    "average_confidence": confidence_analysis.get('average_confidence', 0),
                    "analysis_time": datetime.now().isoformat()
                }
            }

            return AnalysisResult(
                success=True,
                data=result_data,
                message="情感分析完成"
            )

        except Exception as e:
            logger.error(f"情感分析失败: {str(e)}")
            return AnalysisResult(
                success=False,
                data=None,
                message=f"情感分析失败: {str(e)}"
            )

    def get_sentiment_distribution(self, query, filters: SentimentAnalysisFilters) -> Dict[str, Any]:
        """获取情感分布统计"""
        # 按一级情感类型统计
        results = query.with_entities(
            SentimentAnalysisResult.qglx_yj.label('sentiment_type'),
            func.count().label('count')
        ).filter(
            SentimentAnalysisResult.qglx_yj.isnot(None),
            SentimentAnalysisResult.qglx_yj != ''
        ).group_by(
            SentimentAnalysisResult.qglx_yj
        ).order_by(desc('count')).all()

        sentiment_types = []
        counts = []

        for result in results:
            if result.sentiment_type:
                sentiment_types.append(result.sentiment_type)
                counts.append(result.count)

        # 计算百分比
        total = sum(counts)
        percentages = [round((count / total) * 100, 2) if total > 0 else 0 for count in counts]

        return {
            "types": sentiment_types,
            "counts": counts,
            "percentages": percentages,
            "total_types": len(sentiment_types),
            "dominant_sentiment": sentiment_types[0] if sentiment_types else None,
            "dominant_percentage": percentages[0] if percentages else 0
        }

    def get_sentiment_trends(self, query, filters: SentimentAnalysisFilters) -> Dict[str, Any]:
        """获取情感趋势分析"""
        # 按日期和情感类型分组统计
        daily_sentiment = query.with_entities(
            func.date(SentimentAnalysisResult.created_at).label('date'),
            SentimentAnalysisResult.qglx_yj.label('sentiment_type'),
            func.count().label('count')
        ).filter(
            SentimentAnalysisResult.qglx_yj.isnot(None),
            SentimentAnalysisResult.qglx_yj != ''
        ).group_by(
            func.date(SentimentAnalysisResult.created_at),
            SentimentAnalysisResult.qglx_yj
        ).order_by('date').all()

        # 构建趋势数据
        trends = {}
        for result in daily_sentiment:
            date_str = result.date.isoformat()
            if date_str not in trends:
                trends[date_str] = {}
            trends[date_str][result.sentiment_type] = result.count

        # 获取所有情感类型
        sentiment_types = list(set(result.sentiment_type for result in daily_sentiment))

        # 构建时间序列数据
        time_series = []
        for date in sorted(trends.keys()):
            series_data = {"date": date}
            for sentiment_type in sentiment_types:
                series_data[sentiment_type] = trends[date].get(sentiment_type, 0)
            time_series.append(series_data)

        return {
            "time_series": time_series,
            "sentiment_types": sentiment_types,
            "trend_summary": self._calculate_sentiment_trend_summary(time_series, sentiment_types)
        }

    def get_keyword_analysis(self, query, filters: SentimentAnalysisFilters) -> Dict[str, Any]:
        """获取关键词分析"""
        # 收集所有关键词
        all_keywords = []
        keyword_sentiment_map = {}

        for result in query.all():
            try:
                # 从关键词字段提取关键词
                keywords = self._extract_keywords(result)
                sentiment = result.qglx_yj or "未知"

                for keyword in keywords:
                    if keyword:
                        all_keywords.append(keyword)
                        if keyword not in keyword_sentiment_map:
                            keyword_sentiment_map[keyword] = {}
                        if sentiment not in keyword_sentiment_map[keyword]:
                            keyword_sentiment_map[keyword][sentiment] = 0
                        keyword_sentiment_map[keyword][sentiment] += 1
            except Exception:
                continue

        # 统计关键词频率
        keyword_counts = {}
        for keyword in all_keywords:
            keyword_counts[keyword] = keyword_counts.get(keyword, 0) + 1

        # 获取前20个高频关键词
        top_keywords = sorted(keyword_counts.items(), key=lambda x: x[1], reverse=True)[:20]

        # 构建关键词分析结果
        keyword_analysis = []
        for keyword, count in top_keywords:
            sentiment_distribution = keyword_sentiment_map.get(keyword, {})
            keyword_analysis.append({
                "keyword": keyword,
                "frequency": count,
                "sentiment_distribution": sentiment_distribution,
                "dominant_sentiment": max(sentiment_distribution.items(), key=lambda x: x[1])[0] if sentiment_distribution else "未知"
            })

        return {
            "top_keywords": keyword_analysis,
            "total_keywords": len(all_keywords),
            "unique_keywords": len(keyword_counts)
        }

    def get_confidence_analysis(self, query, filters: SentimentAnalysisFilters) -> Dict[str, Any]:
        """获取置信度分析"""
        confidence_scores = []

        for result in query.all():
            try:
                confidence = result.get_confidence_score()
                confidence_scores.append(confidence)
            except Exception:
                continue

        if not confidence_scores:
            return {
                "average_confidence": 0,
                "min_confidence": 0,
                "max_confidence": 0,
                "distribution": {}
            }

        # 计算统计指标
        avg_confidence = sum(confidence_scores) / len(confidence_scores)
        min_confidence = min(confidence_scores)
        max_confidence = max(confidence_scores)

        # 置信度分布
        distribution = {
            "高置信度 (0.8-1.0)": 0,
            "中置信度 (0.6-0.8)": 0,
            "低置信度 (0.4-0.6)": 0,
            "很低置信度 (0-0.4)": 0
        }

        for score in confidence_scores:
            if score >= 0.8:
                distribution["高置信度 (0.8-1.0)"] += 1
            elif score >= 0.6:
                distribution["中置信度 (0.6-0.8)"] += 1
            elif score >= 0.4:
                distribution["低置信度 (0.4-0.6)"] += 1
            else:
                distribution["很低置信度 (0-0.4)"] += 1

        # 转换为百分比
        total = len(confidence_scores)
        distribution_percentage = {
            k: round((v / total) * 100, 2) for k, v in distribution.items()
        }

        return {
            "average_confidence": round(avg_confidence, 3),
            "min_confidence": round(min_confidence, 3),
            "max_confidence": round(max_confidence, 3),
            "distribution": distribution,
            "distribution_percentage": distribution_percentage,
            "total_samples": total
        }

    def get_popular_sentiments(self, query, filters: SentimentAnalysisFilters, limit: int = 10) -> List[Dict[str, Any]]:
        """获取热门情感内容"""
        # 获取高置信度的情感记录
        results = query.filter(
            SentimentAnalysisResult.qglx_yj.isnot(None),
            SentimentAnalysisResult.qglx_yj != ''
        ).order_by(desc(SentimentAnalysisResult.created_at)).limit(limit).all()

        popular_sentiments = []
        for result in results:
            try:
                confidence = result.get_confidence_score()
                popular_sentiments.append({
                    "id": result.petition_record_id,
                    "sentiment_type": result.qglx_yj,
                    "sentiment_subtype": result.qglx_ej or "未细分",
                    "summary": result.qgzy or "",
                    "keywords": self._extract_keywords(result),
                    "confidence": round(confidence, 3),
                    "created_at": result.created_at.isoformat()
                })
            except Exception:
                continue

        return popular_sentiments

    def _apply_sentiment_filter(self, query, sentiment_type: str):
        """应用情感类型筛选"""
        return query.filter(
            or_(
                SentimentAnalysisResult.qglx_yj == sentiment_type,
                SentimentAnalysisResult.qglx_ej == sentiment_type,
                SentimentAnalysisResult.qglx_sj == sentiment_type
            )
        )

    def _apply_confidence_filter(self, query, min_confidence: float):
        """应用置信度筛选"""
        # 需要根据实际的置信度字段进行调整
        return query  # 简化实现

    def _apply_keyword_filter(self, query, keyword: str):
        """应用关键词筛选"""
        return query.filter(
            or_(
                SentimentAnalysisResult.qgzy.contains(keyword),
                SentimentAnalysisResult.qggjc.contains(keyword)
            )
        )

    def _extract_keywords(self, result) -> List[str]:
        """提取关键词"""
        keywords = []

        # 从关键词字段提取
        if result.qggjc:
            if isinstance(result.qggjc, str):
                keywords.extend([kw.strip() for kw in result.qggjc.split(',')])
            elif isinstance(result.qggjc, list):
                keywords.extend(result.qggjc)

        # 从摘要中提取关键词（简单实现）
        if result.qgzy and isinstance(result.qgzy, str):
            # 简单的关键词提取，可以替换为更复杂的NLP处理
            words = result.qgzy.split()
            keywords.extend([word.strip() for word in words if len(word.strip()) > 2])

        # 去重和过滤
        unique_keywords = list(set(kw for kw in keywords if kw and len(kw.strip()) > 0))
        return unique_keywords[:10]  # 限制关键词数量

    def _calculate_sentiment_trend_summary(self, time_series: List[Dict], sentiment_types: List[str]) -> Dict[str, Any]:
        """计算情感趋势摘要"""
        if not time_series:
            return {"trend": "无数据", "change_rate": 0}

        # 计算最近时期的情感分布变化
        recent_period = time_series[-7:] if len(time_series) >= 7 else time_series
        earlier_period = time_series[-14:-7] if len(time_series) >= 14 else time_series[:len(time_series)//2]

        recent_totals = {sentiment: 0 for sentiment in sentiment_types}
        earlier_totals = {sentiment: 0 for sentiment in sentiment_types}

        for day in recent_period:
            for sentiment in sentiment_types:
                recent_totals[sentiment] += day.get(sentiment, 0)

        for day in earlier_period:
            for sentiment in sentiment_types:
                earlier_totals[sentiment] += day.get(sentiment, 0)

        # 计算变化率
        changes = {}
        for sentiment in sentiment_types:
            if earlier_totals[sentiment] > 0:
                change_rate = ((recent_totals[sentiment] - earlier_totals[sentiment]) / earlier_totals[sentiment]) * 100
                changes[sentiment] = round(change_rate, 2)
            else:
                changes[sentiment] = 100.0 if recent_totals[sentiment] > 0 else 0.0

        # 判断总体趋势
        avg_change = sum(changes.values()) / len(changes) if changes else 0
        if avg_change > 10:
            trend = "上升趋势"
        elif avg_change < -10:
            trend = "下降趋势"
        else:
            trend = "平稳趋势"

        return {
            "trend": trend,
            "change_rates": changes,
            "average_change": round(avg_change, 2)
        }

    def get_sentiment_comparison(self, sentiment_types: List[str], filters: SentimentAnalysisFilters) -> Dict[str, Any]:
        """获取多个情感类型的对比分析"""
        comparison_data = {}

        for sentiment_type in sentiment_types:
            type_query = self._apply_sentiment_filter(self.build_base_query(filters), sentiment_type)
            count = type_query.count()

            comparison_data[sentiment_type] = {
                "count": count,
                "percentage": round((count / self.build_base_query(filters).count()) * 100, 2) if self.build_base_query(filters).count() > 0 else 0
            }

        return comparison_data

    def get_sentiment_time_series(self, sentiment_type: str, filters: SentimentAnalysisFilters) -> Dict[str, Any]:
        """获取特定情感类型的时间序列"""
        query = self.build_base_query(filters)
        sentiment_query = self._apply_sentiment_filter(query, sentiment_type)

        # 按日期统计
        daily_stats = sentiment_query.with_entities(
            func.date(SentimentAnalysisResult.created_at).label('date'),
            func.count().label('count')
        ).group_by(
            func.date(SentimentAnalysisResult.created_at)
        ).order_by('date').all()

        return {
            "sentiment_type": sentiment_type,
            "daily_trends": [
                {
                    "date": stat.date.isoformat(),
                    "count": stat.count
                }
                for stat in daily_stats
            ]
        }