"""
重复分析服务
基于重复分析结果进行信访记录的重复性识别和统计
"""
from typing import Dict, Any, List, Optional
from datetime import datetime, timedelta
from sqlalchemy.orm import Session
from sqlalchemy import func, desc, and_, or_
import json

from .base_analyzer import BaseAnalyzer, AnalysisFilters, AnalysisResult
from models.petition_record import DuplicateAnalysisResult
from models.analysis_task import AnalysisTask
from core.logging_config import get_logger

# 日志记录器
logger = get_logger("duplicate_analyzer")


class DuplicateAnalysisFilters(AnalysisFilters):
    """重复分析过滤器"""

    def __init__(self):
        super().__init__()
        self.duplicate_status: Optional[str] = None  # 重复状态: duplicate, unique, suspected
        self.similarity_threshold: Optional[float] = None  # 相似度阈值
        self.time_window_days: Optional[int] = None  # 时间窗口（天）
        self.group_id: Optional[str] = None  # 重复组ID


class DuplicateAnalyzer(BaseAnalyzer):
    """重复分析器"""

    def get_model_class(self):
        """获取数据模型类"""
        return DuplicateAnalysisResult

    def analyze_data(self, filters: DuplicateAnalysisFilters) -> AnalysisResult:
        """分析重复数据"""
        try:
            # 构建查询
            query = self.build_base_query(filters)

            # 应用特殊筛选
            if filters.duplicate_status:
                query = self._apply_duplicate_status_filter(query, filters.duplicate_status)

            if filters.similarity_threshold is not None:
                query = self._apply_similarity_threshold_filter(query, filters.similarity_threshold)

            if filters.time_window_days is not None:
                query = self._apply_time_window_filter(query, filters.time_window_days)

            if filters.group_id:
                query = self._apply_group_filter(query, filters.group_id)

            # 获取基础统计
            basic_stats = self.get_basic_statistics(filters)

            # 获取重复性分布
            duplicate_distribution = self.get_duplicate_distribution(query, filters)

            # 获取重复组分析
            group_analysis = self.get_group_analysis(query, filters)

            # 获取相似度分析
            similarity_analysis = self.get_similarity_analysis(query, filters)

            # 获取时间趋势
            time_series = self.get_time_series_data(filters, 'day')

            # 获取重复模式分析
            pattern_analysis = self.get_pattern_analysis(query, filters)

            # 获取重复详情
            duplicate_details = self.get_duplicate_details(query, filters)

            # 组装结果
            result_data = {
                "basic_statistics": basic_stats,
                "duplicate_distribution": duplicate_distribution,
                "group_analysis": group_analysis,
                "similarity_analysis": similarity_analysis,
                "time_series": time_series,
                "pattern_analysis": pattern_analysis,
                "duplicate_details": duplicate_details,
                "analysis_metadata": {
                    "total_records": query.count(),
                    "duplicate_rate": duplicate_distribution.get('duplicate_rate', 0),
                    "unique_records": duplicate_distribution.get('unique_count', 0),
                    "analysis_time": datetime.now().isoformat()
                }
            }

            return AnalysisResult(
                success=True,
                data=result_data,
                message="重复分析完成"
            )

        except Exception as e:
            logger.error(f"重复分析失败: {str(e)}")
            return AnalysisResult(
                success=False,
                data=None,
                message=f"重复分析失败: {str(e)}"
            )

    def get_duplicate_distribution(self, query, filters: DuplicateAnalysisFilters) -> Dict[str, Any]:
        """获取重复性分布统计"""
        # 按重复状态统计
        results = query.with_entities(
            DuplicateAnalysisResult.cf_zt.label('duplicate_status'),
            func.count().label('count')
        ).filter(
            DuplicateAnalysisResult.cf_zt.isnot(None),
            DuplicateAnalysisResult.cf_zt != ''
        ).group_by(
            DuplicateAnalysisResult.cf_zt
        ).order_by(desc('count')).all()

        status_counts = {}
        for result in results:
            if result.duplicate_status:
                status_counts[result.duplicate_status] = result.count

        total_count = sum(status_counts.values())
        duplicate_count = status_counts.get('duplicate', 0) + status_counts.get('suspected', 0)
        unique_count = status_counts.get('unique', 0)
        duplicate_rate = round((duplicate_count / total_count) * 100, 2) if total_count > 0 else 0

        return {
            "status_distribution": status_counts,
            "total_count": total_count,
            "duplicate_count": duplicate_count,
            "unique_count": unique_count,
            "suspected_count": status_counts.get('suspected', 0),
            "duplicate_rate": duplicate_rate,
            "status_percentages": {
                status: round((count / total_count) * 100, 2) if total_count > 0 else 0
                for status, count in status_counts.items()
            }
        }

    def get_group_analysis(self, query, filters: DuplicateAnalysisFilters) -> Dict[str, Any]:
        """获取重复组分析"""
        # 按重复组统计
        results = query.with_entities(
            DuplicateAnalysisResult.cf_z_id.label('group_id'),
            DuplicateAnalysisResult.cf_zt.label('duplicate_status'),
            func.count().label('count'),
            func.avg(
                func.cast(
                    func.replace(DuplicateAnalysisResult.cf_xsd, '%', ''),
                    float
                ) / 100
            ).label('avg_similarity')
        ).filter(
            DuplicateAnalysisResult.cf_z_id.isnot(None),
            DuplicateAnalysisResult.cf_z_id != ''
        ).group_by(
            DuplicateAnalysisResult.cf_z_id,
            DuplicateAnalysisResult.cf_zt
        ).order_by(desc('count')).all()

        # 构建组分析数据
        group_analysis = {}
        for result in results:
            group_id = result.group_id
            if group_id not in group_analysis:
                group_analysis[group_id] = {
                    "total_count": 0,
                    "status_breakdown": {},
                    "avg_similarity": 0,
                    "group_size": 0
                }

            group_analysis[group_id]["total_count"] += result.count
            group_analysis[group_id]["status_breakdown"][result.duplicate_status] = result.count
            group_analysis[group_id]["avg_similarity"] = result.avg_similarity or 0
            group_analysis[group_id]["group_size"] = len(group_analysis[group_id]["status_breakdown"])

        # 按组大小排序
        sorted_groups = sorted(group_analysis.items(), key=lambda x: x[1]["group_size"], reverse=True)

        return {
            "group_analysis": dict(sorted_groups),
            "total_groups": len(group_analysis),
            "largest_groups": [group_id for group_id, _ in sorted_groups[:10]],
            "average_group_size": round(sum(data["group_size"] for data in group_analysis.values()) / len(group_analysis), 2) if group_analysis else 0
        }

    def get_similarity_analysis(self, query, filters: DuplicateAnalysisFilters) -> Dict[str, Any]:
        """获取相似度分析"""
        similarity_scores = []

        for result in query.all():
            try:
                similarity = self._extract_similarity_score(result)
                if similarity is not None:
                    similarity_scores.append(similarity)
            except Exception:
                continue

        if not similarity_scores:
            return {
                "average_similarity": 0,
                "min_similarity": 0,
                "max_similarity": 0,
                "distribution": {}
            }

        # 计算统计指标
        avg_similarity = sum(similarity_scores) / len(similarity_scores)
        min_similarity = min(similarity_scores)
        max_similarity = max(similarity_scores)

        # 相似度分布
        distribution = {
            "极高相似度 (0.9-1.0)": 0,
            "高相似度 (0.7-0.9)": 0,
            "中等相似度 (0.5-0.7)": 0,
            "低相似度 (0.3-0.5)": 0,
            "极低相似度 (0-0.3)": 0
        }

        for score in similarity_scores:
            if score >= 0.9:
                distribution["极高相似度 (0.9-1.0)"] += 1
            elif score >= 0.7:
                distribution["高相似度 (0.7-0.9)"] += 1
            elif score >= 0.5:
                distribution["中等相似度 (0.5-0.7)"] += 1
            elif score >= 0.3:
                distribution["低相似度 (0.3-0.5)"] += 1
            else:
                distribution["极低相似度 (0-0.3)"] += 1

        # 转换为百分比
        total = len(similarity_scores)
        distribution_percentage = {
            k: round((v / total) * 100, 2) for k, v in distribution.items()
        }

        return {
            "average_similarity": round(avg_similarity, 3),
            "min_similarity": round(min_similarity, 3),
            "max_similarity": round(max_similarity, 3),
            "distribution": distribution,
            "distribution_percentage": distribution_percentage,
            "total_samples": total
        }

    def get_pattern_analysis(self, query, filters: DuplicateAnalysisFilters) -> Dict[str, Any]:
        """获取重复模式分析"""
        # 按时间段分析重复模式
        time_patterns = {}

        for result in query.all():
            try:
                # 提取时间模式
                hour = result.created_at.hour
                day_of_week = result.created_at.weekday()

                time_key = f"{day_of_week}_{hour}"
                if time_key not in time_patterns:
                    time_patterns[time_key] = {
                        "total": 0,
                        "duplicate": 0,
                        "unique": 0
                    }

                time_patterns[time_key]["total"] += 1
                if result.cf_zt == 'duplicate':
                    time_patterns[time_key]["duplicate"] += 1
                else:
                    time_patterns[time_key]["unique"] += 1
            except Exception:
                continue

        # 分析重复率的时间模式
        pattern_analysis = {}
        for time_key, data in time_patterns.items():
            if data["total"] > 0:
                duplicate_rate = (data["duplicate"] / data["total"]) * 100
                pattern_analysis[time_key] = {
                    "duplicate_rate": round(duplicate_rate, 2),
                    "total_count": data["total"],
                    "duplicate_count": data["duplicate"]
                }

        # 找出重复率最高的时间段
        high_rate_periods = sorted(pattern_analysis.items(), key=lambda x: x[1]["duplicate_rate"], reverse=True)[:10]

        return {
            "time_patterns": pattern_analysis,
            "high_rate_periods": high_rate_periods,
            "peak_duplicate_times": [period for period, _ in high_rate_periods],
            "pattern_summary": self._summarize_patterns(pattern_analysis)
        }

    def get_duplicate_details(self, query, filters: DuplicateAnalysisFilters, limit: int = 10) -> List[Dict[str, Any]]:
        """获取重复详情"""
        # 获取最新的重复记录
        results = query.order_by(desc(DuplicateAnalysisResult.created_at)).limit(limit).all()

        duplicate_details = []
        for result in results:
            try:
                similarity = self._extract_similarity_score(result)
                duplicate_details.append({
                    "id": result.petition_record_id,
                    "duplicate_status": result.cf_zt,
                    "group_id": result.cf_z_id,
                    "similarity_score": similarity,
                    "duplicate_reason": result.cf_yy or "",
                    "reference_record_id": result.cf_yj_id,
                    "time_difference": self._calculate_time_difference(result),
                    "created_at": result.created_at.isoformat()
                })
            except Exception:
                continue

        return duplicate_details

    def _apply_duplicate_status_filter(self, query, duplicate_status: str):
        """应用重复状态筛选"""
        return query.filter(DuplicateAnalysisResult.cf_zt == duplicate_status)

    def _apply_similarity_threshold_filter(self, query, threshold: float):
        """应用相似度阈值筛选"""
        return query.filter(
            func.cast(
                func.replace(DuplicateAnalysisResult.cf_xsd, '%', ''),
                float
            ) / 100 >= threshold
        )

    def _apply_time_window_filter(self, query, days: int):
        """应用时间窗口筛选"""
        cutoff_date = datetime.now() - timedelta(days=days)
        return query.filter(DuplicateAnalysisResult.created_at >= cutoff_date)

    def _apply_group_filter(self, query, group_id: str):
        """应用重复组筛选"""
        return query.filter(DuplicateAnalysisResult.cf_z_id == group_id)

    def _extract_similarity_score(self, result) -> Optional[float]:
        """提取相似度分数"""
        try:
            if result.cf_xsd:
                # 假设相似度是字符串形式的百分比，如 "85%"
                score_str = result.cf_xsd.replace('%', '')
                return float(score_str) / 100
        except (ValueError, TypeError):
            pass
        return None

    def _calculate_time_difference(self, result) -> Optional[str]:
        """计算时间差异"""
        try:
            if result.cf_yj_id and result.created_at:
                # 这里简化处理，实际需要查询参考记录的时间
                return "需要查询参考记录"
        except Exception:
            pass
        return None

    def _summarize_patterns(self, pattern_analysis: Dict[str, Any]) -> Dict[str, Any]:
        """总结模式分析"""
        if not pattern_analysis:
            return {"total_periods": 0, "peak_period": None, "low_period": None}

        # 按重复率排序
        sorted_patterns = sorted(pattern_analysis.items(), key=lambda x: x[1]["duplicate_rate"], reverse=True)

        return {
            "total_periods": len(pattern_analysis),
            "peak_period": sorted_patterns[0] if sorted_patterns else None,
            "low_period": sorted_patterns[-1] if sorted_patterns else None,
            "average_duplicate_rate": round(sum(data["duplicate_rate"] for data in pattern_analysis.values()) / len(pattern_analysis), 2)
        }

    def get_duplicate_trends(self, filters: DuplicateAnalysisFilters) -> Dict[str, Any]:
        """获取重复趋势分析"""
        query = self.build_base_query(filters)

        # 按日期和重复状态统计
        daily_duplicates = query.with_entities(
            func.date(DuplicateAnalysisResult.created_at).label('date'),
            DuplicateAnalysisResult.cf_zt.label('duplicate_status'),
            func.count().label('count')
        ).filter(
            DuplicateAnalysisResult.cf_zt.isnot(None),
            DuplicateAnalysisResult.cf_zt != ''
        ).group_by(
            func.date(DuplicateAnalysisResult.created_at),
            DuplicateAnalysisResult.cf_zt
        ).order_by('date').all()

        # 构建趋势数据
        trends = {}
        for result in daily_duplicates:
            date_str = result.date.isoformat()
            if date_str not in trends:
                trends[date_str] = {}
            trends[date_str][result.duplicate_status] = result.count

        # 计算每日重复率
        daily_rates = []
        for date in sorted(trends.keys()):
            day_data = trends[date]
            total = sum(day_data.values())
            duplicate = day_data.get('duplicate', 0) + day_data.get('suspected', 0)
            rate = round((duplicate / total) * 100, 2) if total > 0 else 0
            daily_rates.append({
                "date": date,
                "duplicate_rate": rate,
                "total_count": total
            })

        return {
            "daily_duplicate_rates": daily_rates,
            "trend_summary": self._calculate_duplicate_trend_summary(daily_rates)
        }

    def _calculate_duplicate_trend_summary(self, daily_rates: List[Dict]) -> Dict[str, Any]:
        """计算重复趋势摘要"""
        if not daily_rates:
            return {"trend": "无数据", "change_rate": 0}

        # 计算最近时期和之前时期的平均重复率
        recent_period = daily_rates[-7:] if len(daily_rates) >= 7 else daily_rates
        earlier_period = daily_rates[-14:-7] if len(daily_rates) >= 14 else daily_rates[:len(daily_rates)//2]

        recent_avg = sum(day["duplicate_rate"] for day in recent_period) / len(recent_period)
        earlier_avg = sum(day["duplicate_rate"] for day in earlier_period) / len(earlier_period) if earlier_period else recent_avg

        change_rate = recent_avg - earlier_avg

        # 判断趋势
        if change_rate > 5:
            trend = "上升趋势"
        elif change_rate < -5:
            trend = "下降趋势"
        else:
            trend = "稳定趋势"

        return {
            "trend": trend,
            "current_rate": round(recent_avg, 2),
            "previous_rate": round(earlier_avg, 2),
            "change_rate": round(change_rate, 2)
        }

    def get_duplicate_by_region(self, filters: DuplicateAnalysisFilters) -> Dict[str, Any]:
        """获取按区域分组的重复性分析"""
        query = self.build_base_query(filters)

        # 按区域和重复状态统计
        regional_duplicates = query.with_entities(
            DuplicateAnalysisResult.dzxx_xzq.label('region'),
            DuplicateAnalysisResult.cf_zt.label('duplicate_status'),
            func.count().label('count')
        ).filter(
            DuplicateAnalysisResult.dzxx_xzq.isnot(None),
            DuplicateAnalysisResult.dzxx_xzq != '',
            DuplicateAnalysisResult.cf_zt.isnot(None),
            DuplicateAnalysisResult.cf_zt != ''
        ).group_by(
            DuplicateAnalysisResult.dzxx_xzq,
            DuplicateAnalysisResult.cf_zt
        ).order_by(desc('count')).all()

        # 构建区域分析数据
        regional_analysis = {}
        for result in regional_duplicates:
            region = result.region
            if region not in regional_analysis:
                regional_analysis[region] = {
                    "total_count": 0,
                    "status_breakdown": {},
                    "duplicate_rate": 0
                }

            regional_analysis[region]["total_count"] += result.count
            regional_analysis[region]["status_breakdown"][result.duplicate_status] = result.count

        # 计算每个区域的重复率
        for region, data in regional_analysis.items():
            total = data["total_count"]
            duplicate_count = data["status_breakdown"].get('duplicate', 0) + data["status_breakdown"].get('suspected', 0)
            data["duplicate_rate"] = round((duplicate_count / total) * 100, 2) if total > 0 else 0

        # 按重复率排序
        sorted_regions = sorted(regional_analysis.items(), key=lambda x: x[1]["duplicate_rate"], reverse=True)

        return {
            "regional_analysis": dict(sorted_regions),
            "highest_duplicate_regions": [region for region, _ in sorted_regions[:5]],
            "lowest_duplicate_regions": [region for region, _ in sorted_regions[-5:]],
            "average_regional_duplicate_rate": round(sum(data["duplicate_rate"] for data in regional_analysis.values()) / len(regional_analysis), 2) if regional_analysis else 0
        }

    def get_duplicate_clusters(self, filters: DuplicateAnalysisFilters, min_size: int = 2) -> List[Dict[str, Any]]:
        """获取重复簇分析"""
        query = self.build_base_query(filters)

        # 获取重复簇
        clusters = query.with_entities(
            DuplicateAnalysisResult.cf_z_id.label('cluster_id'),
            func.count().label('cluster_size'),
            func.min(DuplicateAnalysisResult.created_at).label('first_occurrence'),
            func.max(DuplicateAnalysisResult.created_at).label('last_occurrence'),
            func.avg(
                func.cast(
                    func.replace(DuplicateAnalysisResult.cf_xsd, '%', ''),
                    float
                ) / 100
            ).label('avg_similarity')
        ).filter(
            DuplicateAnalysisResult.cf_z_id.isnot(None),
            DuplicateAnalysisResult.cf_z_id != ''
        ).group_by(
            DuplicateAnalysisResult.cf_z_id
        ).having(
            func.count() >= min_size
        ).order_by(desc('cluster_size')).all()

        cluster_analysis = []
        for cluster in clusters:
            # 计算簇的时间跨度
            time_span = (cluster.last_occurrence - cluster.first_occurrence).days

            cluster_analysis.append({
                "cluster_id": cluster.cluster_id,
                "cluster_size": cluster.cluster_size,
                "time_span_days": time_span,
                "first_occurrence": cluster.first_occurrence.isoformat(),
                "last_occurrence": cluster.last_occurrence.isoformat(),
                "average_similarity": round(cluster.avg_similarity or 0, 3),
                "frequency": round(cluster.cluster_size / max(time_span, 1), 2)  # 每天重复次数
            })

        return cluster_analysis