"""
A/B测试数据收集和分析模块
用于跟踪用户模型选择偏好和行为模式
"""

from typing import Dict, List, Any, Optional
from datetime import datetime, timedelta
from dataclasses import dataclass
import numpy as np
from scipy import stats
import pandas as pd
import logging
from motor.motor_asyncio import AsyncIOMotorDatabase

logger = logging.getLogger(__name__)


@dataclass
class ABTestMetrics:
    """A/B测试指标"""
    model_name: str
    selection_count: int
    selection_rate: float
    avg_score: float
    avg_time: float
    user_satisfaction: float
    confidence_interval: tuple
    p_value: float
    is_significant: bool


class ABTestingAnalyzer:
    """
    A/B测试分析器
    收集和分析用户的模型选择行为
    """
    
    def __init__(self, db: AsyncIOMotorDatabase):
        """
        初始化分析器
        
        Args:
            db: MongoDB数据库实例
        """
        self.db = db
        self.events_collection = db.comparison_events
        self.sessions_collection = db.model_comparisons
        
    async def track_selection(self,
                             user_id: str,
                             session_id: str,
                             selected_model: str,
                             alternatives: List[str],
                             scores: Dict[str, float],
                             context: Dict[str, Any]) -> str:
        """
        跟踪用户选择事件
        
        Args:
            user_id: 用户ID
            session_id: 会话ID
            selected_model: 选择的模型
            alternatives: 其他可选模型
            scores: 各模型评分
            context: 上下文信息
            
        Returns:
            事件ID
        """
        try:
            event = {
                "user_id": user_id,
                "session_id": session_id,
                "selected_model": selected_model,
                "alternatives": alternatives,
                "scores": scores,
                "context": context,
                "timestamp": datetime.utcnow(),
                "event_type": "model_selection"
            }
            
            result = await self.events_collection.insert_one(event)
            logger.info(f"跟踪选择事件: {user_id} -> {selected_model}")
            return str(result.inserted_id)
            
        except Exception as e:
            logger.error(f"跟踪选择事件失败: {e}")
            raise
            
    async def track_interaction(self,
                              user_id: str,
                              session_id: str,
                              action: str,
                              target: str,
                              value: Any = None) -> str:
        """
        跟踪用户交互行为
        
        Args:
            user_id: 用户ID
            session_id: 会话ID
            action: 动作类型（view, edit, copy等）
            target: 目标对象
            value: 相关值
            
        Returns:
            事件ID
        """
        try:
            event = {
                "user_id": user_id,
                "session_id": session_id,
                "action": action,
                "target": target,
                "value": value,
                "timestamp": datetime.utcnow(),
                "event_type": "interaction"
            }
            
            result = await self.events_collection.insert_one(event)
            return str(result.inserted_id)
            
        except Exception as e:
            logger.error(f"跟踪交互事件失败: {e}")
            raise
            
    async def analyze_model_performance(self,
                                       start_date: datetime,
                                       end_date: datetime,
                                       min_samples: int = 30) -> List[ABTestMetrics]:
        """
        分析模型性能
        
        Args:
            start_date: 开始日期
            end_date: 结束日期
            min_samples: 最小样本数
            
        Returns:
            各模型的性能指标
        """
        try:
            # 聚合查询
            pipeline = [
                {
                    "$match": {
                        "event_type": "model_selection",
                        "timestamp": {"$gte": start_date, "$lte": end_date}
                    }
                },
                {
                    "$group": {
                        "_id": "$selected_model",
                        "count": {"$sum": 1},
                        "avg_score": {"$avg": "$scores.$selected_model"},
                        "users": {"$addToSet": "$user_id"}
                    }
                },
                {
                    "$project": {
                        "model": "$_id",
                        "count": 1,
                        "avg_score": 1,
                        "unique_users": {"$size": "$users"}
                    }
                }
            ]
            
            cursor = self.events_collection.aggregate(pipeline)
            results = []
            
            async for doc in cursor:
                results.append(doc)
                
            # 计算总选择数
            total_selections = sum(r["count"] for r in results)
            
            # 构建指标
            metrics = []
            for result in results:
                if result["count"] < min_samples:
                    continue
                    
                selection_rate = result["count"] / total_selections if total_selections > 0 else 0
                
                # 计算置信区间（95%）
                ci = self._calculate_confidence_interval(
                    selection_rate,
                    result["count"]
                )
                
                metric = ABTestMetrics(
                    model_name=result["model"],
                    selection_count=result["count"],
                    selection_rate=selection_rate,
                    avg_score=result.get("avg_score", 0),
                    avg_time=0,  # 需要从会话数据计算
                    user_satisfaction=0,  # 需要从用户反馈计算
                    confidence_interval=ci,
                    p_value=0,  # 将在对比测试中计算
                    is_significant=False
                )
                
                metrics.append(metric)
                
            # 执行成对比较测试
            if len(metrics) >= 2:
                metrics = await self._perform_pairwise_tests(metrics)
                
            return metrics
            
        except Exception as e:
            logger.error(f"分析模型性能失败: {e}")
            raise
            
    async def generate_ab_report(self,
                                start_date: datetime,
                                end_date: datetime) -> Dict[str, Any]:
        """
        生成A/B测试报告
        
        Args:
            start_date: 开始日期
            end_date: 结束日期
            
        Returns:
            测试报告
        """
        try:
            # 获取性能指标
            metrics = await self.analyze_model_performance(start_date, end_date)
            
            # 获取用户行为模式
            behavior_patterns = await self._analyze_behavior_patterns(start_date, end_date)
            
            # 获取转化漏斗
            funnel = await self._analyze_conversion_funnel(start_date, end_date)
            
            # 获取时间趋势
            trends = await self._analyze_temporal_trends(start_date, end_date)
            
            report = {
                "period": {
                    "start": start_date.isoformat(),
                    "end": end_date.isoformat()
                },
                "summary": {
                    "total_comparisons": await self._count_comparisons(start_date, end_date),
                    "total_users": await self._count_unique_users(start_date, end_date),
                    "avg_models_per_comparison": await self._avg_models_per_comparison(start_date, end_date)
                },
                "model_metrics": [
                    {
                        "model": m.model_name,
                        "selection_count": m.selection_count,
                        "selection_rate": f"{m.selection_rate * 100:.2f}%",
                        "avg_score": round(m.avg_score, 2),
                        "confidence_interval": f"[{m.confidence_interval[0]:.2%}, {m.confidence_interval[1]:.2%}]",
                        "is_winner": m.is_significant and m.selection_rate > 0.5
                    }
                    for m in metrics
                ],
                "behavior_patterns": behavior_patterns,
                "conversion_funnel": funnel,
                "temporal_trends": trends,
                "recommendations": self._generate_recommendations(metrics, behavior_patterns)
            }
            
            return report
            
        except Exception as e:
            logger.error(f"生成A/B测试报告失败: {e}")
            raise
            
    async def export_raw_data(self,
                            start_date: datetime,
                            end_date: datetime,
                            format: str = "csv") -> Any:
        """
        导出原始数据
        
        Args:
            start_date: 开始日期
            end_date: 结束日期
            format: 导出格式（csv/json）
            
        Returns:
            导出的数据
        """
        try:
            # 查询事件数据
            cursor = self.events_collection.find({
                "timestamp": {"$gte": start_date, "$lte": end_date}
            })
            
            events = []
            async for event in cursor:
                event["_id"] = str(event["_id"])
                event["timestamp"] = event["timestamp"].isoformat()
                events.append(event)
                
            if format == "csv":
                # 转换为DataFrame
                df = pd.DataFrame(events)
                return df.to_csv(index=False)
            else:
                return events
                
        except Exception as e:
            logger.error(f"导出数据失败: {e}")
            raise
            
    def _calculate_confidence_interval(self, p: float, n: int, confidence: float = 0.95) -> tuple:
        """
        计算二项分布的置信区间
        
        Args:
            p: 比例
            n: 样本数
            confidence: 置信水平
            
        Returns:
            置信区间（下限，上限）
        """
        if n == 0:
            return (0, 0)
            
        # Wilson score interval
        z = stats.norm.ppf((1 + confidence) / 2)
        denominator = 1 + z**2 / n
        centre_adjusted_probability = p + z**2 / (2 * n)
        adjusted_standard_error = np.sqrt((p * (1 - p) + z**2 / (4 * n)) / n)
        
        lower = (centre_adjusted_probability - z * adjusted_standard_error) / denominator
        upper = (centre_adjusted_probability + z * adjusted_standard_error) / denominator
        
        return (max(0, lower), min(1, upper))
        
    async def _perform_pairwise_tests(self, metrics: List[ABTestMetrics]) -> List[ABTestMetrics]:
        """
        执行成对比较测试
        
        Args:
            metrics: 指标列表
            
        Returns:
            更新了p值的指标列表
        """
        # 找出选择率最高的模型作为基准
        baseline = max(metrics, key=lambda m: m.selection_rate)
        
        for metric in metrics:
            if metric.model_name == baseline.model_name:
                continue
                
            # 执行卡方检验
            observed = [[metric.selection_count, baseline.selection_count]]
            chi2, p_value = stats.chi2_contingency(observed)[:2]
            
            metric.p_value = p_value
            metric.is_significant = p_value < 0.05
            
        return metrics
        
    async def _analyze_behavior_patterns(self, start_date: datetime, end_date: datetime) -> Dict[str, Any]:
        """分析用户行为模式"""
        # 查询交互事件
        pipeline = [
            {
                "$match": {
                    "event_type": "interaction",
                    "timestamp": {"$gte": start_date, "$lte": end_date}
                }
            },
            {
                "$group": {
                    "_id": "$action",
                    "count": {"$sum": 1}
                }
            }
        ]
        
        cursor = self.events_collection.aggregate(pipeline)
        patterns = {}
        
        async for doc in cursor:
            patterns[doc["_id"]] = doc["count"]
            
        return patterns
        
    async def _analyze_conversion_funnel(self, start_date: datetime, end_date: datetime) -> List[Dict[str, Any]]:
        """分析转化漏斗"""
        funnel = [
            {"stage": "创建对比", "count": 0},
            {"stage": "查看结果", "count": 0},
            {"stage": "选择模型", "count": 0},
            {"stage": "使用结果", "count": 0}
        ]
        
        # 实际实现需要根据具体的事件类型来统计
        return funnel
        
    async def _analyze_temporal_trends(self, start_date: datetime, end_date: datetime) -> List[Dict[str, Any]]:
        """分析时间趋势"""
        # 按天聚合
        pipeline = [
            {
                "$match": {
                    "event_type": "model_selection",
                    "timestamp": {"$gte": start_date, "$lte": end_date}
                }
            },
            {
                "$group": {
                    "_id": {
                        "$dateToString": {
                            "format": "%Y-%m-%d",
                            "date": "$timestamp"
                        }
                    },
                    "count": {"$sum": 1}
                }
            },
            {"$sort": {"_id": 1}}
        ]
        
        cursor = self.events_collection.aggregate(pipeline)
        trends = []
        
        async for doc in cursor:
            trends.append({
                "date": doc["_id"],
                "selections": doc["count"]
            })
            
        return trends
        
    async def _count_comparisons(self, start_date: datetime, end_date: datetime) -> int:
        """统计对比次数"""
        return await self.sessions_collection.count_documents({
            "created_at": {"$gte": start_date, "$lte": end_date}
        })
        
    async def _count_unique_users(self, start_date: datetime, end_date: datetime) -> int:
        """统计唯一用户数"""
        pipeline = [
            {
                "$match": {
                    "timestamp": {"$gte": start_date, "$lte": end_date}
                }
            },
            {
                "$group": {
                    "_id": "$user_id"
                }
            },
            {
                "$count": "unique_users"
            }
        ]
        
        cursor = self.events_collection.aggregate(pipeline)
        async for doc in cursor:
            return doc["unique_users"]
        return 0
        
    async def _avg_models_per_comparison(self, start_date: datetime, end_date: datetime) -> float:
        """计算平均每次对比的模型数"""
        pipeline = [
            {
                "$match": {
                    "created_at": {"$gte": start_date, "$lte": end_date}
                }
            },
            {
                "$project": {
                    "model_count": {"$size": "$models"}
                }
            },
            {
                "$group": {
                    "_id": None,
                    "avg": {"$avg": "$model_count"}
                }
            }
        ]
        
        cursor = self.sessions_collection.aggregate(pipeline)
        async for doc in cursor:
            return round(doc["avg"], 1)
        return 0
        
    def _generate_recommendations(self, metrics: List[ABTestMetrics], patterns: Dict[str, Any]) -> List[str]:
        """生成优化建议"""
        recommendations = []
        
        # 基于选择率的建议
        if metrics:
            best_model = max(metrics, key=lambda m: m.selection_rate)
            worst_model = min(metrics, key=lambda m: m.selection_rate)
            
            if best_model.selection_rate > 0.6:
                recommendations.append(
                    f"模型 {best_model.model_name} 表现优异，建议作为默认选项"
                )
                
            if worst_model.selection_rate < 0.1:
                recommendations.append(
                    f"模型 {worst_model.model_name} 选择率较低，考虑优化或移除"
                )
                
        # 基于行为模式的建议
        if "edit" in patterns and patterns["edit"] > patterns.get("copy", 0):
            recommendations.append("用户编辑行为频繁，建议增强编辑功能")
            
        return recommendations