#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
增强版抖音爬虫

扩展原有爬虫功能，添加舆论监控所需的深度数据获取能力。
"""

import asyncio
import logging
import time
from datetime import datetime
from typing import Dict, List, Optional, Tuple
from .web_crawler import DouyinWebCrawler
from ...base_crawler import BaseCrawler
from anti_crawler.delays import DelayManager, DelayType
from sentiment.analyzer import SentimentAnalyzer

class EnhancedDouyinCrawler(DouyinWebCrawler):
    """增强版抖音爬虫"""
    
    def __init__(self, enable_sentiment_analysis: bool = True, 
                 enable_smart_delay: bool = True):
        super().__init__()
        self.logger = logging.getLogger(__name__)
        
        # 初始化组件
        if enable_sentiment_analysis:
            self.sentiment_analyzer = SentimentAnalyzer()
        else:
            self.sentiment_analyzer = None
            
        if enable_smart_delay:
            self.delay_manager = DelayManager()
        else:
            self.delay_manager = None
        
        # 统计信息
        self.stats = {
            "total_videos_processed": 0,
            "total_comments_processed": 0,
            "negative_comments_found": 0,
            "high_threat_comments": 0,
            "sentiment_analysis_enabled": enable_sentiment_analysis,
            "smart_delay_enabled": enable_smart_delay,
            "last_update": datetime.now().isoformat()
        }
        
        self.logger.info("增强版抖音爬虫初始化完成")
    
    async def fetch_enhanced_video_comments(self, aweme_id: str, 
                                          max_comments: int = 100,
                                          include_replies: bool = True,
                                          sentiment_analysis: bool = True) -> Dict:
        """
        获取增强版视频评论数据
        
        Args:
            aweme_id: 视频ID
            max_comments: 最大评论数
            include_replies: 是否包含回复
            sentiment_analysis: 是否进行情感分析
            
        Returns:
            包含增强评论数据的字典
        """
        start_time = time.time()
        
        try:
            # 智能延迟
            if self.delay_manager:
                await self.delay_manager.wait(DelayType.HUMAN_LIKE)
            
            # 获取基础评论数据
            comments_data = await self._fetch_comments_with_pagination(
                aweme_id, max_comments, include_replies
            )
            
            # 增强评论数据
            enhanced_comments = await self._enhance_comments_data(
                comments_data, sentiment_analysis
            )
            
            # 记录成功请求
            if self.delay_manager:
                response_time = time.time() - start_time
                self.delay_manager.record_request(
                    success=True, 
                    response_time=response_time
                )
            
            # 更新统计信息
            self._update_comment_stats(enhanced_comments)
            
            return {
                "aweme_id": aweme_id,
                "total_comments": len(enhanced_comments),
                "comments": enhanced_comments,
                "sentiment_summary": self._generate_sentiment_summary(enhanced_comments),
                "fetch_time": datetime.now().isoformat(),
                "processing_time": time.time() - start_time
            }
            
        except Exception as e:
            self.logger.error(f"获取增强评论数据失败: {e}")
            
            # 记录失败请求
            if self.delay_manager:
                response_time = time.time() - start_time
                self.delay_manager.record_request(
                    success=False, 
                    response_time=response_time,
                    error=str(e)
                )
            
            raise
    
    async def _fetch_comments_with_pagination(self, aweme_id: str, 
                                            max_comments: int,
                                            include_replies: bool) -> List[Dict]:
        """分页获取评论数据"""
        all_comments = []
        cursor = 0
        page_size = 20  # 每页评论数
        
        while len(all_comments) < max_comments:
            try:
                # 智能延迟
                if self.delay_manager and cursor > 0:
                    await self.delay_manager.wait(DelayType.ADAPTIVE)
                
                # 获取当前页评论
                remaining = max_comments - len(all_comments)
                current_count = min(page_size, remaining)
                
                comments_result = await self.fetch_video_comments(
                    aweme_id, cursor=cursor, count=current_count
                )
                
                if not comments_result or 'comments' not in comments_result:
                    break
                
                page_comments = comments_result['comments']
                if not page_comments:
                    break
                
                # 处理当前页评论
                for comment in page_comments:
                    enhanced_comment = await self._process_single_comment(
                        comment, aweme_id, include_replies
                    )
                    all_comments.append(enhanced_comment)
                    
                    if len(all_comments) >= max_comments:
                        break
                
                # 更新游标
                cursor = comments_result.get('cursor', cursor + current_count)
                
                # 检查是否还有更多数据
                if not comments_result.get('has_more', False):
                    break
                
            except Exception as e:
                self.logger.error(f"获取评论页面失败 (cursor={cursor}): {e}")
                break
        
        return all_comments[:max_comments]
    
    async def _process_single_comment(self, comment: Dict, aweme_id: str,
                                    include_replies: bool) -> Dict:
        """处理单条评论"""
        # 基础评论信息
        enhanced_comment = {
            "cid": comment.get('cid', ''),
            "text": comment.get('text', ''),
            "digg_count": comment.get('digg_count', 0),
            "reply_comment_total": comment.get('reply_comment_total', 0),
            "create_time": comment.get('create_time', 0),
            "create_time_str": self._format_timestamp(comment.get('create_time', 0)),
            
            # 用户信息增强
            "user": self._enhance_user_info(comment.get('user', {})),
            
            # 回复列表
            "replies": [],
            
            # 情感分析结果（稍后填充）
            "sentiment_analysis": None,
            
            # 风险评估
            "risk_assessment": {
                "threat_level": 0,
                "risk_factors": [],
                "requires_attention": False
            }
        }
        
        # 获取回复
        if (include_replies and 
            enhanced_comment["reply_comment_total"] > 0):
            try:
                replies = await self._fetch_comment_replies(
                    aweme_id, enhanced_comment["cid"]
                )
                enhanced_comment["replies"] = replies
            except Exception as e:
                self.logger.warning(f"获取评论回复失败: {e}")
        
        return enhanced_comment
    
    async def _fetch_comment_replies(self, aweme_id: str, comment_id: str,
                                   max_replies: int = 10) -> List[Dict]:
        """获取评论回复"""
        try:
            # 智能延迟
            if self.delay_manager:
                await self.delay_manager.wait(DelayType.RANDOM)
            
            replies_result = await self.fetch_video_comments_reply(
                aweme_id, comment_id, count=max_replies
            )
            
            if not replies_result or 'comments' not in replies_result:
                return []
            
            enhanced_replies = []
            for reply in replies_result['comments']:
                enhanced_reply = {
                    "cid": reply.get('cid', ''),
                    "text": reply.get('text', ''),
                    "digg_count": reply.get('digg_count', 0),
                    "create_time": reply.get('create_time', 0),
                    "create_time_str": self._format_timestamp(reply.get('create_time', 0)),
                    "user": self._enhance_user_info(reply.get('user', {})),
                    "sentiment_analysis": None  # 稍后填充
                }
                enhanced_replies.append(enhanced_reply)
            
            return enhanced_replies
            
        except Exception as e:
            self.logger.error(f"获取评论回复失败: {e}")
            return []
    
    def _enhance_user_info(self, user_data: Dict) -> Dict:
        """增强用户信息"""
        return {
            "uid": user_data.get('uid', ''),
            "nickname": user_data.get('nickname', ''),
            "signature": user_data.get('signature', ''),
            "avatar_larger": user_data.get('avatar_larger', {}).get('url_list', [''])[0],
            "follower_count": user_data.get('follower_count', 0),
            "following_count": user_data.get('following_count', 0),
            "total_favorited": user_data.get('total_favorited', 0),
            "verification_type": user_data.get('verification_type', 0),
            "is_verified": user_data.get('verification_type', 0) > 0,
            "account_region": user_data.get('region', ''),
            
            # 用户影响力评估
            "influence_score": self._calculate_user_influence(user_data),
            "credibility_level": self._assess_user_credibility(user_data)
        }
    
    def _calculate_user_influence(self, user_data: Dict) -> float:
        """计算用户影响力分数 (0-10)"""
        follower_count = user_data.get('follower_count', 0)
        total_favorited = user_data.get('total_favorited', 0)
        verification_type = user_data.get('verification_type', 0)
        
        # 基础影响力计算
        influence_score = 0.0
        
        # 粉丝数影响
        if follower_count > 1000000:  # 百万粉丝
            influence_score += 4.0
        elif follower_count > 100000:  # 十万粉丝
            influence_score += 3.0
        elif follower_count > 10000:   # 万粉
            influence_score += 2.0
        elif follower_count > 1000:    # 千粉
            influence_score += 1.0
        
        # 获赞数影响
        if total_favorited > 10000000:  # 千万赞
            influence_score += 3.0
        elif total_favorited > 1000000:  # 百万赞
            influence_score += 2.0
        elif total_favorited > 100000:   # 十万赞
            influence_score += 1.0
        
        # 认证加成
        if verification_type > 0:
            influence_score += 2.0
        
        return min(10.0, influence_score)
    
    def _assess_user_credibility(self, user_data: Dict) -> str:
        """评估用户可信度"""
        verification_type = user_data.get('verification_type', 0)
        follower_count = user_data.get('follower_count', 0)
        following_count = user_data.get('following_count', 0)
        
        # 认证用户高可信度
        if verification_type > 0:
            return "high"
        
        # 粉丝关注比例分析
        if following_count > 0:
            follow_ratio = follower_count / following_count
            if follow_ratio > 10:  # 粉丝远多于关注
                return "high"
            elif follow_ratio < 0.1:  # 关注远多于粉丝
                return "low"
        
        # 基于粉丝数判断
        if follower_count > 50000:
            return "high"
        elif follower_count > 5000:
            return "medium"
        else:
            return "low"
    
    async def _enhance_comments_data(self, comments: List[Dict], 
                                   sentiment_analysis: bool) -> List[Dict]:
        """增强评论数据"""
        if not self.sentiment_analyzer or not sentiment_analysis:
            return comments
        
        # 批量情感分析
        comment_texts = []
        reply_texts = []
        comment_indices = []
        reply_indices = []
        
        # 收集所有文本
        for i, comment in enumerate(comments):
            comment_texts.append(comment["text"])
            comment_indices.append((i, -1))  # (评论索引, 回复索引=-1表示主评论)
            
            for j, reply in enumerate(comment.get("replies", [])):
                reply_texts.append(reply["text"])
                reply_indices.append((i, j))
        
        # 批量分析
        all_texts = comment_texts + reply_texts
        all_indices = comment_indices + reply_indices
        
        if all_texts:
            sentiment_results = self.sentiment_analyzer.batch_analyze(all_texts)
            
            # 应用分析结果
            for idx, result in enumerate(sentiment_results):
                comment_idx, reply_idx = all_indices[idx]
                
                if reply_idx == -1:
                    # 主评论
                    comments[comment_idx]["sentiment_analysis"] = result
                    comments[comment_idx]["risk_assessment"] = self._assess_comment_risk(
                        comments[comment_idx], result
                    )
                else:
                    # 回复评论
                    comments[comment_idx]["replies"][reply_idx]["sentiment_analysis"] = result
        
        return comments
    
    def _assess_comment_risk(self, comment: Dict, sentiment_result: Dict) -> Dict:
        """评估评论风险"""
        risk_factors = []
        threat_level = sentiment_result.get("threat_level", 0)
        
        # 情感威胁等级
        if threat_level >= 4:
            risk_factors.append("高威胁情感词汇")
        elif threat_level >= 2:
            risk_factors.append("中等威胁情感词汇")
        
        # 用户影响力风险
        user_influence = comment["user"].get("influence_score", 0)
        if user_influence >= 7 and sentiment_result.get("sentiment") == "negative":
            risk_factors.append("高影响力用户负面评论")
            threat_level += 1
        
        # 互动数量风险
        if comment["digg_count"] > 100 and sentiment_result.get("sentiment") == "negative":
            risk_factors.append("高点赞负面评论")
            threat_level += 1
        
        # 回复数量风险
        if comment["reply_comment_total"] > 20:
            risk_factors.append("高回复数评论")
        
        # 综合评估
        requires_attention = (
            threat_level >= 3 or 
            len(risk_factors) >= 2 or
            (user_influence >= 5 and sentiment_result.get("sentiment") == "negative")
        )
        
        return {
            "threat_level": min(5, threat_level),  # 最高5级
            "risk_factors": risk_factors,
            "requires_attention": requires_attention
        }
    
    def _generate_sentiment_summary(self, comments: List[Dict]) -> Dict:
        """生成情感分析摘要"""
        if not self.sentiment_analyzer:
            return {"enabled": False}
        
        total_comments = len(comments)
        if total_comments == 0:
            return {"enabled": True, "total": 0}
        
        # 统计各类情感
        sentiment_counts = {"positive": 0, "negative": 0, "neutral": 0}
        threat_levels = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}
        high_risk_comments = []
        
        for comment in comments:
            sentiment_result = comment.get("sentiment_analysis", {})
            sentiment = sentiment_result.get("sentiment", "neutral")
            sentiment_counts[sentiment] += 1
            
            threat_level = sentiment_result.get("threat_level", 0)
            threat_levels[threat_level] += 1
            
            # 收集高风险评论
            if comment.get("risk_assessment", {}).get("requires_attention", False):
                high_risk_comments.append({
                    "text": comment["text"][:100] + "..." if len(comment["text"]) > 100 else comment["text"],
                    "user": comment["user"]["nickname"],
                    "threat_level": threat_level,
                    "digg_count": comment["digg_count"]
                })
        
        return {
            "enabled": True,
            "total": total_comments,
            "sentiment_distribution": sentiment_counts,
            "sentiment_rates": {
                "positive_rate": sentiment_counts["positive"] / total_comments,
                "negative_rate": sentiment_counts["negative"] / total_comments,
                "neutral_rate": sentiment_counts["neutral"] / total_comments
            },
            "threat_distribution": threat_levels,
            "high_risk_count": len(high_risk_comments),
            "high_risk_comments": high_risk_comments[:10],  # 最多显示10条
            "overall_sentiment": self._determine_overall_sentiment(sentiment_counts),
            "risk_level": self._determine_overall_risk(threat_levels, high_risk_comments)
        }
    
    def _determine_overall_sentiment(self, sentiment_counts: Dict) -> str:
        """确定整体情感倾向"""
        total = sum(sentiment_counts.values())
        if total == 0:
            return "neutral"
        
        positive_rate = sentiment_counts["positive"] / total
        negative_rate = sentiment_counts["negative"] / total
        
        if positive_rate >= 0.6:
            return "positive"
        elif negative_rate >= 0.4:
            return "negative"
        else:
            return "mixed"
    
    def _determine_overall_risk(self, threat_levels: Dict, high_risk_comments: List) -> str:
        """确定整体风险等级"""
        total_comments = sum(threat_levels.values())
        if total_comments == 0:
            return "low"
        
        # 高威胁评论比例
        high_threat = threat_levels.get(4, 0) + threat_levels.get(5, 0)
        medium_threat = threat_levels.get(2, 0) + threat_levels.get(3, 0)
        
        high_threat_rate = high_threat / total_comments
        medium_threat_rate = medium_threat / total_comments
        
        if high_threat_rate >= 0.1 or len(high_risk_comments) >= 5:
            return "critical"
        elif high_threat_rate >= 0.05 or medium_threat_rate >= 0.2:
            return "high"
        elif medium_threat_rate >= 0.1:
            return "medium"
        else:
            return "low"
    
    def _format_timestamp(self, timestamp: int) -> str:
        """格式化时间戳"""
        if timestamp == 0:
            return ""
        
        try:
            dt = datetime.fromtimestamp(timestamp)
            return dt.strftime("%Y-%m-%d %H:%M:%S")
        except:
            return str(timestamp)
    
    def _update_comment_stats(self, comments: List[Dict]):
        """更新统计信息"""
        self.stats["total_comments_processed"] += len(comments)
        
        for comment in comments:
            # 统计负面评论
            sentiment = comment.get("sentiment_analysis", {}).get("sentiment", "neutral")
            if sentiment == "negative":
                self.stats["negative_comments_found"] += 1
            
            # 统计高威胁评论
            threat_level = comment.get("sentiment_analysis", {}).get("threat_level", 0)
            if threat_level >= 4:
                self.stats["high_threat_comments"] += 1
        
        self.stats["last_update"] = datetime.now().isoformat()
    
    async def batch_monitor_videos(self, video_urls: List[str], 
                                 max_comments_per_video: int = 50) -> Dict:
        """批量监控视频舆情"""
        results = {
            "total_videos": len(video_urls),
            "processed_videos": 0,
            "failed_videos": 0,
            "overall_sentiment": {"positive": 0, "negative": 0, "neutral": 0},
            "high_risk_videos": [],
            "videos": []
        }
        
        for i, url in enumerate(video_urls):
            try:
                self.logger.info(f"处理视频 {i+1}/{len(video_urls)}: {url}")
                
                # 提取视频ID
                from crawlers.douyin.web.utils import AwemeIdFetcher
                aweme_id = AwemeIdFetcher.get_aweme_id(url)
                
                if not aweme_id:
                    self.logger.warning(f"无法提取视频ID: {url}")
                    results["failed_videos"] += 1
                    continue
                
                # 获取增强评论数据
                comment_data = await self.fetch_enhanced_video_comments(
                    aweme_id, 
                    max_comments=max_comments_per_video,
                    sentiment_analysis=True
                )
                
                # 统计整体情感
                sentiment_summary = comment_data["sentiment_summary"]
                if sentiment_summary.get("enabled", False):
                    for sentiment, count in sentiment_summary["sentiment_distribution"].items():
                        results["overall_sentiment"][sentiment] += count
                
                # 检查高风险视频
                if (sentiment_summary.get("risk_level") in ["high", "critical"] or 
                    sentiment_summary.get("high_risk_count", 0) >= 3):
                    results["high_risk_videos"].append({
                        "url": url,
                        "aweme_id": aweme_id,
                        "risk_level": sentiment_summary.get("risk_level", "unknown"),
                        "negative_rate": sentiment_summary.get("sentiment_rates", {}).get("negative_rate", 0),
                        "high_risk_count": sentiment_summary.get("high_risk_count", 0)
                    })
                
                results["videos"].append({
                    "url": url,
                    "aweme_id": aweme_id,
                    "status": "success",
                    "comment_data": comment_data
                })
                
                results["processed_videos"] += 1
                
                # 智能延迟（视频间隔）
                if self.delay_manager and i < len(video_urls) - 1:
                    await self.delay_manager.wait(DelayType.ADAPTIVE)
                
            except Exception as e:
                self.logger.error(f"处理视频失败 {url}: {e}")
                results["failed_videos"] += 1
                results["videos"].append({
                    "url": url,
                    "status": "failed",
                    "error": str(e)
                })
        
        return results
    
    def get_crawler_stats(self) -> Dict:
        """获取爬虫统计信息"""
        stats = self.stats.copy()
        
        # 添加延迟管理器统计
        if self.delay_manager:
            stats["delay_stats"] = self.delay_manager.get_statistics()
        
        return stats