"""
视频评论数据采集工具
提供高效的网页数据抓取和处理功能
"""

import time
import datetime
import csv
import os
import random
import urllib.parse
from DrissionPage import ChromiumPage


class VideoCommentCrawler:
    """视频评论数据采集器"""
    
    def __init__(self, video_url, min_likes=0, min_replies=0):
        """
        初始化采集器
        :param video_url: 视频链接地址
        :param min_likes: 最小点赞数过滤条件
        :param min_replies: 最小回复数过滤条件
        """
        self.video_url = video_url
        self.video_id = self._extract_video_id(video_url)
        self.comments_data = []
        self.processed_ids = set()  # 数据去重处理
        self.browser = None
        
        # 过滤条件
        self.min_likes = max(0, min_likes)
        self.min_replies = max(0, min_replies)
        
        # 统计信息
        self.total_crawled = 0  # 总采集数
        self.filtered_out = 0   # 被过滤掉的数量
        
        # 设置输出文件路径
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        filter_suffix = f"_likes{self.min_likes}_replies{self.min_replies}" if (self.min_likes > 0 or self.min_replies > 0) else ""
        self.output_file = f"comments_data_{self.video_id}_{timestamp}{filter_suffix}.csv"
        
        print(f"🎯 数据采集器已初始化")
        print(f"📺 目标视频ID: {self.video_id}")
        print(f"🔍 过滤条件: 点赞数≥{self.min_likes}, 回复数≥{self.min_replies}")
        print(f"📁 输出文件: {self.output_file}")
    
    def _extract_video_id(self, url):
        """从URL中提取视频标识符"""
        if not url:
            raise ValueError("视频URL不能为空")
        
        # URL标准化处理
        url = url.replace("：", ":").strip()
        if not url.startswith("http"):
            url = "https://" + url.lstrip(":/")
        
        # URL解码处理（处理编码的中文字符）
        try:
            url = urllib.parse.unquote(url, encoding='utf-8')
        except:
            pass
            
        # 短链接处理
        if "v.douyin.com" in url:
            parts = url.split("/")
            for part in reversed(parts):
                if part.strip():
                    video_id = part.strip()
                    # 确保是有效的视频ID
                    if len(video_id) > 0:
                        return video_id[:50]  # 限制长度
        
        # 标准链接处理
        try:
            parts = url.split("/")
            for part in parts:
                if part.strip().isdigit():
                    return part.strip()
            
            # 获取最后一部分作为ID
            video_id = parts[-1].split("?")[0]
            # 如果ID太长或包含特殊字符，生成一个简化版本
            if len(video_id) > 30 or not video_id.replace('-', '').replace('_', '').isalnum():
                return f"video_{hash(url) % 10000000}"
            return video_id
        except:
            # 生成一个基于URL哈希的ID
            return f"video_{hash(url) % 10000000}"
    
    def start_collection(self):
        """开始数据采集流程"""
        print(f"\n🚀 开始采集视频 {self.video_id} 的评论数据...")
        
        try:
            # 初始化浏览器环境
            print("正在初始化浏览器环境...")
            try:
                self.browser = ChromiumPage()
                print("浏览器初始化成功")
            except Exception as e:
                print(f"浏览器初始化失败: {str(e)}")
                # 尝试更基本的初始化方式
                from DrissionPage import ChromiumOptions
                options = ChromiumOptions()
                options.add_argument('--no-sandbox')
                options.add_argument('--disable-dev-shm-usage')
                self.browser = ChromiumPage(chromium_options=options)
                print("使用备用配置初始化浏览器成功")
            
            # 设置网络监听器，捕获API数据
            try:
                self.browser.listen.start('aweme/v1/web/comment/list/')
                print("网络监听器启动成功")
            except Exception as e:
                print(f"网络监听器启动失败: {str(e)}")
            
            # 访问目标页面
            print(f"正在访问目标页面: {self.video_url}")
            try:
                self.browser.get(self.video_url)
                print("页面访问成功")
            except Exception as e:
                print(f"页面访问失败: {str(e)}")
                raise Exception(f"无法加载页面: {str(e)}")
            
            time.sleep(5)
            
            # 页面有效性检查
            try:
                page_html = self.browser.html
                page_title = self.browser.title
                if "页面不存在" in page_html or "404" in page_title:
                    raise Exception(f"无法访问目标页面: {self.video_url}")
            except Exception as e:
                print(f"页面检查异常: {str(e)}, 继续执行...")
            
            # 定位到评论区域
            self._navigate_to_comments()
            
            # 执行数据采集
            self._collect_comments()
            
            # 数据持久化
            self._save_data()
            
            print(f"\n🎉 采集完成！")
            print(f"📊 总采集数: {self.total_crawled} 条")
            print(f"🔍 过滤保留: {len(self.comments_data)} 条")
            print(f"❌ 过滤掉: {self.filtered_out} 条")
            print(f"📁 数据已保存到: {self.output_file}")
            
            return {
                'success': True,
                'total_comments': len(self.comments_data),
                'filtered_comments': self.filtered_out,
                'total_crawled': self.total_crawled,
                'output_file': self.output_file,
                'data': self.comments_data
            }
            
        except Exception as e:
            print(f"❌ 采集失败: {str(e)}")
            return {
                'success': False,
                'error': str(e),
                'total_comments': len(self.comments_data),
                'data': self.comments_data
            }
        
        finally:
            if self.browser:
                self.browser.quit()
    
    def _navigate_to_comments(self):
        """导航到评论区域"""
        try:
            # 滚动到页面评论区域
            self.browser.run_js("window.scrollTo(0, document.body.scrollHeight/2);")
            time.sleep(2)
            
            # 尝试展开更多评论
            try:
                expand_btn = self.browser.find_element('xpath://div[contains(text(), "查看更多评论")]')
                if expand_btn:
                    expand_btn.click()
                    time.sleep(2)
            except:
                pass
                
        except Exception as e:
            print(f"导航到评论区域失败: {str(e)}")
    
    def _collect_comments(self):
        """执行评论数据采集"""
        max_attempts = 15  # 最大尝试次数
        no_new_data_count = 0
        max_no_new = 5  # 连续无新数据的最大次数
        
        print("📥 开始采集评论数据...")
        
        while no_new_data_count < max_no_new and len(self.comments_data) < 500:  # 限制最大数量
            try:
                # 页面滚动策略
                scroll_strategy = random.randint(1, 3)
                self._execute_scroll(scroll_strategy)
                
                # 等待网络响应
                time.sleep(2)
                response = self.browser.listen.wait(timeout=8)
                
                if not response:
                    no_new_data_count += 1
                    print(f"⏳ 等待新数据中... ({no_new_data_count}/{max_no_new})")
                    continue
                
                # 解析响应数据
                json_data = response.response.body
                
                # 调试：显示API响应结构
                if len(self.comments_data) < 3:
                    print(f"\n🔍 API响应调试:")
                    if json_data:
                        print(f"响应字段: {list(json_data.keys())}")
                        if 'comments' in json_data:
                            print(f"评论数量: {len(json_data['comments'])}")
                            if json_data['comments']:
                                first_comment = json_data['comments'][0]
                                print(f"第一条评论字段: {list(first_comment.keys())}")
                    print("=" * 50)
                
                if not json_data or 'comments' not in json_data:
                    no_new_data_count += 1
                    print(f"⚠️ 无效响应数据结构: {json_data}")
                    continue
                
                comments_list = json_data['comments']
                if not comments_list:
                    no_new_data_count += 1
                    print(f"⚠️ 评论列表为空")
                    continue
                
                # 处理新数据
                new_data_count = 0
                for comment_item in comments_list:
                    processed_data = self._process_comment(comment_item)
                    if processed_data:
                        self.total_crawled += 1
                        comment_id = processed_data['id']
                        if comment_id not in self.processed_ids:
                            self.processed_ids.add(comment_id)
                            
                            # 应用过滤条件
                            if self._passes_filter(processed_data):
                                self.comments_data.append(processed_data)
                                new_data_count += 1
                            else:
                                self.filtered_out += 1
                
                if new_data_count > 0:
                    print(f"✅ 新增 {new_data_count} 条数据，累计 {len(self.comments_data)} 条")
                    no_new_data_count = 0
                else:
                    no_new_data_count += 1
                
            except Exception as e:
                print(f"⚠️ 采集过程异常: {str(e)}")
                no_new_data_count += 1
        
        print(f"📋 数据采集结束，共获取 {len(self.comments_data)} 条有效数据")
    
    def _execute_scroll(self, strategy):
        """执行页面滚动策略"""
        try:
            if strategy == 1:
                # 滚动到页面底部
                self.browser.run_js("window.scrollTo(0, document.body.scrollHeight);")
            elif strategy == 2:
                # 分步滚动
                self.browser.run_js("window.scrollBy(0, 300);")
                time.sleep(0.5)
                self.browser.run_js("window.scrollTo(0, document.body.scrollHeight);")
            else:
                # 回弹式滚动
                self.browser.run_js("window.scrollBy(0, -200);")
                time.sleep(0.5)
                self.browser.run_js("window.scrollTo(0, document.body.scrollHeight);")
                
            time.sleep(1)
            
        except Exception as e:
            print(f"滚动操作失败: {str(e)}")
    
    def _process_comment(self, comment_item):
        """处理单条评论数据"""
        try:
            # 调试：打印原始数据结构（只打印前几条）
            if len(self.comments_data) < 3:
                print(f"\n🔍 调试 - 原始评论数据结构:")
                print(f"Keys: {list(comment_item.keys())}")
                if 'text' in comment_item:
                    print(f"Text字段: {comment_item['text']}")
                if 'aweme_id' in comment_item:
                    print(f"视频ID: {comment_item['aweme_id']}")
                print("=" * 50)
            
            comment_id = comment_item.get('cid', '') or str(comment_item.get('id', ''))
            
            # 尝试多个可能的内容字段
            content = ''
            possible_content_fields = ['text', 'content', 'comment_text', 'full_text', 'rich_content']
            
            for field in possible_content_fields:
                if field in comment_item and comment_item[field]:
                    content = str(comment_item[field]).strip()
                    if len(content) > len(content):
                        content = str(comment_item[field]).strip()
            
            # 如果还是没有内容，尝试从嵌套结构中获取
            if not content and 'rich_content' in comment_item:
                rich_content = comment_item['rich_content']
                if isinstance(rich_content, dict):
                    content = rich_content.get('text', '').strip()
            
            # 如果仍然没有内容，检查是否有其他可能的字段
            if not content:
                content = comment_item.get('text', '').strip()
            
            likes_count = comment_item.get('digg_count', 0)  # 点赞数量
            replies_count = comment_item.get('reply_comment_total', 0)  # 回复数量
            
            # 调试：显示处理后的内容长度
            if len(self.comments_data) < 5:
                print(f"📝 处理后评论: {content[:100]}..." if len(content) > 100 else f"📝 处理后评论: {content}")
                print(f"💝 点赞: {likes_count}, 💬 回复: {replies_count}")
            
            # 过滤无效数据
            if not content or len(content) < 2:
                return None
            
            return {
                'id': comment_id,
                'content': content,
                'likes': likes_count,
                'replies': replies_count
            }
            
        except Exception as e:
            print(f"数据处理失败: {str(e)}")
            import traceback
            print(f"详细错误: {traceback.format_exc()}")
            return None
    
    def _save_data(self):
        """保存数据到CSV文件"""
        if not self.comments_data:
            print("⚠️ 没有数据需要保存")
            return
        
        try:
            field_names = ['content', 'likes', 'replies']
            
            with open(self.output_file, 'w', newline='', encoding='utf-8-sig') as csvfile:
                writer = csv.DictWriter(csvfile, fieldnames=field_names)
                writer.writeheader()
                
                for item in self.comments_data:
                    writer.writerow({
                        'content': item['content'],
                        'likes': item['likes'],
                        'replies': item['replies']
                    })
            
            print(f"💾 数据已成功保存到文件: {self.output_file}")
            
        except Exception as e:
            print(f"❌ 数据保存失败: {str(e)}")
    
    def _passes_filter(self, comment_data):
        """检查评论是否通过过滤条件"""
        likes = comment_data.get('likes', 0)
        replies = comment_data.get('replies', 0)
        
        # 检查是否满足最小点赞数和回复数要求
        return likes >= self.min_likes and replies >= self.min_replies


# 独立运行测试
if __name__ == "__main__":
    print("=" * 60)
    print("视频评论数据采集工具 - 测试模式")
    print("=" * 60)
    
    # 用户输入
    video_url = input("请输入视频URL: ").strip()
    
    if video_url:
        # 创建采集器并执行
        crawler = VideoCommentCrawler(video_url)
        result = crawler.start_collection()
        
        if result['success']:
            print(f"\n🎉 采集成功!")
            print(f"📊 数据量: {result['total_comments']} 条")
            print(f"📁 文件: {result['output_file']}")
        else:
            print(f"\n❌ 采集失败: {result['error']}")
    else:
        print("❌ 请输入有效的视频URL") 