import time
import csv
import pandas as pd
import os
import sys
import threading

# 初始化DrissionPage相关类
ChromiumPage = None
Actions = None

# 尝试导入DrissionPage
try:
    from DrissionPage import ChromiumPage
    from DrissionPage.common import Actions
except ImportError as e:
    print(f"警告: 未安装DrissionPage库或版本不兼容，将无法正常运行爬虫: {e}")

# 修复导入问题
try:
    from logger import logger
except ImportError:
    # 如果相对导入失败，尝试绝对导入
    try:
        # 添加项目根目录到路径
        sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
        from logger import logger
    except ImportError:
        # 创建一个简单的logger作为后备
        import logging
        logger = logging.getLogger('douyin_crawler')
        logger.setLevel(logging.INFO)
        
        # 添加控制台处理器
        console_handler = logging.StreamHandler()
        console_handler.setLevel(logging.INFO)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        console_handler.setFormatter(formatter)
        logger.addHandler(console_handler)
class DouyinCrawler:
    def __init__(self, gui_callback=None):
        self.page = None
        self.comments_data = []
        self.extract_type = "pinglun"
        self.gui_callback = gui_callback  # GUI回调函数，用于实时更新数据
        self.stop_scroll = False  # 添加停止滚动标志
        self.previous_comment_count = 0  # 记录上一次的评论数量
        self.no_increase_count = 0  # 记录连续无增长的次数
        self.existing_user_links = set()  # 全局集合，用于跟踪已采集的用户链接
        self.browser_close_callback = None  # 浏览器关闭回调函数
    
    def set_gui_callback(self, callback):
        """设置GUI回调函数，用于实时更新数据"""
        self.gui_callback = callback
    
    def set_browser_close_callback(self, callback):
        """设置浏览器关闭回调函数，用于通知GUI更新界面状态"""
        self.browser_close_callback = callback
    
    def init_browser(self):
        """初始化浏览器"""
        try:
            logger.info("正在初始化浏览器...")
            if ChromiumPage is None:
                raise Exception("DrissionPage库未安装")
            self.page = ChromiumPage()
            logger.info("浏览器初始化成功")
            return True
        except Exception as e:
            logger.error(f"浏览器初始化失败: {e}")
            return False
    def get_following_comments(self, video_url=None, scroll_times=5):
        """获取抖音视频评论 - 改进版：滚动并采集数据，连续三次新增为0时结束采集"""
        try:
            # 判断网页是否已经打开 打开的话直接操作
            if self.page:  # 判断网页是否已经打开 打开的话直接操作
                logger.info("网页已经打开，开始操作")
            else:  # 如果网页没有打开，则初始化浏览器
                logger.info(f"开始获取关注链接: {video_url}")
                self.page.get(video_url)
                # 等待页面加载
                time.sleep(2)
            # 关注列表打开后 等用户登录操作 登录后点击确定再开始采集
            actions = Actions(self.page)
            # 清空之前的数据
            self.comments_data = []
            self.existing_user_links.clear()  # 清空用户链接集合，开始新的采集任务
            
            # 初始化计数器
            previous_count = 0
            no_increase_count = 0  # 连续无增长次数
            
            # 滚动指定次数并采集数据 设置i的步骤为5
            logger.info(f"开始滚动加载评论，共 {scroll_times} 次")
            for i in range(scroll_times):
                if self.stop_scroll:  # 检查是否需要停止滚动
                    logger.info("用户停止了滚动操作")
                    break
                    
                logger.info(f"第 {i+1} 次滚动加载关注列表·...")
                
                # 尝试找到关注列表容器并滚动
                try:
                    scroll_container = self.page.ele('xpath://div[contains(@class, "DivUserListContainer")]')
                    # 在容器内滚动
                    for _ in range(5):  # 滚动5次
                        scroll_container.scroll.down(1000)
                        time.sleep(0.5)
                except Exception as e:
                    # 如果找不到特定容器，则滚动整个页面
                    for _ in range(5):  # 滚动5次
                        self.page.scroll.down(1000)
                        time.sleep(0.5)
                
                self.extract_following_info()
                # 获取当前评论数量
                current_count = len(self.comments_data)
                
                # 检查评论数量是否增加
                if current_count <= previous_count:
                    no_increase_count += 1
                    logger.warning(f"关注数量未增加，连续无增长次数: {no_increase_count}")
                    
                    # 如果连续3次无增长，停止采集
                    if no_increase_count >= 3:
                        logger.warning("评论数量连续3次无增长，停止采集")
                        logger.info("采集结束：连续三次新增关注为0")
                        break
                else:
                    # 如果数量增加了，重置无增长计数
                    no_increase_count = 0
                    logger.info(f"关注数量从 {previous_count} 增加到 {current_count}")
                
                # 更新上一次评论数量
                previous_count = current_count
            
            logger.info(f"最终获取 {len(self.comments_data)} 条关注")
            return True
        except Exception as e:
            logger.error(f"获取关注失败: {e}")
            return False
    def get_comments(self, video_url=None, scroll_times=5):
        """获取抖音视频评论 - 改进版：滚动并采集数据，连续三次新增为0时结束采集"""
        try:
            # 判断网页是否已经打开 打开的话直接操作
            if self.page:  # 判断网页是否已经打开 打开的话直接操作
                logger.info("网页已经打开，开始操作")
            else:  # 如果网页没有打开，则初始化浏览器
                logger.info(f"开始获链接: {video_url}")
                self.page.get(video_url)
                # 等待页面加载
                time.sleep(2)
            # 清空之前的数据
            self.comments_data = []
            self.existing_user_links.clear()  # 清空用户链接集合，开始新的采集任务
            
            # 初始化计数器
            previous_count = 0
            no_increase_count = 0  # 连续无增长次数
            
            # 滚动指定次数并采集数据 设置i的步骤为5
            logger.info(f"开始滚动加载评论，共 {scroll_times} 次")
            for i in range(scroll_times):
                logger.info(f"第 {i+1} 次滚动加载评论...")
                for _ in range(5):  # 滚动5次
                    # 使用aciton模拟真实滚动
                    self.page.scroll.to_bottom()
                    time.sleep(0.2)
                
                # 每次滚动后都检查是否需要停止
                if self.stop_scroll:
                    logger.info("用户停止了滚动操作")
                    break
                
                self.extract_comments()
                
                # 获取当前评论数量
                current_count = len(self.comments_data)
                
                # 检查评论数量是否增加
                if current_count <= previous_count:
                    no_increase_count += 1
                    logger.warning(f"评论数量未增加，连续无增长次数: {no_increase_count}")
                    
                    # 如果连续3次无增长，停止采集
                    if no_increase_count >= 3:
                        logger.warning("评论数量连续3次无增长，停止采集")
                        logger.info("采集结束：连续三次新增评论为0")
                        break
                else:
                    # 如果数量增加了，重置无增长计数
                    no_increase_count = 0
                    logger.info(f"评论数量从 {previous_count} 增加到 {current_count}")
                
                # 更新上一次评论数量
                previous_count = current_count
            
            logger.info(f"最终获取 {len(self.comments_data)} 条评论")
            return True
        except Exception as e:
            logger.error(f"获取评论失败: {e}")
            return False
    
    def stop_scrolling(self):
        """停止滚动操作"""
        self.stop_scroll = True
        logger.info("设置停止滚动标志")
    
    def extract_comments_from_current_page(self):
        """从当前页面提取评论数据 - 优化版本，包含停止条件判断"""
        try:
            logger.info("开始从当前页面提取评论数据...")
            
            # 记录提取前的评论数量
            previous_count = len(self.comments_data)
            
            # 提取评论数据
            self.extract_comments()
            
            current_count = len(self.comments_data)
            logger.info(f"成功从当前页面获取 {current_count} 条评论")
            
            # 检查评论数量是否增加
            if current_count <= previous_count:
                self.no_increase_count += 1
                logger.warning(f"评论数量未增加，连续无增长次数: {self.no_increase_count}")
                
                # 如果连续3次无增长，停止采集
                if self.no_increase_count >= 3:
                    logger.warning("评论数量连续3次无增长，停止采集")
                    self.stop_scroll = True  # 设置停止标志
                    logger.info("采集结束：连续三次新增评论为0")
                    return False
            else:
                # 如果数量增加了，重置无增长计数
                self.no_increase_count = 0
                logger.info(f"评论数量从 {previous_count} 增加到 {current_count}")
                
                # 只发送新增的评论数据到GUI（不包含之前的老数据）
                if self.gui_callback and current_count > previous_count:
                    new_comments = self.comments_data[previous_count:current_count]
                    try:
                        self.gui_callback(new_comments)
                    except Exception as e:
                        logger.error(f"调用GUI回调函数时出错: {e}")
            
            # 更新上一次评论数量
            self.previous_comment_count = current_count
            
            if self.comments_data:
                comments_to_display = self.comments_data
                
                print("\n=== 获取到的评论数据 ===")
                print(f"总共获取到 {len(self.comments_data)} 条评论，显示前 {len(comments_to_display)} 条:")
                
                
            else:
                print("\n未获取到任何评论数据")
            
            return True
        except Exception as e:
            logger.error(f"从当前页面提取评论数据失败: {e}")
            return False
    # 添加提取关注列表信息
    def extract_following_info(self):
        """从页面提取关注列表信息"""
        try:
            # 尝试多种不同的选择器来查找评论项
            comment_selectors = [
                'xpath://div[contains(@class, "DivUserContainer")]',
            ]
            comment_items = []
            for selector in comment_selectors:
                try:
                    comment_items = self.page.eles(selector)
                    if comment_items:
                        print(f"使用选择器 '{selector}' 找到 {len(comment_items)} 个粉丝")
                        break
                except Exception as e:
                    print(f"使用选择器 '{selector}' 查找粉丝数时出错: {e}")
                    continue
            print(f"总共找到 {len(comment_items)} 个粉丝数")
            
            # 定义多种可能的选择器
            username_selectors = [
                'xpath://span[contains(@class, "SpanNickname")]'
            ]
            
            link_selectors = [
                'xpath://p[contains(@class, "PUniqueId")]'
            ]
            
            # 定义头像选择器
            avatar_selectors = [
                'tag:img@loading=lazy',
                'xpath:.//img[contains(@class, "ImgAvatar")]'
            ]
            
            # 初始化时，将已有评论数据中的用户链接添加到全局集合中
            if len(self.existing_user_links) == 0 and self.comments_data:
                for comment in self.comments_data:
                    user_link = comment.get('user_link')
                    if user_link and user_link != '无id':
                        self.existing_user_links.add(user_link)
            
            duplicate_count = 0
            new_comments_count = 0
            
            # 处理每条评论
            for i, item in enumerate(comment_items):
                try:
                    # 提取用户链接
                    user_link = "无id"
                    for selector in link_selectors:
                        try:
                            link_element = item.ele(selector, timeout=0.5)  # 添加超时控制
                            if link_element:
                                user_link = link_element.text or "无id"
                        except:
                            continue
                    # 如果无id 跳过当前采集
                    if user_link == "无id":
                        logger.info(f"id为空跳过无效数据")
                        continue
                     # 检查用户链接是否已存在，如果存在则跳过
                    if user_link != "无id" and user_link in self.existing_user_links:
                        # 重复数据跳过
                        # logger.info(f"跳过重复数据：用户名={username}, 用户链接={user_link}")
                        duplicate_count += 1
                        continue
                    # 提取用户名
                    username = "未知用户"
                    for selector in username_selectors:
                        try:
                            username_element = item.ele(selector, timeout=0.5)  # 添加超时控制
                            if username_element:
                                username = username_element.text or "未知用户"
                                if username.strip():
                                    break
                        except:
                            continue
                    
                    # 如果通过选择器没找到用户名，尝试查找所有a和span标签
                    if username == "未知用户":
                        links = item.eles('tag:a')[:5]  # 限制查找数量
                        for link in links:
                            text = link.text
                            if text and len(text.strip()) > 0 and text != "复制":
                                username = text
                                break
                    
                    # 提取评论内容
                    content = "不获取"
                    # 提取评论时间
                    comment_time = "未知"
                    # 提取头像URL
                    avatar_url = "无头像"
                    for selector in avatar_selectors:
                        try:
                            avatar_element = item.ele(selector, timeout=0.5)  # 添加超时控制
                            if avatar_element:
                                avatar_url = avatar_element.attr('src') or "无头像"
                                if avatar_url.strip() and avatar_url != "无头像":
                                    break
                        except:
                            continue

                   
                    
                    # 存储到列表
                    comment_dict = {
                        'nickname': username,
                        'comment': content,
                        'time': comment_time,
                        'user_link': user_link,
                        'avatar_url': avatar_url
                    }
                    self.comments_data.append(comment_dict)
                    new_comments_count += 1
                    
                    # 将新的用户链接添加到全局集合中，用于后续的去重检查
                    if user_link != "无id":
                        self.existing_user_links.add(user_link)
                    
                    # 每处理1条评论就更新GUI显示
                    print(f"已处理 {i + 1}/{len(comment_items)} 条粉丝")
                    # 调用GUI回调函数更新界面
                    if self.gui_callback:
                        try:
                            # 立即调用回调函数更新GUI
                            self.gui_callback([comment_dict])  # 只传递当前评论
                        except Exception as e:
                            print(f"GUI更新出错: {e}")
                            pass
                except Exception as e:
                    print(f"解析第 {i+1} 条粉丝时出错: {e}")
                    continue
            
            print(f"关注提取完成，新增 {new_comments_count} 条有效粉丝，跳过 {duplicate_count} 条重复关注，当前总关注数 {len(self.comments_data)}")
            
        except Exception as e:
            logger.error(f"提取评论过程中发生错误: {e}")
            # self.comments_data = []
    def extract_comments(self):
        """从页面提取评论数据 - 优化版本"""
        try:
            # 尝试多种不同的选择器来查找评论项
            comment_selectors = [
                'xpath://div[contains(@class, "DivCommentObjectWrapper")]'
            ]
            comment_items = []
            for selector in comment_selectors:
                try:
                    comment_items = self.page.eles(selector)
                    if comment_items:
                        print(f"使用选择器 '{selector}' 找到 {len(comment_items)} 个评论项")
                        break
                except Exception as e:
                    print(f"使用选择器 '{selector}' 查找评论项时出错: {e}")
                    continue
            
            print(f"总共找到 {len(comment_items)} 个评论项")
            
            # 定义多种可能的选择器
            username_selectors = [
                'tag:div@data-e2e=comment-username-1',
            ]
            
            time_selectors = [
                'xpath://span[contains(@class, "TUXText TUXText--tiktok-sans TUXText--weight-normal")]',
            ]
            
            link_selectors = [
                'tag:a@class=link-a11y-focus'
            ]
            
            # 定义头像选择器
            avatar_selectors = [
                'tag:img@loading=lazy',
                'xpath:.//img[contains(@class, "ImgAvatar")]'
            ]
            
            # 初始化时，将已有评论数据中的用户链接添加到全局集合中
            if len(self.existing_user_links) == 0 and self.comments_data:
                for comment in self.comments_data:
                    user_link = comment.get('user_link')
                    if user_link and user_link != '无链接':
                        self.existing_user_links.add(user_link)
            
            duplicate_count = 0
            new_comments_count = 0
            
            # 处理每条评论
            for i, item in enumerate(comment_items):
                try:
                    # 提取用户链接
                    user_link = "无链接"
                    for selector in link_selectors:
                        try:
                            link_element = item.ele(selector, timeout=0.5)  # 添加超时控制
                            if link_element:
                                user_link = link_element.attr('href') or "无链接"
                                if user_link.strip() and user_link != "无链接":
                                    # 补全为完整URL
                                    if user_link.startswith('/@'):
                                        # 删除/@前缀的正确方法
                                        user_link = user_link[2:]
                                    # 去掉"https://www.tiktok.com/@"前缀
                                    if user_link.startswith('https://www.tiktok.com/@'):
                                        user_link = user_link[24:]  # 删除前24个字符
                                    break
                        except:
                            continue
                     # 如果用户等无效链接这跳过
                    if user_link == "无链接":
                        continue
                     # 检查用户链接是否已存在，如果存在则跳过
                    if user_link != "无链接" and user_link in self.existing_user_links:
                        # 重复数据跳过
                        # logger.info(f"跳过重复数据：用户名={username}, 用户链接={user_link}")
                        duplicate_count += 1
                        continue
                    # 提取用户名
                    username = "未知用户"
                    for selector in username_selectors:
                        try:
                            username_element = item.ele(selector, timeout=0.5)  # 添加超时控制
                            if username_element:
                                # 取username_element下的p标签
                                p_element = username_element.ele('tag:p')
                                username = p_element.text or "未知用户"
                                if username.strip():
                                    break
                        except:
                            continue
                    
                    # 提取评论内容
                    content = "无内容"

                    # 提取评论时间
                    comment_time = "未知"
                    for selector in time_selectors:
                        try:
                            time_element = item.ele(selector, timeout=0.5)  # 添加超时控制
                            if time_element:
                                comment_time = time_element.text or "未知时间"
                                if comment_time.strip():
                                    break
                        except:
                            continue

                    # 提取头像URL
                    avatar_url = "无头像"
                    for selector in avatar_selectors:
                        try:
                            avatar_element = item.ele(selector, timeout=0.5)  # 添加超时控制
                            if avatar_element:
                                avatar_url = avatar_element.attr('src') or "无头像"
                                if avatar_url.strip() and avatar_url != "无头像":
                                    break
                        except:
                            continue

                   
                   
                    # 存储到列表
                    comment_dict = {
                        'nickname': username,
                        'comment': content,
                        'time': comment_time,
                        'user_link': user_link,
                        'avatar_url': avatar_url
                    }
                    self.comments_data.append(comment_dict)
                    new_comments_count += 1
                    
                    # 将新的用户链接添加到全局集合中，用于后续的去重检查
                    if user_link != "无链接":
                        self.existing_user_links.add(user_link)
                    
                    # 每处理1条评论就更新GUI显示
                    print(f"已处理 {i + 1}/{len(comment_items)} 条评论")
                    # 调用GUI回调函数更新界面
                    if self.gui_callback:
                        try:
                            # 立即调用回调函数更新GUI
                            # 打印data
                            print(comment_dict)
                            self.gui_callback([comment_dict])  # 只传递当前评论
                        except Exception as e:
                            print(f"GUI更新出错: {e}")
                            pass
                except Exception as e:
                    print(f"解析第 {i+1} 条评论时出错: {e}")
                    continue
            
            print(f"评论2提取完成，新增 {new_comments_count} 条有效评论，跳过 {duplicate_count} 条重复评论，当前总评论数 {len(self.comments_data)}")
            # 最后一次更新GUI界面，确保所有数据都显示
            # if self.gui_callback:
            #     try:
            #         import tkinter as tk
            #         if hasattr(self, '_root') and self._root:
            #             self._root.after(0, lambda: self.gui_callback(comments_data))
            #     except Exception as e:
            #         print(f"GUI最终更新出错: {e}")
            #         pass
            
        except Exception as e:
            logger.error(f"提取评论过程中发生错误: {e}")
            # self.comments_data = []
    
    def search_in_comments(self):
        """在获取的评论中进行检索 - 优化版本"""
        if not self.comments_data:
            print("没有评论数据可供检索")
            return
            
        # 这里可以添加自定义的检索逻辑
        print("\n=== 评论数据检索 ===")
        print(f"总共 {len(self.comments_data)} 条评论")
        
        # 限制搜索范围以提高速度
        search_limit = 100  # 只在前100条评论中搜索，除非用户要求搜索全部
        
        # 示例：检索包含特定关键词的评论
        keyword = input("\n请输入要检索的关键词 (直接回车跳过): ").strip()
        if keyword:
            search_all = False
            if len(self.comments_data) > search_limit:
                choice = input(f"数据量较大，只在前{search_limit}条中搜索? (y/n): ").strip().lower()
                if choice == 'n':
                    search_all = True
            
            search_data = self.comments_data if search_all else self.comments_data[:search_limit]
            matched_comments = []
            
            # 优化搜索算法，使用列表推导式
            matched_comments = [
                comment for comment in search_data 
                if keyword.lower() in (comment.get('comment', '') or '').lower() or 
                   keyword.lower() in (comment.get('nickname', '') or '').lower()
            ]
            
            print(f"\n在{'全部' if search_all else f'前{search_limit}'}条评论中找到 {len(matched_comments)} 条包含 '{keyword}' 的评论:")
            
            # 限制显示结果数量
            display_count = min(len(matched_comments), 30)  # 最多显示30条匹配结果
            for i, comment in enumerate(matched_comments[:display_count]):
                print(f"{i+1}. 用户: {comment.get('nickname', 'N/A')}")
                comment_text = comment.get('comment', 'N/A')
                if len(comment_text) > 100:
                    comment_text = comment_text[:100] + "..."
                print(f"   评论: {comment_text}")
                print(f"   时间: {comment.get('time', 'N/A')}")
                print("-" * 30)
            
            if len(matched_comments) > display_count:
                print(f"\n还有 {len(matched_comments) - display_count} 条匹配结果未显示")
    
    def save_to_csv(self, filename='comments.csv'):
        """保存评论数据到CSV文件"""
        try:
            df = pd.DataFrame(self.comments_data)
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            logger.info(f"评论数据已保存到 {filename}")
            return True
        except Exception as e:
            logger.error(f"保存CSV文件失败: {e}")
            return False
    
    def close(self):
        """关闭浏览器"""
        try:
            if self.page:
                self.page.quit()
                # 浏览器关闭后，如果存在回调函数，则通知GUI更新采集按钮状态
                if self.browser_close_callback:
                    try:
                        self.browser_close_callback()
                    except Exception as e:
                        logger.error(f"调用浏览器关闭回调函数时出错: {e}")
                logger.info("浏览器已关闭")
        except Exception as e:
            logger.error(f"关闭浏览器失败: {e}")
    # 清空数据
    def clear_data(self):
        """清空数据"""
        self.comments_data = []
        self.current_page = 1
        self.total_pages = 1
        self.total_comments = 0
        self.scroll_count = 0
        self.scroll_delay = 2
        self.scroll_timeout = 10
        self.scroll_interval = 0.5

# 导出函数
def crawl_douyin_comments(video_url, scroll_times=5):
    """爬取抖音评论的主函数"""
    crawler = DouyinCrawler()
    success = False
    
    try:
        if crawler.init_browser():
            if crawler.get_comments(video_url, scroll_times):
                success = crawler.save_to_csv()
    finally:
        crawler.close()
    
    return success