import re
from playwright.sync_api import TimeoutError as PlaywrightTimeoutError
import urllib.parse
from utils import convert_facebook_time_to_timestamp

class FacebookScraper:
  """Facebook爬虫类，用于爬取指定页面的帖子和评论"""

  def __init__(self, page_name, logger):
    """初始化爬虫类"""
    self.logger = logger
    self.page_name = page_name

  def get_posts(self, page, limit=10):
    """加载帖子直到达到最大数量"""

    posts = []
    unique_ids = set()

    while True:
      post_links_ele = page.query_selector_all(f'//div[@data-pagelet="ProfileTimeline"]//a[contains(@href, "www.facebook.com/{self.page_name}/posts") and not(contains(@href, "comment_id"))]')
      if len(post_links_ele) >= limit:
        break

      # 检查是否已经加载所有帖子
      target_element = page.locator('div[data-pagelet="ProfileTimeline"] > div.x1xzczws')
      if target_element.count() <= 0:
        break

      page.mouse.wheel(0, 1000)
      page.wait_for_timeout(2000)

    for i, link in enumerate(post_links_ele):
      href = link.get_attribute('href')
      post_id = self.get_post_id(href)
      
      if href and href not in unique_ids:
        unique_ids.add(post_id)
        posts.append({
          "post_url": href,
          "post_id": post_id
        })
        self.logger.info(f"找到帖子: id={post_id}, url={href}")

      if len(posts) >= limit:
        break

    return posts
  
  def get_post_id(self, post_url):
    """从帖子URL中提取帖子ID"""
    try:
        # 尝试使用正则表达式提取帖子ID
        match = re.search(r'posts/([^/?]+)', post_url)
        if match:
            return match.group(1)
        return "unknown_id"
    except Exception:
        return "unknown_id"
  
  def find_post_content(self, post_dialog_ele):
    """找到帖子内容"""
    try:
      content_elements = post_dialog_ele.locator('//*[@id]/div/div/span').all_text_contents()
      post_content = " ".join(content_elements).strip()
      return post_content
    except PlaywrightTimeoutError:
      self.logger.warning("获取帖子内容时超时")
      return ""
    except Exception as content_error:
      self.logger.warning(f"获取帖子内容时出错: {content_error}")
      return ""

  def load_all_comments(self, post_dialog_ele, page):
    """加载所有评论"""
    try:
      # 点击带有aria-haspopup="menu"属性的div元素
      self.logger.info("尝试点击评论排序菜单...")
      # menu_button = self.page.locator('div.x6s0dn4.x78zum5.xdj266r.x11i5rnm.xat24cr.x1mh8g0r.xe0p6wg > div > span').first
      menu_button = post_dialog_ele.locator('//*[@aria-expanded][@aria-haspopup="menu"]/span').first
      if menu_button.count() <= 0:
        self.logger.info("评论排序菜单不存在")
        return

      menu_button.click()

      # 等待菜单出现
      page.wait_for_timeout(1000)  # 等待1秒让菜单显示
      
      # 点击"由新到旧"选项
      self.logger.info("尝试选择'由新到旧'选项...")
      all_comments_option = page.get_by_text("由新到旧", exact=True)

      if all_comments_option.count() > 0:
        all_comments_option.click()
        self.logger.info("成功选择'由新到旧'选项")
      
      # 等待菜单关闭和页面更新
      page.wait_for_timeout(1000)

      # 向下滚动，直到指定元素的子元素为空
      self.logger.info("开始滚动加载更多评论...")
      # 找到滚动容器
      scroll_container = post_dialog_ele.locator('> div > div > div > div:nth-child(2)')
      if scroll_container.count() <= 0:
        self.logger.info("滚动容器不存在，停止滚动")
        return

      comments_container = post_dialog_ele.locator('div[data-visualcompletion="ignore-dynamic"] > div > div[class] > div:nth-child(3)')

      if comments_container.count() <= 0:
        self.logger.info("评论容器不存在，停止滚动")
        return

      max_attempts = 50  # 最大滚动尝试次数
      no_change_count = 0  # 连续无变化次数
      
      while max_attempts > 0:
        try:
          # 检查页面是否仍然有效
          if not page.url:
            self.logger.error("页面已失效，停止滚动")
            break
            
          # 检查 loading div 的子元素是否为空
          target_element = comments_container.locator('> div[class]')
          target_count = 0
          children_count = 0
          
          try:
            target_count = target_element.count()
            if target_count > 0:
              children_count = target_element.locator('> *').count()
          except Exception as count_error:
            self.logger.warning(f"获取元素数量时出错: {count_error}")
            # 等待一段时间后重试
            page.wait_for_timeout(2000)
            continue
          
          if target_count > 0:
            if children_count == 0:
              self.logger.info("已加载所有评论，停止滚动")
              break
            else:
              self.logger.info("loading div 有子元素，继续滚动")
          else:
            self.logger.info("loading div不存在，停止滚动")
            break
          
          # 滚动前获取评论容器数量
          comment_containers_count_before = 0
          try:
            comment_containers_count_before = comments_container.locator('> div:not([class])').count()
            self.logger.info(f"滚动前评论容器数量: {comment_containers_count_before}")
          except Exception as count_error:
            self.logger.warning(f"获取滚动前评论数量时出错: {count_error}")

          # 向下滚动
          try:
            scroll_container.evaluate("e => e.scrollTop += 500")
            page.wait_for_timeout(1500)  # 增加等待时间，让页面有足够时间响应
          except Exception as scroll_error:
            self.logger.warning(f"滚动时出错: {scroll_error}")
            page.wait_for_timeout(2000)
            continue

          # 滚动后获取评论容器数量
          comment_containers_count = 0
          try:
            comment_containers_count = comments_container.locator('> div:not([class])').count()
            self.logger.info(f"滚动后评论容器数量: {comment_containers_count}")
          except Exception as count_error:
            self.logger.warning(f"获取滚动后评论数量时出错: {count_error}")
            # 如果获取数量失败，继续下一次循环
            max_attempts -= 1
            continue

          if comment_containers_count == comment_containers_count_before:
            no_change_count += 1
            self.logger.info(f"评论容器数量没有变化 ({no_change_count}/3)")
            if no_change_count >= 3:  # 连续3次没有变化才停止
              self.logger.info("连续多次无变化，停止滚动")
              break
          else:
            no_change_count = 0  # 重置计数器
            
        except PlaywrightTimeoutError as timeout_error:
          self.logger.error(f"滚动过程中超时: {timeout_error}")
          break
        except Exception as e:
          self.logger.error(f"滚动过程中发生未知错误: {e}")
          # 等待更长时间后重试
          page.wait_for_timeout(3000)
          max_attempts -= 1
          if max_attempts <= 0:
            self.logger.error("达到最大重试次数，停止滚动")
            break
          continue
        
        max_attempts -= 1

      self.logger.info("加载所有评论完成")
      
    except PlaywrightTimeoutError:
      self.logger.error("选择评论排序选项时超时")

  def get_comments(self, post_dialog_ele):
    """提取评论"""
    comments = []

    try:
      comments_container = post_dialog_ele.locator('div[data-visualcompletion="ignore-dynamic"] > div > div[class] > div:nth-child(3)')
      comment_containers = comments_container.locator('> div:not([class])')

      count = comment_containers.count()
      self.logger.info(f"找到 {count} 个评论容器")

      for i in range(count):
        try:
          # 获取当前评论容器
          container = comment_containers.nth(i)

          comment_id_ele = container.locator('> div > div > div > div:nth-child(2) a[role="link"]').first
          comment_url = comment_id_ele.get_attribute('href')
          comment_id = None
          
          # 安全获取评论作者
          try:
            comment_author = comment_id_ele.text_content(timeout=5000).strip()
          except Exception as author_error:
            self.logger.warning(f"获取第 {i+1} 个评论作者时出错: {author_error}")
            comment_author = "unknown_author"

          comment_time_ele = container.locator('li a[role="link"]').first
          # 安全获取评论时间
          try:
            comment_time_str = comment_time_ele.text_content(timeout=5000).strip()
          except Exception as time_error:
            self.logger.warning(f"获取第 {i+1} 个评论时间时出错: {time_error}")
            comment_time_str = "未知时间"
          comment_time = convert_facebook_time_to_timestamp(comment_time_str)

          if comment_url:
            # 尝试从URL中提取comment_id参数
            match = re.search(r'comment_id=([^&]+)', comment_url)
            if match:
              # 对URL编码的comment_id进行解码
              encoded_id = match.group(1)
              comment_id = urllib.parse.unquote(encoded_id)
            else:
              comment_id = "unknown_comment_id"
          else:
            comment_id = "unknown_comment_id"

          # 查找所有 dir="auto" 的 div 元素
          text_divs = container.locator('div[dir="auto"]')
          text_divs_count = text_divs.count()

          if text_divs_count == 0:
            continue
          
          # 如果只有一个文本元素，直接获取内容
          if text_divs_count == 1:
            try:
              comment_text = text_divs.first.text_content(timeout=5000).strip()
            except Exception as text_error:
              self.logger.warning(f"获取第 {i+1} 个评论文本时出错: {text_error}")
              comment_text = ""
          else:
            # 如果有多个文本元素，合并它们的内容
            combined_text = []
            for j in range(text_divs_count):
              try:
                div_text = text_divs.nth(j).text_content(timeout=5000).strip()
                if div_text:  # 只添加非空文本
                  combined_text.append(div_text)
              except Exception as text_error:
                self.logger.warning(f"获取第 {i+1} 个评论第 {j+1} 个文本片段时出错: {text_error}")
                continue
            if combined_text:
              comment_text = " | ".join(combined_text)
            else:
              comment_text = ""

          # 只有在有实际内容时才添加评论
          if comment_text.strip():
            comments.append({
              "comment_id": comment_id,
              "comment_author": comment_author,
              "comment_time": comment_time,
              "comment_content": comment_text,
            })
          else:
            self.logger.warning(f"第 {i+1} 个评论内容为空，跳过")
        except Exception as comment_error:
          self.logger.error(f"获取第 {i+1} 个评论时出错: {comment_error}")
          continue
      return comments
    except Exception as e:
      self.logger.error(f"获取评论时出错: {e}")
      return []
  


