from playwright.sync_api import sync_playwright, TimeoutError as PlaywrightTimeoutError
import time
import os
import argparse
import logging
import json
import datetime
import re
from dotenv import load_dotenv

# 不再需要加载环境变量，但保留导入以便将来可能的扩展
# load_dotenv()

class FacebookScraper:
    """Facebook爬虫类，用于爬取指定页面的帖子和评论"""
    
    def __init__(self, page_name=None, max_posts=10, log_level=logging.INFO, cookies_file="cookies.json", output_file=None):
        """
        初始化Facebook爬虫
        
        Args:
            page_name (str): 要爬取的Facebook页面名称
            max_posts (int): 最大爬取帖子数量
            log_level (int): 日志级别
            cookies_file (str): cookie配置文件路径
            output_file (str): 输出JSON文件路径，默认为"data/facebook_data_{page_name}_{timestamp}.json"
        """
        self.page_name = page_name or "AFKJourneyOfficial"  # 默认页面
        self.max_posts = max_posts
        self.post_urls = []
        self.cookies_file = cookies_file
        
        # 确保data目录存在
        os.makedirs("data", exist_ok=True)
        
        # 设置输出文件名
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        if output_file:
            # 如果提供了输出文件名但没有包含路径，则添加data/前缀
            if not os.path.dirname(output_file):
                output_file = os.path.join("data", output_file)
        else:
            # 默认输出到data目录
            output_file = os.path.join("data", f"facebook_data_{self.page_name}_{timestamp}.json")
            
        self.output_file = output_file
        
        # 初始化数据结构
        self.data = {
            "scrape_info": {
                "page_name": self.page_name,
                "scrape_time": datetime.datetime.now().isoformat(),
                "total_posts": 0
            },
            "posts": []
        }
        
        # 设置日志记录
        self.logger = self._setup_logger(log_level)
        
    def _setup_logger(self, log_level):
        """设置日志记录器"""
        logger = logging.getLogger("FacebookScraper")
        logger.setLevel(log_level)
        
        # 创建控制台处理器
        console_handler = logging.StreamHandler()
        console_handler.setLevel(log_level)
        
        # 设置日志格式
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        console_handler.setFormatter(formatter)
        
        # 添加处理器到日志记录器
        logger.addHandler(console_handler)
        
        return logger
    
    def _load_cookies(self):
        """从配置文件加载cookie"""
        try:
            with open(self.cookies_file, 'r', encoding='utf-8') as f:
                cookies = json.load(f)
                self.logger.info(f"成功从 {self.cookies_file} 加载 {len(cookies)} 个cookie")
                return cookies
        except FileNotFoundError:
            self.logger.error(f"Cookie配置文件 {self.cookies_file} 不存在")
            return []
        except json.JSONDecodeError:
            self.logger.error(f"Cookie配置文件 {self.cookies_file} 格式错误")
            return []
        except Exception as e:
            self.logger.error(f"加载Cookie时出错: {e}")
            return []
    
    def _save_data_to_json(self):
        """将爬取的数据保存为JSON文件"""
        try:
            # 确保输出目录存在
            output_dir = os.path.dirname(self.output_file)
            if output_dir and not os.path.exists(output_dir):
                os.makedirs(output_dir, exist_ok=True)
                self.logger.info(f"创建输出目录: {output_dir}")
            
            # 更新总帖子数
            self.data["scrape_info"]["total_posts"] = len(self.data["posts"])
            
            with open(self.output_file, 'w', encoding='utf-8') as f:
                json.dump(self.data, f, ensure_ascii=False, indent=4)
                
            self.logger.info(f"数据已保存到 {self.output_file}")
            return True
        except Exception as e:
            self.logger.error(f"保存数据到JSON文件时出错: {e}")
            return False
    
    def _extract_post_id(self, post_url):
        """从帖子URL中提取帖子ID"""
        try:
            # 尝试使用正则表达式提取帖子ID
            match = re.search(r'posts/([^/?]+)', post_url)
            if match:
                return match.group(1)
            return "unknown_id"
        except Exception:
            return "unknown_id"
        
    def start(self):
        """启动爬虫并开始爬取过程"""
        self.logger.info(f"开始爬取Facebook页面: {self.page_name}")
        self.logger.info(f"最大爬取帖子数量: {self.max_posts}")
        self.logger.info(f"数据将保存到: {self.output_file}")
        
        self.login_to_facebook()
        
        # 保存爬取的数据
        self._save_data_to_json()
        
    def login_to_facebook(self):
        """登录Facebook并爬取帖子URL"""
        with sync_playwright() as p:
            browser = p.chromium.launch(headless=False, slow_mo=100)
            page = browser.new_page()
            
            # 从配置文件加载cookie
            cookies = self._load_cookies()
            if cookies:
                page.context.add_cookies(cookies)
                self.logger.info("已应用cookie，尝试使用cookie登录")
            else:
                self.logger.warning("未加载到cookie，可能需要手动登录")
            
            page.set_viewport_size({"width": 1920, "height": 1080})
            
            # 导航到指定的Facebook页面
            page.goto(f"https://www.facebook.com/{self.page_name}")
            
            if not self._wait_for_page_to_load(page):
                self.logger.error("页面加载失败，请检查网络连接或cookie是否有效")
                browser.close()
                return
                
            if not self._navigate_to_first_post(page):
                self.logger.error("无法找到帖子，请检查页面名称是否正确或cookie是否有效")
                browser.close()
                return
                
            self.post_urls = self._get_post_urls(page)
            self.logger.info(f"找到的帖子链接数量: {len(self.post_urls)}")
            
            # 如果找到帖子，开始爬取帖子内容
            if self.post_urls:
                self._extract_posts(browser)
            else:
                self.logger.warning("未找到任何帖子，请检查页面名称是否正确")
                
            browser.close()
            
    def _wait_for_page_to_load(self, page):
        """等待Facebook页面加载完成"""
        try:
            page.wait_for_selector('div.x1yztbdb.x1n2onr6.xh8yej3.x1ja2u2z', timeout=40000)
            return True
        except PlaywrightTimeoutError:
            self.logger.error("超时：等待页面加载超时")
            return False
            
    def _navigate_to_first_post(self, page):
        """导航到第一个帖子"""
        try:
            posts = page.locator("div.x1yztbdb.x1n2onr6.xh8yej3.x1ja2u2z")
            if posts.count() > 0:
                return True
            self.logger.warning("未找到帖子。")
            return False
        except PlaywrightTimeoutError:
            self.logger.error("超时：等待帖子加载超时")
            return False
            
    def _get_post_urls(self, page):
        """提取帖子URL"""
        self.logger.info(f"获取帖子URL (最大数量: {self.max_posts})")
        
        # 加载足够的帖子以满足max_posts要求
        self._load_posts_until_max(page)
        
        self.logger.info("正在提取帖子URL...")
        
        post_urls = []
        post_links = page.query_selector_all(f'//div[@data-pagelet="ProfileTimeline"]//a[contains(@href, "www.facebook.com/{self.page_name}/posts")]')
        
        self.logger.info(f"找到 {len(post_links)} 个帖子链接")
        
        # 只处理最多max_posts数量的帖子
        for i, link in enumerate(post_links[:self.max_posts]):
            href = link.get_attribute('href')
            if href:
                post_urls.append(href)
                self.logger.info(f"找到帖子链接 {i+1}/{min(len(post_links), self.max_posts)}: {href}")
        
        self.logger.info(f"已收集 {len(post_urls)} 个帖子URL (最大数量: {self.max_posts})")
        return post_urls
        
    def _load_posts_until_max(self, page):
        """加载帖子直到达到最大数量"""
        self.logger.info(f"加载帖子直到至少有 {self.max_posts} 个帖子")
        
        # 初始滚动次数
        scroll_count = 0
        max_scrolls = 100  # 最大滚动次数，可以根据需要调整
        
        while scroll_count < max_scrolls:
            # 检查当前已加载的帖子数量
            post_links = page.query_selector_all(f'//div[@data-pagelet="ProfileTimeline"]//a[contains(@href, "www.facebook.com/{self.page_name}/posts")]')
            current_post_count = len(post_links)
            
            self.logger.info(f"当前已加载 {current_post_count} 个帖子 (目标: {self.max_posts})")
            
            # 如果已经加载了足够的帖子，就停止滚动
            if current_post_count >= self.max_posts:
                self.logger.info(f"已达到目标帖子数量: {current_post_count} >= {self.max_posts}")
                break
                
            # 检查是否已经加载所有帖子
            target_element = page.locator('div[data-pagelet="ProfileTimeline"] > div.x1xzczws')
            if target_element.count() <= 0:
                self.logger.info("没有更多帖子可加载（未找到加载指示器）")
                break
                
            # 滚动页面加载更多帖子
            page.mouse.wheel(0, 1000)
            page.wait_for_timeout(2000)  # 使用wait_for_timeout替代time.sleep
            scroll_count += 1
            
        if scroll_count >= max_scrolls:
            self.logger.warning(f"在加载 {self.max_posts} 个帖子之前达到最大滚动限制 ({max_scrolls})")
        
        self.logger.info("页面滚动完成")
        
    def _extract_posts(self, browser):
        """提取帖子内容和评论"""
        for i, post_url in enumerate(self.post_urls):
            self.logger.info(f"\n正在处理帖子 {i+1}/{len(self.post_urls)}: {post_url}")
            
            # 创建帖子数据结构
            post_data = {
                "post_id": self._extract_post_id(post_url),
                "post_url": post_url,
                "content": "",
                "comments_count": 0,
                "comments": []
            }
            
            page = browser.new_page()
            page.goto(post_url)
            
            # 提取帖子内容
            content = self._get_post_content(page)
            post_data["content"] = content
            
            # 提取评论数量
            comments_count = self._get_comments_count(page)
            post_data["comments_count"] = comments_count
            
            # 如果有评论，提取评论
            if comments_count > 0:
                comments = self._get_comments(page)
                
                # 将评论添加到帖子数据中
                for j, comment_text in enumerate(comments):
                    comment_data = {
                        "comment_id": j + 1,
                        "text": comment_text,
                        "timestamp": datetime.datetime.now().isoformat()  # 由于无法获取实际时间戳，使用当前时间
                    }
                    post_data["comments"].append(comment_data)
            else:
                self.logger.info("没有评论")
            
            # 将帖子数据添加到总数据中
            self.data["posts"].append(post_data)
                
            page.close()
            
    def _get_post_content(self, page):
        """提取帖子内容"""
        try:
            content_elements = page.locator('//*[contains(@id, ":")]//div[@dir="auto"]').all_text_contents()
            
            # 合并所有文本内容
            full_post_content = " ".join(content_elements)
            self.logger.info("帖子内容: " + full_post_content.strip())
            
            # 返回提取的内容以便其他函数使用
            return full_post_content.strip()
        except PlaywrightTimeoutError:
            self.logger.error("获取帖子内容超时")
            return ""
            
    def _get_comments_count(self, page):
        """提取评论数量"""
        try:
            # 使用更精确的选择器，定位到包含"条评论"文本的元素
            comments_count_text = page.get_by_role("button", name="条评论").text_content()
            self.logger.info("评论数文本: " + comments_count_text)
            
            # 处理评论数量文本，提取数字部分
            comments_count = 0
            try:
                if "条评论" in comments_count_text:
                    # 从文本中提取数字部分（例如从"37条评论"中提取"37"）
                    comments_count = int(comments_count_text.replace("条评论", "").strip())
                elif "评论" in comments_count_text:
                    # 处理可能的其他格式
                    comments_count = int(comments_count_text.split("评论")[0].strip())
                
                self.logger.info(f"评论数量（数字）: {comments_count}")
                return comments_count
            except ValueError:
                self.logger.warning("无法将评论数量转换为整数，使用默认值0")
                return 0
        except PlaywrightTimeoutError:
            self.logger.error("获取评论数量超时")
        except Exception as e:
            self.logger.error(f"获取评论数量时出错: {e}")
            
        return 0
        
    def _get_comments(self, page):
        """提取评论"""
        comments = []
        try:
            # 首先点击评论排序菜单并选择"由新到旧"
            self._load_all_comments(page)
            
            # 等待评论加载
            page.wait_for_timeout(2000)  # 等待2秒让评论加载
            
            comment_containers = page.locator('div.x1lliihq.xjkvuk6.x1iorvi4')
            count = comment_containers.count()
            self.logger.info(f"找到 {count} 个评论容器")
            
            for i in range(count):
                try:
                    # 获取当前评论容器
                    container = comment_containers.nth(i)
                    
                    # 查找所有 dir="auto" 的 div 元素
                    text_divs = container.locator('div[dir="auto"]')
                    text_divs_count = text_divs.count()
                    
                    if text_divs_count == 0:
                        continue
                    
                    # 如果只有一个文本元素，直接获取内容
                    if text_divs_count == 1:
                        comment_text = text_divs.first.text_content().strip()
                        comments.append(comment_text)
                    else:
                        # 如果有多个文本元素，合并它们的内容
                        combined_text = []
                        for j in range(text_divs_count):
                            div_text = text_divs.nth(j).text_content().strip()
                            if div_text:  # 只添加非空文本
                                combined_text.append(div_text)
                        
                        if combined_text:
                            comment_text = " | ".join(combined_text)
                            comments.append(comment_text)
                    
                    self.logger.info(f"评论 {i+1}: {comments[-1] if comments else '(空)'}")
                    
                except Exception as comment_error:
                    self.logger.error(f"获取第 {i+1} 个评论时出错: {comment_error}")
                    continue
            
            self.logger.info(f"成功获取 {len(comments)} 条评论")
            return comments
        except PlaywrightTimeoutError:
            self.logger.error("获取评论超时")
        except Exception as e:
            self.logger.error(f"获取评论时出错: {e}")
        
        return comments
        
    def _load_all_comments(self, page):
        """加载所有评论"""
        try:
            # 点击带有aria-haspopup="menu"属性的div元素
            self.logger.info("尝试点击评论排序菜单...")
            menu_button = page.locator('div.x6s0dn4.x78zum5.xdj266r.x11i5rnm.xat24cr.x1mh8g0r.xe0p6wg > div > span').first
            menu_button.click()
            
            # 等待菜单出现
            page.wait_for_timeout(1000)  # 等待1秒让菜单显示
            
            # 点击"由新到旧"选项
            self.logger.info("尝试选择'由新到旧'选项...")
            all_comments_option = page.get_by_text("由新到旧", exact=True)
            if all_comments_option.count() > 0:
                all_comments_option.click()
                self.logger.info("成功选择'由新到旧'选项")
                    
            # 等待菜单关闭和页面更新
            page.wait_for_timeout(1000)
            
            # 向下滚动，直到指定元素的子元素为空
            self.logger.info("开始滚动加载更多评论...")
            max_scrolls = 20  # 设置最大滚动次数，防止无限循环
            scroll_count = 0
            
            while scroll_count < max_scrolls:
                # 检查目标元素的子元素是否为空
                target_element = page.locator('div.xdj266r.x11i5rnm.xat24cr.x1mh8g0r.xexx8yu.x4uap5.x18d9i69.xkhd6sd.x78zum5.x13a6bvl')
                if target_element.count() > 0:
                    children_count = target_element.locator('> *').count()
                    if children_count == 0:
                        self.logger.info("已加载所有评论，停止滚动")
                        break
                
                # 向下滚动
                # 在指定的评论容器中滚动而不是整个页面
                comments_container = page.locator('div.xb57i2i.x1q594ok.x5lxg6s.x78zum5.xdt5ytf.x6ikm8r.x1ja2u2z.x1pq812k.x1rohswg.xfk6m8.x1yqm8si.xjx87ck.xx8ngbg.xwo3gff.x1n2onr6.x1oyok0e.x1odjw0f.x1iyjqo2.xy5w88m').first
                comments_container.evaluate("e => e.scrollTop += 500")
                page.wait_for_timeout(1000)  # 等待内容加载
                scroll_count += 1
                
            if scroll_count >= max_scrolls:
                self.logger.warning(f"已达到最大滚动次数({max_scrolls})，停止滚动")
            
        except PlaywrightTimeoutError:
            self.logger.error("选择评论排序选项时超时")
        except Exception as e:
            self.logger.error(f"选择评论排序选项时出错: {e}")


def main():
    """主函数，解析命令行参数并启动爬虫"""
    parser = argparse.ArgumentParser(description='Facebook页面爬虫')
    parser.add_argument('--page_name', type=str, help='要爬取的Facebook页面名称')
    parser.add_argument('--max_posts', type=int, default=10, help='最大爬取帖子数量（默认：10）')
    parser.add_argument('--log_level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], 
                        default='INFO', help='日志级别（默认：INFO）')
    parser.add_argument('--cookies_file', type=str, default='cookies.json', 
                        help='cookie配置文件路径（默认：cookies.json）')
    parser.add_argument('--output_file', type=str, 
                        help='输出JSON文件路径（默认：data/facebook_data_{page_name}_{timestamp}.json）')
    
    args = parser.parse_args()
    
    # 设置日志级别
    log_level = getattr(logging, args.log_level)
    
    # 创建爬虫实例并启动
    scraper = FacebookScraper(
        page_name=args.page_name, 
        max_posts=args.max_posts, 
        log_level=log_level, 
        cookies_file=args.cookies_file,
        output_file=args.output_file
    )
    scraper.start()


if __name__ == "__main__":
    main() 