from playwright.sync_api import sync_playwright
import time
import os
import json
from crawl4ai import async_webcrawler as crawler
import threading


class PersistentBrowser:
    def __init__(self):
        self.playwright = None
        self.browser = None
        self.context = None
        self.user_data_dir = "runtime"
        # self.crawler = crawler()
        self.is_running = True

    def start(self):
        """启动浏览器并保持常开"""
        # 确保用户数据目录存在
        os.makedirs(self.user_data_dir, exist_ok=True)

        # 启动Playwright
        self.playwright = sync_playwright().start()

        # 创建持久化上下文
        executable_path = "C:/Users/Administrator/AppData/Local/Google/Chrome/Bin/chrome.exe"
        self.browser = self.playwright.chromium.launch_persistent_context(
            user_data_dir=self.user_data_dir,
            headless=False,
            channel="chrome",
            executable_path=executable_path,
            args=["--start-maximized"],
            viewport={"width": 1920, "height": 1080}
        )

        # 获取主页面
        if self.browser.pages:
            self.page = self.browser.pages[0]
        else:
            self.page = self.browser.new_page()

        print("浏览器已启动，现在可以手动操作...")

    def user_login(self, login_url):
        """引导用户登录"""
        self.page.goto(login_url)
        print(f"请打开浏览器并登录: {login_url}")
        input("登录完成后按回车键继续...")
        print("登录状态已保存")

    def crawl_in_background(self, url):
        """在后台抓取页面"""
        try:
            # 在新标签页打开
            new_page = self.browser.new_page()
            # new_page.goto(url, wait_until="networkidle")
            new_page.goto(url)

            # 处理动态内容
            # new_page.evaluate("""async () => {
            #     await new Promise(resolve => {
            #         let totalHeight = 0;
            #         const distance = 100;
            #         const timer = setInterval(() => {
            #             const scrollHeight = document.body.scrollHeight;
            #             window.scrollBy(0, distance);
            #             totalHeight += distance;
            #
            #             if(totalHeight >= scrollHeight){
            #                 clearInterval(timer);
            #                 resolve();
            #             }
            #         }, 100);
            #     });
            # }""")

            # 获取内容
            content = new_page.content()
            new_page.close()

            print('content',content)

            # 使用crawl4ai处理
            # result = self.crawler.run(html=content, url=url)
            return content
        except Exception as e:
            print(f"抓取失败: {str(e)}")
            return None

    def start_periodic_crawling(self, urls, interval=3):
        """启动定时抓取任务"""

        def crawling_task():
            while self.is_running:
                print(f"开始抓取 {len(urls)} 个页面...")
                for url in urls:
                    result = self.crawl_in_background(url)
                    if result:
                        print(f"抓取成功: {url}")
                        # 保存结果
                        with open(f"result_{url.split('//')[-1].replace('/', '_')}.json", "w") as f:
                            f.write(result.model_dump_json(indent=2))
                print(f"抓取完成，下次抓取将在 {interval} 秒后")
                time.sleep(interval)

        threading.Thread(target=crawling_task, daemon=True).start()

    def keep_alive(self):
        """保持程序运行"""
        try:
            while True:
                cmd = input("输入命令 (stop 退出): ").strip().lower()
                if cmd == "stop":
                    break
                elif cmd.startswith("crawl "):
                    url = cmd[6:]
                    result = self.crawl_in_background(url)
                    if result:
                        print(f"标题: {result.title}")
                        print(f"内容摘要: {result.content[:200]}...")
        except KeyboardInterrupt:
            print("程序结束")
        finally:
            self.is_running = False
            # 注意：这里不关闭浏览器，保持打开
            # 如果需要关闭，可以调用 self.browser.close()


# 使用示例
if __name__ == "__main__":
    crawler = PersistentBrowser()
    crawler.start()

    # 用户登录
    crawler.user_login("https://fxg.jinritemai.com/login/common")

    # 要抓取的URL列表
    target_urls = [
        "https://fxg.jinritemai.com/ffa/morder/order/list",
    ]

    # 启动定时抓取（每5分钟一次）
    crawler.start_periodic_crawling(target_urls, interval=300)

    # 保持程序运行
    crawler.keep_alive()