import asyncio
import csv
import random
import time
import json
import base64
import platform
import os
import winreg  # 仅适用于Windows
import shutil  # 适用于Linux和macOS
import sys
import re
import aiohttp
import requests
from urllib.parse import urlparse, parse_qs
from PyQt5.QtWidgets import QApplication
from FB_loginwin import win_main
from playwright.async_api import async_playwright
from FB_status import StatusWindow
from database_manager import db_manager


class Crawler:
    def __init__(self, cookies, params):
        self.username = None
        self.password = None
        self.browser = None
        self.page = None
        self.cookies = cookies
        self.delay = 25
        self.is_logged_in = False
        self.browser_path = get_chrome_path()
        self.IsKeys = params.get('search_content')  # 从参数获取搜索关键词
        self.search_results = []  # 存储搜索结果
        self.params = params  # 保存参数
        self.ui_update_lock = asyncio.Lock()  # 添加UI更新锁
        self.status_window = None  # 状态窗口引用
        self.groups_name = ""
        self.groups_num = ""
        self.supportId = ""
        self.post_user_cunt = 0
        self.post_name = ""

    async def safe_update_status(self, text):
        """安全的异步状态更新"""
        async with self.ui_update_lock:
            if self.status_window:
                loop = asyncio.get_event_loop()
                loop.call_soon_threadsafe(
                    lambda: self.status_window.update_signal.emit(text)
                )
            await asyncio.sleep(0.01)  # 添加微小延迟

    async def robust_update_status(self, text, max_retries=3):
        """带重试机制的状态更新"""
        for attempt in range(max_retries):
            try:
                await self.safe_update_status(text)
                return True
            except Exception as e:
                print(f"状态更新失败 (尝试 {attempt + 1}/{max_retries}): {str(e)}")
                await asyncio.sleep(0.5 * (attempt + 1))
        return False

    async def cleanup(self):
        """清理浏览器资源"""
        try:
            if self.browser:
                await self.browser.close()
                self.browser = None
                self.page = None
        except Exception as e:
            print(f"清理资源时出错: {str(e)}")

    async def start(self):
        playwright = None
        try:
            playwright = await async_playwright().start()
            # 增强浏览器配置
            browser_args = [
                '--no-first-run',
                '--no-default-browser-check',
                '--disable-background-timer-throttling',
                '--disable-backgrounding-occluded-windows',
                '--disable-renderer-backgrounding',
                '--disable-features=TranslateUI',
                '--disable-ipc-flooding-protection',
                '--disable-default-apps',
                '--disable-extensions',
                '--disable-component-extensions-with-background-pages',
                '--disable-blink-features=AutomationControlled',
                '--disable-dev-shm-usage',
                '--no-sandbox',
                '--disable-web-security',
                '--disable-features=VizDisplayCompositor',
                '--disable-back-forward-cache',
                '--disable-site-isolation-trials'
            ]
            self.browser = await playwright.chromium.launch(headless=False, args=browser_args,
                                                            executable_path=self.browser_path)
            context = await self.browser.new_context(
                user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
            )
            await context.add_init_script("""
                   Object.defineProperty(navigator, 'webdriver', {
                       get: () => undefined,
                   });
                   window.chrome = {
                       runtime: {},
                   };
               """)
            if self.cookies:
                await context.add_cookies(self.cookies)

            self.page = await context.new_page()

            if self.params["type"] in 'like':
                app = QApplication.instance()
                if not app:
                    app = QApplication(sys.argv)

                # 创建状态窗口并保存引用
                self.status_window = StatusWindow()
                self.status_window.show()

                # 确保窗口显示
                QApplication.processEvents()
                if not self.params["links"]:
                    await self.getusers_like()  # 无链接
                else:
                    await self.getusers_like_url(self.params["links"])  # 带链接

            else:
                # 根据action参数执行不同的操作
                action = self.params.get('action', 'search')
                if action == 'search':
                    if self.params.get('combo_value', 'combo_value') in "社團":
                        await self.key_groups()
                    else:
                        await self.key_fans()
                elif action == 'confirm':
                    app = QApplication.instance()
                    if not app:
                        app = QApplication(sys.argv)

                    # 创建状态窗口并保存引用
                    self.status_window = StatusWindow()
                    self.status_window.show()

                    # 确保窗口显示
                    QApplication.processEvents()
                    await asyncio.sleep(0.5)  # 给窗口显示一点时间
                    if self.params.get('combo_value', 'combo_value') in "社團":
                        await self.getusers()
                    else:
                        await self.getusers_fans()
            await self.robust_update_status(f"全部任務完成")
        except Exception as e:
            print(f"任务执行出错: {str(e)}")
            raise
        finally:
            # 确保清理资源
            if self.browser:
                await self.browser.close()
            if playwright:
                await playwright.stop()

    async def start_confirm_action(self, params):
        """执行确认操作"""
        self.params = params
        await self.getusers()

    async def check_cookies_valid(self):
        """检查cookies是否有效"""
        try:
            c_user = next((c for c in self.cookies if c["name"] == "c_user"), None)
            xs = next((c for c in self.cookies if c["name"] == "xs"), None)
            current_time = time.time()
            return c_user and xs and xs["expires"] > current_time
        except:
            return False

    async def login_with_gui(self):
        """通过GUI获取凭证并登录"""
        try:
            credentials = win_main()
            if not credentials:
                raise Exception("用户取消登录")

            self.username = credentials["username"]
            self.password = credentials["password"]
            await self.perform_browser_login()
        except Exception as e:
            print(f"GUI登录失败: {str(e)}")
            self.is_logged_in = False

    async def perform_browser_login(self):
        """使用浏览器执行登录"""
        try:
            playwright = await async_playwright().start()
            self.browser = await playwright.chromium.launch(
                headless=False,
                executable_path=self.browser_path
            )
            page = await self.browser.new_page(
                user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36")
            await page.goto(url="https://www.facebook.com/login", wait_until='load', timeout=50000)

            await asyncio.sleep(random.uniform(1.5, 3.5))

            # 模拟人类输入速度
            await page.locator("//input[@id='email']").first.fill(self.username)
            await asyncio.sleep(random.uniform(0.5, 1.2))
            await page.locator("//input[@id='pass']").fill(self.password)
            await asyncio.sleep(random.uniform(0.8, 1.5))

            # 点击登录
            await page.click("//button[@id='loginbutton']")

            # 检查登录是否成功
            try:
                await page.wait_for_url("https://www.facebook.com/?lsrc=lb", timeout=120000)
                title = await page.title()
                if "Facebook" in title:
                    await asyncio.sleep(3)
                    self.is_logged_in = True
                else:
                    print(f"標題未包含 'Facebook'，當前標題: {title}，等待10秒後重試...")
                    await asyncio.sleep(10)  # 等待10秒
                await self.class_fb_set()
            except Exception as e:
                current_url = await page.evaluate("() => window.location.href")
                if "login" in current_url or "authentication" in current_url or "checkpoint" in current_url:
                    print(f"登录失败，当前URL: {current_url}")
                    self.is_logged_in = False
                    await self.browser.close()
                    raise Exception("登录失败，请检查用户名和密码")

            # 登录成功处理
            self.cookies = await page.context.cookies()
            with open("FB.json", "w") as f:
                json.dump(self.cookies, f, indent=4)

            print('登录成功')
            self.is_logged_in = True
            await self.browser.close()
        except Exception as e:
            print(f"浏览器登录过程中发生错误: {str(e)}")
            self.is_logged_in = False
            if self.browser:
                await self.browser.close()
            raise Exception(f"登录失败: {str(e)}")

    # 获取请求Auth签名
    def getBearerAuth(self):
        ds_user_id = next((cookie for cookie in self.cookies if cookie['name'] == "ds_user_id"), None)
        sessionid = next((cookie for cookie in self.cookies if cookie['name'] == "sessionid"), None)
        auth_str = "{\"ds_user_id\":\"" + ds_user_id['value'] + "\",\"sessionid\":\"" + sessionid['value'] + "\"}"
        auth_bytes = auth_str.encode('utf-8')
        auth_str = base64.b64encode(auth_bytes)
        return "Bearer IGT:2:" + auth_str.decode('utf-8')

    async def key_groups(self):
        """关键字搜索群组并返回地址列表 - 优化版"""
        print("關鍵字社團地址")
        key = self.IsKeys.split('#')

        # 处理所有关键字
        for key_index in range(len(key)):
            current_keyword = key[key_index]
            print(f"處理關鍵字 {key_index + 1}/{len(key)}: {current_keyword}")

            # 导航到搜索页面
            await self.page.goto(
                url="https://www.facebook.com/search/groups?q=" + current_keyword + "&filters=eyJwdWJsaWNfZ3JvdXBzOjAiOiJ7XCJuYW1lXCI6XCJwdWJsaWNfZ3JvdXBzXCIsXCJhcmdzXCI6XCJcIn0ifQ%3D%3D",
                wait_until='load',
                timeout=50000
            )

            # 等待页面加载
            title = await self.page.title()
            if "Facebook" in title:
                await asyncio.sleep(3)
            else:
                print(f"標題未包含 'Facebook'，當前標題: {title}，等待10秒後重試...")
                await asyncio.sleep(10)

            post = int(self.params.get('search_count'))  # 设置要获取的帖子数量
            current_groups_postlist = []
            max_scroll_attempts = 10
            scroll_attempts = 0
            previous_count = 0

            try:
                # 批量处理方式：滚动并收集所有可见的群组链接
                while len(current_groups_postlist) < post and scroll_attempts < max_scroll_attempts:
                    # 获取当前页面所有群组链接
                    group_elements = await self.page.query_selector_all(
                        '//div[@aria-label="搜尋結果" or @aria-label="搜索结果"]//div[@role="feed"]//a[contains(@href, "/groups/") or contains(@href, "/profile.php")]'
                    )

                    current_count = len(group_elements)
                    print(f"当前找到 {current_count} 个群组链接")

                    # 批量处理所有找到的链接
                    for element in group_elements[len(current_groups_postlist):]:  # 只处理新找到的链接
                        try:
                            post_url_before = await element.get_attribute('href')

                            if post_url_before and len(current_groups_postlist) < post:
                                # 提取群组ID并构建成员页面URL
                                group_id = self.extract_group_id(post_url_before)
                                if group_id:
                                    members_url = f"https://www.facebook.com/groups/{group_id}/members"
                                    if members_url not in current_groups_postlist:
                                        current_groups_postlist.append(members_url)
                                        print(f"找到群组 {len(current_groups_postlist)}: {members_url}")

                                        # 如果已经达到目标数量，跳出循环
                                        if len(current_groups_postlist) >= post:
                                            break
                        except Exception as e:
                            print(f"处理群组链接时出错: {str(e)}")
                            continue

                    # 检查是否达到目标数量
                    if len(current_groups_postlist) >= post:
                        break

                    # 检查是否有新内容加载
                    if current_count == previous_count:
                        scroll_attempts += 1
                        print(f"没有新内容加载，尝试次数: {scroll_attempts}/{max_scroll_attempts}")
                    else:
                        scroll_attempts = 0

                    previous_count = current_count

                    # 滚动加载更多内容
                    if len(current_groups_postlist) < post:
                        await self.page.evaluate("""
                            window.scrollTo({
                                top: document.body.scrollHeight,
                                behavior: 'smooth'
                            });
                        """)
                        await asyncio.sleep(2)  # 等待新内容加载

            except Exception as e:
                print(f"搜索过程中出错: {str(e)}")

            # 将当前关键字的搜索结果添加到总结果中
            self.search_results.extend(current_groups_postlist)
            print(f"关键字 '{current_keyword}' 完成，找到 {len(current_groups_postlist)} 个群组")

        print(f"搜索完成，共找到 {len(self.search_results)} 个地址")
        return self.search_results

    def extract_group_id(self, url):
        """从Facebook群组URL中提取群组ID"""
        try:
            # 移除查询参数
            clean_url = url.split('?')[0]

            # 匹配多种可能的群组URL格式
            patterns = [
                r'https?://(?:www\.)?facebook\.com/groups/([^/?]+)/?',
                r'/groups/([^/?]+)/?',
            ]

            for pattern in patterns:
                match = re.search(pattern, clean_url)
                if match:
                    return match.group(1)

            return None
        except Exception as e:
            print(f"提取群组ID时出错: {str(e)}")
            return None

    async def key_fans(self):
        """关键字搜索群组并返回地址列表 - 优化版"""
        print("關鍵字社團地址")
        key = self.IsKeys.split('#')

        # 处理所有关键字
        for key_index in range(len(key)):
            current_keyword = key[key_index]
            print(f"處理關鍵字 {key_index + 1}/{len(key)}: {current_keyword}")

            await self.page.goto(
                url="https://www.facebook.com/search/pages/?q=" + current_keyword,
                wait_until='load',
                timeout=50000
            )
            # 等待页面加载
            title = await self.page.title()
            if "Facebook" in title:
                await asyncio.sleep(3)
            else:
                print(f"標題未包含 'Facebook'，當前標題: {title}，等待10秒後重試...")
                await asyncio.sleep(10)

            post = int(self.params.get('search_count'))  # 设置要获取的帖子数量
            current_groups_postlist = []
            max_scroll_attempts = 10
            scroll_attempts = 0
            previous_count = 0

            try:
                # 批量处理方式：滚动并收集所有可见的群组链接
                while len(current_groups_postlist) < post and scroll_attempts < max_scroll_attempts:
                    # 获取当前页面所有群组链接
                    group_elements = await self.page.query_selector_all(
                        '//div[@aria-label="搜尋結果" or @aria-label="搜索结果"]//div[@role="feed"]//a[contains(@href, "/www.facebook.com/") or contains(@href, "/profile.php")]'
                    )

                    current_count = len(group_elements)
                    print(f"当前找到 {current_count} 个群组链接")

                    # 批量处理所有找到的链接
                    for element in group_elements[len(current_groups_postlist):]:  # 只处理新找到的链接
                        try:
                            post_url_before = await element.get_attribute('href')

                            if post_url_before and len(current_groups_postlist) < post:
                                # 提取群组ID并构建成员页面URL
                                print(post_url_before)
                                group_id = self.extract_fans_id(post_url_before)
                                if group_id:
                                    members_url = f"https://www.facebook.com/profile.php?id={group_id}&sk=followers"
                                    if members_url not in current_groups_postlist:
                                        current_groups_postlist.append(members_url)
                                        print(f"找到群组 {len(current_groups_postlist)}: {members_url}")

                                        # 如果已经达到目标数量，跳出循环
                                        if len(current_groups_postlist) >= post:
                                            break
                        except Exception as e:
                            print(f"处理群组链接时出错: {str(e)}")
                            continue

                    # 检查是否达到目标数量
                    if len(current_groups_postlist) >= post:
                        break

                    # 检查是否有新内容加载
                    if current_count == previous_count:
                        scroll_attempts += 1
                        print(f"没有新内容加载，尝试次数: {scroll_attempts}/{max_scroll_attempts}")
                    else:
                        scroll_attempts = 0

                    previous_count = current_count

                    # 滚动加载更多内容
                    if len(current_groups_postlist) < post:
                        await self.page.evaluate("""
                            window.scrollTo({
                                top: document.body.scrollHeight,
                                behavior: 'smooth'
                            });
                        """)
                        await asyncio.sleep(2)  # 等待新内容加载

            except Exception as e:
                print(f"搜索过程中出错: {str(e)}")

            # 将当前关键字的搜索结果添加到总结果中
            self.search_results.extend(current_groups_postlist)
            print(f"关键字 '{current_keyword}' 完成，找到 {len(current_groups_postlist)} 个群组")

        print(f"搜索完成，共找到 {len(self.search_results)} 个地址")
        return self.search_results

    def extract_fans_id(self, url):
        # 处理相对路径
        if url.startswith('/'):
            user_match = re.search(r'/user/(\d+)', url)
            if user_match:
                return user_match.group(1)
            return None

        # 解析完整URL
        parsed = urlparse(url)

        # 处理profile.php情况
        if parsed.path == '/profile.php':
            query_params = parse_qs(parsed.query)
            if 'id' in query_params:
                return query_params['id'][0]

        # 处理用户名情况
        if parsed.netloc == 'www.facebook.com':
            path_parts = parsed.path.strip('/').split('/')
            if path_parts and path_parts[0] != 'profile.php':
                return path_parts[0]

        return None

    async def getusers(self):
        """爬取用户信息"""
        addresses = self.params.get('addresses', [])
        if not addresses:
            print("没有提供地址列表")
            return

        for i in range(len(addresses)):
            url = addresses[i].strip()
            # 创建一个新的CSV文件名
            csv_filename = f'data_{i}.csv'
            in_csv_data = []
            print(f"开始爬取用户信息，地址: {url}")
            await self.robust_update_status(f"社團地址:{url}")
            await self.page.goto(url=url, wait_until='load')
            await asyncio.sleep(5)
            try:
                groups_name_selector = '//h1[@dir="auto"]//a[@role="link"]'
                groups_name_function = await self.page.wait_for_selector(groups_name_selector, timeout=10000)
                self.groups_name = await groups_name_function.inner_text()
                print(self.groups_name)
                groups_num_selector = '//span[@dir="auto"]/div//a[@role="link" and contains(text(), "成員")]'
                groups_num_function = await self.page.wait_for_selector(groups_num_selector, timeout=10000)
                self.groups_num = await groups_num_function.inner_text()
                print(self.groups_num)
                await self.robust_update_status(f"社團名:{self.groups_name} 社團人數：{self.groups_num}")
            except Exception as e:
                print(f"提取社团信息时出错: {str(e)}")
            await self.robust_update_status("开始爬取用户信息...")
            # 滚动加载更多用户
            previous_count = 0
            current_count = 0
            scroll_attempts = 0
            max_scroll_attempts = 10
            users = []
            seen_user_ids = set()  # 用于跟踪已处理的用户ID
            user_counter = 0  # 新增：独立计数器

            while scroll_attempts < max_scroll_attempts:
                # 滚动到底部
                await self.page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
                await asyncio.sleep(3)

                # 获取当前用户数量
                user_links = await self.page.query_selector_all(
                    '//div[@role="list"]//a[@role="link" and @tabindex="-1" and contains(@href, "/user/") or @role="link" and @tabindex="-1" and contains(@href, "facebook.com/")]')
                current_count = len(user_links)
                print(f"滚动后用户数量: {current_count}")

                for n, link in enumerate(user_links):

                    try:
                        href = await link.get_attribute('href')
                        text = await link.get_attribute('aria-label')

                        if not href or not text or not text.strip():
                            continue

                        user_id = await self.extract_facebook_identifier(href)

                        # 检查用户ID是否已存在
                        if user_id and user_id not in seen_user_ids:
                            seen_user_ids.add(user_id)  # 添加到已见集合
                            user_counter += 1  # 只有在添加新用户时才增加计数器
                            users.append({
                                'index': user_counter,  # 使用独立计数器
                                'name': text.strip(),
                                'user_id': user_id
                            })
                            print(f"{user_counter}：{user_id} {text.strip()}")
                            await self.robust_update_status(f"{user_counter}：{user_id} {text.strip()}")
                            in_csv_data.append([user_id, text.strip(), self.extract_group_id(url)])

                            # 检查是否达到目标数量
                            if user_counter >= int(self.params.get('crawl_count')) != 0:
                                print(f"达到目标数量 {user_counter}，停止爬取")
                                break
                    except Exception as e:
                        print(f"提取用户信息时出错: {str(e)}")
                        continue
                # 如果已达到目标数量，跳出循环
                if user_counter >= int(self.params.get('crawl_count')) != 0:
                    break

                if current_count == previous_count:
                    scroll_attempts += 1
                    print(f"用户数量未增加，尝试次数: {scroll_attempts}/{max_scroll_attempts}")
                else:
                    scroll_attempts = 0

                previous_count = current_count

                if scroll_attempts >= 3:  # 连续3次没有新用户就停止
                    print("已加载所有用户")
                    break
            # 将数据写入CSV文件
            with open(csv_filename, 'w', newline='', encoding='utf-8') as csvfile:
                csv_writer = csv.writer(csvfile)
                csv_writer.writerow(['userid', 'username', 'societiesid'])  # 写入表头
                csv_writer.writerows(in_csv_data)  # 写入数据
            print(f"爬取完成，共获取 {user_counter} 个用户信息")
            # return users

    async def getusers_fans(self):
        addresses = self.params.get('addresses', [])
        if not addresses:
            print("没有提供地址列表")
            return

        for i in range(len(addresses)):
            url = addresses[i].strip()
            # 创建一个新的CSV文件名
            csv_filename = f'data_{i}.csv'
            in_csv_data = []
            print(f"开始爬取用户信息，地址: {url}")
            await self.robust_update_status(f"粉絲專頁地址:{url}")
            await self.page.goto(url=url, wait_until='load')
            await asyncio.sleep(5)
            try:
                fans_name_selector = '//div[@class="x1e56ztr x1xmf6yo"]//span[@dir="auto"]//h1[contains(@class, "html-h1")]'
                fans_name_function = await self.page.wait_for_selector(fans_name_selector, timeout=10000)
                self.groups_name = await fans_name_function.inner_text()
                print(self.groups_name)
                fans_num_selector = '//span[@dir="auto"]//a[@role="link" and contains(text(), "追蹤者")]//strong'
                fans_num_function = await self.page.wait_for_selector(fans_num_selector, timeout=10000)
                self.groups_num = await fans_num_function.inner_text()
                print(self.groups_num)
                await self.robust_update_status(f"粉絲專頁名:{self.groups_name} 粉絲人數：{self.groups_num}")
            except Exception as e:
                print(f"提取粉丝专页信息时出错: {str(e)}")
            await self.robust_update_status("开始爬取用户信息...")
            # 滚动加载更多用户
            previous_count = 0
            current_count = 0
            scroll_attempts = 0
            max_scroll_attempts = 10
            users = []
            seen_user_ids = set()  # 用于跟踪已处理的用户ID
            user_counter = 0  # 新增：独立计数器

            while scroll_attempts < max_scroll_attempts:
                # 滚动到底部
                await self.page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
                await asyncio.sleep(3)

                # 获取当前用户数量
                user_links = await self.page.query_selector_all(
                    '//div[@class="x78zum5 x1q0g3np x1a02dak x1qughib"]//a[@role="link" and @tabindex="0" and contains(@href, "/profile.php?id=") or @role="link" and @tabindex="0" and contains(@href, "facebook.com/")]')

                current_count = len(user_links)
                print(f"滚动后用户数量: {current_count}")

                for n, link in enumerate(user_links):
                    try:
                        href = await link.get_attribute('href')
                        # 修改这里：直接从链接元素获取文本内容
                        text = await link.inner_text()

                        # 如果inner_text为空，尝试其他方法获取文本
                        if not text or not text.strip():
                            # 方法1：尝试获取aria-label属性
                            text = await link.get_attribute('aria-label') or ''
                            # 方法2：尝试获取子元素文本
                            if not text.strip():
                                span_element = await link.query_selector('span')
                                if span_element:
                                    text = await span_element.inner_text()

                        if not href or not text or not text.strip():
                            continue

                        user_id = await self.extract_facebook_identifier(href)

                        # 检查用户ID是否已存在
                        if user_id and user_id not in seen_user_ids:
                            seen_user_ids.add(user_id)  # 添加到已见集合
                            user_counter += 1  # 只有在添加新用户时才增加计数器
                            users.append({
                                'index': user_counter,  # 使用独立计数器
                                'name': text.strip(),
                                'user_id': user_id
                            })
                            print(f"{user_counter}：{user_id} {text.strip()}")
                            await self.robust_update_status(f"{user_counter}：{user_id} {text.strip()}")
                            in_csv_data.append([user_id, text.strip(), await self.extract_facebook_identifier(url)])

                            # 检查是否达到目标数量
                            if user_counter >= int(self.params.get('crawl_count')) != 0:
                                print(f"达到目标数量 {user_counter}，停止爬取")
                                break
                    except Exception as e:
                        print(f"提取用户信息时出错: {str(e)}")
                        continue
                # 如果已达到目标数量，跳出循环
                if user_counter >= int(self.params.get('crawl_count')) != 0:
                    break

                if current_count == previous_count:
                    scroll_attempts += 1
                    print(f"用户数量未增加，尝试次数: {scroll_attempts}/{max_scroll_attempts}")
                else:
                    scroll_attempts = 0

                previous_count = current_count

                if scroll_attempts >= 3:  # 连续3次没有新用户就停止
                    print("已加载所有用户")
                    break
            # 将数据写入CSV文件
            with open(csv_filename, 'w', newline='', encoding='utf-8') as csvfile:
                csv_writer = csv.writer(csvfile)
                csv_writer.writerow(['userid', 'username', 'societiesid'])  # 写入表头
                csv_writer.writerows(in_csv_data)  # 写入数据
            print(f"爬取完成，共获取 {user_counter} 个用户信息")

    async def getusers_like(self):
        await self.page.goto(url="https://www.facebook.com/", wait_until='load', timeout=50000)
        title = await self.page.title()
        if "Facebook" in title:
            await asyncio.sleep(3)
        else:
            print(f"標題未包含 'Facebook'，當前標題: {title}，等待10秒後重試...")
            await asyncio.sleep(10)  # 等待10秒

        num_posts = int(self.params["post_count"])
        like_count = 0
        seek_count = 0

        # i = 1
        # print(f"准备与 {num_posts} 个帖子互动")
        # while True:
        #     try:
        #         selector = f'//div[contains(@class, "x1hc1fzr x1unhpq9")]/div//div[@aria-posinset={i}]'
        #         element = await self.page.wait_for_selector(selector, timeout=15000)
        #         if element:
        #             await element.scroll_into_view_if_needed()
        #             print(f"第 {i} 个帖子")
        #             like_count,seek_count = await self.sponsor_like(selector,like_count,seek_count)
        #             if like_count >= num_posts:
        #                 break
        #     except Exception as e:
        #         print(f"处理第 {i} 个帖子时出错: {str(e)}")
        #     finally:
        #         i += 1
        like_count, seek_count = await self.cycle_post(num_posts, like_count, seek_count)
        # 找不到执行普通点赞
        # if seek_count >= int(self.params["refresh"]) and like_count <= num_posts:
        #     while True:
        #         await self.page.reload()
        #         title = await self.page.title()
        #         if "Facebook" in title:
        #             await asyncio.sleep(3)
        #         else:
        #             print(f"標題未包含 'Facebook'，當前標題: {title}，等待10秒後重試...")
        #             await asyncio.sleep(15)  # 等待10秒
        #         like_count, seek_count = await self.cycle_post(num_posts, like_count, seek_count)
        #         if like_count >= num_posts:
        #             break

    async def cycle_post(self, num_posts, like_count, seek_count):
        i = 1
        print(f"准备与 {num_posts} 个帖子互动")
        while True:
            try:
                selector = f'//div[contains(@class, "x1hc1fzr x1unhpq9")]/div//div[@aria-posinset={i}]'
                element = await self.page.wait_for_selector(selector, timeout=15000)
                if element:
                    await element.scroll_into_view_if_needed()
                    print(f"第 {i} 个帖子")
                    await self.robust_update_status(f"第 {i} 个帖子")
                    like_count, seek_count = await self.sponsor_like(selector, like_count, seek_count)
                    if like_count >= num_posts:
                        break
                    # if like_count >= num_posts or seek_count >= int(self.params["refresh"]):
                    #     seek_count = 0
                    #     break
            except Exception as e:
                print(f"处理第 {i} 个帖子时出错: {str(e)}")
            finally:
                i += 1
        return like_count, seek_count

    async def getusers_like_url(self, url):
        for i in range(len(url)):
            await self.page.goto(url=url[i], wait_until='load', timeout=50000)

            self.supportId = await self.get_support_id(url[i])
            self.post_name = self.supportId
            title = await self.page.title()
            if "Facebook" in title:
                await asyncio.sleep(3)
            else:
                print(f"標題未包含 'Facebook'，當前標題: {title}，等待10秒後重試...")
                await asyncio.sleep(10)  # 等待10秒
            selector = '//div[@role="dialog"]'
            await self.sponsor_like_click(selector)

    async def sponsor_like(self, selector, like_count, seek_count):

        try:
            sponsor_element = await self.page.wait_for_selector(selector + '//span[contains(text(), "助")]',
                                                                timeout=2000)
            if sponsor_element:
                await sponsor_element.scroll_into_view_if_needed()
                print("出現啦")
                pots = selector + '//h4//a'
                sponsor_element = await self.page.wait_for_selector(pots)
                href = await sponsor_element.get_attribute("href")
                self.supportId = await self.get_support_id(href)

                print('获取到贴文链接', type(href))
                post_name_element = await self.page.wait_for_selector(pots + ' //span')
                self.post_name = await post_name_element.inner_text()

                print('获取到贴文id', self.supportId)
                print('获取到贴文name', self.post_name)

                await self.robust_update_status(f"出現贊助貼文.")
                like_count += 1
                seek_count = 0
                await self.sponsor_like_click(selector)
                await self.warning_prompt()
            await asyncio.sleep(random.uniform(5, 10))

        except Exception as e:
            print(f"這个帖子不是贊助: {str(e)}")
            seek_count += 1
        return like_count, seek_count

    async def sponsor_like_click(self, selector):
        try:
            sponsor_element = await self.page.wait_for_selector(
                selector + '//span[@role="toolbar"]/following-sibling::*[1]',
                timeout=4000)
            if sponsor_element:
                await sponsor_element.scroll_into_view_if_needed()
                await sponsor_element.click()
                await self.get_sponsor_user()
            await asyncio.sleep(random.uniform(5, 10))

        except Exception as e:
            print(f"這个帖子不是贊助: {str(e)}")

    async def get_sponsor_user(self):
        """修正版：获取赞助帖子的点赞用户"""
        print("开始获取赞助帖子用户...")
        await self.robust_update_status("开始获取赞助帖子用户...")

        # 初始化变量（移到循环外）
        previous_count = 0
        current_count = 0
        scroll_attempts = 0
        max_scroll_attempts = 10
        users = []
        seen_user_ids = set()  # 用于跟踪已处理的用户ID
        user_counter = 0  # 用户计数器
        in_csv_data = []  # 存储CSV数据

        # 修改为有限循环
        while scroll_attempts < max_scroll_attempts:
            await self.dialog_()  # 滚动弹窗
            await asyncio.sleep(3)

            # 获取当前用户数量
            user_links = await self.page.query_selector_all(
                '//div[@role="dialog"]//div[@data-visualcompletion="ignore-dynamic"]//a[contains(@aria-label, "大頭貼照") and @role="link" and @tabindex="0" and contains(@href, "/user/") or contains(@aria-label, "大頭貼照") and @role="link" and @tabindex="0" and contains(@href, "/profile.php?id=") or contains(@aria-label, "大頭貼照") and @role="link" and @tabindex="0" and contains(@href, "facebook.com/")]'
            )

            current_count = len(user_links)
            print(f"滚动后用户数量: {current_count}")

            # 处理新找到的用户链接
            new_users_found = 0
            for link in user_links:
                try:
                    href = await link.get_attribute('href')
                    text = await link.get_attribute('aria-label')

                    if not href or not text or not text.strip():
                        continue

                    # print(f"找到链接: {href}, {text}")
                    user_id = await self.extract_facebook_identifier(href)

                    # 检查用户ID是否已存在
                    if user_id and user_id not in seen_user_ids:
                        seen_user_ids.add(user_id)  # 添加到已见集合
                        user_counter += 1
                        new_users_found += 1

                        # 清理用户名
                        clean_name = text.strip().replace("的大頭貼照", "")

                        users.append({
                            'index': user_counter,
                            'name': clean_name,
                            'user_id': user_id
                        })

                        print(f"{user_counter}：{user_id} {clean_name}")
                        await self.robust_update_status(f"{user_counter}：{user_id} {clean_name}")

                        # # 添加到CSV数据
                        # # 注意：这里需要根据实际情况获取group_id
                        # group_id = "sponsor_post"  # 或者从其他地方获取
                        # in_csv_data.append([user_id, clean_name, group_id])

                        # 检查是否达到目标数量
                        if user_counter >= int(self.params.get('collect_count', 100)) != 0:
                            print(f"达到目标数量 {user_counter}，停止爬取")
                            break

                except Exception as e:
                    print(f"提取用户信息时出错: {str(e)}")
                    continue

            # 如果已达到目标数量，跳出循环
            if user_counter >= int(self.params.get('collect_count', 100)) != 0:
                break

            # 检查是否有新内容加载
            if current_count == previous_count:
                scroll_attempts += 1
                print(f"用户数量未增加，尝试次数: {scroll_attempts}/{max_scroll_attempts}")
            else:
                scroll_attempts = 0  # 重置尝试次数
                print(f"找到 {new_users_found} 个新用户")

            previous_count = current_count

            # 如果连续多次没有新用户，停止滚动
            if scroll_attempts >= 3:
                print("已加载所有可用用户")
                break

            # 短暂延迟后继续滚动
            await asyncio.sleep(2)

        # # 将数据写入CSV文件
        # if in_csv_data:
        #     csv_filename = f'sponsor_users_{int(time.time())}.csv'
        #     with open(csv_filename, 'w', newline='', encoding='utf-8') as csvfile:
        #         csv_writer = csv.writer(csvfile)
        #         csv_writer.writerow(['userid', 'username', 'fansid'])  # 写入表头
        #         csv_writer.writerows(in_csv_data)  # 写入数据
        #     print(f"数据已保存到 {csv_filename}")

        # 提交数据

        print('获取的数据', users, 'id')
        print(f"爬取完成，共获取 {user_counter} 个用户信息")
        user_data = [
            (user['user_id'], user['name'], self.supportId)
            for user in users
        ]
        self.post_user_cunt = db_manager.insert_supportUser(user_data)

        if self.post_user_cunt > 0:
            post_info = {
                'Supportid': self.supportId,
                'SupportName': self.post_name,
                'number': self.post_user_cunt
            }
            print('提交数据', post_info)
            db_manager.insert_post_info(post_info)
        else:
            print('数据为空不提交')
        # return users

    async def extract_facebook_identifier(self, url):
        # 处理相对路径
        if url.startswith('/'):
            user_match = re.search(r'/user/(\d+)', url)
            if user_match:
                return user_match.group(1)
            return None

        # 解析完整URL
        parsed = urlparse(url)

        # 处理profile.php情况
        if parsed.path == '/profile.php':
            query_params = parse_qs(parsed.query)
            if 'id' in query_params:
                return query_params['id'][0]

        # 处理用户名情况
        if parsed.netloc == 'www.facebook.com':
            path_parts = parsed.path.strip('/').split('/')
            if path_parts and path_parts[0] != 'profile.php':
                return path_parts[0]

        return None

    # 贴文id
    async def get_support_id(self, url):
        try:
            print(f"正在处理URL: {url}")
            if '/posts/' in url:
                postId = url.split('/posts/')[1].split('/')[0]
                return postId
            elif '/permalink/' in url:
                postId = url.split('/permalink/')[1]
                return postId
            elif 'multi_permalinks=' in url:
                postId = url.split('/?multi_permalinks=')[1].split('&')[0]
                return postId
            elif 'permalink.php?' in url:
                postId = url.split('/permalink.php?')[1]
                return postId
            elif '?__cft__' in url:
                postId = url.split('?__cft__')[0].split('https://www.facebook.com/')[1]
                return postId
            elif 'profile.php?id=' in url:
                return url.split('profile.php?id=')[1].split('&__cft__')[0]
        except Exception as e:
            print(f"处理URL时出错: {str(e)}")

    async def dialog_(self):
        try:
            dialog = await self.page.wait_for_selector('div[role="dialog"]', timeout=10000)
            if not dialog:
                print("未找到弹窗")
                return

        except Exception as e:
            print(f"等待弹窗出现时出错: {str(e)}")
            return
        try:
            # 获取弹窗的位置和大小
            dialog_box = await dialog.bounding_box()
            if dialog_box:
                center_x = dialog_box['x'] + dialog_box['width'] / 2
                center_y = dialog_box['y'] + dialog_box['height'] / 2
                # 将鼠标移动到弹窗中心，然后滚动
                await self.page.mouse.move(center_x, center_y)
                await self.page.mouse.wheel(0, 5000)  # 向下滚动5000像素
                print(f"鼠标滚动，位置: ({center_x}, {center_y})")
        except Exception as e:
            print(f"鼠标滚动出错: {str(e)}")

    async def home_post(self):
        await self.page.goto(url="https://www.facebook.com/", wait_until='load')
        await asyncio.sleep(10)
        try:
            Option_selector = '//div[@data-visualcompletion="ignore-dynamic"]//div[@role="button" and contains(@aria-label, "選項")]'
            Option_but = await self.page.wait_for_selector(Option_selector, timeout=10000)
            if Option_but:
                await Option_but.scroll_into_view_if_needed()
                await asyncio.sleep(1)
                await Option_but.click()
                await asyncio.sleep(random.uniform(5, 8))
                # 彈出來電音效
                disabled_selector = '//div[@aria-label="聊天室設定"]//div[@aria-checked="true" and contains(@aria-label, "來電音效")]'
                disabled_but = await self.page.wait_for_selector(disabled_selector, timeout=10000)
                if disabled_but:
                    await disabled_but.scroll_into_view_if_needed()
                    await asyncio.sleep(1)
                    await disabled_but.click()
                    await asyncio.sleep(random.uniform(3, 6))
                    input_selector = '//div[@role="dialog"]//div[@role="list"]/div[3]'
                    disabled_but = await self.page.wait_for_selector(input_selector, timeout=10000)
                    if disabled_but:
                        await disabled_but.scroll_into_view_if_needed()
                        await asyncio.sleep(1)
                        await disabled_but.click()
                        await asyncio.sleep(random.uniform(3, 5))
                        disabled_selector = '//div[@role="dialog"]//div[@role="button" and contains(@aria-label, "停用")]'
                        disabled_but = await self.page.wait_for_selector(disabled_selector, timeout=10000)
                        if disabled_but:
                            await disabled_but.scroll_into_view_if_needed()
                            await asyncio.sleep(1)
                            await disabled_but.click()
                            await asyncio.sleep(random.uniform(4, 6))
                            # 找到再次点击才能激活下面查找
                            await Option_but.scroll_into_view_if_needed()
                            await asyncio.sleep(1)
                            await Option_but.click()
                            await asyncio.sleep(random.uniform(5, 8))
        except Exception as e:
            print(f"没有找到來電音效: {str(e)}")
        try:
            # 彈出新訊息关闭
            disabled_selector = '//div[@aria-label="聊天室設定"]//div[@aria-checked="true" and contains(@aria-label, "彈出新訊息")]'
            disabled_but = await self.page.wait_for_selector(disabled_selector, timeout=10000)
            if disabled_but:
                await disabled_but.scroll_into_view_if_needed()
                await asyncio.sleep(1)
                await disabled_but.click()
                await asyncio.sleep(random.uniform(3, 6))
            await Option_but.click()
        except Exception as e:
            print(f"没有找到新訊息关闭: {str(e)}")

    async def class_fb_set(self):
        try:
            Option_selector = '//div[@data-visualcompletion="ignore-dynamic"]//div[@role="button" and contains(@aria-label, "選項")]'
            Option_but = await self.page.wait_for_selector(Option_selector, timeout=10000)
            if Option_but:
                await Option_but.scroll_into_view_if_needed()
                await asyncio.sleep(1)
                await Option_but.click()
                await asyncio.sleep(random.uniform(5, 8))
                disabled_selector = '//div[@aria-label="聊天室設定"]//div[@aria-checked="true" and contains(@aria-label, "彈出新訊息")]'
                disabled_but = await self.page.wait_for_selector(disabled_selector, timeout=10000)
                if disabled_but:
                    await disabled_but.scroll_into_view_if_needed()
                    await asyncio.sleep(1)
                    await disabled_but.click()
                    await asyncio.sleep(random.uniform(5, 8))
                await Option_but.click()
        except Exception as e:
            print(f"没有找到: {str(e)}")

    async def warning_prompt(self):
        await asyncio.sleep(4)
        close_selectors = [
            '//div[@role="dialog"]//div[@aria-label="關閉"]',  # 方式1
            '//div[@aria-label="關閉" and @aria-hidden="false"]',  # 方式2
            '//div[@aria-label="關閉"]'  # 方式3
        ]
        for selector in close_selectors:
            try:
                close_button = await self.page.wait_for_selector(selector, timeout=3000)
                if close_button:
                    await close_button.scroll_into_view_if_needed()
                    await asyncio.sleep(1)
                    await close_button.click()
                    print("彈窗關閉！")
                    return True
            except Exception as e:
                continue  # 尝试下一种选择器

    # 添加新的辅助方法
    def minimize_browser_window(self):
        """最小化浏览器窗口（平台特定实现）"""
        try:
            system = platform.system()
            if system == "Windows":
                import win32gui, win32con
                # 获取浏览器窗口句柄
                time.sleep(1)  # 等待窗口创建
                hwnd = win32gui.GetForegroundWindow()
                if hwnd:
                    win32gui.ShowWindow(hwnd, win32con.SW_MINIMIZE)
            elif system == "Darwin":  # macOS
                import subprocess
                subprocess.run(["osascript", "-e",
                                'tell application "System Events" to set visible of process "Google Chrome" to false'])
            # Linux 系统需要额外的窗口管理器支持，这里暂不处理
        except Exception as e:
            print(f"最小化窗口失败: {e}")

    async def force_minimize_browser(self):
        """强制最小化浏览器窗口"""
        # 先尝试通过Playwright的方式
        try:
            if self.browser:
                # 获取所有页面
                pages = self.browser.contexts[0].pages if self.browser.contexts else []
                for page in pages:
                    # 尝试最小化窗口
                    await page.evaluate("""() => {
                        if (window.moveTo && window.resizeTo) {
                            window.moveTo(-2000, -2000);
                            window.resizeTo(1, 1);
                        }
                    }""")
        except:
            pass

        # 再使用平台特定的方法
        self.minimize_browser_window()


def parse_bool(type_data):
    type_data = str(type_data).lower().strip()
    return type_data in ('true', '1', 'yes', 'yes')


def get_chrome_path():
    system = platform.system()

    if system == "Windows":
        # 尝试通过注册表获取安装路径
        try:
            reg_path = r"SOFTWARE\Google\Chrome\BLBeacon"
            with winreg.OpenKey(winreg.HKEY_CURRENT_USER, reg_path) as key:
                install_path = winreg.QueryValueEx(key, "InstallPath")[0]
                chrome_path = os.path.join(install_path, "chrome.exe")
                if os.path.exists(chrome_path):
                    return chrome_path
        except FileNotFoundError:
            pass  # 继续检查默认路径

        # 检查常见的默认安装路径
        possible_paths = [
            os.path.expandvars(r"%PROGRAMFILES%\Google\Chrome\Application\chrome.exe"),
            os.path.expandvars(r"%PROGRAMFILES(X86)%\Google\Chrome\Application\chrome.exe"),
            os.path.expandvars(r"%LOCALAPPDATA%\Google\Chrome\Application\chrome.exe")
        ]
        for path in possible_paths:
            if os.path.exists(path):
                return path
        return None

    elif system == "Darwin":
        # macOS的默认安装路径
        chrome_path = "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
        return chrome_path if os.path.exists(chrome_path) else None

    elif system == "Linux":
        # 使用which命令查找或检查常见路径
        chrome_path = shutil.which("google-chrome") or "/usr/bin/google-chrome"
        return chrome_path if os.path.exists(chrome_path) else None

    else:
        return None
