import asyncio

from bs4 import BeautifulSoup

from pyppeteer_util.base_pyppeteer import BasePyppeteer


class VideoScraper(BasePyppeteer):

    async def fetch_videos(self, selector):
        page = await self.setup_page()
        await page.goto(self.url)

        # 等待selector插件加载完毕 (偶尔会出现内容加载不出来的情况)
        # await page.waitForSelector(selector, {'timeout': 30000})

        # await page.waitForFunction('document.readyState === "complete"')

        # 假设我们要监测某个元素的属性变化，以下代码等待 #my-element 的属性 data-status 变为 "loaded"
        # await page.waitForFunction('''
        #         document.querySelector(".text-truncate.title").getAttribute("href") === "/video/v.vId"
        # ''')

        # 等待页面上的某个元素的某个属性发生变化
        await page.waitForFunction(
            '''
            () => {
                try{
                    const element = document.querySelector(".text-truncate.title");
                    console.log(element)
                    return element.getAttribute('href') !== '/video/v.vId';
                }catch(error){
                    console.log(error);
                    return false;
                }
            }
            ''',
            {'timeout': 30000}  # 超时时间 30 秒
        )

        # 等待时间
        # await page.waitForTimeout(1000)

        html_content = await page.content()
        # print(html_content)

        await page.close()
        return html_content

    async def start(self):
        try:
            path = self.get_chrome_path()
            await self.create_browser(path)
            html_content = await self.fetch_videos('.colVideoList')
            if not html_content:
                return []
            # 解析 HTML
            soup = BeautifulSoup(html_content, 'html.parser')
            video_list = soup.find_all('div', class_='colVideoList')
            videos = []
            for video in video_list:
                title_element = video.find('a', class_='title')
                title, href, image_url, play_ime = None, None, None, None
                if title_element:
                    title = title_element["title"].strip()
                    if not title:
                        break
                    href = title_element["href"]

                img_element = video.find('img', class_='img')
                if img_element:
                    image_url = img_element["src"]

                play_ime_element = video.find('small', class_='layer')
                if play_ime_element:
                    play_ime = play_ime_element.text.strip()

                videos.append({"video_name": title, "video_url": href, "image_url": image_url, "play_time": play_ime})

            return videos
        except Exception as e:
            print(f'Error: {e}')
        finally:
            await self.close_browser()


async def main():
    scraper = VideoScraper('https://91porn.porn/videos?page=1&type=hot')
    videos = await scraper.start()
    if videos:
        for video in videos:
            print(video)


if __name__ == '__main__':
    asyncio.run(main())
