import asyncio
import json
import os
from time import sleep

from src.utils.capture_web import scrape_website


class WebScraperService:
    def __init__(self, screenshot_path):
        self.screenshot_path = screenshot_path
        os.makedirs(self.screenshot_path, exist_ok=True)

    async def scrape(self, url):
        result = await scrape_website(url, self.screenshot_path)
        return result

    def run_scrape(self, url):
        result = asyncio.run(self.scrape(url))
        print(json.dumps(result, indent=4))

    async def scrape_multiple(self, urls):
        batch_size = 5
        results = []
        for i in range(0, len(urls), batch_size):
            batch_urls = urls[i:i + batch_size]
            tasks = [self.scrape(url) for url in batch_urls]
            batch_results = await asyncio.gather(*tasks)
            results.extend(batch_results)
            sleep(1)
        return results

    def run_scrape_multiple(self, urls):
        results = asyncio.run(self.scrape_multiple(urls))
        for result in results:
            (json.dumps(result, indent=4))
        return results


# 示例调用
if __name__ == "__main__":
    service = WebScraperService("D:\\code\\")
    urls = [
        "https://www.baidu.com",
        "https://www.github.com",
        "https://www.google.com",
        "https://www.qq.com",
        "https://www.jd.com",
        "https://www.weibo.com",
        "https://www.douyin.com",
    ]
    service.run_scrape_multiple(urls)
