import hashlib
import os
import time

from pyppeteer import launch


async def scrape_website(url, screenshot_path):
    try:
        # 设置浏览器窗口大小为 1920x1080，并忽略SSL证书错误
        browser = await launch(headless=True,
                               args=["--no-sandbox", "--window-size=1920,1080", "--ignore-certificate-errors"],
                               executablePath="F:\workspace\chrome-win\\chrome.exe")
        page = await browser.newPage()

        # 设置页面视口大小为 1920x1080
        await page.setViewport({'width': 1920, 'height': 1080})

        start_time = time.time()
        # 增加最大等待时间30秒
        response = await page.goto(url, {'waitUntil': 'networkidle2', 'timeout': 30000})
        load_time = round((time.time() - start_time) * 1000)  # 加载完成时间（ms）

        # 编码信息
        encoding = response.headers.get("content-type", "").split("charset=")[-1] if "charset=" in response.headers.get(
            "content-type", "") else "unknown"

        # 获取请求头
        request_headers = response.request.headers

        # 获取重定向后的 URL
        final_url = page.url

        # 获取网站标题
        title = await page.title()

        # 获取网站图标
        favicon = await page.evaluate("""
               () => {
                   let link = document.querySelector("link[rel~='icon']");
                   return link ? link.href : null;
               }
           """)

        # 获取网站容器、开发语言（可能需要解析 Server 头信息）
        server = response.headers.get("server", "unknown")

        # 获取网站状态码
        status_code = response.status
        # 获取页面源码
        page_content = await page.content()
        # 使用 URL 的 MD5 值作为图片名称
        url_md5 = hashlib.md5(url.encode()).hexdigest()
        screenshot_filename = os.path.join(screenshot_path, f"{url_md5}.png")

        # 只截图可视区域
        await page.screenshot({'path': screenshot_filename, 'clip': {'x': 0, 'y': 0, 'width': 1920, 'height': 1080}})

        await browser.close()

        # 构造结果
        result = {
            "url": url,
            "encoding": encoding,
            "is_valid": status_code == 200,
            "request_headers": request_headers,
            "server": server,
            "redirected_url": final_url,
            "status_code": status_code,
            "load_time_ms": load_time,
            "favicon": favicon,
            "title": title,
            "screenshot_path": screenshot_filename,
            "page_content": page_content  # 添加页面源码
        }
        return result
    except Exception as e:
        print(f"Error scraping {url}: {e}")
        # 设置状态码为0表示请求不到
        result = {
            "url": url,
            "encoding": "unknown",
            "is_valid": False,
            "request_headers": {},
            "server": "unknown",
            "redirected_url": url,
            "status_code": 0,
            "load_time_ms": 0,
            "favicon": None,
            "title": None,
            "screenshot_path": None,
            "page_content": None  # 添加页面源码
        }
        return result
    # finally:
    #         if browser:
    #             await browser.close()
    #             # 强制关闭所有残留的 Chrome 进程
    #             for proc in psutil.process_iter(['pid', 'name']):
    #                 if proc.info['name'] and 'chrome' in proc.info['name'].lower():
    #                     try:
    #                         proc.kill()
    #                     except (psutil.NoSuchProcess, psutil.AccessDenied):
    #                         pass
