import asyncio
import os
from playwright.async_api import async_playwright
from bs4 import BeautifulSoup
from dotenv import load_dotenv

# Load environment variables from .env file
load_dotenv()

# Get the path to the Chromium/Chrome binary from the environment variable
CHROME_PATH = os.getenv("CHROME_PATH")
# Get the DISPLAY and XAUTHORITY environment variables
DISPLAY_VAR = os.getenv("DISPLAY", ":0")
XAUTHORITY_VAR = os.getenv("XAUTHORITY", "")
AUTO_CLOSE = os.getenv("AUTO_CLOSE", "True").lower() == "true"

if not CHROME_PATH:
    print("CHROME_PATH not found in .env, using the default Playwright Chromium browser.")

# The target URL to crawl
URL = "http://www.docuchina.cn/2017/04/05/VIDEalYoTKi24hmjUCiYSF5u170405.shtml"

# Define the output directory and filename
OUTPUT_DIR = "data/download/html"
OUTPUT_FILE = "page_content.html"

async def main():
    """Main function to run the web scraping task."""
    print("Starting the browser...")
    async with async_playwright() as p:
        try:
            # Attempt to launch the browser with the explicit display and sandbox arguments
            print(f"Using chromium which path is provided by dotenv file")
            browser = await p.chromium.launch(
                headless=False,
                executable_path=CHROME_PATH,
                args=[
                    f'--display={DISPLAY_VAR}',
                    '--no-sandbox',
                    '--disable-setuid-sandbox'
                ],
                env={
                    'DISPLAY': DISPLAY_VAR,
                    'XAUTHORITY': XAUTHORITY_VAR
                }
            )
        except Exception as e:
            print(f"Failed to launch browser with CHROME_PATH and explicit args: {e}")
            print("Trying to launch with default Playwright browser...")
            # Fallback to launching the default browser with the same explicit arguments
            browser = await p.chromium.launch(
                headless=False,
                args=[
                    f'--display={DISPLAY_VAR}',
                    '--no-sandbox',
                    '--disable-setuid-sandbox'
                ],
                env={
                    'DISPLAY': DISPLAY_VAR,
                    'XAUTHORITY': XAUTHORITY_VAR
                }
            )

        print("Opening a new page...")
        page = await browser.new_page()

        print(f"Navigating to {URL}...")
        await page.goto(URL, wait_until="networkidle")

        # The previous wait_for_selector is removed as requested
        print("Skipping wait for a specific selector.")
        
        print("Getting page HTML content...")
        content = await page.content()

        print("--- Page HTML Content (first 100 characters) ---")
        print(content[:100] + '...')
        print("--- End of HTML Content ---")

        # Create the output directory if it doesn't exist
        os.makedirs(OUTPUT_DIR, exist_ok=True)
        
        # Save full HTML to a file for later debugging
        output_path = os.path.join(OUTPUT_DIR, OUTPUT_FILE)
        with open(output_path, "w", encoding="utf-8") as f:
            f.write(content)
        print(f"Full page HTML saved to {output_path}")

        if AUTO_CLOSE:
            print("Closing the browser...")
            await browser.close()
        else:
            print("AUTO_CLOSE is False. The browser will remain open. Please close it manually to exit the script.")
            # Keep the browser open until the user manually closes it
            await page.pause()
            await browser.close()

if __name__ == "__main__":
    asyncio.run(main())