RCS / scraper.py
AiDeveloper1's picture
Upload 5 files
95f63e4 verified
from playwright.async_api import async_playwright
from urllib.parse import urljoin, urlparse
import logging
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
async def scrape_page(url: str, visited: set, base_domain: str) -> tuple[dict, set]:
"""Scrape a single page for text, images, and links using Playwright."""
try:
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
context = await browser.new_context(
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
viewport={"width": 1280, "height": 720}
)
page = await context.new_page()
await page.goto(url, wait_until="networkidle", timeout=30000)
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
await page.wait_for_timeout(2000)
# Extract text content
text_content = await page.evaluate("document.body.innerText")
text_content = ' '.join(text_content.split()) if text_content else ""
# Extract images (only JPEG, PNG, WebP, exclude data URLs and SVGs)
images = await page.evaluate(
"""() => {
const validExtensions = ['.jpg', '.jpeg', '.png', '.webp'];
const imgElements = document.querySelectorAll('img');
const imgUrls = new Set();
imgElements.forEach(img => {
const src = img.src || '';
const dataSrc = img.dataset.src || '';
const srcset = img.srcset || '';
// Check src
if (src && !src.startsWith('data:') && validExtensions.some(ext => src.toLowerCase().endsWith(ext))) {
imgUrls.add(src);
}
// Check data-src
if (dataSrc && !dataSrc.startsWith('data:') && validExtensions.some(ext => dataSrc.toLowerCase().endsWith(ext))) {
imgUrls.add(dataSrc);
}
// Check srcset
if (srcset) {
srcset.split(',').forEach(src => {
const url = src.trim().split(' ')[0];
if (url && !url.startsWith('data:') && validExtensions.some(ext => url.toLowerCase().endsWith(ext))) {
imgUrls.add(url);
}
});
}
});
return Array.from(imgUrls);
}"""
)
images = [urljoin(url, img) for img in images if img]
# Extract links
links = await page.evaluate("Array.from(document.querySelectorAll('a')).map(a => a.href)")
links = set(urljoin(url, link) for link in links
if urlparse(urljoin(url, link)).netloc == base_domain
and urljoin(url, link) not in visited)
await browser.close()
page_data = {"url": url, "text": text_content, "images": images}
logging.info(f"Scraped data: url={url}, text_length={len(text_content)}, images={images}")
return page_data, links
except Exception as e:
logging.error(f"Error scraping {url}: {e}")
return {}, set()