acecalisto3's picture
Update app.py
70be69d verified
raw
history blame
11.5 kB
import gradio as gr
import requests
import re
import logging
import json
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from PIL import Image
import io
import zipfile
import os
import datetime
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import tempfile
# Configure logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
def sanitize_filename(filename):
return re.sub(r'[<>:"/\\|?*\n]+', '_', filename)
def validate_url(url):
"""Validate if the URL is properly formatted."""
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except:
return False
def get_latest_data(url):
"""Get the latest HTML content of a webpage."""
try:
response = requests.get(url, timeout=10)
return response.text
except Exception as e:
logging.error(f"Error fetching latest data from {url}: {str(e)}")
return None
def compare_html(old_html, new_html):
"""Compare two HTML contents to detect changes."""
if not old_html or not new_html:
return False
return old_html.strip() != new_html.strip()
def compare_screenshot(old_screenshot, new_screenshot):
"""Compare two screenshots to detect changes."""
try:
if not old_screenshot or not new_screenshot:
return False
old_img = Image.open(io.BytesIO(old_screenshot))
new_img = Image.open(io.BytesIO(new_screenshot))
return not (old_img == new_img)
except Exception as e:
logging.error(f"Error comparing screenshots: {str(e)}")
return False
def alert_changes(url, change_type):
"""Log detected changes."""
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logging.warning(f"[{timestamp}] Changes detected at {url}: {change_type}")
return f"[{timestamp}] {change_type}"
def extract_links_from_page(url):
"""Extract all links from a webpage."""
try:
response = requests.get(url, timeout=10)
soup = BeautifulSoup(response.text, 'html.parser')
links = [a['href'] for a in soup.find_all('a', href=True)]
return links
except Exception as e:
logging.error(f"Error extracting links from {url}: {str(e)}")
return []
def take_screenshot(url):
"""Take a screenshot of a webpage."""
try:
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--window-size=1920,1080")
driver = webdriver.Chrome(options=chrome_options)
driver.get(url)
screenshot = driver.get_screenshot_as_png()
driver.quit()
image = Image.open(io.BytesIO(screenshot))
max_size = (1024, 1024)
image.thumbnail(max_size, Image.LANCZOS)
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format='PNG')
return img_byte_arr.getvalue()
except Exception as e:
logging.error(f"Screenshot error for {url}: {str(e)}")
return None
def is_webpage(url):
"""Check if the URL points to a webpage (HTML)."""
try:
response = requests.head(url, timeout=10)
content_type = response.headers.get('Content-Type', '').lower()
return 'text/html' in content_type
except Exception as e:
logging.error(f"Error checking content type for {url}: {str(e)}")
return False
def crawl_url(url, depth, max_depth, visited=None):
"""Recursively crawl a URL up to a specified depth."""
if visited is None:
visited = set()
if depth > max_depth or url in visited:
return []
visited.add(url)
screenshots = []
if is_webpage(url):
links = extract_links_from_page(url)
screenshot = take_screenshot(url)
if screenshot:
screenshots.append((url, screenshot))
if depth < max_depth:
for link in links:
if not link.startswith(('http://', 'https://')):
link = f"https://{link}"
screenshots.extend(crawl_url(link, depth + 1, max_depth, visited))
else:
logging.info(f"Skipping non-webpage content: {url}")
return screenshots
def process_urls(url_input, bulk_toggle, action_radio, max_urls, crawl_depth, progress=gr.Progress()):
"""Process URLs with crawl depth and change detection."""
# Validate URLs first
urls = re.split(r'[,\n]+', url_input.strip()) if bulk_toggle else [url_input]
urls = [url.strip() for url in urls if url.strip()]
urls = urls[:int(max_urls)]
# Validate all URLs
invalid_urls = [url for url in urls if not validate_url(url)]
if invalid_urls:
return None, json.dumps({"error": f"Invalid URLs detected: {', '.join(invalid_urls)}"}, indent=2)
scraped_data = []
screenshots = []
changes_log = []
# Initialize progress tracking
total_urls = len(urls)
progress(0)
# Directory to store scraped data
data_dir = 'scraped_data'
os.makedirs(data_dir, exist_ok=True)
# Process each URL
for idx, url in enumerate(urls):
if not url.startswith(('http://', 'https://')):
url = f'https://{url}'
# Sanitize URL for file naming
sanitized_url = sanitize_filename(url)
# Check for changes
old_html_path = os.path.join(data_dir, f"{sanitized_url}_html.txt")
old_screenshot_path = os.path.join(data_dir, f"{sanitized_url}_screenshot.png")
# Fetch latest data
latest_html = get_latest_data(url)
latest_screenshot = take_screenshot(url)
# Compare with previous data if available
if os.path.exists(old_html_path):
with open(old_html_path, 'r', encoding='utf-8') as f:
old_html = f.read()
if compare_html(old_html, latest_html):
changes_log.append(alert_changes(url, "HTML content has changed"))
if os.path.exists(old_screenshot_path):
with open(old_screenshot_path, 'rb') as f:
old_screenshot = f.read()
if latest_screenshot and compare_screenshot(old_screenshot, latest_screenshot):
changes_log.append(alert_changes(url, "Visual content has changed"))
# Store latest data
if latest_html:
with open(old_html_path, 'w', encoding='utf-8') as f:
f.write(latest_html)
if latest_screenshot:
with open(old_screenshot_path, 'wb') as f:
f.write(latest_screenshot)
# Prepare output data
if action_radio in ['Scrape data', 'Both']:
scraped_data.append({
'url': url,
'content': latest_html,
'timestamp': datetime.datetime.now().isoformat(),
'changes_detected': changes_log
})
if action_radio in ['Capture image', 'Both']:
crawled_screenshots = crawl_url(url, depth=1, max_depth=int(crawl_depth))
screenshots.extend(crawled_screenshots)
# Update progress
progress((idx + 1) / total_urls)
# Create a temporary file to store the ZIP
with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as tmp_file:
with zipfile.ZipFile(tmp_file, 'w', zipfile.ZIP_DEFLATED) as zipf:
# Add screenshots to ZIP
for screenshot_url, screenshot_data in screenshots:
sanitized_screenshot_url = sanitize_filename(screenshot_url)
filename = f"{sanitized_screenshot_url}.png"
zipf.writestr(filename, screenshot_data)
# Add scraped data and changes log to ZIP
if scraped_data:
data_to_save = {
'scraped_data': scraped_data,
'changes_log': changes_log,
'timestamp': datetime.datetime.now().isoformat()
}
zipf.writestr('data.json', json.dumps(data_to_save, indent=2))
# Get the path to the temporary file
zip_file_path = tmp_file.name
# Prepare display data
display_data = {
'scraped_urls': len(scraped_data),
'screenshots_taken': len(screenshots),
'changes_detected': changes_log
}
# Return the path to the temporary ZIP file and display data
return zip_file_path, json.dumps(display_data, indent=2)
def create_interface():
"""Create the Gradio interface."""
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown(
"""
# Smart Web Scraper with Change Detection
Monitor and capture changes in web content automatically.
"""
)
with gr.Tabs():
with gr.Tab("URL Scrape/Screenshot"):
url_input = gr.Textbox(
label="Enter URL(s)",
value="https://example.com",
placeholder="Enter single URL or multiple URLs separated by commas"
)
with gr.Row():
bulk_toggle = gr.Checkbox(label="Bulk URLs", value=False)
action_radio = gr.Radio(
["Scrape data", "Capture image", "Both"],
label="Select Action",
value="Both"
)
with gr.Row():
max_urls = gr.Slider(
minimum=1,
maximum=20,
value=5,
step=1,
label="Max URLs to process"
)
crawl_depth = gr.Slider(
minimum=1,
maximum=3,
value=1,
step=1,
label="Crawl Depth"
)
process_button = gr.Button("Process URLs", variant="primary")
with gr.Column():
screenshot_zip = gr.File(label="Download Results") # Removed file_name
scraped_data_output = gr.JSON(label="Results Summary")
process_button.click(
fn=process_urls,
inputs=[
url_input,
bulk_toggle,
action_radio,
max_urls,
crawl_depth
],
outputs=[
screenshot_zip,
scraped_data_output
],
show_progress=True
)
gr.Markdown(
"""
### Features
- Bulk URL processing
- Screenshot capture
- Content change detection
- Recursive crawling
- Automatic data storage
"""
)
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch()