|
|
|
''' A simple web scraping script using requests and BeautifulSoup libraries. I am trying to achive the following: |
|
1. Send an HTTP GET request to a website. |
|
2. Parse the HTML content of the page. |
|
3. Extract relevant data (e.g., tables, headings, paragraphs). |
|
4. Save the extracted data to a file or display it in the terminal.''' |
|
|
|
|
|
import requests |
|
from bs4 import BeautifulSoup |
|
|
|
|
|
url = "https://www.ireland.ie/en/india/newdelhi/services/visas/processing-times-and-decisions/" |
|
|
|
|
|
headers = { |
|
"User-Agent": ( |
|
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " |
|
"(KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" |
|
) |
|
} |
|
|
|
|
|
response = requests.get(url, headers=headers) |
|
|
|
|
|
if response.status_code == 200: |
|
|
|
soup = BeautifulSoup(response.content, 'html.parser') |
|
|
|
|
|
paragraphs = soup.find_all('p') |
|
for i, paragraph in enumerate(paragraphs, start=1): |
|
print(f"Paragraph {i}: {paragraph.get_text(strip=True)}") |
|
print("-" * 80) |
|
|
|
|
|
tables = soup.find_all('table') |
|
for table in tables: |
|
print("\nTable found:") |
|
rows = table.find_all('tr') |
|
for row in rows: |
|
cells = row.find_all(['th', 'td']) |
|
cell_data = [cell.get_text(strip=True) for cell in cells] |
|
print("\t".join(cell_data)) |
|
else: |
|
print(f"Failed to retrieve the webpage. Status code: {response.status_code}") |
|
|