File size: 4,400 Bytes
2e1ba41
7dc760d
 
 
 
 
 
 
 
2e1ba41
7dc760d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e1ba41
 
 
7dc760d
2e1ba41
 
7dc760d
 
 
2e1ba41
7dc760d
 
2e1ba41
 
 
 
7dc760d
 
 
 
 
 
2e1ba41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7dc760d
 
 
 
 
2e1ba41
 
 
 
 
7dc760d
2e1ba41
 
 
 
 
 
7dc760d
 
 
2e1ba41
 
 
 
 
 
 
 
 
 
7dc760d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
# main.py
from fastapi import FastAPI, HTTPException, Query
from pydantic import BaseModel
from playwright.async_api import async_playwright, TimeoutError as PlaywrightTimeout
from typing import List, Optional
import datetime
import logging

logging.basicConfig(level=logging.INFO)
app = FastAPI(title="RealEstateSnap", version="0.3.0")

class Listing(BaseModel):
    title: str
    price: Optional[str]
    address: Optional[str]
    bedrooms: Optional[str]
    bathrooms: Optional[str]
    listing_url: str
    image_url: Optional[str]
    platform: str
    timestamp: str

async def scrape_craigslist(location: str, limit: int = 10) -> List[Listing]:
    listings = []
    async with async_playwright() as p:
        browser = await p.chromium.launch(headless=True)
        page = await browser.new_page()
        site = location.replace(' ', '').lower()
        url = f"https://{site}.craigslist.org/search/apa"
        logging.info(f"πŸ“¦ Scraping Craigslist: {url}")
        await page.goto(url)
        items = await page.query_selector_all(".result-row")
        for item in items[:limit]:
            try:
                title = await item.inner_text(".result-title")
                href = await item.get_attribute(".result-title", "href")
                price = (await item.inner_text(".result-price")).strip()
                listings.append(Listing(
                    title=title.strip(),
                    price=price,
                    address=None,
                    bedrooms=None,
                    bathrooms=None,
                    listing_url=href,
                    image_url=None,
                    platform="craigslist",
                    timestamp=datetime.datetime.utcnow().isoformat()
                ))
            except PlaywrightTimeout:
                logging.warning("⏱ Timeout β€” skipping a Craigslist item")
        await browser.close()
    return listings

async def scrape_kijiji(location: str, limit: int = 10) -> List[Listing]:
    listings = []
    async with async_playwright() as p:
        browser = await p.chromium.launch(headless=True)
        page = await browser.new_page()
        city = location.replace(' ', '-').lower()
        url = f"https://www.kijiji.ca/b-apartments-condos/{city}/c37l1700271"
        logging.info(f"πŸ“¦ Scraping Kijiji: {url}")
        await page.goto(url)
        cards = await page.query_selector_all(".search-item")
        for card in cards[:limit]:
            try:
                title = await card.inner_text(".title")
                price = (await card.inner_text(".price")).strip()
                href = await card.get_attribute("a.title", "href")
                listings.append(Listing(
                    title=title.strip(),
                    price=price,
                    address=None,
                    bedrooms=None,
                    bathrooms=None,
                    listing_url=f"https://www.kijiji.ca{href}",
                    image_url=None,
                    platform="kijiji",
                    timestamp=datetime.datetime.utcnow().isoformat()
                ))
            except PlaywrightTimeout:
                logging.warning("⏱ Timeout β€” skipping a Kijiji item")
        await browser.close()
    return listings

@app.get("/realestate", response_model=List[Listing])
async def get_listings(
    location: str = Query(..., description="City name or ZIP/postal code"),
    platform: Optional[List[str]] = Query(
        None,
        description="Platforms to scrape: craigslist, kijiji. Defaults to all."
    )
):
    selected = [p.lower() for p in platform] if platform else ["craigslist", "kijiji"]
    logging.info(f"🧭 Platforms selected: {selected}")

    results: List[Listing] = []

    if "craigslist" in selected:
        try:
            results += await scrape_craigslist(location)
        except Exception as e:
            logging.error(f"Craigslist scrape failed: {e}")
            raise HTTPException(status_code=500, detail="Craigslist scrape failed")

    if "kijiji" in selected:
        try:
            results += await scrape_kijiji(location)
        except Exception as e:
            logging.error(f"Kijiji scrape failed: {e}")
            raise HTTPException(status_code=500, detail="Kijiji scrape failed")

    if not results:
        raise HTTPException(status_code=404, detail="No listings found")
    return results