import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random

def scrape_property_data(district_url):
    # 设置请求头，模拟浏览器访问
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Referer": "https://newhouse.fang.com/",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
    }

    # 发送请求
    response = requests.get(district_url, headers=headers)
    print(f"Status Code: {response.status_code}")
    if response.status_code != 200:
        print("Failed to retrieve data")
        return []

    # 解析 HTML
    soup = BeautifulSoup(response.text, 'html.parser')

    # 获取房源列表
    listings = soup.find_all('div', class_='nlc_details')  # 使用 find_all 查找房源列表
    print(f"Found {len(listings)} listings")

    properties = []
    for listing in listings:
        try:
            # 提取楼盘名称
            name_element = listing.find('div', class_='nlcd_name')  # 使用 find 查找楼盘名称
            name = name_element.get_text(strip=True) if name_element else "N/A"

            # 提取单价
            price_element = listing.find('div', class_='nhouse_price')  # 使用 find 查找单价
            price = price_element.get_text(strip=True) if price_element else "N/A"

            # 提取总价（如果存在）
            total_price_element = listing.find('p', class_='zj_price')  # 使用 find 查找总价
            total_price = total_price_element.get_text(strip=True) if total_price_element else "N/A"

            # 添加到结果列表
            properties.append({
                'name': name,
                'price_per_sqm': price,
                'total_price': total_price
            })
        except Exception as e:
            print(f"Skipped a listing due to error: {e}")

    print(f"Scraped {len(properties)} properties")
    return properties

# 区域和对应的 URL
districts = {
    '丰台': 'https://newhouse.fang.com/house/s/fengtai/',
    '西城': 'https://newhouse.fang.com/house/s/xicheng/',
    '东城': 'https://newhouse.fang.com/house/s/dongcheng/',
    '昌平': 'https://newhouse.fang.com/house/s/changping/',
    '大兴': 'https://newhouse.fang.com/house/s/daxing/',
    '通州': 'https://newhouse.fang.com/house/s/tongzhou/',
    '房山': 'https://newhouse.fang.com/house/s/fangshan/',
    '顺义': 'https://newhouse.fang.com/house/s/shunyi/',
    '石景山': 'https://newhouse.fang.com/house/s/shijingshan/',
    '密云': 'https://newhouse.fang.com/house/s/miyun/',
    '门头沟': 'https://newhouse.fang.com/house/s/mentougou/',
    '怀柔': 'https://newhouse.fang.com/house/s/huairou/',
    '延庆': 'https://newhouse.fang.com/house/s/yanqing/',
    '平谷': 'https://newhouse.fang.com/house/s/pinggu/',
    '朝阳': 'https://newhouse.fang.com/house/s/chaoyang/',
    '海淀': 'https://newhouse.fang.com/house/s/haidian/'
}

# 遍历每个区域，抓取数据并保存为 CSV 文件
for district, url in districts.items():
    print(f"Scraping data for {district}...")
    data = scrape_property_data(url)
    if data:
        df = pd.DataFrame(data)
        df.to_csv(f'{district}_properties.csv', index=False, encoding='utf-8-sig')
        print(f"Data for {district} saved to {district}_properties.csv")
    else:
        print(f"No data found for {district}")
    time.sleep(random.uniform(1, 3))  # 随机延迟 1-3 秒

print("数据抓取完成，所有文件已保存。")