import csv
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from bs4 import BeautifulSoup
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

# 配置Chrome浏览器驱动
chrome_driver_path = 'C:\Program Files (x86)\Google\Chrome\Application\chromedriver-win64\chromedriver.exe'  # 请替换为你本地的ChromeDriver路径
service = Service(chrome_driver_path)
driver = webdriver.Chrome(service=service)

# 安居客某个城市的房源列表页面URL，你需要根据实际情况修改
url = 'https://cq.fang.anjuke.com/loupan/s?kw='

# 打开网页
driver.get(url)

# 等待页面加载
time.sleep(5)

# 定义要获取的页数
num_pages = 5

# 用于存储所有房源信息
all_house_list = []

for page in range(num_pages):
    print(f"正在获取第 {page + 1} 页的房源信息...")

    # 滚动页面到页面底部
    scroll_step = 200
    total_height = driver.execute_script("return document.body.scrollHeight")
    # 循环滚动直到达到页面底部
    for i in range(0, total_height, scroll_step):
        # 执行 JavaScript 代码滚动指定像素
        driver.execute_script(f"window.scrollTo(0, {i});")
        # 暂停一小段时间，模拟缓慢滚动
        time.sleep(0.5)

    # 获取当前页面源代码
    html_content = driver.page_source

    # 使用BeautifulSoup解析HTML内容
    soup = BeautifulSoup(html_content, 'html.parser')

    # 提取房源信息
    item_mods = soup.find_all('div', class_='item-mod')

    # 遍历每个 item-mod 元素
    for item_mod in item_mods:
        price = None
        title_name = None
        room_types = []
        building_area = None
        address_text = None

        # 找到 favor-pos 元素下的 p 元素中的 span 元素
        favor_pos = item_mod.find('a', class_='favor-pos')
        if favor_pos:
            price_p = favor_pos.find('p', class_='price')
            if price_p:
                price_span = price_p.find('span')
                if price_span:
                    price = price_span.text

        # 找到 div.infos 下的标题名字（即 span.items-name 中的文本内容）
        infos_div = item_mod.find('div', class_='infos')
        if infos_div:
            lp_name_a = infos_div.find('a', class_='lp-name')
            if lp_name_a:
                items_name_span = lp_name_a.find('span', class_='items-name')
                if items_name_span:
                    title_name = items_name_span.text

        # 找到 div.infos 下的户型信息（即 a.huxing 中的文本内容）
        if infos_div:
            huxing_a = infos_div.find('a', class_='huxing')
            if huxing_a:
                # 分别获取户型的 span 标签内容
                room_type_spans = huxing_a.find_all('span', class_=lambda x: x != 'building-area')
                for room_type_span in room_type_spans:
                    room_types.append(room_type_span.text)

                # 获取建筑面积的 span 标签内容
                building_area_span = huxing_a.find('span', class_='building-area')
                if building_area_span:
                    building_area = building_area_span.text

        # 找到 a 链接下 class="address" 中 span class="list-map" 的文本
        if infos_div:
            address_a = infos_div.find('a', class_='address')
            if address_a:
                list_map_span = address_a.find('span', class_='list-map')
                if list_map_span:
                    address_text = list_map_span.text

        # 将提取的信息添加到列表中
        all_house_list.append([title_name, address_text, ', '.join(room_types), building_area, price])

    # 如果不是最后一页，点击下一页
    if page < num_pages - 1:
        try:
            # 等待下一页按钮可点击
            next_page_link = WebDriverWait(driver, 10).until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, 'a.next-page.next-link'))
            )
            next_page_link.click()
            # 等待页面加载
            time.sleep(5)
        except Exception as e:
            print(f"点击下一页时出现错误: {e}")
            break

# 保存数据到CSV文件
with open('anjuke_houses.csv', mode='w', newline='', encoding='utf-8') as file:
    writer = csv.writer(file)
    writer.writerow(['名字', '地区', '户型', '面积', '价格'])
    writer.writerows(all_house_list)

# 关闭浏览器
driver.quit()