import requests
from bs4 import BeautifulSoup
import pandas as pd
import os
import datetime
import time
import random

def w2():
    url_template = "https://wenzhou.anjuke.com/sale/p{}/"
    headers_list = [
        {
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/"
                           "537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,"
                      "image/avif,image/webp,image/apng,*/*;q=0.8,application/"
                      "signed-exchange;v=b3;q=0.7",
            "Referer": "https://wenzhou.anjuke.com/",
            "Cookie":""
        },
        # 可以添加更多的 User-Agent，以支持更随机的请求
        {
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) Gecko/20100101 Firefox/89.0",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
            "Referer": "https://wenzhou.anjuke.com/",
        },
        # 继续添加更多的 User-Agent...
    ]

    data_list = []

    for i in range(1, 51):  # 循环爬取50页
        url = url_template.format(i)
        header = random.choice(headers_list)  # 随机选择一个 User-Agent
        
        try:
            rs1 = requests.get(url=url, headers=header)
            rs1.raise_for_status()  # 检查请求是否成功
            soup = BeautifulSoup(rs1.text, 'html.parser')

            property_items = soup.find_all('div', class_='property')

            for item in property_items:
                property_id = item.find('a').get('href')  # 获取链接
                detail_url = property_id  # 使用获取的链接

                # 访问房产详情页
                detail_response = requests.get(detail_url, headers=header)
                detail_response.raise_for_status()  # 检查请求是否成功
                detail_soup = BeautifulSoup(detail_response.text, 'html.parser')

                # 提取成交量信息
                transaction_volume = ''
                transaction_volume_info = detail_soup.find('div', class_='community-info-td community-info-right')
                if transaction_volume_info:
                    transaction_volume = transaction_volume_info.get_text(strip=True)

                data = {
                    '网页链接': url,
                    'ID': detail_url,
                    '总价格': item.find('p', class_='property-price-total').get_text(strip=True),
                    '单位面积价格': item.find('p', class_='property-price-average').get_text(strip=True),
                    '小区': item.find('p', class_='property-content-info-comm-name').get_text(strip=True),
                    '地址': item.find('p', class_='property-content-info-comm-address').get_text(strip=True),
                    '面积和建成年份': item.find('div', class_='property-content-info').get_text(strip=True),
                    '成交量': transaction_volume,  # 添加成交量
                    '爬取时间': datetime.datetime.now()
                }
                data_list.append(data)

            print(f'第 {i} 页数据已爬取完成')
            time.sleep(random.uniform(20, 40))  # 随机等待 1 到 3 秒

        except requests.exceptions.RequestException as e:
            print(f'请求出错: {e}')
            time.sleep(5)  # 如果请求失败，等待5秒后再试

    # 检查保存路径
    save_path = 'data.xlsx'

    df = pd.DataFrame(data_list)

    # 若文件已存在，获取当前行数以追加数据
    if os.path.exists(save_path):
        with pd.ExcelWriter(save_path, mode='a', engine='openpyxl', if_sheet_exists='overlay') as writer:
            startrow = writer.sheets['Sheet1'].max_row  # 获取现有数据的行数
            df.to_excel(writer, index=False, header=False, startrow=startrow)
    else:
        df.to_excel(save_path, index=False)

    print(f'数据已保存到 {save_path}')

if __name__ == "__main__":
    w2()
