import requests
from lxml import etree
import pandas as pd
import time
from datetime import datetime
import random
import re
from tqdm import tqdm
import os
from urllib.parse import urlparse

# 创建项目文件夹、存储路径
file_path = r'E:\pycharm\pythonProject\4_Project\4_贝壳链家二手房\1_青岛新都心\1_成交_二手房'
os.makedirs(file_path,exist_ok=True)  # 确保存储目录存在,存在则跳过

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36 Edg/138.0.0.0',
    'referer': 'https://qd.ke.com/chengjiao/xindouxin/',
    'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-encoding':'gzip,deflate,br,zstd',
    'accept-language':'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6'
}

# 必填!!！从浏览器获取的Cookies,否则可能被反爬!!!
# 需要转换为：字典格式，在线转换工具，Eg  https://www.kgtools.cn/compression/cookie
cookies = {
    "SECKEY_ABVK": "nqPtROz/g4vFSkuTILmWfQ72ypL9/GnfMFH5CsNA/qs%3D",
    "BMAP_SECKEY": "nqPtROz_g4vFSkuTILmWfXlw8tESngkUzhFxVwfMRHdZDJ1nVa5KuJVbrhdMV0wAEz2TlTyHFBVtoMjfy8e-317roguHL_q9y-IdhMecJZOIzogfDhOi10v7QcDTRbIjXrsepya69KmQREPdakBrWPCi6ucbvH_UAZONxp7xZypZv6RGnazhKacxG4y_v5-t",
    "lianjia_uuid": "3e1f304e-9a05-438b-aec4-890d8bea9bd2",
    "ftkrc_": "a5d93584-9ba9-4e6e-9a32-685d98cde3e1",
    "lfrc_": "ba3533ea-18bf-4159-898c-819179bbff1d",
    "crosSdkDT2019DeviceId": "-729e0j--j43bu4-nqrgkqr049rdr08-hrfkoagle",
    "HMACCOUNT": "1222F827FD22A500",
    "sensorsdata2015jssdkcross": "%7B%22distinct_id%22%3A%221936182532b2526-0193abdbd024e4-4c657b58-2073600-1936182532c1ca8%22%2C%22%24device_id%22%3A%221936182532b2526-0193abdbd024e4-4c657b58-2073600-1936182532c1ca8%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_utm_source%22%3A%22biying%22%2C%22%24latest_utm_medium%22%3A%22pinzhuan%22%2C%22%24latest_utm_campaign%22%3A%22wymoren%22%2C%22%24latest_utm_content%22%3A%22biaotimiaoshu%22%2C%22%24latest_utm_term%22%3A%22biaoti%22%7D%7D",
    "Hm_lvt_b160d5571570fd63c347b9d4ab5ca610": "1751421313,1751870512,1751939252",
    "select_city": "370200",
    "login_ucid": "2000000024131671",
    "lianjia_token": "2.0014cad85a6d218de10567f16b84b52dcc",
    "lianjia_token_secure": "2.0014cad85a6d218de10567f16b84b52dcc",
    "security_ticket": "Or3mSQbRQY7tAbOywHIia7SE//DV+mNYqK2+7LBF/Xi4RAGboFQxyAN/5Es0qhArgWE0ufmtoXEPrJ6jdExNYGoP1VTXu9koLQ3UMjePQGW3DFgF0oQQdxKG4kWd6q4RkFLrk1hA7skq833JidbeKMawfvh8BapLdt4JLCUH/bs=",
    "lianjia_ssid": "0a5dc19f-ab00-4529-a336-5472c06cae19",
    "hip": "OLBbDD1V09IqJxLsuuLudUAASvhjw2my6HK3zZbmV-fW9nqlFbKXbrD3zxi-GmQPy7imDh1BWSo1ATFbJKdid5Po0-MbMbwMM9qD61xmX-3wxbRKMvUVJzpxmgvsMgScXL7KaECZzcg_nTOwj5tZ0OkhnpDoMQ-dMwbq0ptAnb8afsXZXtU%3D",
    "Hm_lpvt_b160d5571570fd63c347b9d4ab5ca610": "1752486305",
    "srcid": "eyJ0Ijoie1wiZGF0YVwiOlwiNDQ3MzViNjdkMTY3NDZmNzlkMDZhNDZhNzBhYTQyM2RlZmE2NTJiNjMxMWUxNjY5NDQyMTFkN2FjMTNiZDk1ODhlMTdmMmRmODBmMGI0YjRhNWNhZDdjMzRlYmU1ZTM0NTI0ZmQzOGI5Y2Y1MGNjZWY5N2EyMWNlZjRmZDNjYzFmYWVjYjA3NzQ3MDhhM2NjNjgxMzFmYTU3MDM4NjAyZGM0NTNkOWE2NTI0ZDI1M2U2YjU3NWI0ODAzYTVjZjU1N2Q1OGI3M2M0YTZmNDdkZWIzN2FkNTNiZTNkMzQyYmZhYTBjZjUwYTZjNTc1NzkxNDFjMzVhMGYyODA5OGZmNVwiLFwia2V5X2lkXCI6XCIxXCIsXCJzaWduXCI6XCI3ZGM1MjUzMFwifSIsInIiOiJodHRwczovL3FkLmtlLmNvbS9jaGVuZ2ppYW8veGluZG91eGluL3AyLyIsIm9zIjoid2ViIiwidiI6IjAuMSJ9"
}

# 后续构造网址需要
base_url = 'https://qd.ke.com/chengjiao/xindouxin/'

# # 存储最终合并后的[所有]房源数据：[1级网址_brief]+[2级详情_detail].每个元素是一个字典，对应一行数据[{},{}……]
all_houses = []

# -----------------------------------------------------------------------------------------------
# 一、【逐页】采集：每个[1级网页：页码列表页]中的每条[房源列表]数据（1对多：Eg.贝壳链家1页有30条房源，58同城1页有60条）
# -----------------------------------------------------------------------------------------------
# def get_Pagelink_HouseData(page):
for page in range(5,47):
    # 第一页无pg1。只要page>1，网页加pgx，否则不加：base_url
    url = f'{base_url}p{page + 1}/' if page > 0 else base_url
    response = requests.get(url, headers=headers, cookies=cookies, timeout=3)
    response.encoding = 'utf-8'
    Pagelink=response.url
    print(f'✅ 正在采集[第{page + 1}页]：{Pagelink}')

    html = etree.HTML(response.text)
    # print(html)
    time.sleep(random.uniform(0.7, 2))

    # 获取1级网页（Pagelink/页码列表链接页面）中的所有房源列表的Xpath
    HouseLists_Pagelink = html.xpath('//*[@id="beike"]/div[1]/div[5]/div[1]/div[3]/ul/li')
    # print(HouseLists_Pagelink)

    if not HouseLists_Pagelink:
        print(f' → ⚠️ 未找到房源列表。可能XPath错误:页面结构发生变化，或触发人机验证')
    else:
        print(f' → 🔍 [第{page + 1}页]：查询到_{len(HouseLists_Pagelink)}条房源列表')


    # # 定义变量——汇总统计[1级网页]中的[房源列表]信息数据：列表格式
    # Houses_brief = []
    #
    # # 定义变量——汇总统计[2级详情链接]中的房源信息数据：列表格式
    # Houses_detail = []

    # 遍历[1级网页]中的[房源列表]
    for idx, houselist in enumerate(HouseLists_Pagelink):  # [25:27]括号内

        # -----------------------------------------------
        # （一）、【逐条】采集：每页对应的每个[房源列表]简要数据
        # -----------------------------------------------

        # 定义变量：字典格式————接收[1级网页]中的每1条[房源列表]中的简要信息数据HouseData_brief：包括2级详情链接、房源简要信息等
        HouseData_brief = {}

        # 1、1级网址（页码页面链接）
        HouseData_brief['1级网址'] = Pagelink

        # 2、房源标题
        # 数据清洗：[0]_提取houselist的第1项元素，同时会将list格式转化为str格式；strip_去除首尾空格、换行符
        # houselist.xpath() 返回的格式是：列表'list',即使只采集到了1个结果，格式也是list。houselist[0]_提取列表中的元素，提取后转换为str格式
        title_pagelink = houselist.xpath('.//div/div[1]/a/text()')[0].strip()
        HouseData_brief['Brief_房源标题'] = title_pagelink

        # 3、2级详情链接
        detail_link = houselist.xpath('.//a/@href')[0]
        HouseData_brief['2级房源详情链接'] = detail_link

        # 4、房源朝向与装修
        house_Icon_raw = houselist.xpath('.//div/div[2]/div[1]//text()')

        # 执行顺序：for循环→ if条件→ 表达式
        # for text in house_Icon_raw：
        #   T1=text.strip()
        #   if T1:  → (如果T1为真：非空字符。去除首尾空格、回车等字符后仍为非空、有效文本信息)
        #       house_Icon= text.strip().split('|') → 进行表达式处理，对！！原始数据（text）！！进行strip、split处理:
        house_Icon = [text.strip().split('|') for text in house_Icon_raw if text.strip()]  # 列表推导式
        # print(type(house_Icon))  #<class 'list'>  #[['南 ', ' 简装']]，列表中的元素为列表，列表嵌套
        # print(type(house_Icon[0]))  # <class 'list'>  #['南 ', ' 简装']
        # print(type(house_Icon[0][0]))  # <class 'str'>  #'南 '

        HouseData_brief['Brief_房源朝向'] = house_Icon[0][0].strip()
        HouseData_brief['Brief_房源装修'] = house_Icon[0][1].strip()

        # 5、成交日期
        dealDate = houselist.xpath('.//div/div[2]/div[2]/text()')[0].strip()
        HouseData_brief['Brief_成交日期'] = dealDate

        # 6、成交总价
        totalPrice = houselist.xpath('.//div/div[2]/div[3]/span/text()')[0].strip()
        HouseData_brief['Brief_成交总价（万）'] = float(totalPrice)

        # 7、建筑信息
        position_Icon_raw = houselist.xpath('.//div/div[3]/div[1]/text()')
        position_Icon = [text.strip() for text in position_Icon_raw if text.strip()]
        HouseData_brief['Brief_建筑信息'] = position_Icon[0].strip()

        # 8、成交单价
        unitPrice = houselist.xpath('.//div/div[3]/div[2]/span/text()')[0].strip()
        HouseData_brief['Brief_成交单价（元/平）'] = float(unitPrice)

        # 9、成交房屋信息（特色/竞争力）：满2/5交易年限、最近地铁站距离
        dealHouse_Icon_raw = houselist.xpath('.//div/div[4]/span[2]//text()')
        dealHouse_Icon = [text.strip() for text in dealHouse_Icon_raw if text.strip()]
        HouseData_brief['Brief_成交房屋信息'] = '/'.join(dealHouse_Icon)

        # 10、成交信息
        # 模糊匹配定位，而非绝对位置索引：递归定位div标签中class="dealCycleeInfo"且span标签中class="dealCycleTxt"的Xpath
        dealCycleeInfo_raw = houselist.xpath(
            './/div[@class="dealCycleeInfo"]//span[@class="dealCycleTxt"]//text()')
        dealCycleeInfo = [text.strip() for text in dealCycleeInfo_raw if text.strip()]
        HouseData_brief['Brief_挂牌价（万）'] = float(dealCycleeInfo[0].replace('挂牌', '').replace('万', ''))
        HouseData_brief['Brief_成交周期（天）'] = int(dealCycleeInfo[1].replace('成交周期', '').replace('天', ''))

        # print(HouseData_brief)



        # ---------------------------------------------------------------
        # （二）、【逐条】采集：每个[2级详情链接]中的房源详情数据HouseData_detail
        # ---------------------------------------------------------------
        # 接收[2级详情链接]中的详细的房源数据HouseData_detail
        HouseData_detail = {}

        try:
            response = requests.get(detail_link, headers=headers, cookies=cookies, timeout=3)
            response.encoding = 'utf-8'
            print(f'   🧐 正在采集[第{page + 1}页]-的-[第{idx + 1}条]房源详情页：{detail_link}')
            detailHtml = etree.HTML(response.text)
            # print(html)
            time.sleep(random.uniform(0.7, 2))
        except Exception as e:
            print(f'   ❌ 详情页请求失败：{e}')

            # 即使详情页请求失败，也需保存前序、成功采集到的brief数据
            all_houses.append(HouseData_brief)
            continue


        # 1、详情页房源标题
        title_raw = detailHtml.xpath('//div[@class="title"]//h1[@class="main"]//text()')
        title=[text.strip() for text in title_raw if text.strip()]
        HouseData_detail['Detail_房源标题'] =' '.join(title)

        # 2、成交总价
        dealtotalPrice = detailHtml.xpath('//div[@class="price"]//span[@class="dealTotalPrice"]/i/text()')[0].strip()
        HouseData_detail['Detail_成交总价（万）'] = float(dealtotalPrice)

        # 3、成交单价
        unitPrice = detailHtml.xpath('//div[@class="price"]//b/text()')[0].strip()
        HouseData_detail['Detail_成交单价（元/平）'] = float(unitPrice)

        # 4、成交信息
        # 模糊匹配定位，而非绝对位置索引：递归定位所有div标签中class中包含"msg"值的元素，并进一步对span标签下label标签提取文字的Xpath
        # [not(parent::label)]:获取span标签下不是label标签的子元素的文本内容
        dealInfo_item_raw = detailHtml.xpath('//div[contains(@class,"msg")]//span/text()[not(parent::label)]')
        dealInfo_item=[text.strip() for text in dealInfo_item_raw if text.strip()]  #<class 'list'>
        # print(dealInfo_item)
        dealInfo_label_raw = detailHtml.xpath('//div[contains(@class,"msg")]//span/label/text()')
        dealInfo_label = [text.strip() for text in dealInfo_label_raw if text.strip()]  #<class 'list'>
        # print(dealInfo_label)

        # zip()将两个列表中的元素一一对应起来————将多个可迭代对象（列表）的元素，按索引配对，返回一个迭代器
        # list()将上一步zip（）生成的迭代器转换为列表格式
        dealInfo=list(zip(dealInfo_item,dealInfo_label))   #[('挂牌价格（万）', '72'), ('成交周期（天）', '76'),……（'item','label'）]
        # print(dealInfo)
        for item,label in dealInfo:
            # 格式转换：将采集到str字符串转换为其他格式。整数型→int、带小数点→float、文本类→str（保持原样）
            try:
                label_format=int(label)
            except:
                try:
                    label_format=float(label)
                except:
                    label_format=label
            HouseData_detail[f'Detail_{item}']=label_format

            HouseData_brief['降价金额（万）'] = HouseData_brief['Detail_挂牌价格（万）']-HouseData_brief['Detail_成交总价（万）']
            HouseData_brief['降价幅度（%）'] = HouseData_brief['降价金额（万）'] / HouseData_brief['Detail_挂牌价格（万）']

        # 5、房源基本属性信息
        baseInfo_item_raw = detailHtml.xpath('//div[@class="base"]//ul//li//span/text()')
        baseInfo_item=[text.strip() for text in baseInfo_item_raw if text.strip()]  #<class 'list'>

        baseInfo_value_raw = detailHtml.xpath('//div[@class="base"]//ul//li/text()[not(parent::span)]')
        baseInfo_value=[text.strip() for text in baseInfo_value_raw if text.strip()]  #<class 'list'>

        baseInfo = list(zip(baseInfo_item, baseInfo_value))
        for item, value in baseInfo:
            try:
                value_format = int(value)
            except:
                try:
                    value_format = float(value)
                except:
                    value_format = value
            HouseData_detail[f'Detail_{item}'] = value_format

        # 6、房源交易属性信息
        transactionInfo_item_raw = detailHtml.xpath('//div[@class="transaction"]//ul//li//span/text()')
        transactionInfo_item=[text.strip() for text in transactionInfo_item_raw if text.strip()]  #<class 'list'>

        transactionInfo_value_raw = detailHtml.xpath('//div[@class="transaction"]//ul//li/text()[not(parent::span)]')
        transactionInfo_value=[text.strip() for text in transactionInfo_value_raw if text.strip()]  #<class 'list'>

        transactionInfo = list(zip(transactionInfo_item, transactionInfo_value))
        for item, value in transactionInfo:
            try:
                value_format = int(value)
            except:
                try:
                    value_format = float(value)
                except:
                    value_format = value
            HouseData_detail[f'Detail_{item}'] = value_format

        # 7、房源相册与视频
        # 7.1、采集[视频与图片]网址urls
        # https://ke-image.ljcdn.com/hdic-frame/standard_60a4f05d-c0e5-4f83-8bc2-39ba81007f69.png.533x400.jpg?from=ke.com
        # 这里有个坑：网页源代码中的[户型图的Xpath]位置与[pycharm代码运行详情页response.text]的结果[网页结构]不同，发生了改变。
        # 网页源代码中是放在：//*[@id="topImg"]/div[1]/img中。
        # 代码运行出的结果是放在：//*[@id="topImg"]/div[1]/ul/li/img中
        imgUrls_raw = detailHtml.xpath('//div[@class="img"]//ul/li/@data-src')
        # print(imgUrls_raw)
        imgUrls=imgUrls_raw[0] if imgUrls_raw else '暂无图片'
        HouseData_detail['Detail_成交房源相册_链接'] =imgUrls


        # 7.2、只有图片存在时，创建存储目录，并下载视频与图片
        if imgUrls_raw:
            # 7.2.1、创建存储文件夹:成交房源户型图。 采用递归方法创建，自动跳过已存在的文件夹
            os.makedirs(f'{file_path}\\成交房源户型图',exist_ok=True)

            # 7.2.2、解析图片url，提取部分文本;结合页码、房源标题命名图片。[页码]_[房源标题]_[图片序列名称].png
            # (1)解析[户型图网址]，并提取图片名称
            parsed_url=urlparse(imgUrls)
            # 包括6个参数：scheme（协议）、netloc（域名）、path（资源路径）、params（参数）、query（查询参数）、fragment（片段）
            # scheme='https', netloc='ke-image.ljcdn.com', path='/hdic-frame/standard_60a4f05d-c0e5-4f83-8bc2-39ba81007f69.png.533x400.jpg', params='', query='from=ke.com', fragment=''
            # print(parsed_url)

            # 局部图片名称——从[图片url]中的[path]参数中提取
            imgName_path=parsed_url.path.split('/')[-1]  #[-1]前：<class 'list'>，[-1]后：str

            # （2）使用[正则表达式],格式化[房源标题]，以符合文件命名规则
            # 需被替换的不符合命名规则的字符
            replace_chars=r'[\\/:*?"<>|\.,;=+&%$#@!~`\']'
            # 正则表达式：将[房源标题]中的[违规字符]替换为[空格]
            title_name=re.sub(replace_chars,' ',HouseData_detail['Detail_房源标题'])

            # (3)定义命名：户型图名称————[页码]_[房源标题]_[图片序列名称].png
            imgName=f'Page{page + 1}_{title_name}_{imgName_path}'

            # 7.2.3、访问[户型图url]，并保存下载
            try:
                response=requests.get(imgUrls,stream=True)    #分块（流式）下载大文件（如图片、视频等），而不是一次性加载整个文件到内存
                if response.status_code==200:
                    with open(f'{file_path}\\成交房源户型图\\{imgName}','wb')as f:
                        for chunk in response.iter_content(1024):    #逐块（chunk），如：按指定块大小（如 1KB）迭代数据、读取文件
                            f.write(chunk)
                    # print(f'   ✔️ [成交房源户型图]已保存至：{file_path}\\成交房源户型图')
                else:
                    print(f'   ❌下载失败，状态码：{response.status_code}')
            except Exception as e:
                print(f'     ❌发生错误,图片/视频未能下载：e')

        # 8、小区信息及经纬度
        # 8.1、提取经纬度所在位置的xpath。但由于文本信息较多，除[经纬度]外，还有其他不需要的信息
        residentInfos_raw=detailHtml.xpath('//*[@id="beike"]/script[5]/text()')[0]
        # print(residentInfos_raw)   #<class 'list'>  #未加[0]时，len(residentInfos_raw)==1
        # print(len(residentInfos_raw))

        # 定义正则表达式：字典格式
        patterns={
            # \s*:匹配0或多个空白/制表字符
            # '':匹配单引号，以单引号开始、并以单引号结束
            # （）：捕获组、目标内容所在部分
            # [^'+]:匹配1或多个非单引号的文本字符。[^']:除单引号外的任意字符
            '小区名称':r"resblockName:\s*'([^']+)'",
            '小区经纬度_百度坐标':r"resblockPosition:\s*'([^']+)'"
        }

        # 8.2、精准提取所需的经纬度等数据
        for item,pattern in patterns.items():
            # match:正则匹配约定俗成的变量名称
            match=re.search(pattern,residentInfos_raw)
            # print(match)
            if match:
                # group(1)：返回第1个括号捕获组中的内容，仅value值。
                # group(0)：返回整个匹配的字符，包括："resblockName:或resblockPosition:"等前序文本内容，和后序[捕获组]内的value值。
                HouseData_detail[f'Detail_{item}']=match.group(1)

                # 经纬度拆分处理
                if item=='小区经纬度_百度坐标':
                    position=HouseData_detail[f'Detail_{item}'].split(',')
                    HouseData_detail[f'Detail_小区经度_百度'] = float(position[0])
                    HouseData_detail[f'Detail_小区纬度_百度'] = float(position[1])
                # print(HouseData_detail[f'Detail_{item}'])


        # ---------------------------------------------------------------------------
        # （三）、【合并】当前房源（[某页page_i]中的[某条houselist_idx]）的brief和detail数据
        # ---------------------------------------------------------------------------
        # 字典合并、并保证1行数据对应1条房源。将[同一房源]的brief和detail数据合并为1行数据
        # **字典解包，将字典的键、值展开
        HouseData_merge={**HouseData_brief,**HouseData_detail}

        all_houses.append(HouseData_merge)


    # -----------------------------------------------------------------------------------------------
    # 二、保存为excel文件。在for idx,houselist循环内，意味着每访问、采集1条房源列表及详情链接数据后，即保存。
    # -----------------------------------------------------------------------------------------------
    if all_houses:
        # 转换为DataFrame格式
        df=pd.DataFrame(all_houses)
        total_houses=len(df)

        # 自定义排序：房源信息[列]
        column_order=['1级网址',
                      '2级房源详情链接',
                      # 'Brief_房源标题',
                      'Detail_房源标题',
                      'Detail_链家编号',
                      'Detail_房屋年限',
                      'Detail_交易权属',
                      'Detail_房屋用途',
                      'Detail_房权所属',
                      'Detail_小区名称',
                      'Detail_小区经度_百度',
                      'Detail_小区纬度_百度',
                      'Detail_成交总价（万）',
                      # 'Brief_成交总价（万）',
                      'Detail_挂牌价格（万）',
                      # 'Brief_挂牌价（万）',
                      '降价金额（万）',
                      '降价幅度（%）',
                      'Detail_成交单价（元/平）',
                      # 'Brief_成交单价（元/平）',
                      'Detail_房屋户型',
                      'Detail_建筑面积',
                      'Detail_套内面积',
                      'Detail_房屋朝向',
                      # 'Brief_房源朝向',
                      'Detail_装修情况',
                      # 'Brief_房源装修',
                      'Detail_供暖方式',
                      'Detail_配备电梯',
                      'Detail_梯户比例',
                      'Detail_所在楼层',
                      'Detail_户型结构',
                      'Detail_建筑类型',
                      'Detail_建成年代',
                      'Detail_建筑结构',
                      'Detail_挂牌时间',
                      'Brief_成交日期',
                      'Detail_成交周期（天）',
                      # 'Brief_成交周期（天）',
                      'Brief_成交房屋信息',
                      # 'Brief_建筑信息',
                      'Detail_调价（次）',
                      'Detail_带看（次）',
                      'Detail_关注（人）',
                      'Detail_浏览（次）',
                      'Detail_成交房源相册_链接'
                      ]
        col_orded=[col for col in column_order if col in df.columns]

        # 按colum_order指定顺序重新排列
        df_orded=df[col_orded]

    print(f' → 🏡 [第{page + 1}页]——→成功_采集到[1级房源列表]_简要数据:{len(HouseLists_Pagelink)}条、[2级详情页]_详情数据：{len(detail_link)}条。')

    # 保存至指定路径下的excel
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    excel_file=f'青岛新都心_成交二手房_{timestamp}.xlsx'
    save_path=os.path.join(file_path,excel_file)
    df_orded.to_excel(save_path,index=False)

    print(f"🤗 恭喜！成功采集到：{total_houses}条房源数据，已保存至：{save_path}")
