from lxml import etree
import re
import csv
import time
import random
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.edge.options import Options
import json


DETAIL_FIELDS = [
    '编号', '标题', '小区名称', '区域', '户型', '面积',
    '朝向', '装修情况', '楼层', '建筑类型',
    '关注人数', '发布时间','总价', '单价'
]

def init_browser():
    """初始化浏览器（设置代理、随机UA、无头模式等）"""
    options = Options()
    # 随机UA
    user_agents = [
        'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
        'Opera/9.25 (Windows NT 5.1; U; en)',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
        'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
        'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
        'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7',
        'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',
        'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)',
        'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',
        'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
    ]
    random_ua = random.choice(user_agents)
    options.add_argument(f'--user-agent={random_ua}')

    options.add_experimental_option("excludeSwitches", ["enable-automation"])
    options.add_experimental_option('useAutomationExtension', False)
    # 其他反爬配置
    options.add_argument('--disable-blink-features=AutomationControlled')  # 隐藏“自动化测试”标识
    options.add_argument('--no-sandbox')  # 禁用沙盒模式（服务器环境需要）

    # 启动浏览器
    browser = webdriver.Edge(options=options)
    browser.set_window_size(1920, 1080)  # 模拟桌面浏览器尺寸
    return browser

def parse_list_page(html):
    """解析列表页，提取所有房源项"""
    root = etree.HTML(html)
    # 定位所有房源li
    house_li_list = root.xpath("//li[contains(@class, 'LOGCLICKDATA') and contains(@class, 'clear')]")
    return house_li_list

def extract_house_data(li):
    """从单个li元素中提取房源数据"""
    data = {field: 'NULL' for field in DETAIL_FIELDS}
    try:
        # 编号
        data['编号'] = li.xpath("@data-lj_action_housedel_id")[0]

        # 标题
        title = li.xpath(".//div[@class='title']/a/text()")[0].strip()
        data['标题'] = title

        # 小区名称与区域
        pos_info = li.xpath(".//div[@class='positionInfo']/a/text()")
        if len(pos_info) >= 2:
            data['小区名称'] = pos_info[0].strip()
            data['区域'] = pos_info[1].strip()

        # 户型信息（拆分处理）
        house_info = li.xpath(".//div[@class='houseInfo']/text()")[0].strip()
        parts = [p.strip() for p in house_info.split('|')]
        if len(parts) >= 6:
            data['户型'], data['面积'], data['朝向'], data['装修情况'], data['楼层'], data['建筑类型'] = parts[:6]

        # 关注与发布时间
        follow_info = li.xpath(".//div[@class='followInfo']/text()")[0].strip()
        if '/ ' in follow_info:
            followers, post_time = follow_info.split('/ ', 1)
            data['关注人数'] = followers.replace('人关注', '').strip()
            data['发布时间'] = post_time.strip()

        # 价格信息
        total_price_elems = li.xpath(".//div[@class='priceInfo']/div[contains(@class, 'totalPrice')]/span/text()")
        data['总价'] = f"{total_price_elems[0].strip()}万" if total_price_elems else 'NULL'

        # 单价：通过priceInfo下的第二个价格块（包含unitPrice类）
        unit_price_elems = li.xpath(".//div[@class='priceInfo']/div[contains(@class, 'unitPrice')]/span/text()")
        data['单价'] = unit_price_elems[0].strip() if unit_price_elems else 'NULL'

    except Exception as e:
        print(f"解析错误（编号{data['编号']}）: {e}")
    return data


if __name__ == "__main__":
    browser = init_browser()
    browser.get('https://zhanjiang.lianjia.com/ershoufang/')
    print("请在30s内手动登录...")
    time.sleep(30)

    base_url = "https://zhanjiang.lianjia.com/ershoufang//"
    csv_file = "lianjia_houses_ershoufang_zhanjiang.csv"

    # CSV初始化
    with open(csv_file, 'a', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=DETAIL_FIELDS)
        if f.tell() == 0:  # 检查文件是否为空
            writer.writeheader()

        for page in range(1, 101):  # 爬取1-100页
            page_url = f"{base_url}pg{page}/"
            print(f"正在处理第{page}页: {page_url}")

            try:
                browser.get(page_url)
                WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.CLASS_NAME, 'LOGCLICKDATA')))

                # 处理验证码（如果出现）
                if "人机验证" in browser.page_source:
                    print("检测到验证码，请手动处理...")
                    time.sleep(30)

                html = browser.page_source
                house_li_list = parse_list_page(html)

                # 批量解析每页30条数据
                for li in house_li_list:
                    house_data = extract_house_data(li)
                    writer.writerow(house_data)

                # 随机休眠防反爬
                # 自动滑动屏幕
                scroll_height = browser.execute_script("return document.body.scrollHeight")
                for _ in range(3):
                    # 随机生成一个较小的滚动距离，模拟平滑滚动
                    scroll_distance = random.randint(100, 300)
                    browser.execute_script(f"window.scrollBy(0, {scroll_distance});")
                    time.sleep(random.uniform(1, 3))

            except Exception as e:
                print(f"第{page}页爬取失败: {e}")
                continue

    browser.quit()
    print("数据爬取完成！")