from lxml import etree
import re
import csv
import time
import random
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.edge.options import Options
import json

DETAIL_FIELDS = [
    '编号', '小区', '房型', '面积', '朝向', '装修',
    '楼层', '成交周期', '挂牌价格', '成交时间', '成交价格', '均价'
]

def init_browser():
    """初始化浏览器（设置代理、随机UA、无头模式等）"""
    options = Options()
    # 随机UA（从原有UA列表中选择）
    user_agents = [
        'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
        'Opera/9.25 (Windows NT 5.1; U; en)',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
        'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
        'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
        'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7',
        'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',
        'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)',
        'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',
        'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
    ] 
    random_ua = random.choice(user_agents)
    options.add_argument(f'--user-agent={random_ua}')

    options.add_experimental_option("excludeSwitches", ["enable-automation"])
    options.add_experimental_option('useAutomationExtension', False)
    # 其他反爬配置
    options.add_argument('--disable-blink-features=AutomationControlled')  # 隐藏“自动化测试”标识
    options.add_argument('--no-sandbox')  # 禁用沙盒模式（服务器环境需要）
    # options.add_argument('--headless=new')  # 无头模式（可选，调试时关闭）

    # 启动浏览器
    browser = webdriver.Edge(options=options)
    browser.set_window_size(1920, 1080)  # 模拟桌面浏览器尺寸
    return browser

def handle_captcha(browser):
    """验证码检测（通过元素存在判断）"""
    try:
        # 显式等待验证码容器出现（最多10秒）
        WebDriverWait(browser, 10).until(EC.presence_of_element_located((
            By.CSS_SELECTOR, '.captcha-container, .verify-panel, .recaptcha-container'
        )))
        print("检测到验证码，请手动处理...（60秒内完成）")
        time.sleep(60)  # 延长处理时间
        return True
    except Exception as e:
        return False  # 未检测到验证码

def parse_list_page(html):
    try:
        root = etree.HTML(html)
        li_list = root.xpath('//ul[@class="listContent"]/li')  # 提取所有li元素
        if not li_list:
            print("警告：未找到房源列表项，可能页面结构变化或被反爬拦截")
            time.sleep(400)
        return li_list
    except Exception as e:
        print(f"列表页解析错误: {e}")
        return []

def extract_house_data(li_element: etree._Element):
    """从单个 li 元素中提取房源数据"""
    data = {field: 'NULL' for field in DETAIL_FIELDS}

    # 1. 编号（从链接中提取）
    href = li_element.xpath('.//div[@class="title"]/a/@href')[0] if li_element.xpath('.//div[@class="title"]/a/@href') else ''
    house_code = re.search(r'/(\d+)\.html', href).group(1) if href else 'NULL'
    data['编号'] = house_code

    # 2. 小区、房型、面积（标题文本分割）
    title_text = li_element.xpath('.//div[@class="title"]/a/text()')[0] if li_element.xpath('.//div[@class="title"]/a/text()') else ''
    if title_text:
        parts = title_text.split()
        data['小区'] = parts[0] if len(parts) >= 1 else 'NULL'
        data['房型'] = parts[1] if len(parts) >= 2 else 'NULL'
        data['面积'] = parts[2] if len(parts) >= 3 else 'NULL'

    # 3. 朝向、装修（houseInfo 分割）
    house_info = li_element.xpath('.//div[@class="houseInfo"]/text()')[0] if li_element.xpath('.//div[@class="houseInfo"]/text()') else ''
    if house_info:
        direction_decor = [p.strip() for p in house_info.split('|')]
        data['朝向'] = direction_decor[0] if len(direction_decor) >= 1 else 'NULL'
        data['装修'] = direction_decor[1] if len(direction_decor) >= 2 else 'NULL'

    # 4. 楼层（positionInfo 提取）
    position_info = li_element.xpath('.//div[@class="positionInfo"]/text()')[0] if li_element.xpath(
        './/div[@class="positionInfo"]/text()') else ''
    data['楼层'] = position_info.split()[0] if position_info else 'NULL'

    # 5. 成交周期、挂牌价格（dealCycleTxt 提取）
    deal_cycle_spans = li_element.xpath('.//span[@class="dealCycleTxt"]/span/text()')
    if len(deal_cycle_spans) >= 2:
        data['挂牌价格'] = re.search(r'(\d+)', deal_cycle_spans[0]).group(1) if re.search(r'(\d+)', deal_cycle_spans[
            0]) else 'NULL'
        data['成交周期'] = re.search(r'(\d+)', deal_cycle_spans[1]).group(1) if re.search(r'(\d+)', deal_cycle_spans[
            1]) else 'NULL'

    # 6. 成交时间
    deal_date = li_element.xpath('.//div[@class="dealDate"]/text()')[0] if li_element.xpath(
        './/div[@class="dealDate"]/text()') else 'NULL'
    data['成交时间'] = deal_date

    # 7. 成交价格（总价格）
    total_price = li_element.xpath('.//div[@class="totalPrice"]/span/text()')[0] if li_element.xpath(
        './/div[@class="totalPrice"]/span/text()') else 'NULL'
    data['成交价格'] = f'{total_price}万' if total_price != 'NULL' else 'NULL'

    # 8. 均价
    unit_price = li_element.xpath('.//div[@class="unitPrice"]/span/text()')[0] if li_element.xpath(
        './/div[@class="unitPrice"]/span/text()') else 'NULL'
    data['均价'] = f'{unit_price}元/平' if unit_price != 'NULL' else 'NULL'

    return data

if __name__ == "__main__":
    browser = init_browser()
    browser.get('https://qy.lianjia.com/')
    print("请在30s内手动登录...")
    time.sleep(30)

    base_url = "https://qy.lianjia.com/chengjiao/"
    csv_file = "成交_清远.csv"

    with open(csv_file, 'a', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=DETAIL_FIELDS)
        if f.tell() == 0:  # 检查文件是否为空
            writer.writeheader()
        for page in range(1, 101):  # 爬取1-100页
            page_url = f"{base_url}pg{page}/"
            print(f"正在处理第{page}页: {page_url}")

            try:
                browser.get(page_url)
                WebDriverWait(browser,40).until(EC.presence_of_element_located((By.CLASS_NAME, 'LOGCLICKDATA')))
                time.sleep(10)
                if "人机验证" in browser.page_source:
                    print("检测到验证码，请手动处理...")
                    time.sleep(30)
                html = browser.page_source
                house_li_list = parse_list_page(html)

                for li in house_li_list:
                    house_data = extract_house_data(li)
                    if house_data:  # 确保数据不为None
                        writer.writerow(house_data)
                    else:
                        print('为None，未读取到任何数据')
                # 随机休眠防反爬
                 # 自动滑动屏幕
                scroll_height = browser.execute_script("return document.body.scrollHeight")
                for _ in range(4):
                  # 随机生成一个较小的滚动距离，模拟平滑滚动
                    scroll_distance = random.randint(200, 400)
                    browser.execute_script(f"window.scrollBy(0, {scroll_distance});")
                    time.sleep(random.uniform(1, 3))
            except Exception as e:
                print(f"第{page}页{page_url}爬取失败: {e}")
                continue
    print("数据爬取完成！")
    time.sleep(1000)
    browser.quit()