import requests
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.service import Service
import time
import random
import re
import os
from datetime import datetime
import logging
from urllib.parse import urlencode, quote

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('house_crawler.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)


class Five8Spider:
    def __init__(self, headless=False, proxy=None, driver_path=None):
        self.driver = None
        self.session = requests.Session()
        self.data_list = []
        self.setup_headers()
        self.setup_driver(headless, proxy, driver_path)

    def setup_headers(self):
        """设置请求头"""
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        }
        self.session.headers.update(self.headers)

    def setup_driver(self, headless=False, proxy=None, driver_path=None):
        """设置Chrome驱动 - 完全禁用自动更新检查"""
        chrome_options = Options()

        # 禁用所有自动更新和日志
        chrome_options.add_argument('--disable-extensions')
        chrome_options.add_argument('--disable-browser-side-navigation')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--disable-infobars')
        chrome_options.add_argument('--disable-blink-features=AutomationControlled')
        chrome_options.add_argument('--log-level=3')
        chrome_options.add_argument('--disable-logging')
        chrome_options.add_argument('--silent')
        chrome_options.add_argument('--disable-background-networking')
        chrome_options.add_argument('--disable-default-apps')
        chrome_options.add_argument('--disable-sync')
        chrome_options.add_argument('--disable-translate')
        chrome_options.add_argument('--disable-client-side-phishing-detection')
        chrome_options.add_argument('--disable-software-rasterizer')
        chrome_options.add_argument('--disable-web-security')
        chrome_options.add_argument('--allow-running-insecure-content')
        chrome_options.add_argument('--disable-notifications')
        chrome_options.add_argument('--disable-popup-blocking')
        chrome_options.add_argument('--ignore-certificate-errors')
        chrome_options.add_argument('--ignore-ssl-errors')

        # 禁用自动化控制标志
        chrome_options.add_experimental_option("excludeSwitches", [
            "enable-automation",
            "enable-logging",
            "ignore-certificate-errors",
            "test-type"
        ])
        chrome_options.add_experimental_option('useAutomationExtension', False)

        if headless:
            chrome_options.add_argument('--headless')

        if proxy:
            chrome_options.add_argument(f'--proxy-server={proxy}')

        chrome_options.add_argument('--window-size=1920,1080')
        chrome_options.add_argument('--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36')

        try:
            # 方法1：使用Service并禁用所有自动检查
            service = Service()
            service.creationflags = 0x08000000  # 禁用控制台窗口

            # 手动设置driver路径（如果提供）
            if driver_path and os.path.exists(driver_path):
                service = Service(executable_path=driver_path)

            # 禁用SeleniumManager的自动更新检查
            os.environ['SE_SESSION_REQUEST_TIMEOUT'] = '0'
            os.environ['SE_SESSION_REQUEST_RETRY_TIMEOUT'] = '0'
            os.environ['SE_NODE_SESSION_TIMEOUT'] = '0'

            self.driver = webdriver.Chrome(service=service, options=chrome_options)
            self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
            logging.info("ChromeDriver初始化成功")

        except Exception as e:
            logging.error(f"Chrome驱动初始化失败: {e}")
            # 尝试备用方法
            self.setup_driver_fallback(chrome_options)

    def setup_driver_fallback(self, chrome_options):
        """备用驱动设置方法"""
        try:
            # 方法2：尝试直接使用ChromeDriver
            possible_paths = [
                'chromedriver.exe',
                'chromedriver',
                r'C:\chromedriver\chromedriver.exe',
                os.path.join(os.path.dirname(__file__), 'chromedriver.exe'),
                os.path.join(os.path.dirname(__file__), 'chromedriver')
            ]

            for path in possible_paths:
                if os.path.exists(path):
                    service = Service(executable_path=path)
                    self.driver = webdriver.Chrome(service=service, options=chrome_options)
                    self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
                    logging.info(f"使用备用驱动: {path}")
                    return

            # 方法3：最后尝试直接初始化
            self.driver = webdriver.Chrome(options=chrome_options)
            self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
            logging.info("使用默认驱动初始化成功")

        except Exception as e:
            logging.error(f"所有驱动初始化方法都失败: {e}")
            raise

    def random_delay(self, min_delay=1, max_delay=3):
        """随机延迟"""
        time.sleep(random.uniform(min_delay, max_delay))

    def human_like_behavior(self):
        """模拟人类行为"""
        # 随机滚动页面
        scroll_height = random.randint(200, 800)
        self.driver.execute_script(f"window.scrollBy(0, {scroll_height});")
        self.random_delay(0.5, 1.5)

    def build_search_url(self, city='gz', district=None, price_min=None, price_max=None,
                         room_type=None, area_min=None, area_max=None, keywords=None):
        """
        构建搜索URL
        """
        if district:
            base_url = f"https://{city}.58.com/{district}/zufang/"
        else:
            base_url = f"https://{city}.58.com/zufang/"

        params = {}

        # 价格参数
        if price_min is not None or price_max is not None:
            price_min_val = price_min if price_min is not None else 0
            price_max_val = price_max if price_max is not None else 99999
            params['minprice'] = f"{price_min_val}_{price_max_val}"

        # 房型参数
        if room_type:
            room_mapping = {'1': '1', '2': '2', '3': '3', '4': '4', '5': '5'}
            if room_type in room_mapping:
                params['room'] = room_mapping[room_type]

        # 面积参数
        if area_min is not None or area_max is not None:
            area_min_val = area_min if area_min is not None else 0
            area_max_val = area_max if area_max is not None else 999
            params['area'] = f"{int(area_min_val)}_{int(area_max_val)}"

        # 关键词
        if keywords:
            params['key'] = keywords

        # 添加其他常用参数
        params['sourcetype'] = '5'

        if params:
            query_string = urlencode(params)
            return f"{base_url}?{query_string}"
        else:
            return base_url

    def wait_for_page_load(self, timeout=10):
        """等待页面加载完成"""
        try:
            WebDriverWait(self.driver, timeout).until(
                lambda driver: driver.execute_script("return document.readyState") == "complete"
            )
        except:
            pass

    def extract_house_info(self, house_element):
        """提取单个房源信息"""
        try:
            info = {}

            # 提取标题和链接
            try:
                title_elem = house_element.find_element(By.CSS_SELECTOR, 'h2 a, .title a')
                info['标题'] = title_elem.text.strip() if title_elem.text.strip() else '未知'
                info['链接'] = title_elem.get_attribute('href')
            except:
                info['标题'] = '未知'
                info['链接'] = '未知'

            # 提取价格
            try:
                price_elem = house_element.find_element(By.CSS_SELECTOR, '.money b, .price b')
                price_text = price_elem.text.strip()
                if price_text and '元' not in price_text:
                    price_text += '元/月'
                info['价格'] = price_text
            except:
                info['价格'] = '未知'

            # 提取房型和面积
            try:
                room_elem = house_element.find_element(By.CSS_SELECTOR, '.room, .house-room')
                room_text = room_elem.text

                # 解析房型
                room_match = re.search(r'(\d+)室', room_text)
                info['房型'] = room_match.group(1) + '室' if room_match else '未知'

                # 解析面积
                area_match = re.search(r'(\d+\.?\d*)\s*㎡', room_text)
                info['面积'] = area_match.group(1) + '㎡' if area_match else '未知'
            except:
                info['房型'] = '未知'
                info['面积'] = '未知'

            # 提取位置信息
            try:
                location_elem = house_element.find_element(By.CSS_SELECTOR, '.infor, .location')
                location_text = location_elem.text
                parts = [part.strip() for part in location_text.split('\n') if part.strip()]

                info['区域'] = parts[0] if parts else '未知'
                info['小区'] = parts[1] if len(parts) > 1 else '未知'
            except:
                info['区域'] = '未知'
                info['小区'] = '未知'

            info['爬取时间'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

            return info

        except Exception as e:
            logging.error(f"提取房源信息失败: {e}")
            return None

    def crawl_page(self, url, max_pages=2):
        """爬取单个页面"""
        try:
            logging.info(f"访问URL: {url}")
            self.driver.get(url)
            self.wait_for_page_load(10)

            page_count = 0
            while page_count < max_pages:
                logging.info(f"正在爬取第 {page_count + 1} 页...")

                # 等待房源列表加载
                try:
                    WebDriverWait(self.driver, 10).until(
                        EC.presence_of_element_located((By.CLASS_NAME, "house-list"))
                    )
                except:
                    logging.warning("页面加载超时，尝试继续...")

                # 模拟人类行为
                self.human_like_behavior()

                # 获取房源元素
                house_elements = self.driver.find_elements(By.CLASS_NAME, "house-cell")

                if not house_elements:
                    logging.warning("未找到房源元素")
                    break

                current_count = 0
                for house_element in house_elements:
                    try:
                        house_info = self.extract_house_info(house_element)
                        if house_info and house_info['标题'] != '未知':
                            self.data_list.append(house_info)
                            current_count += 1
                    except Exception as e:
                        logging.warning(f"提取房源信息时出错: {e}")
                        continue

                logging.info(f"本页提取 {current_count} 个房源")

                # 尝试翻页
                try:
                    next_btn = self.driver.find_element(By.CSS_SELECTOR, 'a.next')
                    if next_btn.is_enabled():
                        next_btn.click()
                        page_count += 1
                        self.random_delay(2, 4)
                        self.wait_for_page_load(10)
                        logging.info("成功翻到下一页")
                    else:
                        logging.info("已到达最后一页")
                        break
                except Exception as e:
                    logging.info(f"翻页失败: {e}")
                    break

        except Exception as e:
            logging.error(f"爬取页面失败: {e}")

    def crawl_with_filters(self, city='gz', max_pages=2, district=None, price_min=None,
                           price_max=None, room_type=None, area_min=None, area_max=None, keywords=None):
        """带筛选条件的爬取"""
        search_url = self.build_search_url(
            city=city,
            district=district,
            price_min=price_min,
            price_max=price_max,
            room_type=room_type,
            area_min=area_min,
            area_max=area_max,
            keywords=keywords
        )

        self.crawl_page(search_url, max_pages)

    def save_to_excel(self, filename=None):
        """保存数据到Excel"""
        if not filename:
            filename = f"58同城房源数据_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx"

        if self.data_list:
            df = pd.DataFrame(self.data_list)
            df.to_excel(filename, index=False, engine='openpyxl')
            logging.info(f"数据已保存到: {filename}")

            # 显示统计信息
            self.show_statistics()
            return filename
        else:
            logging.warning("没有数据可保存")
            return None

    def show_statistics(self):
        """显示统计信息"""
        if not self.data_list:
            return

        logging.info("=" * 50)
        logging.info("爬取统计信息:")
        logging.info(f"总房源数量: {len(self.data_list)}")

        # 价格统计
        prices = []
        for info in self.data_list:
            if info['价格'] != '未知':
                try:
                    price_num = int(re.search(r'(\d+)', info['价格']).group(1))
                    prices.append(price_num)
                except:
                    pass

        if prices:
            logging.info(f"平均价格: {sum(prices) / len(prices):.0f} 元/月")
            logging.info(f"最高价格: {max(prices)} 元/月")
            logging.info(f"最低价格: {min(prices)} 元/月")

        logging.info("=" * 50)

    def close(self):
        """关闭浏览器"""
        if self.driver:
            self.driver.quit()
            logging.info("浏览器已关闭")


def get_user_input():
    """获取用户输入"""
    print("=" * 60)
    print("58同城房源爬虫配置")
    print("=" * 60)

    filters = {}

    # 区域选择
    districts = {
        '1': 'tianhe', '2': 'yuexiu', '3': 'liwan', '4': 'haizhu', '5': 'panyu',
        '6': 'baiyun', '7': 'huangpu', '8': 'huadu', '9': 'nansha', '10': 'conghua',
        '11': 'zengcheng'
    }

    print("\n区域选择:")
    for key, value in districts.items():
        print(f"{key}. {value}")

    district_choice = input("请选择区域编号(直接回车跳过): ")
    if district_choice in districts:
        filters['district'] = districts[district_choice]

    # 价格范围
    price_min = input("最低价格(元/月, 直接回车跳过): ")
    price_max = input("最高价格(元/月, 直接回车跳过): ")
    if price_min:
        filters['price_min'] = int(price_min)
    if price_max:
        filters['price_max'] = int(price_max)

    # 房型
    print("\n房型选择:")
    print("1. 一室  2. 两室  3. 三室  4. 四室  5. 五室")
    room_type = input("请选择房型编号(直接回车跳过): ")
    if room_type in ['1', '2', '3', '4', '5']:
        filters['room_type'] = room_type

    # 面积范围
    area_min = input("最小面积(㎡, 直接回车跳过): ")
    area_max = input("最大面积(㎡, 直接回车跳过): ")
    if area_min:
        filters['area_min'] = float(area_min)
    if area_max:
        filters['area_max'] = float(area_max)

    # 关键词
    keywords = input("关键词搜索(小区/商圈/地铁, 直接回车跳过): ")
    if keywords:
        filters['keywords'] = keywords

    # 爬取页数
    max_pages = input("爬取页数(默认2页): ")
    filters['max_pages'] = int(max_pages) if max_pages else 2

    return filters


def main():
    """主函数"""
    print("=" * 60)
    print("58同城房源爬虫")
    print("=" * 60)

    # 获取用户输入
    filters = get_user_input()

    spider = None
    try:
        # 初始化爬虫
        spider = Five8Spider(headless=False)

        # 开始爬取
        logging.info("开始爬取58同城房源数据...")
        spider.crawl_with_filters(
            city='gz',
            max_pages=filters.get('max_pages', 2),
            district=filters.get('district'),
            price_min=filters.get('price_min'),
            price_max=filters.get('price_max'),
            room_type=filters.get('room_type'),
            area_min=filters.get('area_min'),
            area_max=filters.get('area_max'),
            keywords=filters.get('keywords')
        )

        # 保存结果
        filename = spider.save_to_excel()

        if filename:
            logging.info(f"爬取完成！共获取 {len(spider.data_list)} 条房源信息")
            logging.info(f"数据文件: {filename}")

    except Exception as e:
        logging.error(f"程序运行出错: {e}")
    finally:
        if spider:
            spider.close()


if __name__ == "__main__":
    main()