import requests
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import time
import random
import re
import json
from bs4 import BeautifulSoup
import logging
from urllib.parse import urljoin, urlparse
import os
from datetime import datetime

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class Five8Spider:
    def __init__(self, headless=True, proxy=None):
        self.driver = None
        self.session = requests.Session()
        self.data_list = []
        self.setup_headers()
        self.setup_driver(headless, proxy)

    def setup_headers(self):
        """设置请求头"""
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        }
        self.session.headers.update(self.headers)

    def setup_driver(self, headless=True, proxy=None):
        """设置Chrome驱动"""
        chrome_options = Options()

        if headless:
            chrome_options.add_argument('--headless')

        if proxy:
            chrome_options.add_argument(f'--proxy-server={proxy}')

        # 反检测配置
        chrome_options.add_argument('--disable-blink-features=AutomationControlled')
        chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
        chrome_options.add_experimental_option('useAutomationExtension', False)
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')

        try:
            self.driver = webdriver.Chrome(options=chrome_options)
            self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        except Exception as e:
            logging.error(f"Chrome驱动初始化失败: {e}")
            raise

    def random_delay(self, min_delay=1, max_delay=3):
        """随机延迟"""
        time.sleep(random.uniform(min_delay, max_delay))

    def human_like_behavior(self):
        """模拟人类行为"""
        # 随机滚动页面
        scroll_height = random.randint(200, 800)
        self.driver.execute_script(f"window.scrollBy(0, {scroll_height});")
        self.random_delay(0.5, 1.5)

    def extract_house_info(self, house_element):
        """提取单个房源信息"""
        try:
            info = {}

            # 提取标题和链接
            title_elem = house_element.find_element(By.CSS_SELECTOR, 'h2 a')
            info['标题'] = title_elem.text.strip()
            info['链接'] = title_elem.get_attribute('href')

            # 提取价格
            try:
                price_elem = house_element.find_element(By.CSS_SELECTOR, '.money b')
                info['价格'] = price_elem.text.strip()
            except:
                info['价格'] = '未知'

            # 提取房型信息
            try:
                room_info = house_element.find_element(By.CSS_SELECTOR, '.room').text
                # 解析房型、面积等信息
                room_match = re.search(r'(\d+)室', room_info)
                info['房型'] = room_match.group(1) + '室' if room_match else '未知'

                area_match = re.search(r'(\d+\.?\d*)\s*㎡', room_info)
                info['面积'] = area_match.group(1) + '㎡' if area_match else '未知'
            except:
                info['房型'] = '未知'
                info['面积'] = '未知'

            # 提取位置信息
            try:
                location_elem = house_element.find_element(By.CSS_SELECTOR, '.infor')
                location_text = location_elem.text
                # 解析区域、小区等信息
                parts = location_text.split('\n')
                if len(parts) > 0:
                    info['区域'] = parts[0].strip()
                if len(parts) > 1:
                    info['小区'] = parts[1].strip()
            except:
                info['区域'] = '未知'
                info['小区'] = '未知'

            # 提取经纪人信息
            try:
                agent_elem = house_element.find_element(By.CSS_SELECTOR, '.jjr')
                info['经纪人'] = agent_elem.text.strip()
            except:
                info['经纪人'] = '未知'

            # 提取发布时间
            try:
                time_elem = house_element.find_element(By.CSS_SELECTOR, '.send-time')
                info['发布时间'] = time_elem.text.strip()
            except:
                info['发布时间'] = '未知'

            info['爬取时间'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

            return info

        except Exception as e:
            logging.error(f"提取房源信息失败: {e}")
            return None

    def crawl_list_page(self, url, max_pages=10):
        """爬取列表页"""
        try:
            self.driver.get(url)
            WebDriverWait(self.driver, 10).until(
                EC.presence_of_element_located((By.CLASS_NAME, "house-list"))
            )

            page_count = 0
            while page_count < max_pages:
                logging.info(f"正在爬取第 {page_count + 1} 页...")

                # 等待房源列表加载
                WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located((By.CLASS_NAME, "house-cell"))
                )

                # 模拟人类行为
                self.human_like_behavior()

                # 获取当前页面的房源元素
                house_elements = self.driver.find_elements(By.CLASS_NAME, "house-cell")

                for house_element in house_elements:
                    house_info = self.extract_house_info(house_element)
                    if house_info:
                        self.data_list.append(house_info)
                        logging.info(f"成功提取房源: {house_info['标题'][:20]}...")

                # 尝试翻页
                try:
                    next_page = self.driver.find_element(By.CSS_SELECTOR, '.next')
                    if next_page.is_enabled():
                        next_page.click()
                        page_count += 1
                        self.random_delay(2, 4)
                    else:
                        logging.info("已到达最后一页")
                        break
                except:
                    logging.info("无法找到下一页或已是最后一页")
                    break

        except Exception as e:
            logging.error(f"爬取列表页失败: {e}")

    def get_detailed_info(self, url):
        """获取房源详细信息（可选）"""
        try:
            self.driver.get(url)
            WebDriverWait(self.driver, 10).until(
                EC.presence_of_element_located((By.CLASS_NAME, "house-detail-desc"))
            )

            detailed_info = {}

            # 提取详细描述
            try:
                desc_elem = self.driver.find_element(By.CLASS_NAME, "house-word-introduce")
                detailed_info['详细描述'] = desc_elem.text.strip()
            except:
                detailed_info['详细描述'] = '未知'

            # 提取配套设施
            try:
                facilities = []
                facility_elems = self.driver.find_elements(By.CSS_SELECTOR, '.house-disposal li')
                for elem in facility_elems:
                    facilities.append(elem.text.strip())
                detailed_info['配套设施'] = ' | '.join(facilities)
            except:
                detailed_info['配套设施'] = '未知'

            return detailed_info

        except Exception as e:
            logging.error(f"获取详细信息失败: {e}")
            return {}

    def save_to_excel(self, filename=None):
        """保存数据到Excel"""
        if not filename:
            filename = f"58同城房源数据_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx"

        if self.data_list:
            df = pd.DataFrame(self.data_list)
            df.to_excel(filename, index=False, engine='openpyxl')
            logging.info(f"数据已保存到: {filename}")
            return filename
        else:
            logging.warning("没有数据可保存")
            return None

    def close(self):
        """关闭浏览器"""
        if self.driver:
            self.driver.quit()


def main():
    # 配置参数
    start_url = "https://gz.58.com/zufang/"  # 广州租房页面
    max_pages = 2  # 最大爬取页数

    spider = None
    try:
        # 初始化爬虫（非无头模式以便观察）
        spider = Five8Spider(headless=False)

        # 开始爬取
        logging.info("开始爬取58同城房源数据...")
        spider.crawl_list_page(start_url, max_pages)

        # 保存数据
        filename = spider.save_to_excel()

        if filename:
            logging.info(f"爬取完成！共获取 {len(spider.data_list)} 条房源信息")
            logging.info(f"数据文件: {filename}")

    except Exception as e:
        logging.error(f"爬虫运行出错: {e}")
    finally:
        if spider:
            spider.close()


if __name__ == "__main__":
    main()