import requests
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import time
import random
import re
import json
from bs4 import BeautifulSoup
import logging
from urllib.parse import urljoin, urlparse, urlencode, quote
import os
from datetime import datetime

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class Five8Spider:
    def __init__(self, headless=True, proxy=None):
        self.driver = None
        self.session = requests.Session()
        self.data_list = []
        self.setup_headers()
        self.setup_driver(headless, proxy)

    def setup_headers(self):
        """设置请求头"""
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        }
        self.session.headers.update(self.headers)

    def setup_driver(self, headless=True, proxy=None):
        """设置Chrome驱动"""
        chrome_options = Options()

        if headless:
            chrome_options.add_argument('--headless')

        if proxy:
            chrome_options.add_argument(f'--proxy-server={proxy}')

        # 反检测配置
        chrome_options.add_argument('--disable-blink-features=AutomationControlled')
        chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
        chrome_options.add_experimental_option('useAutomationExtension', False)
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--window-size=1920,1080')

        try:
            self.driver = webdriver.Chrome(options=chrome_options)
            self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        except Exception as e:
            logging.error(f"Chrome驱动初始化失败: {e}")
            raise

    def random_delay(self, min_delay=1, max_delay=3):
        """随机延迟"""
        time.sleep(random.uniform(min_delay, max_delay))

    def human_like_behavior(self):
        """模拟人类行为"""
        # 随机滚动页面
        scroll_height = random.randint(200, 800)
        self.driver.execute_script(f"window.scrollBy(0, {scroll_height});")
        self.random_delay(0.5, 1.5)

        # 随机移动鼠标
        try:
            actions = ActionChains(self.driver)
            x_offset = random.randint(-100, 100)
            y_offset = random.randint(-100, 100)
            actions.move_by_offset(x_offset, y_offset).perform()
        except:
            pass

    def build_search_url(self, city='gz', district=None, price_min=None, price_max=None,
                         room_type=None, area_min=None, area_max=None, keywords=None):
        """
        构建搜索URL
        :param city: 城市拼音缩写，如'gz'为广州
        :param district: 区域，如'tianhe'为天河
        :param price_min: 最低价格
        :param price_max: 最高价格
        :param room_type: 房型，如'1'为一室，'2'为两室
        :param area_min: 最小面积
        :param area_max: 最大面积
        :param keywords: 关键词
        :return: 构建好的URL
        """
        base_url = f"https://{city}.58.com/zufang/"
        params = {}

        if district:
            params['district'] = district

        if price_min or price_max:
            price_params = []
            if price_min:
                price_params.append(f"{price_min}")
            if price_max:
                price_params.append(f"{price_max}")
            params['price'] = '_'.join(price_params)

        if room_type:
            params['room'] = room_type

        if area_min or area_max:
            area_params = []
            if area_min:
                area_params.append(f"{area_min}")
            if area_max:
                area_params.append(f"{area_max}")
            params['area'] = '_'.join(area_params)

        if keywords:
            params['key'] = quote(keywords)

        if params:
            query_string = urlencode(params)
            return f"{base_url}?{query_string}"
        else:
            return base_url

    def apply_filters(self, district=None, price_min=None, price_max=None,
                      room_type=None, area_min=None, area_max=None, keywords=None):
        """
        在页面上应用筛选条件
        """
        try:
            # 区域筛选
            if district:
                try:
                    district_btn = self.driver.find_element(By.XPATH, f"//span[contains(text(), '{district}')]")
                    district_btn.click()
                    self.random_delay(2, 3)
                except:
                    logging.warning(f"未找到区域: {district}")

            # 价格筛选
            if price_min or price_max:
                try:
                    price_dropdown = self.driver.find_element(By.XPATH, "//span[contains(text(), '租金')]")
                    price_dropdown.click()
                    self.random_delay(1, 2)

                    if price_min:
                        min_input = self.driver.find_element(By.XPATH, "//input[@placeholder='最低租金']")
                        min_input.clear()
                        min_input.send_keys(str(price_min))

                    if price_max:
                        max_input = self.driver.find_element(By.XPATH, "//input[@placeholder='最高租金']")
                        max_input.clear()
                        max_input.send_keys(str(price_max))

                    # 确认筛选
                    confirm_btn = self.driver.find_element(By.XPATH, "//a[contains(text(), '确定')]")
                    confirm_btn.click()
                    self.random_delay(2, 3)
                except:
                    logging.warning("价格筛选失败")

            # 房型筛选
            if room_type:
                try:
                    room_xpath_map = {
                        '1': "//span[contains(text(), '一室')]",
                        '2': "//span[contains(text(), '两室')]",
                        '3': "//span[contains(text(), '三室')]",
                        '4': "//span[contains(text(), '四室')]",
                        '5': "//span[contains(text(), '五室')]",
                    }
                    if room_type in room_xpath_map:
                        room_btn = self.driver.find_element(By.XPATH, room_xpath_map[room_type])
                        room_btn.click()
                        self.random_delay(2, 3)
                except:
                    logging.warning(f"房型筛选失败: {room_type}")

            # 关键词搜索
            if keywords:
                try:
                    search_input = self.driver.find_element(By.XPATH, "//input[@placeholder='请输入小区/商圈/地铁']")
                    search_input.clear()
                    search_input.send_keys(keywords)
                    search_input.send_keys(Keys.ENTER)
                    self.random_delay(2, 3)
                except:
                    logging.warning("关键词搜索失败")

        except Exception as e:
            logging.error(f"应用筛选条件失败: {e}")

    def extract_house_info(self, house_element):
        """提取单个房源信息"""
        try:
            info = {}

            # 提取标题和链接
            title_elem = house_element.find_element(By.CSS_SELECTOR, 'h2 a')
            info['标题'] = title_elem.text.strip()
            info['链接'] = title_elem.get_attribute('href')

            # 提取价格
            try:
                price_elem = house_element.find_element(By.CSS_SELECTOR, '.money b')
                info['价格'] = price_elem.text.strip() + '元/月'
            except:
                info['价格'] = '未知'

            # 提取房型信息
            try:
                room_info = house_element.find_element(By.CSS_SELECTOR, '.room').text
                # 解析房型、面积等信息
                room_match = re.search(r'(\d+)室', room_info)
                info['房型'] = room_match.group(1) + '室' if room_match else '未知'

                area_match = re.search(r'(\d+\.?\d*)\s*㎡', room_info)
                info['面积'] = area_match.group(1) + '㎡' if area_match else '未知'
            except:
                info['房型'] = '未知'
                info['面积'] = '未知'

            # 提取位置信息
            try:
                location_elem = house_element.find_element(By.CSS_SELECTOR, '.infor')
                location_text = location_elem.text
                # 解析区域、小区等信息
                parts = location_text.split('\n')
                if len(parts) > 0:
                    info['区域'] = parts[0].strip()
                if len(parts) > 1:
                    info['小区'] = parts[1].strip()

                # 提取地铁信息
                try:
                    metro_elem = house_element.find_element(By.CSS_SELECTOR, 'em')
                    info['地铁'] = metro_elem.text.strip() if metro_elem else '未知'
                except:
                    info['地铁'] = '未知'
            except:
                info['区域'] = '未知'
                info['小区'] = '未知'
                info['地铁'] = '未知'

            # 提取经纪人信息
            try:
                agent_elem = house_element.find_element(By.CSS_SELECTOR, '.jjr')
                info['经纪人'] = agent_elem.text.strip()
            except:
                info['经纪人'] = '未知'

            # 提取发布时间
            try:
                time_elem = house_element.find_element(By.CSS_SELECTOR, '.send-time')
                info['发布时间'] = time_elem.text.strip()
            except:
                info['发布时间'] = '未知'

            info['爬取时间'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

            return info

        except Exception as e:
            logging.error(f"提取房源信息失败: {e}")
            return None

    def crawl_list_page(self, base_url, max_pages=10, district=None, price_min=None,
                        price_max=None, room_type=None, area_min=None, area_max=None, keywords=None):
        """爬取列表页"""
        try:
            # 构建带参数的URL
            search_url = self.build_search_url(
                city='gz',  # 默认广州，可根据需要修改
                district=district,
                price_min=price_min,
                price_max=price_max,
                room_type=room_type,
                area_min=area_min,
                area_max=area_max,
                keywords=keywords
            )

            logging.info(f"访问URL: {search_url}")
            self.driver.get(search_url)

            # 等待页面加载
            WebDriverWait(self.driver, 15).until(
                EC.presence_of_element_located((By.CLASS_NAME, "house-list"))
            )

            # 应用额外的筛选条件
            self.apply_filters(district, price_min, price_max, room_type, area_min, area_max, keywords)

            page_count = 0
            while page_count < max_pages:
                logging.info(f"正在爬取第 {page_count + 1} 页...")

                # 等待房源列表加载
                WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located((By.CLASS_NAME, "house-cell"))
                )

                # 模拟人类行为
                self.human_like_behavior()

                # 获取当前页面的房源元素
                house_elements = self.driver.find_elements(By.CLASS_NAME, "house-cell")

                for house_element in house_elements:
                    house_info = self.extract_house_info(house_element)
                    if house_info:
                        # 检查是否符合筛选条件
                        if self.check_filter_conditions(house_info, price_min, price_max, room_type, area_min,
                                                        area_max):
                            self.data_list.append(house_info)
                            logging.info(f"成功提取房源: {house_info['标题'][:20]}...")

                # 尝试翻页
                try:
                    next_page = self.driver.find_element(By.CSS_SELECTOR, '.next')
                    if next_page.is_enabled():
                        next_page.click()
                        page_count += 1
                        self.random_delay(2, 4)

                        # 等待新页面加载
                        WebDriverWait(self.driver, 10).until(
                            EC.presence_of_element_located((By.CLASS_NAME, "house-cell"))
                        )
                    else:
                        logging.info("已到达最后一页")
                        break
                except Exception as e:
                    logging.info(f"无法找到下一页或已是最后一页: {e}")
                    break

        except Exception as e:
            logging.error(f"爬取列表页失败: {e}")

    def check_filter_conditions(self, house_info, price_min, price_max, room_type, area_min, area_max):
        """检查房源是否符合筛选条件"""
        # 价格检查
        if price_min or price_max:
            try:
                price = int(house_info['价格'].replace('元/月', '').strip())
                if price_min and price < price_min:
                    return False
                if price_max and price > price_max:
                    return False
            except:
                pass

        # 房型检查
        if room_type:
            try:
                current_rooms = house_info['房型'][0]  # 获取第一个字符（数字）
                if current_rooms != room_type:
                    return False
            except:
                pass

        # 面积检查
        if area_min or area_max:
            try:
                area = float(house_info['面积'].replace('㎡', '').strip())
                if area_min and area < area_min:
                    return False
                if area_max and area > area_max:
                    return False
            except:
                pass

        return True

    def save_to_excel(self, filename=None):
        """保存数据到Excel"""
        if not filename:
            filename = f"58同城房源数据_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx"

        if self.data_list:
            df = pd.DataFrame(self.data_list)

            # 重新排列列的顺序
            column_order = ['标题', '价格', '房型', '面积', '区域', '小区', '地铁', '经纪人', '发布时间', '爬取时间',
                            '链接']
            existing_columns = [col for col in column_order if col in df.columns]
            df = df[existing_columns + [col for col in df.columns if col not in column_order]]

            df.to_excel(filename, index=False, engine='openpyxl')
            logging.info(f"数据已保存到: {filename}")
            return filename
        else:
            logging.warning("没有数据可保存")
            return None

    def close(self):
        """关闭浏览器"""
        if self.driver:
            self.driver.quit()


def get_user_input():
    """获取用户输入"""
    print("=" * 50)
    print("58同城房源爬虫配置")
    print("=" * 50)

    filters = {}

    # 区域选择
    districts = {
        '1': 'tianhe', '2': 'yuexiu', '3': 'liwan', '4': 'haizhu', '5': 'panyu',
        '6': 'baiyun', '7': 'huangpu', '8': 'huadu', '9': 'nansha', '10': 'conghua',
        '11': 'zengcheng'
    }

    print("\n区域选择:")
    for key, value in districts.items():
        print(f"{key}. {value}")
    district_choice = input("请选择区域编号(直接回车跳过): ")
    if district_choice in districts:
        filters['district'] = districts[district_choice]

    # 价格范围
    price_min = input("最低价格(元/月, 直接回车跳过): ")
    price_max = input("最高价格(元/月, 直接回车跳过): ")
    if price_min:
        filters['price_min'] = int(price_min)
    if price_max:
        filters['price_max'] = int(price_max)

    # 房型
    print("\n房型选择:")
    print("1. 一室  2. 两室  3. 三室  4. 四室  5. 五室")
    room_type = input("请选择房型编号(直接回车跳过): ")
    if room_type in ['1', '2', '3', '4', '5']:
        filters['room_type'] = room_type

    # 面积范围
    area_min = input("最小面积(㎡, 直接回车跳过): ")
    area_max = input("最大面积(㎡, 直接回车跳过): ")
    if area_min:
        filters['area_min'] = float(area_min)
    if area_max:
        filters['area_max'] = float(area_max)

    # 关键词
    keywords = input("关键词搜索(小区/商圈/地铁, 直接回车跳过): ")
    if keywords:
        filters['keywords'] = keywords

    # 爬取页数
    max_pages = input("爬取页数(默认10页): ")
    filters['max_pages'] = int(max_pages) if max_pages else 10

    return filters


def main():
    # 获取用户输入
    filters = get_user_input()

    spider = None
    try:
        # 初始化爬虫
        spider = Five8Spider(headless=False)  # 设置为False可以看到浏览器操作

        # 开始爬取
        logging.info("开始爬取58同城房源数据...")
        spider.crawl_list_page(
            base_url="https://gz.58.com/zufang/",
            max_pages=filters.get('max_pages', 10),
            district=filters.get('district'),
            price_min=filters.get('price_min'),
            price_max=filters.get('price_max'),
            room_type=filters.get('room_type'),
            area_min=filters.get('area_min'),
            area_max=filters.get('area_max'),
            keywords=filters.get('keywords')
        )

        # 保存数据
        filename = spider.save_to_excel()

        if filename:
            logging.info(f"爬取完成！共获取 {len(spider.data_list)} 条房源信息")
            logging.info(f"数据文件: {filename}")

            # 显示统计信息
            if spider.data_list:
                prices = [int(info['价格'].replace('元/月', '').strip()) for info in spider.data_list if
                          info['价格'] != '未知']
                if prices:
                    avg_price = sum(prices) / len(prices)
                    logging.info(f"平均价格: {avg_price:.2f} 元/月")
                    logging.info(f"最高价格: {max(prices)} 元/月")
                    logging.info(f"最低价格: {min(prices)} 元/月")

    except Exception as e:
        logging.error(f"爬虫运行出错: {e}")
    finally:
        if spider:
            spider.close()


if __name__ == "__main__":
    main()