import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
import re
from urllib.parse import urlencode, urljoin
import os
import json
import logging
from datetime import datetime
import sys
from typing import List, Dict, Optional
import concurrent.futures
from spider.database import DatabaseManager

# 添加当前目录到Python路径，确保可以导入config
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import config as config_module

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('anjuke_spider.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)

class EnhancedAnjukeRentSpider:
    def __init__(self, use_proxy=False, proxy_pool=None, config=None):
        self.session = requests.Session()
        self.use_proxy = use_proxy
        self.proxy_pool = proxy_pool or []
        self.request_count = 0
        self.start_time = time.time()
        if config is not None:
            self.config = config
        else:
            self.config = config_module.SPIDER_CONFIG  # 这里将导入的config模块重命名为config_module
        # 初始化数据库管理器
        self.db_manager = DatabaseManager(config_module.DATABASE_CONFIG)

        # 存储选项
        self.storage_options = {
            'save_to_mysql': True,
            'save_to_excel': False,
            'save_to_csv': False,
            'save_analysis_report': False
        }

        # 优化的浏览器头
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
        }
        self.session.headers.update(self.headers)

        # 加载城市区域数据
        self.city_data = self.load_city_data()

    def load_city_data(self) -> Dict:
        """加载城市区域数据"""
        city_data = {
            '广州': {
                'code': 'gz',
                'districts': {
                    '白云': 'baiyun', '天河': 'tianhe', '越秀': 'yuexiu', '海珠': 'haizhu',
                    '荔湾': 'liwan', '番禺': 'panyu', '黄埔': 'huangpugz', '花都': 'huadu',
                    '南沙': 'nansha', '从化': 'conghua', '增城': 'zengcheng'
                }
            },
            '北京': {
                'code': 'bj',
                'districts': {
                    '朝阳': 'chaoyang', '海淀': 'haidian', '西城': 'xicheng', '东城': 'dongcheng',
                    '丰台': 'fengtai', '石景山': 'shijingshan', '通州': 'tongzhou', '昌平': 'changping',
                    '大兴': 'daxing', '顺义': 'shunyi', '房山': 'fangshan', '门头沟': 'mentougou'
                }
            },
            '上海': {
                'code': 'sh',
                'districts': {
                    '浦东': 'pudong', '徐汇': 'xuhui', '长宁': 'changning', '静安': 'jingan',
                    '普陀': 'putuo', '虹口': 'hongkou', '杨浦': 'yangpu', '闵行': 'minhang',
                    '宝山': 'baoshan', '嘉定': 'jiading', '金山': 'jinshan', '松江': 'songjiang'
                }
            },
            '深圳': {
                'code': 'sz',
                'districts': {
                    '福田': 'futian', '南山': 'nanshan', '罗湖': 'luohu', '宝安': 'baoan',
                    '龙岗': 'longgang', '盐田': 'yantian', '龙华': 'longhua', '坪山': 'pingshan'
                }
            },
            '杭州': {
                'code': 'hz',
                'districts': {
                    '西湖': 'xihu', '上城': 'shangcheng', '下城': 'xiacheng', '江干': 'jianggan',
                    '拱墅': 'gongshu', '滨江': 'binjiang', '萧山': 'xiaoshan', '余杭': 'yuhang'
                }
            },
            '南京': {
                'code': 'nj',
                'districts': {
                    '玄武': 'xuanwu', '秦淮': 'qinhuai', '建邺': 'jianye', '鼓楼': 'gulou',
                    '浦口': 'pukou', '栖霞': 'qixia', '雨花台': 'yuhuatai', '江宁': 'jiangning'
                }
            },
            '成都': {
                'code': 'cd',
                'districts': {
                    '锦江': 'jinjiang', '青羊': 'qingyang', '金牛': 'jinniu', '武侯': 'wuhou',
                    '成华': 'chenghua', '龙泉驿': 'longquanyi', '新都': 'xindu', '温江': 'wenjiang'
                }
            },
            '武汉': {
                'code': 'wh',
                'districts': {
                    '江岸': 'jiangan', '江汉': 'jianghan', '硚口': 'qiaokou', '汉阳': 'hanyang',
                    '武昌': 'wuchang', '青山': 'qingshan', '洪山': 'hongshan', '东西湖': 'dongxihu'
                }
            },
            '西安': {
                'code': 'xa',
                'districts': {
                    '新城': 'xincheng', '碑林': 'beilin', '莲湖': 'lianhu', '灞桥': 'baqiao',
                    '未央': 'weiyang', '雁塔': 'yanta', '阎良': 'yanliang', '临潼': 'lintong'
                }
            },
            '重庆': {
                'code': 'cq',
                'districts': {
                    '渝中': 'yuzhong', '大渡口': 'dadukou', '江北': 'jiangbei', '沙坪坝': 'shapingba',
                    '九龙坡': 'jiulongpo', '南岸': 'nanan', '北碚': 'beibei', '渝北': 'yubei'
                }
            },
            '天津': {
                'code': 'tj',
                'districts': {
                    '和平': 'heping', '河东': 'hedong', '河西': 'hexi', '南开': 'nankai',
                    '河北': 'hebei', '红桥': 'hongqiao', '东丽': 'dongli', '西青': 'xiqing'
                }
            },
            '苏州': {
                'code': 'su',
                'districts': {
                    '姑苏': 'gus', '虎丘': 'huqiu', '吴中': 'wuzhong', '相城': 'xiangcheng',
                    '吴江': 'wujiang', '工业园区': 'gongyeyuanqu', '新区': 'xinqu'
                }
            },
            '长沙': {
                'code': 'cs',
                'districts': {
                    '芙蓉': 'furong', '天心': 'tianxin', '岳麓': 'yuelu', '开福': 'kaifu',
                    '雨花': 'yuhua', '望城': 'wangcheng', '宁乡': 'ningxiang', '浏阳': 'liuyang'
                }
            },
            '郑州': {
                'code': 'zz',
                'districts': {
                    '中原': 'zhongyuan', '二七': 'erqi', '管城': 'guancheng', '金水': 'jinshui',
                    '上街': 'shangjie', '惠济': 'huiji', '中牟': 'zhongmou', '巩义': 'gongyi'
                }
            },
            '东莞': {
                'code': 'dg',
                'districts': {
                    '东城': 'dongcheng', '南城': 'nancheng', '万江': 'wanjiang', '莞城': 'guancheng',
                    '石碣': 'shijie', '石龙': 'shilong', '茶山': 'chashan', '虎门': 'humen'
                }
            },
            '佛山': {
                'code': 'fs',
                'districts': {
                    '禅城': 'chancheng', '南海': 'nanhai', '顺德': 'shunde', '三水': 'sanshui',
                    '高明': 'gaoming'
                }
            }
        }
        return city_data

    def get_random_proxy(self) -> Optional[Dict]:
        """获取随机代理"""
        if not self.proxy_pool:
            return None
        return random.choice(self.proxy_pool)

    def make_request(self, url: str, max_retries: int = 2) -> Optional[requests.Response]:
        """优化的请求方法，减少延迟和重试次数"""
        for attempt in range(max_retries):
            try:
                proxies = None
                if self.use_proxy:
                    proxy = self.get_random_proxy()
                    if proxy:
                        proxies = {'http': proxy, 'https': proxy}

                # 减少延迟
                delay = random.uniform(0.5, 1.5)
                time.sleep(delay)

                response = self.session.get(url, timeout=10, proxies=proxies)
                self.request_count += 1

                if response.status_code == 200:
                    return response
                elif response.status_code == 403:
                    logging.warning(f"请求被拒绝 (403)，尝试更换代理或User-Agent")
                    self.rotate_user_agent()
                else:
                    logging.warning(f"请求失败，状态码: {response.status_code}")

            except requests.exceptions.RequestException as e:
                logging.warning(f"请求异常 (尝试 {attempt + 1}/{max_retries}): {e}")

            # 减少重试等待时间
            if attempt < max_retries - 1:
                time.sleep(random.uniform(2, 4))

        return None

    def rotate_user_agent(self):
        """更换User-Agent"""
        user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/121.0',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/121.0',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/120.0.0.0',
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
        ]
        new_ua = random.choice(user_agents)
        self.session.headers.update({'User-Agent': new_ua})
        logging.info(f"更换User-Agent: {new_ua[:50]}...")

    def get_city_info(self, city_name: str) -> Optional[Dict]:
        """获取城市信息"""
        return self.city_data.get(city_name)

    def build_search_url(self, city: str, district: str = None,
                         max_price: str = None, min_price: str = None,
                         room_type: str = None, source_type: str = None,
                         page: int = 1) -> str:
        """构建搜索URL"""
        city_info = self.get_city_info(city)
        if not city_info:
            logging.error(f"不支持的城市: {city}")
            return None

        city_code = city_info['code']

        # 基础URL结构
        if district and district in city_info['districts']:
            district_code = city_info['districts'][district]
            base_path = f"fangyuan/{district_code}/"
        else:
            base_path = "fangyuan/"

        # 分页处理
        if page > 1:
            if 'fangyuan/' in base_path and not base_path.endswith('/'):
                base_path += f"/p{page}/"
            else:
                base_path += f"p{page}/"

        base_url = f"https://{city_code}.zu.anjuke.com/{base_path}"

        # 构建查询参数
        params = {}
        if max_price:
            params['to_price'] = max_price
        if min_price:
            params['from_price'] = min_price

        # 房间类型过滤
        if room_type:
            room_mapping = {'一室': 'w1', '二室': 'w2', '三室': 'w3', '四室': 'w4', '五室': 'w5'}
            if room_type in room_mapping:
                params['room'] = room_mapping[room_type]

        # 房源来源过滤
        if source_type:
            source_mapping = {'个人房源': 'l2', '经纪房源': 'l1', '品牌公寓': 'jg1'}
            if source_type in source_mapping:
                params['type'] = source_mapping[source_type]

        if params:
            url = base_url + '?' + urlencode(params)
        else:
            url = base_url

        logging.debug(f"构建的URL: {url}")
        return url

    def parse_list_page(self, html: str) -> List[Dict]:
        """解析列表页 - 优化版本"""
        soup = BeautifulSoup(html, 'html.parser')
        houses = []

        # 多种可能的房源元素选择器
        selectors = [
            'div.zu-itemmod',
            'div[class*="zu-itemmod"]',
            'div[link*="anjuke"]'
        ]

        house_items = []
        for selector in selectors:
            house_items = soup.select(selector)
            if house_items:
                break

        if not house_items:
            logging.warning("未找到房源列表元素")
            return houses

        for item in house_items:
            try:
                house_info = self.parse_house_item_fast(item)
                if house_info:
                    houses.append(house_info)
            except Exception as e:
                logging.error(f"解析房源信息时出错: {e}")
                continue

        return houses

    def parse_house_item_fast(self, item) -> Optional[Dict]:
        """快速解析单个房源信息"""
        try:
            # 标题和链接
            title_elem = item.find('h3')
            if title_elem:
                title_link = title_elem.find('a')
                title = title_link.get_text(strip=True) if title_link else '未知'
                link = title_link.get('href', '') if title_link else ''
            else:
                title = '未知'
                link = item.get('link', '')

            # 价格
            price_elem = item.find('strong', class_='price')
            price = price_elem.get_text(strip=True) if price_elem else '0'

            # 清理价格数据
            price = re.sub(r'[^\d]', '', price) or '0'

            # 户型、面积、楼层信息 - 简化处理
            rooms, living_rooms, area, floor_info, total_floors = self.parse_house_details_fast(item)

            # 地址 - 简化处理
            address = self.parse_address_fast(item)

            # 标签 - 简化处理
            tags = self.parse_tags_fast(item)

            # 经纪人信息 - 简化处理
            agent = self.parse_agent_info_fast(item)

            # 房源特色（安选、实拍等）
            features = self.parse_features_fast(item)

            house_info = {
                '标题': title,
                '价格(元/月)': int(price) if price.isdigit() else 0,
                '户型': f"{rooms}室{living_rooms}厅",
                '面积(平米)': area,
                '楼层': floor_info,
                '总楼层': total_floors,
                '地址': address,
                '标签': '|'.join(tags),
                '经纪人': agent,
                '特色': '|'.join(features),
                '链接': link,
                '爬取时间': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }

            return house_info

        except Exception as e:
            logging.error(f"解析房源项时出错: {e}")
            return None

    def parse_house_details_fast(self, item) -> tuple:
        """快速解析房屋详细信息"""
        details_elem = item.find('p', class_='details-item')
        rooms = living_rooms = area = floor_info = total_floors = '未知'

        if details_elem:
            details_text = details_elem.get_text()

            # 提取户型
            room_match = re.search(r'(\d+)室', details_text)
            living_room_match = re.search(r'(\d+)厅', details_text)
            rooms = room_match.group(1) if room_match else '0'
            living_rooms = living_room_match.group(1) if living_room_match else '0'

            # 提取面积
            area_match = re.search(r'(\d+(?:\.\d+)?)平米', details_text)
            area = area_match.group(1) if area_match else '未知'

            # 提取楼层信息 - 简化处理
            floor_match = re.search(r'([高低]层)\(共(\d+)层\)', details_text)
            if floor_match:
                floor_info = floor_match.group(1)
                total_floors = floor_match.group(2)

        return rooms, living_rooms, area, floor_info, total_floors

    def parse_address_fast(self, item) -> str:
        """快速解析地址信息"""
        address_elem = item.find('address', class_='details-item')
        if address_elem:
            # 简化处理：只获取文本，不进行复杂的清理
            address = address_elem.get_text(strip=True)
            # 移除明显的链接文本
            address = re.sub(r'\s*田心江中街三巷自建房\s*', '', address)
            address = re.sub(r'\s*良城一路-三路自建房\s*', '', address)
            return address
        return '未知'

    def parse_tags_fast(self, item) -> List[str]:
        """快速解析房源标签"""
        tags = []
        tags_elem = item.find('p', class_='details-item bot-tag')
        if tags_elem:
            tag_spans = tags_elem.find_all('span', class_='cls-common')
            tags = [span.get_text(strip=True) for span in tag_spans]
        return tags

    def parse_agent_info_fast(self, item) -> str:
        """快速解析经纪人信息"""
        agent_elem = item.find('p', class_='detail-jjr')
        if agent_elem:
            agent_text = agent_elem.get_text(strip=True)
            # 简化处理：直接返回文本
            return agent_text if agent_text else '未知'
        return '未知'

    def parse_features_fast(self, item) -> List[str]:
        """快速解析房源特色"""
        features = []
        # 安选标识
        if item.find('em', class_='anxvan-ico'):
            features.append('安选')
        return features

    def get_house_detail_fast(self, url: str) -> Dict:
        """快速获取房源详细信息"""
        try:
            # 增加延迟避免被封
            time.sleep(random.uniform(2, 4))  # 增加到2-4秒
            logging.debug(f"开始获取详情: {url}")

            response = self.make_request(url)

            if not response:
                logging.warning(f"获取详情页失败: {url}")
                return {}

            soup = BeautifulSoup(response.text, 'html.parser')
            detail_info = {}

            # 房屋编码和更新时间
            house_code_elem = soup.find('span', id='houseCode')
            if house_code_elem:
                code_text = house_code_elem.get_text(strip=True)
                code_match = re.search(r'房屋编码：(\d+)', code_text)
                if code_match:
                    detail_info['房屋编码'] = code_match.group(1)

            # 付款方式
            payment_elems = soup.find_all('li', class_='full-line')
            for elem in payment_elems:
                if '付' in elem.get_text():
                    detail_info['付款方式'] = elem.get_text(strip=True)
                    break

            # 详细房屋信息
            layout_elems = soup.find_all('li', class_='house-info-item')
            for elem in layout_elems:
                text = elem.get_text(strip=True)
                if '户型：' in text:
                    detail_info['详细户型'] = text.replace('户型：', '')
                elif '朝向：' in text:
                    detail_info['朝向'] = text.replace('朝向：', '')
                elif '装修：' in text:
                    detail_info['装修'] = text.replace('装修：', '')

            # 配套设施
            facilities = []
            facility_elems = soup.find_all('li', class_='peitao-item')
            for elem in facility_elems:
                facility_text = elem.get_text(strip=True)
                if facility_text and facility_text != '无':
                    facilities.append(facility_text)
            if facilities:
                detail_info['配套设施'] = '|'.join(facilities)

            # 房源描述
            desc_elem = soup.find('div', class_='auto-general')
            if desc_elem:
                description = desc_elem.get_text(strip=True)
                if len(description) > 200:
                    description = description[:200] + '...'
                detail_info['房源描述'] = description

            requirement_elem = soup.find('div', class_='auto-general')
            if requirement_elem and '出租要求' in str(soup):
                # 查找出租要求部分
                requirement_title = soup.find('h2', string='出租要求')
                if requirement_title:
                    requirement_div = requirement_title.find_next('div', class_='auto-general')
                    if requirement_div:
                        detail_info['出租要求'] = requirement_div.get_text(strip=True)

            logging.debug(f"详情获取成功: {url}, 获取到 {len(detail_info)} 个字段")
            return detail_info

        except Exception as e:
            logging.error(f"获取房源详情时出错 {url}: {e}")
            return {}

    def get_house_details_batch(self, houses: List[Dict], city: str = None, district: str = None,
                                source_type: str = None) -> List[Dict]:
        """批量获取房源详情 - 使用多线程提高速度，并实时保存数据"""
        # 筛选出有有效链接的房源
        valid_houses = []
        urls = []

        for house in houses:
            if house.get('链接') and house['链接'].startswith('http'):
                valid_houses.append(house)
                urls.append(house['链接'])

        if not urls:
            logging.info("没有有效的房源链接需要获取详情")
            return houses

        logging.info(f"开始批量获取详情，共 {len(urls)} 个链接")

        # 使用线程池并发获取
        max_workers = self.config.get('concurrent_workers', 3)

        # 分批保存的配置
        save_batch_size = 50  # 每完成20个详情就保存一次
        completed_houses = []  # 记录已完成的房源

        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
            # 提交所有任务，并保留URL与house的对应关系
            future_to_house = {}
            for i, url in enumerate(urls):
                house = valid_houses[i]
                future = executor.submit(self.get_house_detail_fast, url)
                future_to_house[future] = house

            # 收集结果
            completed = 0
            total = len(urls)

            for future in concurrent.futures.as_completed(future_to_house):
                house = future_to_house[future]
                try:
                    result = future.result()
                    # 直接更新对应的房源信息
                    house.update(result)
                    completed += 1
                    completed_houses.append(house)

                    # 每完成一定数量就保存一次
                    if completed % save_batch_size == 0:
                        logging.info(f"保存进度: 正在保存第 {completed} 个房源的详情数据...")
                        if self.storage_options.get('save_to_mysql', True):
                            # 只保存已获取详情的房源
                            success_count = self.db_manager.insert_batch_houses(
                                completed_houses, city, district, source_type
                            )
                            logging.info(f"已保存 {success_count} 条详情数据到数据库")
                            # 清空已保存的列表，避免重复保存
                            completed_houses = []

                    # 显示进度
                    if completed % 5 == 0 or completed == total:
                        logging.info(f"详情获取进度: {completed}/{total}")

                except Exception as e:
                    logging.error(f"获取详情时出错: {e}")
                    completed += 1

        # 保存剩余的未保存的房源详情
        if completed_houses and self.storage_options.get('save_to_mysql', True):
            logging.info(f"保存剩余的 {len(completed_houses)} 条详情数据...")
            success_count = self.db_manager.insert_batch_houses(
                completed_houses, city, district, source_type
            )
            logging.info(f"剩余详情保存完成: 成功 {success_count}/{len(completed_houses)} 条")

        logging.info(f"详情获取完成，成功处理 {completed} 个房源")
        return houses

    def crawl(self, city: str, district: str = None, max_price: str = None,
              min_price: str = None, room_type: str = None, source_type: str = None,
              max_pages: int = 5, get_details: bool = False,
              delay: tuple = (1, 3)) -> List[Dict]:
        """主爬取函数 - 优化版本"""
        all_houses = []
        successful_pages = 0

        logging.info(f"开始爬取 {city}{f'-{district}' if district else ''} 的租房信息...")
        logging.info(f"搜索条件: 价格{min_price or '0'}-{max_price or '不限'} {room_type or ''} {source_type or ''}")

        # 每爬取一定页数就保存一次，避免数据丢失
        save_interval = 3  # 每3页保存一次

        for page in range(1, max_pages + 1):
            try:
                logging.info(f"正在爬取第 {page} 页...")

                url = self.build_search_url(city, district, max_price, min_price,
                                            room_type, source_type, page)
                if not url:
                    break

                response = self.make_request(url)
                if not response:
                    logging.warning(f"第 {page} 页请求失败")
                    continue

                houses = self.parse_list_page(response.text)
                if not houses:
                    logging.info(f"第 {page} 页没有找到房源信息，可能已到最后一页")
                    break

                all_houses.extend(houses)
                successful_pages += 1
                logging.info(f"第 {page} 页爬取完成，共 {len(houses)} 条房源信息")

                # 定期保存数据
                if page % save_interval == 0 and all_houses:
                    logging.info(f"定期保存数据，当前已爬取 {len(all_houses)} 条")
                    if self.storage_options.get('save_to_mysql', True):
                        success_count = self.db_manager.insert_batch_houses(all_houses, city, district, source_type)
                        logging.info(f"已保存 {success_count} 条数据到数据库")

                # 减少页面间延迟
                if page < max_pages:
                    sleep_time = random.uniform(*delay)
                    time.sleep(sleep_time)

            except Exception as e:
                logging.error(f"爬取第 {page} 页时出错: {e}")
                continue

        # 批量获取详情信息 - 修改调用方式
        if get_details and all_houses:
            logging.info(f"开始批量获取详情信息，共 {len(all_houses)} 条房源...")
            # 传递城市、区域、来源类型参数，以便在获取详情时保存数据
            self.get_house_details_batch(all_houses, city, district, source_type)
        else:
            # 保存到MySQL
            if all_houses and self.storage_options.get('save_to_mysql', True):
                logging.info(f"开始保存数据到MySQL，共 {len(all_houses)} 条记录...")
                success_count = self.db_manager.insert_batch_houses(all_houses, city, district, source_type)
                logging.info(f"MySQL保存完成: 成功 {success_count}/{len(all_houses)} 条记录")

        # 统计信息
        total_time = time.time() - self.start_time
        logging.info(f"爬取完成！共处理 {successful_pages} 页，获取 {len(all_houses)} 条房源信息")
        logging.info(f"总请求数: {self.request_count}, 总耗时: {total_time:.1f} 秒")

        return all_houses

    def crawl_fast(self, city: str, district: str = None, max_price: str = None,
                   min_price: str = None, max_pages: int = 3, get_details: bool = False) -> List[Dict]:
        """快速爬取函数 - 极速版本"""
        all_houses = []

        logging.info(f"开始快速爬取 {city}{f'-{district}' if district else ''}")

        for page in range(1, max_pages + 1):
            try:
                logging.info(f"正在爬取第 {page} 页...")

                url = self.build_search_url(city, district, max_price, min_price, page)
                if not url:
                    break

                response = self.make_request(url)
                if not response:
                    continue

                houses = self.parse_list_page(response.text)
                if not houses:
                    break

                all_houses.extend(houses)
                logging.info(f"第 {page} 页爬取完成，共 {len(houses)} 条房源信息")

                # 极短的页面间延迟
                if page < max_pages:
                    time.sleep(random.uniform(0.5, 1))

            except Exception as e:
                logging.error(f"爬取第 {page} 页时出错: {e}")
                continue

        # 快速获取详情信息
        if get_details and all_houses:
            logging.info(f"快速获取详情信息...")
            all_houses = self.get_house_details_batch(all_houses)

        # 保存到MySQL
        if all_houses and self.storage_options.get('save_to_mysql', True):
            logging.info(f"开始保存数据到MySQL，共 {len(all_houses)} 条记录...")
            success_count = self.db_manager.insert_batch_houses(all_houses, city, district)
            logging.info(f"MySQL保存完成: 成功 {success_count}/{len(all_houses)} 条记录")

        total_time = time.time() - self.start_time
        logging.info(f"快速爬取完成！获取 {len(all_houses)} 条房源信息，总耗时: {total_time:.1f} 秒")

        return all_houses

    def remove_duplicates(self, houses: List[Dict]) -> List[Dict]:
        """去除重复房源（基于标题和价格）"""
        seen = set()
        unique_houses = []

        for house in houses:
            # 使用标题和价格作为唯一标识
            identifier = (house.get('标题', ''), house.get('价格(元/月)', 0))
            if identifier not in seen:
                seen.add(identifier)
                unique_houses.append(house)

        removed_count = len(houses) - len(unique_houses)
        if removed_count > 0:
            logging.info(f"去除 {removed_count} 条重复房源")

        return unique_houses

    def analyze_data(self, houses: List[Dict]) -> Dict:
        """数据分析"""
        if not houses:
            return {}

        df = pd.DataFrame(houses)

        # 基本统计
        analysis = {
            '总房源数': len(houses),
            '价格统计': {},
            '户型分布': {},
            '区域分布': {}
        }

        # 价格分析
        if '价格(元/月)' in df.columns:
            prices = pd.to_numeric(df['价格(元/月)'], errors='coerce').dropna()
            if len(prices) > 0:
                analysis['价格统计'] = {
                    '平均价格': int(prices.mean()),
                    '中位数': int(prices.median()),
                    '最低价格': int(prices.min()),
                    '最高价格': int(prices.max()),
                    '价格区间': f"{int(prices.min())} - {int(prices.max())}"
                }

        # 户型分布
        if '户型' in df.columns:
            room_type_counts = df['户型'].value_counts().head(10).to_dict()
            analysis['户型分布'] = room_type_counts

        return analysis

    def save_to_excel(self, houses: List[Dict], filename: str = None) -> str:
        """保存到Excel文件"""
        if not houses:
            logging.warning("没有数据可保存")
            return ""

        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"安居客租房信息_{timestamp}.xlsx"

        try:
            # 确保目录存在
            directory = os.path.dirname(filename) if os.path.dirname(filename) else '.'
            os.makedirs(directory, exist_ok=True)

            print(f"正在保存Excel文件到: {os.path.abspath(filename)}")

            df = pd.DataFrame(houses)

            # 检查数据框是否为空
            if df.empty:
                logging.warning("数据框为空，无法保存")
                return ""

            print(f"数据框形状: {df.shape}")
            print(f"数据列: {list(df.columns)}")

            # 简化保存过程，直接使用to_excel
            df.to_excel(filename, index=False, engine='openpyxl')

            # 检查文件是否创建成功
            if os.path.exists(filename):
                file_size = os.path.getsize(filename)
                print(f"✓ Excel文件保存成功: {filename}")
                print(f"✓ 文件大小: {file_size} 字节")
                print(f"✓ 完整路径: {os.path.abspath(filename)}")

                # 验证文件可读
                try:
                    test_df = pd.read_excel(filename, engine='openpyxl')
                    print(f"✓ 文件验证成功，包含 {len(test_df)} 行数据")
                except Exception as e:
                    print(f"⚠ 文件保存但验证失败: {e}")

                return filename
            else:
                print("✗✗ 文件未创建成功")
                return ""

        except Exception as e:
            error_msg = f"保存Excel文件时出错: {e}"
            logging.error(error_msg)
            print(error_msg)
            import traceback
            error_details = traceback.format_exc()
            logging.error(f"错误详情: {error_details}")
            print(f"错误详情: {error_details}")
            return ""

    def save_to_csv(self, houses: List[Dict], filename: str = None) -> str:
        """保存到CSV文件"""
        if not houses:
            logging.warning("没有数据可保存")
            return ""

        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"安居客租房信息_{timestamp}.csv"

        try:
            df = pd.DataFrame(houses)
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            logging.info(f"数据已保存到: {filename}")
            return filename
        except Exception as e:
            logging.error(f"保存CSV文件时出错: {e}")
            return ""

    def save_analysis_report(self, houses: List[Dict], filename: str = None) -> str:
        """生成详细分析报告"""
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"租房分析报告_{timestamp}.txt"

        analysis = self.analyze_data(houses)

        with open(filename, 'w', encoding='utf-8') as f:
            f.write("=" * 50 + "\n")
            f.write("           安居客租房数据分析报告\n")
            f.write("=" * 50 + "\n\n")

            f.write(f"报告生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(f"分析房源数量: {len(houses)}\n\n")

            # 价格分析
            if analysis.get('价格统计'):
                f.write("【价格分析】\n")
                for key, value in analysis['价格统计'].items():
                    f.write(f"  {key}: {value}\n")
                f.write("\n")

            # 户型分析
            if analysis.get('户型分布'):
                f.write("【热门户型】\n")
                for room_type, count in analysis['户型分布'].items():
                    percentage = (count / len(houses)) * 100
                    f.write(f"  {room_type}: {count}套 ({percentage:.1f}%)\n")

        logging.info(f"分析报告已保存到: {filename}")
        return filename

    def save_to_mysql(self, houses: List[Dict], city: str = None, district: str = None, source_type: str = None) -> int:
        """保存数据到MySQL（独立方法，可单独调用）"""
        if not houses:
            logging.warning("没有数据可保存到MySQL")
            return 0

        logging.info(f"开始保存数据到MySQL，共 {len(houses)} 条记录...")
        success_count = self.db_manager.insert_batch_houses(houses, city, district, source_type)
        logging.info(f"MySQL保存完成: 成功 {success_count}/{len(houses)} 条记录")
        return success_count

    def close(self):
        """关闭资源"""
        if hasattr(self, 'db_manager'):
            self.db_manager.close()
        if hasattr(self, 'session'):
            self.session.close()
        logging.info("爬虫资源已关闭")

    def __del__(self):
        """析构函数，确保资源被正确释放"""
        self.close()