import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
import re
from urllib.parse import urlencode, urljoin
import os
import json
import logging
from datetime import datetime
import sys
from typing import List, Dict, Optional

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('anjuke_spider.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)


class EnhancedAnjukeRentSpider:
    def __init__(self, use_proxy=False, proxy_pool=None):
        self.session = requests.Session()
        self.use_proxy = use_proxy
        self.proxy_pool = proxy_pool or []
        self.request_count = 0
        self.start_time = time.time()

        # 更真实的浏览器头
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Cache-Control': 'max-age=0',
        }
        self.session.headers.update(self.headers)

        # 加载城市区域数据
        self.city_data = self.load_city_data()

    def load_city_data(self) -> Dict:
        """加载城市区域数据"""
        city_data = {
            '广州': {
                'code': 'gz',
                'districts': {
                    '白云': 'baiyun', '天河': 'tianhe', '越秀': 'yuexiu', '海珠': 'haizhu',
                    '荔湾': 'liwan', '番禺': 'panyu', '黄埔': 'huangpugz', '花都': 'huadu',
                    '南沙': 'nansha', '从化': 'conghua', '增城': 'zengcheng'
                }
            },
            '北京': {
                'code': 'bj',
                'districts': {
                    '朝阳': 'chaoyang', '海淀': 'haidian', '西城': 'xicheng', '东城': 'dongcheng',
                    '丰台': 'fengtai', '石景山': 'shijingshan', '通州': 'tongzhou', '昌平': 'changping',
                    '大兴': 'daxing', '顺义': 'shunyi', '房山': 'fangshan', '门头沟': 'mentougou'
                }
            },
            '上海': {
                'code': 'sh',
                'districts': {
                    '浦东': 'pudong', '徐汇': 'xuhui', '长宁': 'changning', '静安': 'jingan',
                    '普陀': 'putuo', '虹口': 'hongkou', '杨浦': 'yangpu', '闵行': 'minhang',
                    '宝山': 'baoshan', '嘉定': 'jiading', '金山': 'jinshan', '松江': 'songjiang'
                }
            },
            '深圳': {
                'code': 'sz',
                'districts': {
                    '福田': 'futian', '南山': 'nanshan', '罗湖': 'luohu', '宝安': 'baoan',
                    '龙岗': 'longgang', '盐田': 'yantian', '龙华': 'longhua', '坪山': 'pingshan'
                }
            },
            # 可以继续添加更多城市...
        }
        return city_data

    def get_random_proxy(self) -> Optional[Dict]:
        """获取随机代理"""
        if not self.proxy_pool:
            return None
        return random.choice(self.proxy_pool)

    def make_request(self, url: str, max_retries: int = 3) -> Optional[requests.Response]:
        """封装请求，支持重试和代理"""
        for attempt in range(max_retries):
            try:
                proxies = None
                if self.use_proxy:
                    proxy = self.get_random_proxy()
                    if proxy:
                        proxies = {'http': proxy, 'https': proxy}

                # 随机延迟
                delay = random.uniform(1, 3)
                time.sleep(delay)

                response = self.session.get(url, timeout=15, proxies=proxies)
                self.request_count += 1

                if response.status_code == 200:
                    return response
                elif response.status_code == 403:
                    logging.warning(f"请求被拒绝 (403)，尝试更换代理或User-Agent")
                    self.rotate_user_agent()
                else:
                    logging.warning(f"请求失败，状态码: {response.status_code}")

            except requests.exceptions.RequestException as e:
                logging.warning(f"请求异常 (尝试 {attempt + 1}/{max_retries}): {e}")

            # 最后一次重试前等待更长时间
            if attempt < max_retries - 1:
                time.sleep(random.uniform(5, 10))

        return None

    def rotate_user_agent(self):
        """更换User-Agent"""
        user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/120.0.0.0',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/121.0',
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
        ]
        new_ua = random.choice(user_agents)
        self.session.headers.update({'User-Agent': new_ua})
        logging.info(f"更换User-Agent: {new_ua[:50]}...")

    def get_city_info(self, city_name: str) -> Optional[Dict]:
        """获取城市信息"""
        return self.city_data.get(city_name)

    def build_search_url(self, city: str, district: str = None,
                         max_price: str = None, min_price: str = None,
                         room_type: str = None, source_type: str = None,
                         page: int = 1) -> str:
        """构建搜索URL"""
        city_info = self.get_city_info(city)
        if not city_info:
            logging.error(f"不支持的城市: {city}")
            return None

        city_code = city_info['code']

        # 基础URL结构
        if district and district in city_info['districts']:
            district_code = city_info['districts'][district]
            base_path = f"fangyuan/{district_code}/"
        else:
            base_path = "fangyuan/"

        # 分页处理
        if page > 1:
            if 'fangyuan/' in base_path and not base_path.endswith('/'):
                base_path += f"/p{page}/"
            else:
                base_path += f"p{page}/"

        base_url = f"https://{city_code}.zu.anjuke.com/{base_path}"

        # 构建查询参数
        params = {}
        if max_price:
            params['to_price'] = max_price
        if min_price:
            params['from_price'] = min_price

        # 房间类型过滤
        if room_type:
            room_mapping = {'一室': 'w1', '二室': 'w2', '三室': 'w3', '四室': 'w4', '五室': 'w5'}
            if room_type in room_mapping:
                params['room'] = room_mapping[room_type]

        # 房源来源过滤
        if source_type:
            source_mapping = {'个人': 'l2', '经纪人': 'l1', '品牌公寓': 'jg1'}
            if source_type in source_mapping:
                params['type'] = source_mapping[source_type]

        if params:
            url = base_url + '?' + urlencode(params)
        else:
            url = base_url

        logging.debug(f"构建的URL: {url}")
        return url

    def parse_list_page(self, html: str) -> List[Dict]:
        """解析列表页"""
        soup = BeautifulSoup(html, 'html.parser')
        houses = []

        # 多种可能的房源元素选择器
        selectors = [
            'div.zu-itemmod',
            'div[class*="zu-itemmod"]',
            'div[link*="anjuke"]'
        ]

        house_items = []
        for selector in selectors:
            house_items = soup.select(selector)
            if house_items:
                break

        if not house_items:
            logging.warning("未找到房源列表元素")
            return houses

        for item in house_items:
            try:
                house_info = self.parse_house_item(item)
                if house_info:
                    houses.append(house_info)
            except Exception as e:
                logging.error(f"解析房源信息时出错: {e}")
                continue

        return houses

    def parse_house_item(self, item) -> Optional[Dict]:
        """解析单个房源信息"""
        try:
            # 标题和链接
            title_elem = item.find('h3')
            if title_elem:
                title_link = title_elem.find('a')
                title = title_link.get_text(strip=True) if title_link else '未知'
                link = title_link.get('href', '') if title_link else ''
            else:
                title = '未知'
                link = item.get('link', '')

            # 价格
            price_elem = item.find('strong', class_='price')
            price = price_elem.get_text(strip=True) if price_elem else '0'

            # 清理价格数据
            price = re.sub(r'[^\d]', '', price) or '0'

            # 户型、面积、楼层信息
            rooms, living_rooms, area, floor_info, total_floors = self.parse_house_details(item)

            # 地址
            address = self.parse_address(item)

            # 标签
            tags = self.parse_tags(item)

            # 经纪人信息
            agent = self.parse_agent_info(item)

            # 房源特色（安选、实拍等）
            features = self.parse_features(item)

            house_info = {
                '标题': title,
                '价格(元/月)': int(price) if price.isdigit() else 0,
                '户型': f"{rooms}室{living_rooms}厅",
                '面积(平米)': area,
                '楼层': floor_info,
                '总楼层': total_floors,
                '地址': address,
                '标签': '|'.join(tags),
                '经纪人': agent,
                '特色': '|'.join(features),
                '链接': link,
                '爬取时间': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }

            return house_info

        except Exception as e:
            logging.error(f"解析房源项时出错: {e}")
            return None

    def parse_house_details(self, item) -> tuple:
        """解析房屋详细信息"""
        details_elem = item.find('p', class_='details-item')
        rooms = living_rooms = area = floor_info = total_floors = '未知'

        if details_elem:
            details_text = details_elem.get_text()

            # 提取户型
            room_match = re.search(r'(\d+)室', details_text)
            living_room_match = re.search(r'(\d+)厅', details_text)
            rooms = room_match.group(1) if room_match else '0'
            living_rooms = living_room_match.group(1) if living_room_match else '0'

            # 提取面积
            area_match = re.search(r'(\d+(?:\.\d+)?)平米', details_text)
            area = area_match.group(1) if area_match else '未知'

            # 提取楼层信息
            floor_match = re.search(r'([高低]层)\(共(\d+)层\)', details_text)
            if floor_match:
                floor_info = floor_match.group(1)
                total_floors = floor_match.group(2)
            else:
                # 尝试其他格式
                floor_alt = re.search(r'([高中低]层)', details_text)
                if floor_alt:
                    floor_info = floor_alt.group(1)

        return rooms, living_rooms, area, floor_info, total_floors

    def parse_address(self, item) -> str:
        """解析地址信息"""
        address_elem = item.find('address', class_='details-item')
        if address_elem:
            # 移除内部的链接文本，只保留地址
            for a in address_elem.find_all('a'):
                a.decompose()
            address = address_elem.get_text(strip=True)
            # 清理地址中的多余空格和换行
            address = re.sub(r'\s+', ' ', address)
            return address
        return '未知'

    def parse_tags(self, item) -> List[str]:
        """解析房源标签"""
        tags = []
        tags_elem = item.find('p', class_='details-item bot-tag')
        if tags_elem:
            tag_spans = tags_elem.find_all('span', class_='cls-common')
            tags = [span.get_text(strip=True) for span in tag_spans]
        return tags

    def parse_agent_info(self, item) -> str:
        """解析经纪人信息"""
        agent_elem = item.find('p', class_='detail-jjr')
        if agent_elem:
            agent_text = agent_elem.get_text(strip=True)
            # 移除图标等无关文本
            agent_text = re.sub(r'[\uf047]', '', agent_text).strip()
            return agent_text if agent_text else '未知'
        return '未知'

    def parse_features(self, item) -> List[str]:
        """解析房源特色"""
        features = []
        # 安选标识
        if item.find('em', class_='anxvan-ico'):
            features.append('安选')
        # 实拍验真
        if item.find('i', class_='shipai'):
            features.append('实拍验真')
        # 视频看房
        if item.find('i', class_='v-small'):
            features.append('视频看房')
        return features

    def get_house_detail(self, url: str) -> Dict:
        """获取房源详细信息"""
        try:
            time.sleep(random.uniform(2, 4))
            response = self.make_request(url)

            if not response:
                return {}

            soup = BeautifulSoup(response.text, 'html.parser')
            detail_info = {}

            # 房屋编码和更新时间
            house_code_elem = soup.find('span', id='houseCode')
            if house_code_elem:
                code_text = house_code_elem.get_text(strip=True)
                code_match = re.search(r'房屋编码：(\d+)', code_text)
                if code_match:
                    detail_info['房屋编码'] = code_match.group(1)

                update_match = re.search(r'更新时间：(.+)$', code_text)
                if update_match:
                    detail_info['更新时间'] = update_match.group(1)

            # 付款方式
            payment_elems = soup.find_all('li', class_='full-line')
            for elem in payment_elems:
                payment_elem = elem.find('span', class_='type')
                if payment_elem and '付' in payment_elem.get_text():
                    detail_info['付款方式'] = elem.get_text(strip=True)

            # 详细房屋信息
            layout_elems = soup.find_all('li', class_='house-info-item')
            for elem in layout_elems:
                text = elem.get_text(strip=True)
                if '户型：' in text:
                    detail_info['详细户型'] = text.replace('户型：', '')
                elif '朝向：' in text:
                    detail_info['朝向'] = text.replace('朝向：', '')
                elif '装修：' in text:
                    detail_info['装修'] = text.replace('装修：', '')
                elif '类型：' in text:
                    detail_info['房屋类型'] = text.replace('类型：', '')
                elif '楼层：' in text:
                    detail_info['详细楼层'] = text.replace('楼层：', '')

            # 配套设施
            facilities = []
            facility_elems = soup.find_all('li', class_='peitao-item')
            for elem in facility_elems:
                facility_text = elem.get_text(strip=True)
                if facility_text and facility_text != '无':
                    facilities.append(facility_text)
            if facilities:
                detail_info['配套设施'] = '|'.join(facilities)

            # 房源描述
            desc_elem = soup.find('div', class_='auto-general')
            if desc_elem:
                description = desc_elem.get_text(strip=True)
                # 限制描述长度
                if len(description) > 500:
                    description = description[:500] + '...'
                detail_info['房源描述'] = description

            # 出租要求
            requirement_elem = soup.find('div', class_='auto-general')
            if requirement_elem and '出租要求' in str(soup):
                # 查找出租要求部分
                requirement_title = soup.find('h2', string='出租要求')
                if requirement_title:
                    requirement_div = requirement_title.find_next('div', class_='auto-general')
                    if requirement_div:
                        detail_info['出租要求'] = requirement_div.get_text(strip=True)

            return detail_info

        except Exception as e:
            logging.error(f"获取房源详情时出错 {url}: {e}")
            return {}

    def crawl(self, city: str, district: str = None, max_price: str = None,
              min_price: str = None, room_type: str = None, source_type: str = None,
              max_pages: int = 5, get_details: bool = False,
              delay: tuple = (2, 5)) -> List[Dict]:
        """主爬取函数"""
        all_houses = []
        successful_pages = 0

        logging.info(f"开始爬取 {city}{f'-{district}' if district else ''} 的租房信息...")
        logging.info(f"搜索条件: 价格{min_price or '0'}-{max_price or '不限'} {room_type or ''} {source_type or ''}")

        for page in range(1, max_pages + 1):
            try:
                logging.info(f"正在爬取第 {page} 页...")

                url = self.build_search_url(city, district, max_price, min_price,
                                            room_type, source_type, page)
                if not url:
                    break

                response = self.make_request(url)
                if not response:
                    logging.warning(f"第 {page} 页请求失败")
                    continue

                houses = self.parse_list_page(response.text)
                if not houses:
                    logging.info(f"第 {page} 页没有找到房源信息，可能已到最后一页")
                    break

                # 获取详情信息
                if get_details:
                    logging.info(f"正在获取第 {page} 页的详情信息...")
                    for i, house in enumerate(houses):
                        if house.get('链接') and house['链接'].startswith('http'):
                            detail_info = self.get_house_detail(house['链接'])
                            house.update(detail_info)
                            # 进度显示
                            if (i + 1) % 5 == 0:
                                logging.info(f"  已处理 {i + 1}/{len(houses)} 条房源详情")

                all_houses.extend(houses)
                successful_pages += 1
                logging.info(f"第 {page} 页爬取完成，共 {len(houses)} 条房源信息")

                # 页面间延迟
                if page < max_pages:
                    sleep_time = random.uniform(*delay)
                    logging.info(f"等待 {sleep_time:.1f} 秒后继续...")
                    time.sleep(sleep_time)

            except Exception as e:
                logging.error(f"爬取第 {page} 页时出错: {e}")
                continue

        # 统计信息
        total_time = time.time() - self.start_time
        logging.info(f"爬取完成！共处理 {successful_pages} 页，获取 {len(all_houses)} 条房源信息")
        logging.info(f"总请求数: {self.request_count}, 总耗时: {total_time:.1f} 秒")

        return all_houses

    def remove_duplicates(self, houses: List[Dict]) -> List[Dict]:
        """去除重复房源（基于标题和价格）"""
        seen = set()
        unique_houses = []

        for house in houses:
            # 使用标题和价格作为唯一标识
            identifier = (house.get('标题', ''), house.get('价格(元/月)', 0))
            if identifier not in seen:
                seen.add(identifier)
                unique_houses.append(house)

        removed_count = len(houses) - len(unique_houses)
        if removed_count > 0:
            logging.info(f"去除 {removed_count} 条重复房源")

        return unique_houses

    def analyze_data(self, houses: List[Dict]) -> Dict:
        """数据分析"""
        if not houses:
            return {}

        df = pd.DataFrame(houses)

        # 基本统计
        analysis = {
            '总房源数': len(houses),
            '价格统计': {},
            '户型分布': {},
            '区域分布': {}
        }

        # 价格分析
        if '价格(元/月)' in df.columns:
            prices = pd.to_numeric(df['价格(元/月)'], errors='coerce').dropna()
            if len(prices) > 0:
                analysis['价格统计'] = {
                    '平均价格': int(prices.mean()),
                    '中位数': int(prices.median()),
                    '最低价格': int(prices.min()),
                    '最高价格': int(prices.max()),
                    '价格区间': f"{int(prices.min())} - {int(prices.max())}"
                }

        # 户型分布
        if '户型' in df.columns:
            room_type_counts = df['户型'].value_counts().head(10).to_dict()
            analysis['户型分布'] = room_type_counts

        return analysis

    def save_to_excel(self, houses: List[Dict], filename: str = None) -> str:
        """保存到Excel文件"""
        if not houses:
            logging.warning("没有数据可保存")
            return ""

        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"安居客租房信息_{timestamp}.xlsx"

        try:
            df = pd.DataFrame(houses)

            # 数据分析
            analysis = self.analyze_data(houses)

            with pd.ExcelWriter(filename, engine='openpyxl') as writer:
                # 主数据表
                df.to_excel(writer, sheet_name='房源数据', index=False)

                # 分析报告表
                analysis_data = []
                for category, stats in analysis.items():
                    if isinstance(stats, dict):
                        for key, value in stats.items():
                            analysis_data.append({'分类': category, '指标': key, '数值': value})
                    else:
                        analysis_data.append({'分类': category, '指标': '统计', '数值': stats})

                if analysis_data:
                    analysis_df = pd.DataFrame(analysis_data)
                    analysis_df.to_excel(writer, sheet_name='数据分析', index=False)

            logging.info(f"数据已保存到: {filename}")
            return filename

        except Exception as e:
            logging.error(f"保存Excel文件时出错: {e}")
            return ""

    def save_to_csv(self, houses: List[Dict], filename: str = None) -> str:
        """保存到CSV文件"""
        if not houses:
            logging.warning("没有数据可保存")
            return ""

        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"安居客租房信息_{timestamp}.csv"

        try:
            df = pd.DataFrame(houses)
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            logging.info(f"数据已保存到: {filename}")
            return filename
        except Exception as e:
            logging.error(f"保存CSV文件时出错: {e}")
            return ""

    def save_analysis_report(self, houses: List[Dict], filename: str = None) -> str:
        """生成详细分析报告"""
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"租房分析报告_{timestamp}.txt"

        analysis = self.analyze_data(houses)

        with open(filename, 'w', encoding='utf-8') as f:
            f.write("=" * 50 + "\n")
            f.write("           安居客租房数据分析报告\n")
            f.write("=" * 50 + "\n\n")

            f.write(f"报告生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(f"分析房源数量: {len(houses)}\n\n")

            # 价格分析
            if analysis.get('价格统计'):
                f.write("【价格分析】\n")
                for key, value in analysis['价格统计'].items():
                    f.write(f"  {key}: {value}\n")
                f.write("\n")

            # 户型分析
            if analysis.get('户型分布'):
                f.write("【热门户型】\n")
                for room_type, count in analysis['户型分布'].items():
                    percentage = (count / len(houses)) * 100
                    f.write(f"  {room_type}: {count}套 ({percentage:.1f}%)\n")

        logging.info(f"分析报告已保存到: {filename}")
        return filename