import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
import re
from urllib.parse import urlencode
import os


class AnjukeRentSpider:
    def __init__(self):
        self.session = requests.Session()
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
        }
        self.session.headers.update(self.headers)

    def get_city_code(self, city_name):
        """获取城市代码"""
        city_mapping = {
            '广州': 'gz', '北京': 'bj', '上海': 'sh', '深圳': 'sz',
            '杭州': 'hz', '南京': 'nj', '成都': 'cd', '武汉': 'wh',
            '西安': 'xa', '重庆': 'cq', '天津': 'tj', '苏州': 'su',
            '长沙': 'cs', '郑州': 'zz', '东莞': 'dg', '佛山': 'fs',
            '宁波': 'nb', '青岛': 'qd', '大连': 'dl', '厦门': 'xm'
        }
        return city_mapping.get(city_name, city_name.lower())

    def get_district_code(self, city, district):
        """获取区域代码（简化版，实际使用时需要完善）"""
        # 这里只提供部分示例，实际需要根据安居客的URL结构完善
        district_mapping = {
            '广州': {
                '白云': 'baiyun', '天河': 'tianhe', '越秀': 'yuexiu',
                '海珠': 'haizhu', '荔湾': 'liwan', '番禺': 'panyu'
            },
            '北京': {
                '朝阳': 'chaoyang', '海淀': 'haidian', '西城': 'xicheng',
                '东城': 'dongcheng', '丰台': 'fengtai', '通州': 'tongzhou'
            },
            '上海': {
                '浦东': 'pudong', '徐汇': 'xuhui', '长宁': 'changning',
                '静安': 'jingan', '普陀': 'putuo', '虹口': 'hongkou'
            }
        }
        return district_mapping.get(city, {}).get(district, district.lower())

    def build_search_url(self, city, district=None, max_price=None, min_price=None, page=1):
        """构建搜索URL"""
        city_code = self.get_city_code(city)

        # 基础URL
        if district:
            district_code = self.get_district_code(city, district)
            base_url = f"https://{city_code}.zu.anjuke.com/fangyuan/{district_code}/"
        else:
            base_url = f"https://{city_code}.zu.anjuke.com/fangyuan/"

        # 构建查询参数
        params = {}
        if max_price:
            params['to_price'] = max_price
        if min_price:
            params['from_price'] = min_price
        params['p'] = page

        if params:
            url = base_url + '?' + urlencode(params)
        else:
            url = base_url

        return url

    def parse_list_page(self, html):
        """解析列表页"""
        soup = BeautifulSoup(html, 'html.parser')
        houses = []

        # 查找房源列表
        house_items = soup.find_all('div', class_='zu-itemmod')

        for item in house_items:
            try:
                house_info = self.parse_house_item(item)
                if house_info:
                    houses.append(house_info)
            except Exception as e:
                print(f"解析房源信息时出错: {e}")
                continue

        return houses

    def parse_house_item(self, item):
        """解析单个房源信息"""
        try:
            # 标题和链接
            title_elem = item.find('h3').find('a')
            title = title_elem.get_text(strip=True) if title_elem else '未知'
            link = title_elem.get('href', '') if title_elem else ''

            # 价格
            price_elem = item.find('strong', class_='price')
            price = price_elem.get_text(strip=True) if price_elem else '未知'

            # 户型、面积、楼层信息
            details_elem = item.find('p', class_='details-item')
            if details_elem:
                details_text = details_elem.get_text()
                # 提取户型信息
                room_match = re.search(r'(\d+)室', details_text)
                living_room_match = re.search(r'(\d+)厅', details_text)
                area_match = re.search(r'(\d+(?:\.\d+)?)平米', details_text)
                floor_match = re.search(r'([高低]层)\(共(\d+)层\)', details_text)

                rooms = room_match.group(1) if room_match else '未知'
                living_rooms = living_room_match.group(1) if living_room_match else '未知'
                area = area_match.group(1) if area_match else '未知'
                floor_info = floor_match.group(1) if floor_match else '未知'
                total_floors = floor_match.group(2) if floor_match else '未知'
            else:
                rooms = living_rooms = area = floor_info = total_floors = '未知'

            # 地址
            address_elem = item.find('address', class_='details-item')
            address = address_elem.get_text(strip=True) if address_elem else '未知'

            # 标签（整租、朝向、地铁等）
            tags_elem = item.find('p', class_='details-item bot-tag')
            tags = []
            if tags_elem:
                tag_spans = tags_elem.find_all('span', class_='cls-common')
                tags = [span.get_text(strip=True) for span in tag_spans]

            # 经纪人信息
            agent_elem = item.find('p', class_='detail-jjr')
            agent = agent_elem.get_text(strip=True) if agent_elem else '未知'

            house_info = {
                '标题': title,
                '价格(元/月)': price,
                '户型': f"{rooms}室{living_rooms}厅",
                '面积(平米)': area,
                '楼层': floor_info,
                '总楼层': total_floors,
                '地址': address,
                '标签': '|'.join(tags),
                '经纪人': agent,
                '链接': link
            }

            return house_info

        except Exception as e:
            print(f"解析房源项时出错: {e}")
            return None

    def get_house_detail(self, url):
        """获取房源详细信息"""
        try:
            time.sleep(random.uniform(1, 3))  # 延迟防止被封
            response = self.session.get(url)
            response.encoding = 'utf-8'

            if response.status_code != 200:
                print(f"获取详情页失败: {response.status_code}")
                return {}

            soup = BeautifulSoup(response.text, 'html.parser')
            detail_info = {}

            # 房屋编码和更新时间
            house_code_elem = soup.find('span', id='houseCode')
            if house_code_elem:
                detail_info['房屋编码'] = house_code_elem.get_text(strip=True)

            # 付款方式
            payment_elem = soup.find('span', class_='type')
            if payment_elem and '付' in payment_elem.get_text():
                detail_info['付款方式'] = payment_elem.get_text(strip=True)

            # 详细户型
            layout_elems = soup.find_all('li', class_='house-info-item')
            for elem in layout_elems:
                text = elem.get_text()
                if '户型：' in text:
                    detail_info['详细户型'] = text.replace('户型：', '').strip()
                elif '朝向：' in text:
                    detail_info['朝向'] = text.replace('朝向：', '').strip()
                elif '装修：' in text:
                    detail_info['装修'] = text.replace('装修：', '').strip()
                elif '类型：' in text:
                    detail_info['房屋类型'] = text.replace('类型：', '').strip()

            # 配套设施
            facilities = []
            facility_elems = soup.find_all('li', class_='peitao-item')
            for elem in facility_elems:
                facility_text = elem.get_text(strip=True)
                if facility_text:
                    facilities.append(facility_text)
            if facilities:
                detail_info['配套设施'] = '|'.join(facilities)

            # 房源描述
            desc_elem = soup.find('div', class_='auto-general')
            if desc_elem:
                detail_info['房源描述'] = desc_elem.get_text(strip=True)

            return detail_info

        except Exception as e:
            print(f"获取房源详情时出错: {e}")
            return {}

    def crawl(self, city, district=None, max_price=None, min_price=None, max_pages=5, get_details=False):
        """主爬取函数"""
        all_houses = []

        print(f"开始爬取 {city}{f'-{district}' if district else ''} 的租房信息...")

        for page in range(1, max_pages + 1):
            try:
                print(f"正在爬取第 {page} 页...")

                url = self.build_search_url(city, district, max_price, min_price, page)
                response = self.session.get(url)
                response.encoding = 'utf-8'

                if response.status_code != 200:
                    print(f"第 {page} 页请求失败: {response.status_code}")
                    break

                houses = self.parse_list_page(response.text)
                if not houses:
                    print(f"第 {page} 页没有找到房源信息，可能已到最后一页")
                    break

                # 如果需要获取详情信息
                if get_details:
                    for house in houses:
                        if house['链接'] and house['链接'].startswith('http'):
                            detail_info = self.get_house_detail(house['链接'])
                            house.update(detail_info)

                all_houses.extend(houses)
                print(f"第 {page} 页爬取完成，共 {len(houses)} 条房源信息")

                # 随机延迟，避免请求过于频繁
                time.sleep(random.uniform(2, 5))

            except Exception as e:
                print(f"爬取第 {page} 页时出错: {e}")
                continue

        print(f"爬取完成！共获取 {len(all_houses)} 条房源信息")
        return all_houses

    def save_to_excel(self, houses, filename=None):
        """保存到Excel文件"""
        if not houses:
            print("没有数据可保存")
            return

        if not filename:
            timestamp = time.strftime("%Y%m%d_%H%M%S")
            filename = f"安居客租房信息_{timestamp}.xlsx"

        df = pd.DataFrame(houses)
        df.to_excel(filename, index=False, engine='openpyxl')
        print(f"数据已保存到: {filename}")
        return filename

    def save_to_csv(self, houses, filename=None):
        """保存到CSV文件"""
        if not houses:
            print("没有数据可保存")
            return

        if not filename:
            timestamp = time.strftime("%Y%m%d_%H%M%S")
            filename = f"安居客租房信息_{timestamp}.csv"

        df = pd.DataFrame(houses)
        df.to_csv(filename, index=False, encoding='utf-8-sig')
        print(f"数据已保存到: {filename}")
        return filename


def main():
    """主函数 - 用户交互界面"""
    spider = AnjukeRentSpider()

    print("=" * 50)
    print("          安居客租房信息爬虫工具")
    print("=" * 50)

    # 获取用户输入
    city = input("请输入城市名称 (如: 广州、北京、上海): ").strip()
    if not city:
        city = "广州"
        print(f"使用默认城市: {city}")

    district = input("请输入区域名称 (如: 白云、天河，留空则搜索全市): ").strip()

    min_price = input("请输入最低价格 (元/月，留空则不限制): ").strip()
    max_price = input("请输入最高价格 (元/月，留空则不限制): ").strip()

    try:
        max_pages = int(input("请输入要爬取的页数 (默认5页): ") or "5")
    except:
        max_pages = 5

    get_details = input("是否获取详细房源信息? (y/n，默认n): ").strip().lower() == 'y'

    file_format = input("保存格式 (1: Excel, 2: CSV，默认Excel): ").strip()
    if file_format == '2':
        save_func = spider.save_to_csv
    else:
        save_func = spider.save_to_excel

    print("\n开始爬取...")

    # 执行爬取
    houses = spider.crawl(
        city=city,
        district=district if district else None,
        min_price=min_price if min_price else None,
        max_price=max_price if max_price else None,
        max_pages=max_pages,
        get_details=get_details
    )

    if houses:
        # 保存数据
        filename = save_func(houses)

        # 显示统计信息
        print(f"\n爬取统计:")
        print(f"总房源数: {len(houses)}")
        if houses:
            prices = [int(h['价格(元/月)']) for h in houses if h['价格(元/月)'].isdigit()]
            if prices:
                print(f"平均价格: {sum(prices) / len(prices):.2f} 元/月")
                print(f"最低价格: {min(prices)} 元/月")
                print(f"最高价格: {max(prices)} 元/月")

        print(f"\n数据已保存到: {filename}")
    else:
        print("没有找到符合条件的房源信息")


if __name__ == "__main__":
    main()