import asyncio
import random

import aiohttp
import pandas as pd
from parsel import Selector
import logging

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 请求头列表信息
HEADERS_list = [
    {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0'
    }, {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2946.89 Safari/537.36'
    }, {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2945.74 Safari/537.36'
    }
]
async def fetch_page(session, url, timeout=30, retries=3):
    for attempt in range(retries):
        try:
            headers = random.choice(HEADERS_list)  # 选择随机请求头
            await asyncio.sleep(random.uniform(1, 2))  # 增加随机延时
            async with session.get(url, headers=headers, timeout=timeout) as response:
                response.raise_for_status()
                return await response.text()
        except Exception as e:
            logging.error(f"Attempt {attempt + 1} failed to fetch {url}: {e}")
            await asyncio.sleep(2 ** attempt)
        return None


def extract_region_links(html_content):
    selector = Selector(text=html_content)
    region_links = selector.css('div[data-role="ershoufang"] a')
    results = {}
    for link in region_links:
        url = link.css('::attr(href)').get()
        name = link.css('::text').get()
        results.update({
            name: url
        })
    return results
async def get_region(city_name):
    url = f'https://{city_name}.lianjia.com/ershoufang/'
    async with aiohttp.ClientSession() as session:
        html_content = await fetch_page(session, url)
        region_data = extract_region_links(html_content)
        return region_data


async def process_region(city_name, session, key, value, semaphore, pageRange):
    logging.info(f"开始抓取 {value} 数据......")
    all_data = []
    for page in pageRange:
        url = f'https://{city_name}.lianjia.com{value}pg{page}'
        print(url)
        async with semaphore:
            res_text = await fetch_page(session, url)
            selector = Selector(res_text)
            infos = selector.css('.sellListContent li .info')
            if not infos:
                break  # No more listings found, exit loop
            for info in infos:
                try:
                    title = info.css('.title a::text').get()
                    house_info = info.css('.address .houseInfo::text').get()
                    house_region = key
                    house_location = info.css('.flood .positionInfo a::text').get()
                    house_unitPrice = (str((info.css('div.unitPrice span::text').get()).split('元/平')[0])).replace(',',
                                                                                                                    '')
                    price = info.css('div.totalPrice span::text').get().replace('万', '')
                    # 提取关注数，它位于包含类 'followInfo' 的元素中
                    follow_info = selector.css('.followInfo::text').get()
                    # 假设关注数和发布时间用“/”分隔，我们只取第一部分
                    followers_count = follow_info.split('/')[0].strip() if follow_info else '未知'
                    # 获取小区名称
                    community_name = selector.css('.positionInfo a:first-of-type::text').get()
                    # 获取镇（街道）名称
                    town_name = selector.css('.positionInfo a:last-of-type::text').get()
                    all_data.append({
                        '标题': title,
                        '区': house_region,
                        '镇/街道': town_name,
                        '小区': community_name,
                        '位置': house_location,
                        '单价（元/平米）': int(house_unitPrice),
                        '总价（万）': float(price),
                        '房屋信息': house_info,
                        '网址': url,
                        '关注数': followers_count,
                    })
                except Exception as e:
                    logging.error(f"Error processing {url}: {e}")
    return all_data


async def main():
    # 城市名，比如佛山
    city_name = 'sh'
    # 城市区域
    regional = await get_region(city_name)
    # 页面范围
    pageRange = range(1, 101)
    # 文件名
    fileName = "上海市二手房数据"
    print("获取该城市有以下区域")
    print(regional)
    print("抓取开始，在页面数量过多的情况下可能会耗时过长....")
    semaphore = asyncio.Semaphore(len(regional))  # 控制并发数
    async with aiohttp.ClientSession() as session:
        tasks = [process_region(city_name, session, key, value, semaphore, pageRange) for key, value in
                 regional.items()]
        all_regions_data = await asyncio.gather(*tasks)

    combined_data = [item for region_data in all_regions_data for item in region_data]
    df = pd.DataFrame(combined_data)
    df.to_csv(f'./data/{fileName}.csv', index=False, encoding='utf-8')


if __name__ == "__main__":
    asyncio.run(main())
