import scrapy

from anjuke.items import AnjukeItem
class HouseSpider(scrapy.Spider):
    name = "house"
    allowed_domains = ["chongqing.anjuke.com"]
    start_urls = ["https://chongqing.anjuke.com/sale/?from=HomePage_TopBar"]
    custom_settings = {
        'DEFAULT_REQUEST_HEADERS': {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'}
    }
    def parse(self, response):
        #提取数据
        node_list = response.xpath('//div[@class="property"]')

        for node in node_list:
            item = AnjukeItem()
            # 提取房屋名称
            item['name'] = node.xpath('.//h3[@class="property-content-title-name"]/text()').get()
            # 提取房屋总价
            item['price'] = node.xpath('.//span[@class="property-price-total-num"]/text()').get()
            # 提取房屋均价
            item['average'] = node.xpath('.//p[@class="property-price-average"]/text()').re_first(r'(\d+)元/㎡')
            # 提取房屋面积
            item['area'] = node.xpath('.//p[@class="property-content-info-text"]/text()').re_first(r'(\d+\.?\d*)㎡')
            # 提取建造年份
            item['year'] = node.xpath('.//p[@class="property-content-info-text"]/text()').re_first(r'(\d+)年建造')
            # 提取房屋朝向
            item['posttion'] = node.xpath('.//p[@class="property-content-info-text"]/text()').re_first(r'(南|北|南北)')
            # 提取房屋地址
            item['adress'] = node.xpath('.//p[@class="property-content-info-comm-address"]/span/text()').getall()
            item['adress'] = ''.join(item['adress']) if item['adress'] else None
            # 提取几室几厅
            item['ting'] = node.xpath(
                './/p[@class="property-content-info-text property-content-info-attribute"]/span/text()').getall()
            item['ting'] = ''.join(item['ting']) if item['ting'] else None
            yield item

        next_page = response.xpath('//a[contains(@class, "next") and contains(@class, "next-active")]/@href').get()
        if next_page:
            # 使用urljoin处理相对路径
            next_url = response.urljoin(next_page)
            yield scrapy.Request(url=next_url, callback=self.parse)