# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from fangdd_test1.items import FangddTest1Item

class FangddTestSpiderSpider(CrawlSpider):
    name = 'fangdd_test_spider'
    allowed_domains = ['fangdd.com']
    start_urls = ['https://www.fangdd.com/']
    
    rules = (
        # 获取所有地区
        Rule(LinkExtractor(allow=r'\w+\.fangdd\.com/$'), callback='get_urls', follow=True),
        # 获取一个地区所有页
        Rule(LinkExtractor(allow=r'fangdd\.com/xiaoqu/\?pageNo=\d+'), follow=True),
        # 获取每一个小区
        Rule(LinkExtractor(allow=r'/xiaoqu/n-\d+\.html'), callback='parse_detail', follow=False)
    )

    def __init__(self, *args, **kwargs):
        super(FangddTestSpiderSpider, self).__init__(*args, **kwargs)

    def get_urls(self, response):
        url = response.url + 'xiaoqu/'
        yield scrapy.Request(url)

    def parse_detail(self, response):
        name = response.xpath('//h1[@class="TopHeader-title"]/text()').get()
        price = response.xpath('//p[@class="BasicDetail-base-price"]/strong/text()').get()
        url = response.url
        item = FangddTest1Item(name=name, price=price, url=url)
        yield(item)