# -*- coding: utf-8 -*-
import scrapy
from myspider.items import MyspiderItem


class A71abSpider(scrapy.Spider):
    name = '71ab'
    allowed_domains = ['71ab.com']  # 允许爬取的范围
    start_urls = ['https://www.71ab.com/province_9_87.html']  # 最开始爬取的地址

    def parse(self, response):
        # 处理start_urls地址对应响应
        # retl = response.xpath('//div[@class="m2l"]//div[@class="list"]//table//ul//li//a//strong//text()').extract()
        # print(retl)

        li_list = response.xpath('//div[@class="m2l"]//div[@class="list"]')
        for li in li_list:
            item = MyspiderItem()
            item['url'] = li.xpath('./table//ul//li//a/@href').extract_first()
            item['province'] = li.xpath('./table//tr//td[2]//text()').extract_first()[1:-1].split('/')[0]
            item['city'] = li.xpath('./table//tr//td[2]//text()').extract_first()[1:-1].split('/')[1]
            # print(item)
            yield scrapy.Request(
                item['url'],
                callback=self.parse_detail,
                meta={'item': item}
            )
            # 找到下一页url地址
        next_url = response.xpath('//div[@class="pages"]/a[last()]/@href').extract_first()
        if not(next_url in '{destoon_page}'):
            next_url = 'https://www.71ab.com' + next_url
            yield scrapy.Request(next_url, callback=self.parse)

    def parse_detail(self, response):       # 处理详情页面
        item = response.meta['item']
        item['company'] = response.xpath('//div[@id="lianxi"]/div[@class="boxcontent"]/strong/text()').extract_first()
        item['address'] = response.xpath('//div[@id="lianxi"]/div[@class="boxcontent"]/li[1]/text()').extract_first()
        item['telephone'] = response.xpath('//div[@id="lianxi"]/div[@class="boxcontent"]/li[3]/text()').extract_first()
        item['contacts'] = response.xpath('//div[@id="lianxi"]/div[@class="boxcontent"]/li[4]/text()').extract_first()
        item['phone'] = response.xpath('//div[@id="lianxi"]/div[@class="boxcontent"]/li[5]/text()').extract_first()
        item['introduction'] = response.xpath('//*[@id="jieshao"]/div/p/text()').extract_first()
        # print(item)
        yield item
