# -*- coding: utf-8 -*-
import scrapy
from myspider.items import MyspiderItem
import re


class QishunnewsSpider(scrapy.Spider):
    name = 'qishunnews'
    allowed_domains = ['11467.com']
    start_urls = ['http://www.11467.com/changchun/sitemap/']

    def parse(self, response):
        li_list = response.xpath('//div[@id="il"]//ul[@class="companylist"]//li//div[@class="f_l"]')
        for li in li_list:
            item = MyspiderItem()
            item['url'] = 'http:' + li.xpath('.//h4//a//@href').extract_first()
            item['company'] = li.xpath('.//h4//text()').extract_first()
            item['province'] = '吉林省'
            item['city'] = '长春市'
            # print(item)
            yield scrapy.Request(
                item['url'],
                callback=self.parse_detail,
                meta={'item': item}
            )
            # 找到下一页url地址
        next_url = response.xpath('//div[@id="il"]//div[@class="pages"]//a[text()="下一页"]//@href').extract_first()
        if next_url is None:
            next_url = response.xpath('//div[@id="il"]//div[@class="pages"]//a[text()="尾页"]//@href').extract_first()
        next_url = 'http:' + next_url
        if next_url != response.url:
            yield scrapy.Request(next_url, callback=self.parse)

    def parse_detail(self, response):  # 处理详情页面
        item = response.meta['item']
        item['address'] = response.xpath('//div[@id="contact"]/div[@class="boxcontent"]/dl/dd[1]/text()').extract_first()
        item['telephone'] = response.xpath('//div[@id="contact"]/div[@class="boxcontent"]/dl/dd[2]/text()').extract_first()
        item['contacts'] = response.xpath('//div[@id="contact"]/div[@class="boxcontent"]/dl/dd[3][text()!="未提供"]/text()').extract_first()
        item['phone'] = response.xpath('//div[@id="contact"]/div[@class="boxcontent"]/dl/dd[4][text()!="未提供"]/text()').extract_first()
        item['qq'] = response.xpath('//div[@id="contact"]/div/dl/dd[6]/a/@href').extract_first()
        if item['qq']:
            item['qq'] = ''.join(re.findall('uin=(.+?)&', item['qq']))
        item['introduction'] = response.xpath('//div[@id="aboutuscontent"]//text()').extract_first()
        yield item
