import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from gz_spider.items import ShenZhenSpiderItem

class ShenzhenSpiderSpider(CrawlSpider):
    name = 'shenzhen_spider'
    allowed_domains = ['pnr.sz.gov.cn']
    start_urls = ['http://pnr.sz.gov.cn/xxgk/zcwj/gfxwj2/index.html']

    rules = (
        # 翻页的url
        Rule(LinkExtractor(allow=r'http://pnr.sz.gov.cn/xxgk/zcwj/gfxwj2/index_\d+\.html'), follow=True),
        # 详情的url
        Rule(LinkExtractor(allow=r'http://pnr.sz.gov.cn/xxgk/zcwj/gfxwj2/content/post_\d+\.html'),
             callback='parse_detail', follow=False),
    )

    def parse_detail(self, response):
        title = response.xpath('//h4/text()').get()
        des = response.xpath('//h5//text()').getall()
        date = ''.join(des[1][:18]).strip()
        contents = response.xpath('//font[@id="Zoom"]//p//text()').getall()
        content = ''.join(contents).strip().replace(r'\n\t', '').replace(r' ', '')
        image_urls = response.xpath('//p[@id="appendix"]//a/@href').getall()
        image_name = response.xpath('//p[@id="appendix"]//a/text()').getall()
        image_info = list(zip(image_urls, image_name))
        item = ShenZhenSpiderItem(title=title, date=date, content=content, image_urls=image_info,images=image_name)
        yield item
