# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from gov.items import GovItem
from gov.public import sub
import re
class GzSpider(CrawlSpider):
    name = 'gz'
    allowed_domains = ['www.gz.gov.cn']
    # start_urls = ['http://www.gz.gov.cn/']
    start_urls = ['http://www.gz.gov.cn/2017cwhy/s20452/201707/80dfdca542374708a6f52297bdfa057b.shtml']

    rules = (
        Rule(LinkExtractor(allow=r'\.shtml',unique=True), follow=True,callback='parse_item'),
        # Rule(LinkExtractor(allow=r'\d{6}/.*\.shtml',unique=True), callback='parse_item'),
    )
    def parse_item(self, response):
        id = re.findall(r'\d{6}/(.*)\.shtml',response.url)
        if not id:
            return
        item = GovItem()
        # 法规公文,政策解读,通知公告,新闻,政务公开
        div = response.xpath('//div[@class="container clearfix"]')
        if div:
            container = div[0]
            slide = container.xpath('.//div[@class="content_slide"]')
            if slide:
                slide = slide[0]
                item['title'] = sub(slide.xpath('.//h1[@class="content_title"]/text()').extract_first())
                item['date'] = sub(slide.xpath('.//li[@class="date"]/span/text()').extract_first())
                item['source'] = container.xpath('.//li[@class="ly"]/span/b/text() | .//li[@class="ly"]/text()').extract_first().replace('\r\n','').replace('来源：','').replace('&nbsp;','').strip()
                if not item['source']:
                    item['source'] = response.xpath('//meta[@name="ContentSource"]/@content').extract_first()
                    if not item['source']:
                        item['source'] = response.xpath('//meta[@name="ColumnName"]/@content').extract_first()
                item['content'] = sub(''.join(container.xpath('.//div[@class="content_article"]//text()').extract()))
                item['url'] = response.url
        # 新闻发布会
        div = response.xpath('//div[@class="fbt clearfix"]')
        if div:
            tpxw = div[0].xpath('.//div[@class="tpxw"]')
            xwdt = div[0].xpath('.//div[@class="xwdt"]')
            if tpxw:
                tpxw = tpxw[0]
                xwdt = xwdt[0]
                item['title'] = sub(tpxw.xpath('.//div[@class="tpxw_xxbt"]/text()').extract_first())
                item['date'] = sub(tpxw.xpath('.//span[@class="date"]/text()').extract_first())
                item['source'] = sub(tpxw.xpath('.//span[@class="ly"]/text()').extract_first())
                item['content'] = sub(''.join(xwdt.xpath('.//div[@id="demo"]//text()').extract()))
                item['url'] = response.url
        # 常务会议
        div = response.xpath('//div[@class="cwhy"]')
        if div:
            text = div[0].xpath('//div[@class="text"]')[0]
            item['title'] = sub(text.xpath('.//div[@class="cwhy_cont"]//center/text() | .//div[@class="cwhy_cont"]/text()').extract_first())
            item['source'] = sub(text.xpath('.//*[@class="ly"]/text()').extract_first())
            if '来源' in item['source']:
                r = re.findall('来源：(.*).*发布时间：(.*)',item['source'],re.S)[0]
                item['source'],item['date'] = r[0].strip(),r[1]
            else:
                item['date'] = sub(text.xpath('.//span[@class="time"]/text()').extract_first())
            item['content'] = sub(''.join(text.xpath('.//div[@class="wb"]//text()').extract()))
            item['url'] = response.url
        if item:
            yield item