#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : config_spider
import scrapy
import re
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class ShanxiYcsrmzf9737Policy(BaseTaxPolicySpider):
    name = 'shanxi_ycsrmzf_9737_policy'

    province = '山西省'
    city = '运城市'
    county = ''
    park = ''
    source = '运城市人民政府'
    url = 'https://www.yuncheng.gov.cn/zwgk_1/xxgkml/zfwj/szfwj/index.shtml'

    def start_requests(self):
        yield from self.gen_request_by_get_documents(
            url="https://www.yuncheng.gov.cn/intertidwebapp/govChanInfo/getDocuments",
            data={
                "pageIndex": "1",
                "pageSize": "20",
                "siteId": "1",
                "ChannelType": "1",
                "KeyWord": "",
                "KeyWordType": "",
                "chanId": "578",
                "order": "1"
            },
            callback=self.parse_detail
        )

    def parse_detail(self, response, **kwargs):
        item = Item()
        item['title'] = response.xpath('//*[@class="info_title"]/text()').get()
        item['publish_date'] = response.xpath('//*[@class="infoe_time"]/text()').get()
        item['content'] = response.xpath(""".""").get()
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        item['source'] = self.source
        item['source_url'] = response.request.url
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl shanxi_ycsrmzf_9737_policy".split())
