#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : config_spider
import scrapy
import re
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item, NetTaxPolicyItem, make_item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class NeimengguAlsmxzgssgyhxxhj9c50Policy(BaseTaxPolicySpider):
    name = 'neimenggu_alsmxzgssgyhxxhj_9c50_policy'

    source = '阿拉善盟行政公署市工业和信息化局'
    province = '内蒙古自治区'
    city = '阿拉善盟'
    county = ''
    park = ''

    def start_requests(self):
        url = "https://www.als.gov.cn/module/xxgk/search.jsp?"
        params = {'currpage': '1'}
        for infotypeId in ['ALSA35']:
            data = {
                "divid": "div210",
                "infotypeId": infotypeId,
                "jdid": "1",
                "area": "152900000000004",
                "standardXxgk": "0"
            }
            yield from self.gen_request_by_search_jsp(
                url=url, params=params, data=data, callback=self.parse_content, parse_response=self.parse_response
            )

    def parse_response(self, response, **kwargs):
        callback = kwargs.get("callback")
        list_select = response.xpath("//tr")
        for i in list_select.xpath(".//a"):
            url = i.xpath("./@href").get()
            if 'htm' not in url:
                continue
            # print(url)
            yield self.Request(url, callback=callback)

    def parse_content(self, response, **kwargs):
        request = response.request
        pre_data = self.parse_title_and_publish_time_by_gen(response.text)
        title = pre_data.get("title")
        publish_date = (
                response.xpath('//*[@name="PubDate"]/@content').get()
                or response.xpath('//*[@name="pub_date"]/@content').get()
                or pre_data.get("publish_time")
        )
        content = response.text
        content = self.process_content(content)

        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=response.url,
            source=self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
        )
        yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl neimenggu_alsmxzgssgyhxxhj_9c50_policy".split())
