
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem, urllib
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import make_item


class ZhejiangQuzhouPolicy(BaseTaxPolicySpider):
    name = "zhejiang_quzhou_policy"
    province: str = "浙江省"  # 取表格
    city: str = "衢州市"  # 取表格
    county: str = ""  # 取表格
    park: str = ""  # 取表格
    source: str = ""  # 取表格 同一个来源合并
    url: str = "https://www.qz.gov.cn/col/col1229136124/index.html?cid=0"  # 注明入口网址，以便后续排错
    auto_next: bool = True

    def start_requests(self):
        url = "https://www.qz.gov.cn/module/xxgk/search.jsp"
        params = {'currpage': '1'}
        data = {'infotypeId': '', 'divid': ''}
        for source, infotypeId, area in [
            ['衢州市人民政府', 'A0206', ''],
            ['衢州市人民政府', 'A0201', ''],
            ['衢州市人民政府', 'A0207', ''],
            ['衢州市人民政府', 'A0211', ''],
            ['衢州市人民政府', 'A0212', ''],
            ['衢州市人民政府', 'A0202', ''],
            ['衢州市财政局', 'A0207', '11330800002618111W'],
            ['衢州市财政局', 'A0202', '11330800002618111W'],
        ]:
            data = {'infotypeId': infotypeId, 'divid': 'div1543161', "area": area, "jdid": "3084", "standardXxgk": "1"}
            meta = {'source': source}
            if '?' not in url:
                url = url + '?'
            temp_url = url
            temp_url += urllib.parse.urlencode(params)
            yield self.FormRequest(
                temp_url,
                formdata=data,
                method="POST",
                callback=self.gen_request_by_search_jsp_mid,
                cb_kwargs={
                    "params": params,
                    "data": data,
                    "callback": self.parse_content,
                    "auto_next": self.auto_next,
                    "url": url,
                    "parse_response": self.parse_response,
                },
                meta=meta
            )

    def parse_response(self, response, **kwargs):
        callback = kwargs.get("callback")
        list_select = response.xpath("//li") or response.xpath("//td")
        for i in list_select.xpath(".//a"):
            url = i.xpath("./@href").get()
            yield self.Request(url, callback=callback, meta=response.meta)

    def parse_content(self, response, **kwargs):
        request = response.request
        pre_data = self.parse_title_and_publish_time_by_gen(response.text)
        title = pre_data.get("title")
        publish_date = (
            response.xpath('//*[@name="PubDate"]/@content').get()
            or response.xpath('//*[@name="pub_date"]/@content').get()
            or pre_data.get("publish_time")
        )
        content = response.text
        content = self.process_content(content)
        meta = response.meta
        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=response.url,
            source=meta.get('source') or self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
        )
        yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl zhejiang_quzhou_policy".split())
