
import scrapy

from apps.tax_honor.tax_honor.base_clean_spider import BaseCleanMixin
from apps.tax_honor.tax_honor.items import NetTaxHonorResponseItem
from utils.tools import urlencode, parse_url_params, del_redundant_blank_character


class ZjHzzjxxhjTaxHonorSpider(scrapy.Spider, BaseCleanMixin):
    name = "zj_huzjjxxhj_honor"
    url = "https://hzjx.huzhou.gov.cn/col/col1229208168/index.html?uid=6217941&pageNum=1"
    province = "浙江省"
    city = "湖州市"
    county = ""
    hierarchy = "市级"

    def start_requests(self):
        datas = [
            {
                "url": "https://hzjx.huzhou.gov.cn/module/jpage/morecolumndataproxy.jsp",
                "params": {"startrecord": "1", "endrecord": "45", "perpage": "15"},
                "data": {
                    "col": "1",
                    "appid": "1",
                    "webid": "3617",
                    "path": "/",
                    "columnid": "1229208168,1229208169",
                    "b_vir": "0",
                    "virinfos": "",
                    "sourceContentType": "3",
                    "unitid": "6217941",
                    "keyWordCount": "999",
                    "webname": "湖州市经济和信息化局",
                },
                "cb_kwargs": {
                    "city": "湖州市",
                    "county": "",
                    "hierarchy": "市级",
                },
            },
        ]

        for x in datas:
            yield scrapy.FormRequest(
                method="POST",
                url=x["url"] + "?" + urlencode(x["params"]),
                formdata=x["data"],
                callback=self.parse,
                dont_filter=True,
                cb_kwargs=x["cb_kwargs"],
            )

    def parse(self, response, **kwargs):
        city = kwargs["city"]
        county = kwargs["county"]
        hierarchy = kwargs["hierarchy"]
        root_url, params = parse_url_params(response.request.url)
        _, data = parse_url_params(response.request.body.decode())
        page = kwargs.get("page")
        if not page:
            total_record = int(response.xpath("//datastore//totalrecord/text()").get())
            for i in range(46, total_record + 1, 45):
                yield scrapy.FormRequest(
                    method="POST",
                    url=root_url + "?" + urlencode({**params, "startrecord": str(i), "endrecord": str(i + 44)}),
                    formdata=data,
                    callback=self.parse,
                    dont_filter=True,
                    cb_kwargs={"page": i, **kwargs},
                )

        for li in response.xpath("//datastore//recordset//record"):
            url = response.urljoin(li.xpath(".//a/@href").extract_first())
            document_title = li.xpath(".//a/@title").extract_first() or li.xpath(".//a/text()").extract_first()
            publish_date = li.xpath(".//span/text()").extract_first()
            if document_title:
                if self.check_utils.check_title_is_valid(document_title):
                    yield scrapy.Request(
                        url=url,
                        callback=self.parse_detail,
                        cb_kwargs={
                            "document_title": document_title,
                            "publish_date": publish_date,
                            "document_url": url,
                            "city": city,
                            "county": county,
                            "hierarchy": hierarchy,
                        },
                    )

    def parse_detail(self, response, **kwargs):
        response_title = response.xpath('//*[@class="content_bt"]/@text()').get() or ''
        city = kwargs["city"]
        county = kwargs["county"]
        hierarchy = kwargs["hierarchy"]
        document_title = response_title.strip() or kwargs.get("document_title")
        publish_date = kwargs.get("publish_date")
        decare_department = response.xpath("string(.)").re_first(r"发布机构\s*[:：]\s*([^\s*]{1,50})")
        document_url = kwargs.get("document_url")
        hrefs = response.xpath("//a/@href").getall()
        attachment_url = []
        if hrefs:
            for href in hrefs:
                if href.endswith((".pdf", ".docx", ".doc", ".xlsx", ".xls", ".zip", ".et")):
                    href = response.urljoin(href)
                    attachment_url.append(href)
        response_item = {
            "province": self.province,
            "city": city,
            "county": county,
            "document_title": document_title,
            "publish_date": publish_date,
            "decare_department": decare_department,
            "hierarchy": hierarchy,
            "document_url": document_url,
            "attachment_url": attachment_url,
            "deadline": None,
            "source": self.name,
            "item_count": 0,
            "response": del_redundant_blank_character(response.body.decode()),
        }
        yield NetTaxHonorResponseItem(**response_item)
        if response_item["attachment_url"]:
            for url in response_item["attachment_url"]:
                yield scrapy.Request(
                    url=url,
                    callback=self.parse_spider_detail,
                    cb_kwargs={"response_item": response_item},
                )
        yield scrapy.Request(
            url=response_item["document_url"],
            callback=self.parse_spider_detail,
            cb_kwargs={"response_item": response_item},
        )


if __name__ == "__main__":
    from scrapy import cmdline, Selector

    cmdline.execute(argv=["scrapy", "crawl", "zj_huzjjxxhj_honor"])
