import scrapy

from apps.tax_honor.tax_honor.base_clean_spider import BaseCleanMixin
from apps.tax_honor.tax_honor.items import NetTaxHonorResponseItem
from utils.tools import urlencode, parse_url_params, del_redundant_blank_character


class ZjHzzjxxhjTaxHonorSpider(scrapy.Spider, BaseCleanMixin):
    name = "zj_hzjjxxhj_honor"
    url = "https://jxj.hangzhou.gov.cn/col/col1229279601/index.html?number=A007"
    province = "浙江省"
    city = "杭州市"
    county = ""

    def start_requests(self):
        url = "https://jxj.hangzhou.gov.cn/module/xxgk/search.jsp"
        params = {
            "standardXxgk": "0",
            "isAllList": "1",
            "texttype": "0",
            "fbtime": "-1",
            "vc_all": "",
            "vc_filenumber": "",
            "vc_title": "",
            "vc_number": "",
            "currpage": "1",
            "sortfield": "b_settop:0,createdatetime:0,orderid:0",
        }
        data = {
            "infotypeId": "A007A001",
            "jdid": "3244",
            "area": " ",
            "divid": "div1692658",
            "vc_title": "",
            "vc_number": "",
            "sortfield": "b_settop:0,createdatetime:0,orderid:0",
            "currpage": "1",
            "vc_filenumber": "",
            "vc_all": "",
            "texttype": "0",
            "fbtime": "-1",
            "standardXxgk": "0",
            "isAllList": "1",
        }
        yield scrapy.FormRequest(
            method="POST",
            url=url + "?" + urlencode(params),
            formdata=data,
            callback=self.parse,
            dont_filter=True,
        )

        params = {
            "standardXxgk": "0",
            "isAllList": "1",
            "texttype": "0",
            "fbtime": "-1",
            "vc_all": "",
            "vc_filenumber": "",
            "vc_title": "",
            "vc_number": "",
            "currpage": "1",
            "sortfield": "b_settop:0,createdatetime:0,orderid:0",
        }
        data = {
            "infotypeId": "A007A002",
            "jdid": "3244",
            "area": " ",
            "divid": "div1692658",
            "vc_title": "",
            "vc_number": "",
            "sortfield": "b_settop:0,createdatetime:0,orderid:0",
            "currpage": "1",
            "vc_filenumber": "",
            "vc_all": "",
            "texttype": "0",
            "fbtime": "-1",
            "standardXxgk": "0",
            "isAllList": "1",
        }
        yield scrapy.FormRequest(
            method="POST",
            url=url + "?" + urlencode(params),
            formdata=data,
            callback=self.parse,
            dont_filter=True,
        )

    def parse(self, response, **kwargs):
        root_url, params = parse_url_params(response.request.url)
        _, data = parse_url_params(response.request.body.decode())
        page = kwargs.get("page")
        if not page:
            total_page = int(response.xpath('string(.)').re_first(r"共\s+(\d+)\s+页"))
            for i in range(1, total_page + 1):
                yield scrapy.FormRequest(
                    method="POST",
                    url=root_url + "?" + urlencode({**params, "currpage": str(i)}),
                    formdata={**data, "currpage": str(i)},
                    callback=self.parse,
                    dont_filter=True,
                    cb_kwargs={"page": i},
                )

        for li in response.xpath('//*[@class="tab_box"]'):
            url = li.xpath(".//a/@href").extract_first()
            document_title = li.xpath(".//*[@onmouseover]/@mc").extract_first()
            publish_date = li.xpath('.//*[@class="sub_tab_span3"]/text()').extract_first()
            decare_department = li.xpath('.//*[@class="sub_tab_span4"]/text()').extract_first()
            if self.check_utils.check_title_is_valid(document_title):
                yield scrapy.Request(
                    url=url,
                    callback=self.parse_detail,
                    cb_kwargs={
                        "document_title": document_title,
                        "publish_date": publish_date,
                        "decare_department": decare_department,
                        "document_url": url,
                    },
                )

    def parse_detail(self, response, **kwargs):
        document_title = kwargs.get("document_title")
        publish_date = kwargs.get("publish_date")
        decare_department = kwargs.get("decare_department")
        document_url = kwargs.get("document_url")
        hrefs = response.xpath("//a/@href").getall()
        attachment_url = []
        if hrefs:
            for href in hrefs:
                if href.endswith((".pdf", ".docx", ".doc", ".xlsx", ".xls", ".zip", ".et")):
                    href = response.urljoin(href)
                    attachment_url.append(href)
        response_item = {
            "province": self.province,
            "city": self.city,
            "county": self.county,
            "document_title": document_title,
            "publish_date": publish_date,
            "decare_department": decare_department,
            "hierarchy": None,
            "document_url": document_url,
            "attachment_url": attachment_url,
            "deadline": None,
            "source": self.name,
            "item_count": 0,
            "response": del_redundant_blank_character(response.body.decode()),
        }
        yield NetTaxHonorResponseItem(**response_item)
        if response_item["attachment_url"]:
            for url in response_item["attachment_url"]:
                yield scrapy.Request(
                    url=url,
                    callback=self.parse_spider_detail,
                    cb_kwargs={"response_item": response_item},
                )
        yield scrapy.Request(
            url=response_item["document_url"],
            callback=self.parse_spider_detail,
            cb_kwargs={"response_item": response_item},
        )


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute(argv=["scrapy", "crawl", "zj_hzjjxxhj_honor"])
