import json
import re

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import make_item
from utils.tools import urlencode, urldecode, parse_url_params


class HenanLyskxjsjEe25Policy(BaseTaxPolicySpider):
    name = "henan_lyskxjsj_ee25_policy"
    province: str = "河南省"  # 取表格
    city: str = "洛阳市"  # 取表格
    county: str = ""  # 取表格
    park: str = ""  # 取表格
    source: str = "洛阳市科学技术局"  # 取表格 同一个来源合并
    url: str = "http://czj.xuchang.gov.cn/govxxgk/11411000005747357M/openSubPage.html?specialurl=/govxxgk/11411000005747357M/category/009/009005/009005002/govlist.html&righttitle=%E5%85%B6%E4%BB%96%E6%96%87%E4%BB%B6"  # 注明入口网址，以便后续排错
    auto_next: bool = True

    def start_requests(self):
        url = "http://www.xuchang.gov.cn/govxxgk/11411000005747357M/category/009/009005/govlist.html?%E6%94%BF%E7%AD%96?righttitle=%E6%94%BF%E7%AD%96"
        yield self.Request(url, callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        yield from self.parse_list(response, **kwargs)
        page_num = int(1)
        for i in range(1, page_num + 1):
            root_url, params = parse_url_params(response.request.url)
            params["page"] = i
            new_url = root_url + "?" + urlencode(params)
            yield self.Request(new_url, callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        for i in response.xpath('//td//a/@onclick').extract():
            infoid = re.findall(r"linkToNew\(\'(.*?)\'", i)[0]
            url = "http://www.xuchang.gov.cn/EpointWebBuilder/zNJSAction.action?cmd=getOpenDetail"
            data = {
                "infoid": infoid,
                "siteguid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a"
            }
            yield self.FormRequest(url, method="POST", formdata=data, callback=self.parse_content)

    def parse_content(self, response, **kwargs):
        data = json.loads(response.json()['custom'])["data"][0]
        title = data['title']
        publish_date = data['infodate']
        content = data['infocontent']
        content = self.process_content(content)

        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=response.url,
            source=self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
        )
        if item["publish_date"]:
            yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_lyskxjsj_ee25_policy".split())
