from apps.tax_policy.tax_policy.items import NetTaxPolicyItem
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import make_item
from utils.tools import urlencode, urldecode, parse_url_params


class HenanLysswj1677Policy(BaseTaxPolicySpider):
    name = "henan_lysswj_1677_policy"
    province: str = "河南省"  # 取表格
    city: str = "洛阳市"  # 取表格
    county: str = ""  # 取表格
    park: str = ""  # 取表格
    source: str = "洛阳市商务局"  # 取表格 同一个来源合并
    url: str = "http://swj.ly.gov.cn/zwgklist?channelChildrenId=120&channelChildrenName=%E8%A1%8C%E6%94%BF%E8%A7%84%E8%8C%83%E6%80%A7%E6%96%87%E4%BB%B6"  # 注明入口网址，以便后续排错
    auto_next: bool = True

    def start_requests(self):
        url = "http://swj.ly.gov.cn/api/article/newsByChannelIdTopLimit?channelId=120&limit=100"
        yield self.Request(url, callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        datas = response.json()['data']
        for data in datas:
            title = data['newsTitle']
            publish_date = data['createTime']
            content = data['content']
            item = dict(
                title=title,
                publish_date=publish_date,
                content=content,
                source_url=f"http://swj.ly.gov.cn/zwgkdetail?newsId={data['id']}&channelChildrenId=121",
                source=self.source,
                province=self.province,
                city=self.city,
                county=self.county,
                park=self.park,
            )
            if item["publish_date"]:
                yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_lysswj_1677_policy".split())
