import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "dalian_dlsshkqrmzf"

    province: str = "辽宁省"  # 取表格
    city: str = "大连市"  # 取表格
    county: str = "沙河口区"  # 取表格
    park: str = "None"  # 取表格
    source: str = "大连市沙河口区人民政府"  # 取表格 同一个来源合并
    url: str = "http://www.dlshk.gov.cn/web/shk/zfxxgk?index=2"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = 'http://www.dlshk.gov.cn/web/shk/zfxxgk/morelist?t1=zhengwugongkai&t2=43145f51066d48cbbc8fb8761a7313a0&t3='
        payload = {
            'pageNo': '1',
            'pageSize': '100',
            'Type': '',
            'Year': '',
            'orderBy': 'a.istopic desc,a.userPublishDate desc'
        }
        headers = {
        }
        # 发送  请求
        yield scrapy.FormRequest(url=url, formdata=payload, headers=headers, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        print(response.text)

        pattern = r"总共 \s*(\d+)\s* 条记录"
        notice_num = int(re.findall(pattern, response.text)[0])
        print("总条数：", notice_num)
        if notice_num > 100:
            raise("翻页了")
        links = response.xpath('//table/tr//a/@href').getall()
        for link in links:
            real_url = "http://www.dlshk.gov.cn"+link
            yield response.follow(real_url, callback=self.parse_detail)

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['source_url'] = response.url
        item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
        item['content'] = response.xpath(""".""").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl dalian_dlsshkqrmzf".split())
