import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "dalian_dlsxgqrmzf"

    province: str = "辽宁省"  # 取表格
    city: str = "大连市"  # 取表格
    county: str = "西岗区"  # 取表格
    park: str = "None"  # 取表格
    source: str = "大连市西岗区人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.dlxg.gov.cn/zwxxgk/xxgk.html"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://www.dlxg.gov.cn/info/iList.jsp?cat_id=15159&tm_id=482"

        # 发送 请求
        yield scrapy.Request(
            url=url,
            callback=self.detail_requests,
            meta={
                '_request_kwargs': {'verify': False},
            },
            headers={'User-Agent': 'Mozilla/5.0'}
        )

    def detail_requests(self, response, **kwargs):
        print(response.text)
        links = response.xpath('//ul[@class="WorkPicList1"]/li[position() > 1]/a/@href').getall()
        for link in links:
            rel_url = "https://www.dlxg.gov.cn"+link
            yield response.follow(rel_url, callback=self.parse_detail)

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = response.xpath("""string(//div[@class='info_title'])""").get()
        item['source_url'] = response.url
        pattern = r'\d{4}-\d{2}-\d{2}'
        item['publish_date'] = re.findall(pattern, response.xpath("""string(//p[@class='news_tim']/text()[1])""").get())[0]
        item['content'] = response.xpath(""".""").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl dalian_dlsxgqrmzf".split())
