import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode
import requests
from lxml import etree


class spider(BaseTaxPolicySpider):
    name = "dandong_ddskdxrmzf"

    province: str = "辽宁省"  # 取表格
    city: str = "丹东市"  # 取表格
    county: str = "宽甸县"  # 取表格
    park: str = "None"  # 取表格
    source: str = "丹东市宽甸县人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.lnkd.gov.cn/policy/policyList.ct?siteCode=KDXZF&type=gfxwj"

    def start_requests(self):
        url = "https://www.lnkd.gov.cn/policy/policyList.ct?siteCode=KDXZF&type=gfxwj"

        # 设置请求头
        yield scrapy.Request(url=url, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        links = response.xpath("//div[@class='title']/a/@href").getall()  # 取链接
        date_list = response.xpath("//div[@class='time1']/p/text()").getall()  # 取链接
        print("links", len(links), links)
        print("date_list", len(date_list), date_list)
        if len(links) == len(date_list):
            for i in range(len(date_list)):
                date_pattern = r"\d{4}-\d{1,2}-\d{1,2}"
                match = re.search(date_pattern, date_list[i])
                if match:
                    date_list[i] = match.group()
                else:
                    print("未找到日期", date_list[i])
                    raise ValueError("error")

                if 'javascript' in links[i]:
                    continue

                if 'http' not in links[i]:
                    raise ValueError("链接出错")
                yield scrapy.Request(links[i], callback=self.parse_detail, meta={'date': date_list[i]})
        else:
            raise ValueError("出错了")

        # 翻页逻辑
        # if response.meta.get("is_next") is not False:
        #     # 正则表达式模式，匹配数字
        #     pattern = r"<span>共(\d+)页</span>"  # 匹配形如 1/10 页 的模式，其中(\d+)用于捕获数字10
        #
        #     # 使用re.search()函数搜索匹配项
        #     pages = int(re.search(pattern, response.text).group(1))
        #     print("总页数：", pages)
        #     for page in range(2, pages + 1):
        #         print("当前页：", page)
        #         url = "https://www.zaq.gov.cn/policy/policyList.ct?siteCode=ZAQZF&organCode=CMS&pageMax=15"
        #         # 设置请求头
        #         payload = {
        #             "beginYear": "",
        #             "govTextnumId": "",
        #             "topicIds": "",
        #             "pageNum": f"{page}",
        #             "offset": "0",
        #             "limit": "15",
        #             "tzNum": ""
        #         }
        #
        #         headers = {
        #             'Content-Type': 'application/x-www-form-urlencoded',
        #         }
        #
        #         yield scrapy.FormRequest(
        #             url=url,
        #             formdata=payload,
        #             headers=headers,
        #             callback=self.detail_requests,
        #             meta={'is_next': False}
        #         )

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item()
        if '.pdf' in response.url or '.do' in response.url:
            content = response.url
        else:
            content = response.xpath(".").get()
        item['title'] = response.xpath("""string(//head/title/text())""").get()
        item['source_url'] = response.url
        item['publish_date'] = response.meta.get('date')
        item['content'] = content
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl dandong_ddskdxrmzf".split())
