import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode
import requests
from lxml import etree


class spider(BaseTaxPolicySpider):
    name = "dandong_ddsrmzf"

    province: str = "辽宁省"  # 取表格
    city: str = "丹东市"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "丹东市人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.dandong.gov.cn/policy/policyList.ct?siteCode=DDSZF&type=gfxwj"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://www.dandong.gov.cn/policy/policyList.ct?siteCode=DDSZF&type=gfxwj"
        # 设置请求头
        yield scrapy.Request(url=url, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        links_list = []
        tot_date_list = []
        # 正则表达式模式，匹配数字
        page_pattern = r"共(\d+)页</span>"  # 匹配形如 1/10 页 的模式，其中(\d+)用于捕获数字10

        # 使用re.search()函数搜索匹配项
        pages = int(re.search(page_pattern, response.text).group(1))
        print("总页数：", pages)
        links = response.xpath("//div[@class='mid']//a/@href").getall()  # 取链接
        date_list = response.xpath("//div[@class='mid']//span/text()").getall()
        if len(date_list) == len(links):
            for i in range(len(links)):
                if 'http' not in links[i]:
                    raise ValueError('error')
                date_pattern = r"\d{4}-\d{1,2}-\d{1,2}"
                match = re.search(date_pattern, date_list[i])
                if match:
                    date_list[i] = match.group()
                    links_list.append(links[i])
                    tot_date_list.append(date_list[i])
                else:
                    print("未找到日期", date_list[i])
        else:
            raise ValueError('error1')

        # 翻页逻辑
        for page in range(2, pages+1):
            print("当前页：", page)
            headers = {
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
                'Accept-Language': 'zh-CN,zh;q=0.9',
                'Cache-Control': 'no-cache',
                'Connection': 'keep-alive',
                'Content-Type': 'application/x-www-form-urlencoded',
                'Origin': 'https://www.dandong.gov.cn',
                'Pragma': 'no-cache',
                'Referer': 'https://www.dandong.gov.cn/policy/policyList.ct?siteCode=DDSZF&type=gfxwj&organCode=CMS&pageMax=15',
                'Sec-Fetch-Dest': 'document',
                'Sec-Fetch-Mode': 'navigate',
                'Sec-Fetch-Site': 'same-origin',
                'Sec-Fetch-User': '?1',
                'Upgrade-Insecure-Requests': '1',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
                'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
                'sec-ch-ua-mobile': '?0',
                'sec-ch-ua-platform': '"Windows"'
            }
            url = "https://www.dandong.gov.cn/policy/policyList.ct?siteCode=DDSZF&type=gfxwj&organCode=CMS&pageMax=15"
            payload = f'pageNum={page}&limit=15&tzNum=2'
            response = requests.request("POST", url, headers=headers, data=payload)
            content = response.text

            # 解析 HTML 文件
            parser = etree.HTMLParser()
            root = etree.fromstring(content, parser)

            # 抽取 <h1> 标签内的文本内容
            links = root.xpath("//div[@class='mid']//a/@href")
            date_list = root.xpath("//div[@class='mid']//span/text()")

            if len(date_list) == len(links):
                for i in range(len(date_list)):
                    links_list.append(links[i])
                    date_pattern = r"(\d{4})年(\d{1,2})月(\d{1,2})日"
                    match = re.search(date_pattern, date_list[i])
                    if match:
                        year = match.group(1)
                        month = match.group(2).zfill(2)  # 确保月份为两位数
                        day = match.group(3).zfill(2)  # 确保日期为两位数

                        date_list[i] = f"{year}-{month}-{day}"
                    else:
                        print("日期格式不正确")
                    tot_date_list.append(date_list[i])
            else:
                raise ValueError('error2')

        print(links_list)
        print("去重链接数：", len(set(links_list)))
        print(tot_date_list)
        print(len(tot_date_list))
        if len(links_list) == len(tot_date_list):
            for i in range(len(links_list)):
                yield scrapy.Request(links_list[i], callback=self.parse_detail, meta={'date': tot_date_list[i]})
        else:
            raise ValueError('error3')

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response=None, **kwargs):
        item = Item()
        if '.pdf' in response.url or '.do' in response.url:
            content = response.url
        else:
            content = response.xpath(".").get()
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['source_url'] = response.url
        item['publish_date'] = response.meta.get('date')
        item['content'] = content
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl dandong_ddsrmzf".split())
