import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "heilongjiang_dfqrmzfgfxwj"

    province: str = "黑龙江省"  # 取表格
    city: str = "佳木斯市"  # 取表格
    county: str = "东风区"  # 取表格
    park: str = "None"  # 取表格
    source: str = "东风区人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.jmsdf.gov.cn/html/-publicinfo-.html"  # 注明入口网址，以便后续排错

    def start_requests(self):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Pragma": "no-cache",
            "Referer": "https://www.jmsdf.gov.cn/html/-publicinfo-.html",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
            "sec-ch-ua": "\"Google Chrome\";v=\"131\", \"Chromium\";v=\"131\", \"Not_A Brand\";v=\"24\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\""
        }
        url = "https://www.jmsdf.gov.cn/html/-list-%20050002.html"

        # 发送GET请求
        yield scrapy.Request(url=url, method='GET', headers=headers, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        page = response.meta.get('page', "")
        links = response.xpath("//tbody/tr/@onclick").getall()  # 取链接
        print(f"links{len(links)}", links, page)
        for i in range(len(links)):
            # 正则表达式模式，用于匹配URL
            pattern = r"'(https?://[^\']+)'"

            # 使用 re.search 查找匹配项
            match = re.search(pattern, links[i])

            if match:
                # 提取匹配的URL
                links[i] = match.group(1)
                print("匹配到的URL:", links[i])
            else:
                print("未找到匹配项")
            if 'http' not in links[i]:
                if links[i].count(".") == 5:
                    print(links[i])
                    links[i] = f"http://{links[i][5:]}"
                if links[i].count(".") == 7:
                    print(links[i])
                    links[i] = f"http://{links[i][8:]}"
                if links[i].count(".") == 2:
                    print(links[i])
                    links[i] = f"http://{links[i][1:]}"
                if links[i].count(".") == 1:
                    links[i] = f"https://{links[i]}"
            print("links[i]", links[i], 'page', page)
            yield scrapy.Request(links[i], callback=self.parse_detail,
                                 )
        # 翻页逻辑  原网页暂无翻页
        # if response.meta.get("is_next") is not False:
        #     total_num = response.xpath("//div[@class='page']/a/b/text()").get()
        #     pattern = r"line=(\d+)"
        #     # 使用 re.search 查找匹配项
        #     match = re.search(pattern, response.text)
        #
        #     if match:
        #         # 提取匹配的数字
        #         number = match.group(1)
        #         print("每页数据量:", number)
        #         print("数据总量:", total_num)
        #     else:
        #         raise Exception("未找到数据总量")
        #     pages = math.ceil(int(total_num) / int(number))
        #     print("总页数：", pages)
        #     if pages >= 1:
        #         for page in range(2, pages + 1):
        #             print("当前页：", page)
        #             not_first_url = f"https://www.jmsqjq.gov.cn/e/action/ListInfo.php?page={page - 1}&classid=43&line=30&tempid=22&orderby=&myorder=0"
        #             # 设置请求头
        #             headers = {
        #                 "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        #                 "accept-language": "zh-CN,zh;q=0.9",
        #                 "cache-control": "no-cache",
        #                 "pragma": "no-cache",
        #                 "priority": "u=0, i",
        #                 "referer": "https://www.jmsqjq.gov.cn/e/action/ListInfo.php?classid=20&tempid=22",
        #                 "sec-ch-ua": "\"Google Chrome\";v=\"131\", \"Chromium\";v=\"131\", \"Not_A Brand\";v=\"24\"",
        #                 "sec-ch-ua-mobile": "?0",
        #                 "sec-ch-ua-platform": "\"Windows\"",
        #                 "sec-fetch-dest": "iframe",
        #                 "sec-fetch-mode": "navigate",
        #                 "sec-fetch-site": "same-origin",
        #                 "sec-fetch-user": "?1",
        #                 "upgrade-insecure-requests": "1",
        #                 "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
        #             }
        #
        #             # 发送请求
        #             yield scrapy.Request(url=not_first_url, method='GET', headers=headers, callback=self.detail_requests
        #                                  , meta={'is_next': False, 'page': page})

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item()
        # title = response.xpath("//meta[@name='ArticleTitle']/@content").get()
        title = response.xpath("//span[@id='title001']/text()").get()
        pub_date = response.xpath("//span[@id='time001']/text()").get()
        # 正则表达式模式
        pattern = r'\d{4}年\d{2}月\d{2}日'

        # 匹配字符串
        match = re.search(pattern, pub_date)

        # 如果匹配成功，获取匹配内容
        if match:
            date_matched = match.group()
            print(date_matched)  # 输出: 2024年11月16日
        else:
            raise Exception("未找到匹配的日期")

        if 'pdf' in response.url or 'doc' in response.url or 'xls' in response.url or 'jpg' in response.url:
            print("特殊url：", response.url)
            content = response.url

        else:
            content = response.xpath(".").get()
        item['title'] = title
        item['source_url'] = response.url
        item['publish_date'] = date_matched
        item['content'] = content
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl heilongjiang_dfqrmzfgfxwj".split())
