import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "sichuan_msspsqrmzfxzgfxwj"

    province: str = "四川省"  # 取表格
    city: str = "眉山市"  # 取表格
    county: str = "彭山区"  # 取表格
    park: str = "None"  # 取表格
    source: str = "眉山市彭山区人民政府"  # 取表格 同一个来源合并
    url: str = "http://www.scps.gov.cn/zfxxgk/zc/xzgfxwj.htm"  # 注明入口网址，以便后续排错
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Pragma': 'no-cache',
        'Referer': 'https://www.ms.gov.cn/zfxxgk/z__c/gfxwj/bmgfxwj/10.htm',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-User': '?1',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
        'sec-ch-ua': '"Not A(Brand";v="8", "Chromium";v="132", "Google Chrome";v="132"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"'
    }

    def start_requests(self):
        url = "https://www.scps.gov.cn/zfxxgk/zc/xzgfxwj.htm"

        # 发送GET请求
        yield scrapy.Request(url=url, method='GET', headers=self.headers, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        page = response.meta.get('page', "")
        links = response.xpath("//table/ul/li/a/@href").getall()  # 取链接
        # print("links", links)
        # print("date_list", pubdate_list)
        for i in range(len(links)):
            if 'http' not in links[i]:
                if links[i].count('.') == 5:
                    links[i] = f"https://www.scps.gov.cn{links[i][5:]}"
                if links[i].count('.') == 7:
                    links[i] = f"https://www.scps.gov.cn{links[i][8:]}"
                if links[i].count('.') == 9:
                    links[i] = f"https://www.scps.gov.cn{links[i][11:]}"
            print("links[i]", links[i], 'page', page)
            yield scrapy.Request(links[i], headers=self.headers, callback=self.parse_detail,
                                 meta={'is_next': False, 'page': page})

        # 翻页逻辑
        if response.meta.get("is_next") is not False:
            s = response.xpath(
                "//div/a[@class='Next'][1]/@href").get()
            match = re.search(r'xzgfxwj/(\d+).htm', s)
            if match:
                result = match.group(1)
                for page in range(1, int(result) + 1):
                    print("当前页：", page)
                    next_page = f"https://www.scps.gov.cn/zfxxgk/zc/xzgfxwj/{page}.htm"
                    yield scrapy.Request(url=next_page, method='GET', headers=self.headers,
                                         callback=self.detail_requests, meta={'is_next': False, 'page': page})
            else:
                raise ValueError("找不到下一页")

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item()
        if 'pdf' in response.url or 'doc' in response.url or 'xls' in response.url or 'jpg' in response.url:
            print("特殊url：", response.url)
            content = response.url

        else:
            content = response.xpath(".").get()
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['source_url'] = response.url
        item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
        item['content'] = content
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl sichuan_msspsqrmzfxzgfxwj".split())
