import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "huludao_lnshldsrmzf3"

    province: str = "辽宁省"  # 取表格
    city: str = "葫芦岛市"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "辽宁省葫芦岛市人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.hld.gov.cn/zwgk/fdzdgknr/lzyj/hzbf/2023n/index.html"  # 注明入口网址，以便后续排错

    def start_requests(self):
        print("111")
        url = "https://www.hld.gov.cn/zwgk/fdzdgknr/lzyj/hzbf/2024n/index.html"

        # 设置请求头
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Pragma': 'no-cache',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
            'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"'
        }

        # 发送GET请求
        yield scrapy.Request(url=url, method='GET', headers=headers, callback=self.year_requests)

    def year_requests(self, response, **kwargs):
        print("222")

        # 翻年份逻辑
        year_list = response.xpath("//div[@class='yearlist']//li/a/text()").getall()
        years_list = [year.replace('年', '') for year in year_list]
        for year in years_list:
            year_url = f"https://www.hld.gov.cn/zwgk/fdzdgknr/lzyj/hzbf/{year}n/"

            # 设置请求头
            year_headers = {
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
                'Accept-Language': 'zh-CN,zh;q=0.9',
                'Cache-Control': 'no-cache',
                'Connection': 'keep-alive',
                'Pragma': 'no-cache',
                'Referer': 'https://www.hld.gov.cn/zwgk/fdzdgknr/lzyj/hzf/2024n/index.html',
                'Sec-Fetch-Dest': 'document',
                'Sec-Fetch-Mode': 'navigate',
                'Sec-Fetch-Site': 'same-origin',
                'Sec-Fetch-User': '?1',
                'Upgrade-Insecure-Requests': '1',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
                'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
                'sec-ch-ua-mobile': '?0',
                'sec-ch-ua-platform': '"Windows"'
            }
            print("year_url", year_url)
            # 发送GET请求
            yield scrapy.Request(url=year_url, method='GET', headers=year_headers, callback=self.page_requests,
                                 meta={'year': year})

    def page_requests(self, response, **kwargs):
        print("333")

        year = response.meta['year']
        pattern = r"""createPageHTML\((\d+)"""
        match = re.search(pattern, response.text)
        pages = int(match.group(1))
        if pages > 1:
            print(f"总页数：{pages},年份：{year}")
            for page in range(1, pages + 1):
                print("当前页：", page)
                not_first_url = f"https://www.hld.gov.cn/zwgk/fdzdgknr/lzyj/hzbf/{year}n/index_{page - 1}.html"
                # 设置请求头
                headers = {
                }
                # 发送请求
                yield scrapy.Request(url=not_first_url, method='GET', headers=headers, callback=self.parse_list,
                                     meta={'year': year, 'page': page})
        else:
            print(f"总页数：{pages},年份：{year}")

            print("当前页：", pages)
            not_first_url = f"https://www.hld.gov.cn/zwgk/fdzdgknr/lzyj/hzbf/{year}n/index.html"
            # 设置请求头
            headers = {
            }
            # 发送请求
            yield scrapy.Request(url=not_first_url, method='GET', headers=headers, callback=self.parse_list,
                                 meta={'year': year, 'page': pages})

    def parse_list(self, response, **kwargs):
        print("444")
        page = response.meta['page']
        year = response.meta['year']
        links = response.xpath("//td[@class='bt']/a/@href").getall()  # 取链接
        pubdate_list = response.xpath("//td[@class='fbrq']/text()").getall()
        print("links", links)
        print("date_list", pubdate_list)
        if len(pubdate_list) == len(links):
            for i in range(len(pubdate_list)):
                links[i] = f"https://www.hld.gov.cn/zwgk/fdzdgknr/lzyj/hzbf/{year}n{links[i][1:]}"
                print(f"detail_url: {links[i]},page: {page}, year: {year}")
                yield scrapy.Request(links[i], callback=self.parse_detail, meta={'pubdate': pubdate_list[i]})
        else:
            raise ("出错了")

    def parse_detail(self, response, **kwargs):
        print("555")
        item = Item()
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['source_url'] = response.url
        item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
        item['content'] = response.xpath(""".""").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl huludao_lnshldsrmzf3".split())
