import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "xizang_aldqxzgs2"

    province: str = "西藏自治区"  # 取表格
    city: str = "阿里地区"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "阿里地区行政公署"  # 取表格 同一个来源合并
    url: str = "https://www.al.gov.cn/zwgk/zcwj/xsbmwj.htm"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://www.al.gov.cn/gk/zcwjk/xsbmwj.htm"

        # 设置请求头
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Pragma': 'no-cache',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
        }

        # 发送GET请求
        yield scrapy.Request(url=url, method='GET', headers=headers, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        page = response.meta.get('page', "")
        pub_data = response.xpath("//div[@class='newsList']//li/a/div[@class='time']/text()").getall()  # 取日期
        links = response.xpath("//div[@class='newsList']//li/a/@href").getall()  # 取链接
        title_list = response.xpath("//div[@class='newsList']//li/a/div[@class='txt']/text()").getall()  # 取标题
        print(f"links{len(links)}", links, page)
        print(f"title_list{len(title_list)}", title_list)
        if len(title_list) == len(links) and len(title_list) == len(pub_data):
            for i in range(len(links)):
                if 'http' not in links[i]:
                    if links[i].count(".") == 5:
                        print(links[i])
                        links[i] = f"https://www.al.gov.cn{links[i][5:]}"
                    if links[i].count(".") == 7:
                        print(links[i])
                        links[i] = f"https://www.al.gov.cn{links[i][8:]}"
                    if links[i].count(".") == 2:
                        print(links[i])
                        links[i] = f"http://{links[i][1:]}"
                    if links[i].count(".") == 1:
                        links[i] = f"{links[i]}"
                print(links[i], 'page', page)
                if 'pdf' in links[i] or 'doc' in links[i] or 'xls' in links[i] or 'jpg' in links[i]:
                    print("特殊url：", links[i])
                    d = {'url': links[i], 'title': title_list[i], }
                    self.parse_detail(None, **d)
                else:
                    yield scrapy.Request(links[i], callback=self.parse_detail,
                                         meta={'title': title_list[i], 'pub_date': pub_data[i], 'page': page}, )
        else:
            raise ("链接，标题，时间数量出错了", page)

        # 翻页逻辑
        if response.meta.get("is_next") is not False:
            # 正则表达式模式
            pattern = r'<a href="xsbmwj\/(\d+)\.htm">下页</a>'

            # 使用 re.search 查找匹配项
            match = re.search(pattern, response.text)

            if match:
                # 提取捕获组中的数字
                pages = match.group(1)
                print(pages)
            else:
                raise Exception('No match found')
            pages = int(pages)
            print("总页数：", pages + 1)
            if pages > 1:
                for page in range(2, pages + 2):
                    print("当前页：", page - 1)
                    not_first_url = f"https://www.al.gov.cn/gk/zcwjk/xsbmwj/{page - 1}.htm"
                    # 设置请求头
                    headers = {
                    }

                    # 发送请求
                    yield scrapy.Request(url=not_first_url, method='GET', headers=headers,
                                         callback=self.detail_requests, meta={'is_next': False, 'page': page})

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response=None, **kwargs):
        item = Item()
        if response is None:
            raise Exception("特殊url")
        else:
            title = response.xpath(("""string(//meta[@name='ArticleTitle']/@content)""")).get()
            pub_date = response.meta.get('pub_date')
            item['title'] = title
            item['source_url'] = response.url
            item['publish_date'] = pub_date
            item['content'] = response.xpath(".").get()
            item['source'] = self.source
            item['province'] = self.province
            item['city'] = self.city
            item['county'] = self.county
            item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl xizang_aldqxzgs2".split())
