import time
import re
import json
import urllib
import requests
import scrapy
from lxml import etree
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "heilongjiang_fysrmzfzfwj"

    province: str = "黑龙江省"  # 取表格
    city: str = "佳木斯市"  # 取表格
    county: str = "抚远市"  # 取表格
    park: str = "None"  # 取表格
    source: str = "抚远市人民政府"  # 取表格 同一个来源合并
    url: str = "http://www.hljfy.gov.cn/govinfo/"  # 注明入口网址，以便后续排错

    def start_requests(self):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Pragma": "no-cache",
            "Referer": "http://www.hljfy.gov.cn/govinfo/Policy.html",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
        }
        url = "http://www.hljfy.gov.cn/govinfo/Policylist.html"

        # 发送GET请求
        yield scrapy.Request(url=url, method='GET', headers=headers, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        value = re.findall(
            """value: '(.*?)'""", response.text
        )[1]
        print(value)
        headers = {
            "Accept": "application/vnd.siteserver+json; version=1",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Content-Type": "application/json;charset=UTF-8",
            "Origin": "http://www.hljfy.gov.cn",
            "Pragma": "no-cache",
            "Referer": "http://www.hljfy.gov.cn/govinfo/Policylist.html",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
        }
        url = "http://www.hljfy.gov.cn/api/sys/stl/actions/dynamic?fc=1&&1735089635572"
        cookies = {
        }
        data = {
            "value": f"{value}",
            "page": 1
        }
        data = json.dumps(data, separators=(',', ':'))
        get_page_response = requests.post(url, headers=headers, cookies=cookies, data=data, verify=False)
        # 正则表达式，匹配方括号内的数字
        pattern = r'共 \[(\d+)\] 页'

        # 使用re.search()查找匹配项
        match = re.search(pattern, get_page_response.text)

        # 如果找到了匹配项，则提取数字
        if match:
            pages = match.group(1)  # group(1)表示第一个捕获组，即(\d+)匹配到的内容
            print("总页数：", pages)  # 输出：1
        else:
            raise Exception("没有找到匹配项")
        for page in range(1, int(pages) + 1):
            _data = {
                "value": f"{value}",
                "page": page
            }
            _data_json = json.dumps(_data, separators=(',', ':'))

            yield scrapy.Request(
                url=url,
                method='POST',
                headers=headers,
                cookies=cookies,
                body=_data_json,
                callback=self.parse_list,
            )

    def parse_list(self, response, **kwargs):
        item_html = json.loads(response.text)['html']
        tree = etree.HTML(item_html)
        link_list = tree.xpath('//ul/li/a/@href')
        title_list = tree.xpath('//ul/li/a/text()')
        pub_date = tree.xpath('//ul/li/span/text()')
        if len(link_list) == len(title_list) == len(pub_date):
            print(len(link_list), len(title_list), len(pub_date))
            for i in range(len(link_list)):
                print(link_list[i], title_list[i], pub_date[i])
                url = f"http://www.hljfy.gov.cn{link_list[i]}"
                # 设置请求头
                headers = {
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
                    "Accept-Language": "zh-CN,zh;q=0.9",
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                    "Pragma": "no-cache",
                    "Referer": "http://www.hljfy.gov.cn/govinfo/Policylist.html",
                    "Upgrade-Insecure-Requests": "1",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
                    "Accept-Encoding": "gzip, deflate, br"
                }
                # 发送GET请求
                yield scrapy.Request(url=url, method='GET', headers=headers, callback=self.parse_detail,
                                     meta={'title': title_list[i], 'pub_date': pub_date[i]})

    def parse_detail(self, response=None, **kwargs):
        item = Item()
        if response is None:
            raise Exception("特殊url")
        else:
            title = response.meta.get('title')
            pub_date = response.meta.get('pub_date')
            item['title'] = title
            item['source_url'] = response.url
            item['publish_date'] = pub_date
            item['content'] = response.xpath(".").get()
            item['source'] = self.source
            item['province'] = self.province
            item['city'] = self.city
            item['county'] = self.county
            item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl heilongjiang_fysrmzfzfwj".split())
