import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "liaoyang_lnslysrmzfszfxz"

    province: str = "辽宁省"  # 取表格
    city: str = "辽阳市"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "辽宁省辽阳市人民政府"  # 取表格 同一个来源合并
    url: str = "http://www.liaoyang.gov.cn/gfxwj_inventory.html?deptcode=033"  # 注明入口网址，以便后续排错
    headers = {
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Connection': 'keep-alive',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'Origin': 'http://www.liaoyang.gov.cn',
        'Referer': 'http://www.liaoyang.gov.cn/gfxwj_inventory.html?deptcode=033',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
        'X-Requested-With': 'XMLHttpRequest',
        'Cookie': 'sid=CFE7D528B8D24067BC6B6A6EB297035F; fontZoomState=0; oauthClientId=demoClient; oauthPath=http://10.9.6.89:8080/EWB-FRONT; oauthLoginUrl=http://127.0.0.1:1112/membercenter/login.html?redirect_uri=; oauthLogoutUrl=; noOauthRefreshToken=277bdcce3bd9d684650831c043c81948; noOauthAccessToken=de598547d9f6c3406ed185108c649e89'
    }
    cookies = {
        # "fontZoomState": "0",
        # "oauthClientId": "demoClient",
        # "oauthPath": "http://10.9.6.89:8080/EWB-FRONT",
        # "oauthLoginUrl": "http://127.0.0.1:1112/membercenter/login.html?redirect_uri=",
        # "oauthLogoutUrl": "",
        # "noOauthRefreshToken": "8067cfb1c4258c680d95051c59a76473",
        # "noOauthAccessToken": "1fec7c93ad050163236c998af6c2d034"
    }
    keys = [
        "市政府行政规范性文件"
        "市政府办公室行政规范性文件",

    ]

    def start_requests(self):
        for key in self.keys:
            # 动态生成 JSON 参数，避免手动转义
            params = {
                "siteGuid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a",
                "keywords": key,
                "title": "",
                "categorynum": "016001",
                "deptcode": "033",
                "gktype": "1",
                "documentnumber": "",
                "pageNumber": 1,
                "pageSize": 5
            }
            data = {
                "params": json.dumps(params, ensure_ascii=False)
            }
            url = "http://www.liaoyang.gov.cn/EWB-FRONT/rest/commonapiaction/getgovinfolist"

            yield scrapy.FormRequest(
                url=url,
                headers=self.headers,
                cookies=self.cookies,
                formdata=data,
                method='POST',
                callback=self.detail_requests,
                meta={'key': key}  # 假设中间件处理 SSL
            )

    def detail_requests(self, response, **kwargs):
        total_num = json.loads(response.body)['custom']['total']
        pages = math.ceil(total_num / 5)
        # 翻页逻辑
        if pages > 1:
            print("总页数：", pages)

            for page in range(1, pages + 1):
                print("当前页：", page)
                params = {
                    "siteGuid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a",
                    "keywords": response.meta['key'],
                    "title": "",
                    "categorynum": "016001",
                    "deptcode": "033",
                    "gktype": "1",
                    "documentnumber": "",
                    "pageNumber": page-1,
                    "pageSize": 5
                }
                data = {
                    "params": json.dumps(params, ensure_ascii=False)
                }
                yield scrapy.FormRequest(
                    url=response.url,
                    headers=self.headers,
                    cookies=self.cookies,
                    formdata=data,
                    method='POST',
                    callback=self.parse_list,
                    dont_filter=True,
                    meta={'page': page}  # 假设中间件处理 SSL
                )

    def parse_list(self, response, **kwargs):
        page = response.meta.get('page', "")
        if page:
            print(response.text)
        item_list = json.loads(response.body)['custom']['data']
        for item in item_list:
            link = "http://www.liaoyang.gov.cn" + item['visiturl'] + "?deptcode=033"
            print("links[i]", link, 'page', page)
            yield scrapy.Request(link, headers=self.headers, callback=self.parse_detail,
                                 meta={'pubdate': item["infodate"],
                                       'title': item["title"]})

    def parse_detail(self, response, **kwargs):
        item = Item()
        title = response.meta.get('title')
        pub_date = response.meta.get('pubdate')
        if '.pdf' in response.url or '.doc' in response.url or '.xls' in response.url or '.jpg' in response.url:
            print("特殊url：", response.url)
            content = response.url

        else:
            content = response.xpath(".").get()
        item['title'] = title
        item['source_url'] = response.url
        item['publish_date'] = pub_date
        item['content'] = content
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl liaoyang_lnslysrmzfszfxz".split())
