import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "liaoyang_lnslysgclqrmzf1"

    province: str = "辽宁省"  # 取表格
    city: str = "辽阳市"  # 取表格
    county: str = "弓长岭区"  # 取表格
    park: str = "None"  # 取表格
    source: str = "辽宁省辽阳市弓长岭区人民政府"  # 取表格 同一个来源合并
    url: str = "http://www.lygcl.gov.cn/column_list_wenjian.html?categorynum=025001"  # 注明入口网址，以便后续排错
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Origin": "http://www.lygcl.gov.cn",
        "Referer": "http://www.lygcl.gov.cn/column_list_wenjian.html?categorynum=025001",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest"
    }
    cookies = {
    }
    sources = [
        ["辽宁省辽阳市弓长岭区人民政府", "http://www.lygcl.gov.cn", "1c075522-dec2-4006-b24e-6c0d86abab23",
         "006006", "http://www.lygcl.gov.cn/EWB-FRONT/rest/commonapiaction/getgovinfolist"],
    ]

    def start_requests(self):
        for source in self.sources:
            # 动态生成 JSON 参数，避免手动转义
            params = {
                "siteGuid": source[2],
                "categorynum": source[3],
                "deptcode": "001",
                "gktype": "1",
                "documentnumber": "",
                "pageNumber": 0,
                "pageSize": 21
            }
            data = {
                "params": json.dumps(params, ensure_ascii=False)
            }
            url = source[4]

            yield scrapy.FormRequest(
                url=url,
                headers=self.headers,
                cookies=self.cookies,
                formdata=data,
                method='POST',
                callback=self.detail_requests,
                meta={'source': source[0], 'base_url': source[1], 'siteGuid': source[2], 'categorynum': source[3]}
                # 假设中间件处理 SSL
            )

    def detail_requests(self, response, **kwargs):
        total_num = json.loads(response.body)['custom']['total']
        pages = math.ceil(total_num / 21)
        # 翻页逻辑
        if pages >= 1:
            print("总页数：", pages)

            for page in range(1, pages + 1):
                print("当前页：", page)
                params = {
                    "siteGuid": response.meta['siteGuid'],
                    "categorynum": response.meta['categorynum'],
                    "deptcode": "001",
                    "gktype": "1",
                    "documentnumber": "",
                    "pageNumber": page-1,
                    "pageSize": 21
                }
                data = {
                    "params": json.dumps(params, ensure_ascii=False)
                }
                yield scrapy.FormRequest(
                    url=response.url,
                    headers=self.headers,
                    cookies=self.cookies,
                    formdata=data,
                    method='POST',
                    callback=self.parse_list,
                    dont_filter=True,
                    meta={'page': page, 'source': response.meta['source'], 'base_url': response.meta['base_url']}
                    # 假设中间件处理 SSL
                )

    def parse_list(self, response, **kwargs):
        page = response.meta.get('page', "")
        if page:
            print(response.text)
        item_list = json.loads(response.body)['custom']['data']
        for item in item_list:
            link = response.meta['base_url'] + item['visiturl']
            print("links[i]", link, 'page', page)
            yield scrapy.Request(link, headers=self.headers, callback=self.parse_detail,
                                 meta={'pubdate': item["infodate"],
                                       'title': item["title"], 'source': response.meta['source']})

    def parse_detail(self, response, **kwargs):
        item = Item()
        title = response.meta.get('title')
        pub_date = response.meta.get('pubdate')
        if '.pdf' in response.url or '.doc' in response.url or '.xls' in response.url or '.jpg' in response.url:
            print("特殊url：", response.url)
            content = response.url

        else:
            content = response.xpath(".").get()
        item['title'] = title
        item['source_url'] = response.url
        item['publish_date'] = pub_date
        item['content'] = content
        item['source'] = response.meta['source']
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl liaoyang_lnslysgclqrmzf1".split())
