import time
import re
import json
import urllib
from curl_cffi import requests
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode
from lxml import html


class spider(BaseTaxPolicySpider):
    name = "dalian_dlspldqrmzf"

    province: str = "辽宁省"  # 取表格
    city: str = "大连市"  # 取表格
    county: str = "普兰店区"  # 取表格
    park: str = "None"  # 取表格
    source: str = "大连市普兰店区人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.dlpld.gov.cn/xxgk/index.html"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://www.dlpld.gov.cn/front/list-xxgk.jhtml"
        payload = "category.id=3bde0194c0c041618a28847472acb6cc&pageNo=1"
        headers = {
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Cookie': 'JSESSIONID=C986C8E18901CD7225E92427B41792D1; pageNo=2; jeeh5.session.id=d3a8a1db08d743fe9f948b928f61ede8; jeeh5.session.id=aa0c3710939649e2ade853230e268b43; pageNo=1',
        }

        response = requests.request("POST", url, headers=headers, data=payload)
        # print("response.text", response.text)
        tree = html.fromstring(response.text)

        # 使用 XPath 提取链接
        links = tree.xpath("//a[@class='list-demo']/@href")
        for link in links:
            real_url = f"https://www.dlpld.gov.cn{link}"
            # print(link)
            yield scrapy.Request(url=real_url, method='GET', callback=self.parse_detail)

    def detail_requests(self, response, **kwargs):
        pass

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item()
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['source_url'] = response.url
        item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
        item['content'] = response.xpath(""".""").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl dalian_dlspldqrmzf".split())
