#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : config_spider
import json
import math

import scrapy
import re
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item, urllib, time
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class HenanXuchangPolicy(BaseTaxPolicySpider):
    name = 'henan_xuchang_jianan2_policy'
    
    province = '河南省'
    city = '许昌市'
    # county = '滑县'
    park = ''

    custom_settings = {
        "DOWNLOAD_TIMEOUT": 60
    }
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
        # "X-Requested-With": "XMLHttpRequest"
    }

    def start_requests(self):
        for source, county, url in [
            ['许昌市建安区人民政府', "建安区", "https://www.xuchang.gov.cn/govxxgk/11411023071371351L/category/031/031002/031002004/031002004002/031002004002001/govlist.html"],
            ['许昌市建安区人民政府', "建安区", "https://www.xuchang.gov.cn/govxxgk/11411023071371351L/category/031/031002/031002004/031002004002/031002004002002/govlist.html"],
            ['许昌市魏都区人民政府', "魏都区", "https://www.xuchang.gov.cn/govxxgk/11411002005750994L/category/035/035005/govlist.html"],
            ['许昌市魏都区人民政府', "魏都区", "https://www.xuchang.gov.cn/govxxgk/11411002005750994L/category/035/035002/035002004/035002004004/govlist.html"],
            ['许昌市襄城县人民政府', "襄城县", "https://www.xuchang.gov.cn/govxxgk/11411025005497706A/category/033/033002/033002004/033002004002/govlist.html"],
            ['许昌市禹州市人民政府', "禹州市", "https://www.xuchang.gov.cn/govxxgk/11411081764879393M/category/029/029007/govlist.html"],
            ['许昌市禹州市人民政府', "禹州市", "https://www.xuchang.gov.cn/govxxgk/11411081764879393M/category/029/029002/029002004/029002004002/govlist.html"],
            ['许昌市长葛市人民政府', "长葛市", "https://www.xuchang.gov.cn/govxxgk/11411082094577395T/category/002/002005/govlist.html"],
            ['许昌市长葛市人民政府', "长葛市", "https://www.xuchang.gov.cn/govxxgk/11411082094577395T/category/002/002002/002002004/002002004005/govlist.html"],
            ['许昌市长葛市人民政府', "长葛市", "https://www.xuchang.gov.cn/govxxgk/11411082094577395T/category/002/002002/002002004/002002004004/govlist.html"],

        ]:

            item = {'source': source, 'county': county}
            yield scrapy.Request(url, callback=self.parse_list, meta={'item': item}, dont_filter=True)

    def parse_list(self, response, **kwargs):
        prev_item = response.meta.get('item')
        page = response.meta.get('page') or 1
        elem_list = response.xpath(
            "//tr//td[@class='ewb-has-detail ewb-xxnr']//a[@onclick]/ancestor::td[string-length(string(.))>=10]"
        )

        for elem in elem_list:
            item = Item()
            infoid = elem.xpath(""".//a/@onclick""").re_first(r"linkToNew\('(.*)',")
            item['source_url'] = f"/openDetailDynamic.html?infoid={infoid}"
            item['source_url'] = response.urljoin(item['source_url'])
            item['publish_date'] = elem.xpath(".//*[contains(text(), '发布日期：')]/following-sibling::p/text()").get().strip()
            item['title'] = elem.xpath(""".//a/text()""").get().strip()
            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            if not item['source_url']:
                continue
            if '.htm' not in item['source_url'] and '.shtm' not in item['source_url'] and '.php' not in item['source_url']:
                continue
            if 'relativeInfo' in item['source_url']:
                continue
            print(response.url, item['source_url'])
            data = {
                "infoid": infoid,
                "siteguid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a"
            }
            yield scrapy.FormRequest('https://www.xuchang.gov.cn/EpointWebBuilder/zNJSAction.action?cmd=getOpenDetail', formdata=data, callback=self.parse_detail, meta={'item': item}, dont_filter=True)
        next_url = response.xpath(
            """//a[text()='下页' or ./text()='下页>' or text()='»' or contains(./text(), '下一页') or text()='下一页»']/@href""").get()
        if not next_url:
            next_url = response.xpath(f"""//b[text()='{page}']/following::a/@href""").get()
        if next_url and next_url not in ['javascript:void(0);'] and response.urljoin(next_url) != response.url:
            print(response.url, 'next', next_url)
            yield response.follow(next_url, callback=self.parse_list, meta={'item': prev_item, "page": page + 1})
        else:
            print(response.url, 'not next_url')

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        # print(item['title'], item['publish_date'])
        # item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        # item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get() or response.xpath("//*[@class='fbrq']/text()").get()
        item['content'] = response.json()['custom']
        # item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        # item['county'] = self.county
        item['park'] = self.park
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_xuchang_jianan2_policy".split())
