#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : config_spider
import json
import math

import scrapy
import re
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item, urllib, time
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class HenanZhumadianPolicy(BaseTaxPolicySpider):
    name = 'henan_zhumadian_itemscode_policy'
    
    province = '河南省'
    city = '驻马店市'
    # county = '滑县'
    park = ''

    custom_settings = {
        "DOWNLOAD_TIMEOUT": 60
    }
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
        # "X-Requested-With": "XMLHttpRequest"
    }

    def start_requests(self):
        for source, county, url in [
            ['驻马店市新蔡县人民政府', "新蔡县", "https://www.xincai.gov.cn/web/front/gk/index.php?itemscode=133"],
            ['驻马店市泌阳县人民政府', "泌阳县", "https://www.biyang.gov.cn/web/front/news/news.php?itemscode=122"],
            ['驻马店市泌阳县人民政府', "泌阳县", "https://www.biyang.gov.cn/web/front/news/news.php?itemscode=123"],
            ['驻马店市泌阳县人民政府', "泌阳县", "https://www.biyang.gov.cn/web/front/news/news.php?itemscode=124"],
            ['驻马店市汝南县人民政府', "汝南县", "https://www.runan.gov.cn/web/front/news/news.php?itemscode=162"],
            ['驻马店市汝南县人民政府', "汝南县", "https://www.runan.gov.cn/web/front/news/news.php?itemscode=163"],
            ['驻马店市汝南县人民政府', "汝南县", "https://www.runan.gov.cn/web/front/news/news.php?itemscode=164"],
            ['驻马店市汝南县人民政府', "汝南县", "https://www.runan.gov.cn/web/front/news/news.php?itemscode=165"],
            ['驻马店市遂平县人民政府', "遂平县", "https://www.suiping.gov.cn/web/front/news/news.php?itemscode=204"],
            ['驻马店市遂平县人民政府', "遂平县", "https://www.suiping.gov.cn/web/front/news/news.php?itemscode=205"],
            ['驻马店市遂平县人民政府', "遂平县", "https://www.suiping.gov.cn/web/front/news/news.php?itemscode=206"],
            ['驻马店市正阳县人民政府', "正阳县", "https://www.zhengyang.gov.cn/web/front/gk2020/index.php?itemscode=522"],
            ['驻马店市上蔡县人民政府', "上蔡县", "https://www.shangcai.gov.cn/web/front/special/zhengwugongkai.php?itemscode=154"],
        ]:
            item = {'source': source, 'county': county}
            yield scrapy.Request(url, callback=self.parse_content, meta={'item': item}, dont_filter=True)

    def parse_content(self, response, **kwargs):
        t = int(time.time()*1000)
        url = response.urljoin(f"/web/front/news/imsNewsAction.php?action=list&random={t}")
        data = {
            "itemscode": response.url.split("itemscode=")[-1],
            # "itemtype": "1",
            "title": "",
            "start": "0",
            "length": "30",
            "ts": f"{t}"
        }
        meta = response.meta
        meta['data'] = data
        yield scrapy.FormRequest(url, formdata=data, callback=self.parse_list, meta=meta, headers=self.headers,
                                 method="POST", dont_filter=True)

    def parse_list(self, response, **kwargs):
        prev_item = response.meta.get('item')
        elem_list = response.json()['root']
        for elem in elem_list:
            item = Item()
            item['source_url'] = '/web/front/news/detail.php?newsid=' + elem['ID']
            item['source_url'] = response.urljoin(item['source_url'])
            item['publish_date'] = elem['ADDTIME'][:10]
            item['title'] = elem['TITLE']
            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            if not item['source_url']:
                continue
            print(response.url, item['source_url'])
            yield response.follow(item['source_url'], callback=self.parse_detail, meta={'item': item})

        if response.meta.get("is_next") is not False:
            total = response.json()['totalProperty']
            total_page = math.ceil(int(total) / 30)
            print(response.url, "page:", total_page)
            for page_num in range(1, int(total_page)+1):
                t = int(time.time()*1000)
                url = response.urljoin(f"/web/front/news/imsNewsAction.php?action=list&random={t}")
                data = response.meta['data']
                data["start"] = f"{page_num * 30}"
                data["ts"] = f"{t}"
                yield scrapy.FormRequest(url, formdata=data, callback=self.parse_list, meta={'item': prev_item, 'is_next': False},
                                         headers=self.headers, method="POST", dont_filter=True)

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        # item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        # item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get().split(' ')[0]
        item['content'] = response.xpath(""".""").get()
        # item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        # item['county'] = self.county
        item['park'] = self.park
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_zhumadian_itemscode_policy".split())
