# -*- coding: utf-8 -*-
# @Time   : 2024-05-20 16:25
# @Author : 大龙 🚀
import json

import scrapy
import re
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import urlencode


class GxGxsbssllgzzzxrmzf22F2Policy(BaseTaxPolicySpider):
    name = 'gx_gxsbssllgzzzxrmzf_22f2_policy'

    province = "广西壮族自治区"
    city = "百色市"
    county = "隆林各族自治县"
    park = ""
    source = "广西省百色市隆林各族自治县人民政府"

    def start_requests(self):
        page_url = 'http://www.gxll.gov.cn/xxgk/jcxxgk/wjzl/index.shtml#%E9%9A%86%E6%94%BF%E5%8F%91'
        url = "http://www.gxll.gov.cn/irs/front/list"
        data = {
            "code": "189682f495f",
            "tableName": "t_18797a40156",
            "granularity": "ALL",
            "searchFields": [
                {
                    "fieldName": "site_id",
                    "searchWord": 25,
                    "withHighLight": True
                }
            ],
            "sorts": [
                {
                    "sortField": "save_time",
                    "sortOrder": "DESC"
                }
            ],
            "customFilter": {
                "operator": "and",
                "properties": [],
                "filters": [
                    {
                        "operator": "or",
                        "properties": [
                            {
                                "property": "channel_id",
                                "operator": "eq",
                                "value": "132390"
                            }
                        ]
                    }
                ]
            },
            "pageNo": 1,
            "pageSize": 10
        }
        data = json.dumps(data, separators=(',', ':'))
        yield scrapy.http.JsonRequest(url, callback=self.parse_list, body=data, method='POST', meta={'data': data})

    def parse_list(self, response, **kwargs):
        prev_item = response.meta.get('item')
        data = response.meta.get('data')
        for info in response.json()['data']['list']:
            item = Item()
            item['source_url'] = info['doc_pub_url']
            item['publish_date'] = info['save_time']
            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            if not item['source_url']:
                continue
            yield response.follow(item['source_url'], callback=self.parse_detail, meta={'item': item})
        if response.meta.get("is_next") is not False:
            total_page = response.json()['data']['pager']['pageCount']
            for page_num in range(2, int(total_page) + 1):
                url = "http://www.baise.gov.cn/irs/front/list"
                data = json.loads(data)
                data['pageNo'] = str(page_num)
                data = json.dumps(data, separators=(',', ':'))
                yield scrapy.http.JsonRequest(url, callback=self.parse_list, body=data, method='POST',
                                              meta={'data': data})

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
        item['content'] = response.xpath(""".""").get()
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        item['source'] = self.source
        yield item


if __name__ == "__main__":
    from scrapy import cmdline
    cmdline.execute("scrapy crawl gx_gxsbssllgzzzxrmzf_22f2_policy".split())
